code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import gym
from nuro_arm import RobotArm
class GridWorldEnv(gym.Env):
def __init__(self,
mode='sim',
n_grids=10,
seed=None,
):
self.seed(seed)
self.n_grids = n_grids
self.observation_space = gym.spaces.Box(0, n_grids, (2,), dtype=int)
self.robot = RobotArm(mode)
x_range = np.linspace(0.1, 0.3, self.n_grids)
y_range = np.linspace(-0.1, 0.1, self.n_grids)
z_val = 0.05
self.grid_positions = np.dstack(np.meshgrid(x_range, y_range, [z_val]))
# move in cardinal directions and noop
self.action_deltas = ((0, 0), (1, 0), (0, 1), (-1, 0), (0, 1))
self.action_space = gym.spaces.Discrete(5)
self.goal = np.array((self.n_grids-1, self.n_grids-1))
def reset(self):
self.state = np.array((1, 1))
return self.get_obs()
def step(self, a):
assert self.action_space.contains(a)
new_state = np.add(self.state, self.action_deltas[a])
self.state = np.clip(new_state, 0, self.n_grids-1)
self.move_hand_to_state(self.state)
obs = self.get_obs()
reward = (self.state == self.goal).all()
done = reward
info = {}
return obs, reward, done, info
def get_obs(self):
return self.state.copy()
def move_hand_to_state(self, state):
pos = self.grid_positions[state[0], state[1]]
self.robot.move_hand_to(pos)
| [
"numpy.clip",
"numpy.add",
"gym.spaces.Discrete",
"gym.spaces.Box",
"numpy.array",
"numpy.linspace",
"nuro_arm.RobotArm",
"numpy.meshgrid"
] | [((309, 352), 'gym.spaces.Box', 'gym.spaces.Box', (['(0)', 'n_grids', '(2,)'], {'dtype': 'int'}), '(0, n_grids, (2,), dtype=int)\n', (323, 352), False, 'import gym\n'), ((375, 389), 'nuro_arm.RobotArm', 'RobotArm', (['mode'], {}), '(mode)\n', (383, 389), False, 'from nuro_arm import RobotArm\n'), ((409, 444), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.3)', 'self.n_grids'], {}), '(0.1, 0.3, self.n_grids)\n', (420, 444), True, 'import numpy as np\n'), ((463, 499), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(0.1)', 'self.n_grids'], {}), '(-0.1, 0.1, self.n_grids)\n', (474, 499), True, 'import numpy as np\n'), ((748, 770), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(5)'], {}), '(5)\n', (767, 770), False, 'import gym\n'), ((792, 838), 'numpy.array', 'np.array', (['(self.n_grids - 1, self.n_grids - 1)'], {}), '((self.n_grids - 1, self.n_grids - 1))\n', (800, 838), True, 'import numpy as np\n'), ((878, 894), 'numpy.array', 'np.array', (['(1, 1)'], {}), '((1, 1))\n', (886, 894), True, 'import numpy as np\n'), ((1016, 1057), 'numpy.add', 'np.add', (['self.state', 'self.action_deltas[a]'], {}), '(self.state, self.action_deltas[a])\n', (1022, 1057), True, 'import numpy as np\n'), ((1079, 1118), 'numpy.clip', 'np.clip', (['new_state', '(0)', '(self.n_grids - 1)'], {}), '(new_state, 0, self.n_grids - 1)\n', (1086, 1118), True, 'import numpy as np\n'), ((561, 599), 'numpy.meshgrid', 'np.meshgrid', (['x_range', 'y_range', '[z_val]'], {}), '(x_range, y_range, [z_val])\n', (572, 599), True, 'import numpy as np\n')] |
# Load pickled data
import pickle
import numpy as np
def load(filename):
with open(filename, mode='rb') as f:
data = pickle.load(f)
inputs, labels = data['features'], data['labels']
inputs = np.reshape(inputs, (inputs.shape[0],inputs.shape[1],inputs.shape[2], 1))
return inputs, labels
def load_train():
return load('/home/ans5k/work/CarND-Traffic-Sign-Classifier-Project/traffic-signs-data/augmented-train.p')
def load_valid():
return load('/home/ans5k/work/CarND-Traffic-Sign-Classifier-Project/traffic-signs-data/gray-valid.p')
def load_test():
return load('/home/ans5k/work/CarND-Traffic-Sign-Classifier-Project/traffic-signs-data/gray-test.p')
| [
"pickle.load",
"numpy.reshape"
] | [((212, 286), 'numpy.reshape', 'np.reshape', (['inputs', '(inputs.shape[0], inputs.shape[1], inputs.shape[2], 1)'], {}), '(inputs, (inputs.shape[0], inputs.shape[1], inputs.shape[2], 1))\n', (222, 286), True, 'import numpy as np\n'), ((130, 144), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (141, 144), False, 'import pickle\n')] |
import splat.simulate as spsim
import splat.evolve as spev
from .config import DATA_FOLDER, POLYNOMIALS, EVOL_MODELS_FOLDER, FIGURES
from .tools import teff_to_spt, teff_from_spt
from .abs_mags import get_abs_mag, get_teff_from_mag, get_teff_from_mag_ignore_unc
#import pymc3 as pm
from scipy.interpolate import griddata
#import theano.tensor as tt
#from theano.compile.ops import as_op
import astropy.units as u
import numba
import pandas as pd
import numpy as np
#use splat for no
import splat
import splat.empirical as spe
def read_bintemplates():
df=pd.read_pickle(DATA_FOLDER+'/binary_lookup_table.pkl.gz')
return [df.prim.values, df.sec.values, df.sys.values]
def get_system_type(pr, sc, interpolators):
"""
use the lookup table to get a spectral type for the binary
using a linear interpolation to avoid nans
pr: primary type (float, M0=10)
sc: secondary type float, M0=10)
interpolatotrs: (3, N) array of loats (0: primary, 1: secondary, 2: system)
"""
#where secondary are nans set to primaries
sc[np.isnan(sc)]=pr[np.isnan(sc)]
#interpolate
interpoints=np.array([interpolators[0], interpolators[1] ]).T
comb=griddata(interpoints, interpolators[-1] , (pr, sc), method='linear')
return comb
def evolutionary_model_interpolator(mass, age, model):
"""
Evolutionary model interpolator
input: mass, age
model: model name
"""
model_filename=EVOL_MODELS_FOLDER+'//'+model.lower()+'.csv'
evolutiomodel=pd.read_csv( model_filename)
#use the full cloud treatment for saumon models
if model=='saumon2008':
evolutiomodel=evolutiomodel[evolutiomodel.cloud=='hybrid']
#make age, teff, mass logarithm scale
valuest=np.log10(evolutiomodel.temperature.values)
valueslogg=evolutiomodel.gravity.values
valueslumn=evolutiomodel.luminosity.values
valuesm=np.log10(evolutiomodel.mass.values)
valuesag=np.log10(evolutiomodel.age.values)
evolpoints=np.array([valuesm, valuesag ]).T
teffs=griddata(evolpoints, valuest , (np.log10(mass), np.log10(age)), method='linear')
lumn=griddata(evolpoints, valueslumn , (np.log10(mass), np.log10(age)), method='linear')
return {'mass': mass*u.Msun, 'age': age*u.Gyr, 'temperature': 10**teffs*u.Kelvin,
'luminosity': lumn*u.Lsun}
def simulate_spts(**kwargs):
"""
Simulate parameters from mass function,
mass ratio distribution and age distribution
"""
recompute=kwargs.get('recompute', False)
model_name=kwargs.get('name','baraffe2003')
#use hybrid models that predit the T dwarf bump for Saumon Models
if model_name=='saumon2008':
cloud='hybrid'
else:
cloud=False
#automatically set maxima and minima to avoid having too many nans
#mass age and age, min, max
#all masses should be 0.01
acceptable_values={'baraffe2003': [0.01, 0.1, 0.01, 8.0],
'marley2019': [0.01, 0.08, 0.001, 8.0], 'saumon2008':[0.01, 0.09, 0.003, 8.0],
'phillips2020':[0.01, 0.075, 0.001, 8.0 ],'burrows2001':[0.01, 0.075, 10, 12]}
fname=kwargs.get('filename', DATA_FOLDER+'/mass_age_spcts_with_bin{}.pkl'.format(model_name))
filename=fname
if recompute:
nsim = kwargs.get('nsample', 1e5)
ranges=kwargs.get('range', None)
# masses for singles [this can be done with pymc but nvm]
m_singles = spsim.simulateMasses(nsim,range=[ranges[0], ranges[1]],distribution='power-law',alpha=0.6)
#ages for singles
ages_singles= spsim.simulateAges(nsim,range=[ranges[2], ranges[3]], distribution='uniform')
#parameters for binaries
#binrs=simulate_binary(int(nsim), [ranges[0], ranges[1]], [ranges[2], ranges[3]])
qs=spsim.simulateMassRatios(nsim,distribution='power-law',q_range=[0.1,1.0],gamma=4)
m_prims = spsim.simulateMasses(nsim,range=[ranges[0], ranges[1]],distribution='power-law',alpha=0.6)
m_sec=m_prims*qs
ages_bin= spsim.simulateAges(nsim,range=[ranges[2], ranges[3]], distribution='uniform')
#single_evol=spev.modelParameters(mass=m_singles,age=ages_singles, set=model_name, cloud=cloud)
single_evol=evolutionary_model_interpolator(m_singles, ages_singles, model_name)
#primary_evol=spev.modelParameters(mass=binrs[0],age=binrs[-1], set=model_name, cloud=cloud)
primary_evol=evolutionary_model_interpolator(m_prims,ages_bin, model_name)
#secondary_evol=spev.modelParameters(mass=binrs[1],age=binrs[-1], set=model_name, cloud=cloud)
secondary_evol=evolutionary_model_interpolator(m_sec,ages_bin, model_name)
#save luminosities
#temperatures
teffs_singl =single_evol['temperature'].value
teffs_primar=primary_evol['temperature'].value
teffs_second=secondary_evol['temperature'].value
#spectraltypes
spts_singl =teff_to_spt(teffs_singl)
#the singles will be fine, remove nans from systems
spt_primar=teff_to_spt(teffs_primar)
spt_second=teff_to_spt(teffs_second)
xy=np.vstack([np.round(np.array(spt_primar), decimals=0), np.round(np.array(spt_second), decimals=0)]).T
spt_binr=get_system_type(xy[:,0], xy[:,1], read_bintemplates())
values={ 'sing_evol': single_evol, 'sing_spt':spts_singl,
'prim_evol': primary_evol, 'prim_spt':spt_primar,
'sec_evol': secondary_evol, 'sec_spt': spt_second,
'binary_spt': spt_binr }
import pickle
with open(filename, 'wb') as file:
pickle.dump(values,file)
else:
values=pd.read_pickle(filename)
return values
def get_mag_from_luminosity(lumn, bc, log=False):
if log:
return -2.5*np.log10(lumn)+4.74-bc
else:
return -2.5*lumn+4.74-bc
def fillipazzo_bolometric_correction(spt, filt='2MASS_J', mask=None):
"""
number spectral type
"""
#for float
if isinstance(spt, (np.floating, float, int)):
return spe.typeToBC(spt, filt, ref='filippazzo2015')
#vectorized solution, masking things outside the range
else:
ref='filippazzo2015'
spt=np.array(spt)
res=np.ones_like(spt)*np.nan
if mask is None: mask=np.zeros_like(spt).astype(bool)
bc = np.polyval(splat.SPT_BC_RELATIONS[ref]['filters'][filt]['coeff'], spt-splat.SPT_BC_RELATIONS[ref]['sptoffset'])
bc_error = splat.SPT_BC_RELATIONS[ref]['filters'][filt]['fitunc']
rands=np.random.normal(bc, bc_error)
np.place(res, ~mask, rands )
return res
def make_systems(bfraction=0.2, recompute=False, model='baraffe2003',
mass_age_range=[0.01, 0.1, 0., 8.0], nsample=5e5, return_singles=False, **kwargs):
#quick but dirty
if 'filename' in kwargs:
mods=simulate_spts(name=model,
recompute=recompute, range=mass_age_range,\
nsample=nsample, filename= kwargs.get('filename', ''))
else:
mods=simulate_spts(name=model,
recompute=recompute, range=mass_age_range,\
nsample=nsample)
#singles
singles=mods['sing_evol']
#singles['abs_2MASS_J']= get_abs_mag(mods['sing_spt'], '2MASS J')[0]
#bolometric corrections for 2MASS J
#bcs_sings=fillipazzo_bolometric_correction(mods['sing_spt'], filt='2MASS_J',
# mask=None)
#singles['bolometric_cor_2MASS_J']=bcs_sings
#singles['abs_2MASS_J']=get_mag_from_luminosity(singles['luminosity'].value,\
# bcs_sings, log=False)
singles['is_binary']= np.zeros_like(mods['sing_spt']).astype(bool)
singles['spt']=mods['sing_spt']
singles['prim_spt']=mods['sing_spt']
singles['sec_spt']=np.ones_like(mods['sing_spt'])*np.nan
#binary
binaries={}
binaries['age']=mods['prim_evol']['age']
binaries['mass']=mods['prim_evol']['mass']+mods['sec_evol']['mass']
binaries['pri_mass']=mods['prim_evol']['mass']
binaries['sec_mass']=mods['sec_evol']['mass']
binaries['luminosity']=np.log10(10**(mods['prim_evol']['luminosity']).value+\
10**(mods['sec_evol']['luminosity']).value)
#binaries['temperature']=mods['prim_evol']['temperature']
binaries['spt']=np.random.normal(mods['binary_spt'], 0.3)
binaries['prim_spt']=mods['prim_spt']
binaries['sec_spt']=mods['sec_spt']
binaries['prim_luminosity']=10**(mods['prim_evol']['luminosity']).value
binaries['sec_luminosity']=10**(mods['sec_evol']['luminosity']).value
binaries['is_binary']=np.ones_like(mods['sec_spt']).astype(bool)
#bolometric corrections for 2MASS J
#bcs_bins=fillipazzo_bolometric_correction(binaries['spt'], filt='2MASS_J',
# mask=None)
#binaries['bolometric_cor_2MASS_J']=bcs_bins
#magnitudes ugh
"""
ignore 2mass photometry
js_singles, j_single_unc=get_abs_mag(mods['sing_spt'],'2MASS J')
hs_singles, h_single_unc=get_abs_mag(mods['sing_spt'],'2MASS H')
singles['abs_2MASS_J']=np.random.normal(js_singles, j_single_unc)
singles['abs_2MASS_H']=np.random.normal(hs_singles, h_single_unc)
js_primns, junc_prims=get_abs_mag(mods['prim_spt'], '2MASS J')
js_prims_to_use=np.random.normal(js_primns, junc_prims)
hs_primns, hunc_prims=get_abs_mag(mods['prim_spt'], '2MASS H')
hs_prims_to_use=np.random.normal(hs_primns, junc_prims)
js_secs, junc_secs=get_abs_mag(mods['sec_spt'], '2MASS J')
js_secs_to_use=np.random.normal(js_secs, junc_secs)
hs_secs, hunc_secs=get_abs_mag(mods['sec_spt'], '2MASS H')
hs_secs_to_use=np.random.normal(hs_secs, hunc_secs)
#print (np.isnan(js_prims_to_us).any())
binaries['abs_2MASS_J']= -2.5*np.log10(10**(-0.4*js_prims_to_use)+ 10**(-0.4*js_secs_to_use))
binaries['abs_2MASS_H']= -2.5*np.log10(10**(-0.4*hs_prims_to_use)+ 10**(-0.4*hs_secs_to_use))
"""
#assign teff from absolute mag
#binaries['temperature']=get_teff_from_mag_ignore_unc(binaries['abs_2MASS_H'])
binaries['temperature']=teff_from_spt(binaries['spt'])
#binaries['temperature']=
#compute numbers to choose based on binary fraction
ndraw= int(len(mods['sing_spt'])/(1-bfraction))-int(len(mods['sing_spt']))
#ndraw=int(len(mods['sing_spt'])* bfraction)
#random list of binaries to choose
random_int=np.random.choice(np.arange(len(binaries['spt'])), ndraw)
chosen_binaries={}
for k in binaries.keys():
chosen_binaries[k]=binaries[k][random_int]
#add scale to the local lf
res=pd.concat([pd.DataFrame(singles), pd.DataFrame(chosen_binaries)])
scl=scale_to_local_lf(res.temperature.values)
#print (scl
res['scale']=scl[0]
res['scale_unc']=scl[1]
res['scale_times_model']=scl[-1]
#combine the to dictionaries
return res
def scale_to_local_lf(teffs):
"""
scale a teff distribution to the local lf
"""
kirkpatrick2020LF={'bin_center': np.array([ 525, 675, 825, 975, 1125, 1275, 1425, 1575, 1725, 1875, 2025]),
'values': np.array([4.24, 2.8 , 1.99, 1.72, 1.11, 1.95, 0.94, 0.81, 0.78, 0.5 , 0.72]),
'unc': np.array([0.7 , 0.37, 0.32, 0.3 , 0.25, 0.3 , 0.22, 0.2 , 0.2 , 0.17, 0.18])}
binedges= np.append(kirkpatrick2020LF['bin_center']-75, kirkpatrick2020LF['bin_center'][-1]+75)
#bools=np.logical_and(teffs <= binedges[-1], teffs >= binedges[0])
#print (binedges[0], binedges[-1])
preds=np.histogram(teffs, bins=binedges, normed=False)[0]
obs=np.array(kirkpatrick2020LF['values'])
unc=np.array(kirkpatrick2020LF['unc'])
obs_monte_carlo= np.random.normal(obs, unc, (10000, len(obs)))
pred_monte= np.ones_like(obs_monte_carlo)*(preds)
unc_monte= np.ones_like(obs_monte_carlo)*(unc)
scale=(np.nansum((obs_monte_carlo*pred_monte)/(unc_monte**2), axis=1)\
/np.nansum(((pred_monte**2)/(unc_monte**2)), axis=1))*(10**-3)
res=[np.nanmedian(scale), np.nanstd(scale), \
np.sum(preds*np.nanmedian(scale))]
return res
def make_systems_nocombined_light(**kwargs):
"""
choose a random sets of primaries and secondaries
and a sample of single systems based off a preccomputed-evolutionary model grid
and an unresolved binary fraction
"""
#recompute for different evolutionary models
model=kwargs.get('model_name', 'baraffe2003')
binary_fraction=kwargs.get('bfraction', 0.2)
model_vals=simulate_spts(name=model, **kwargs)
#nbin= int(len(model_vals['sing_spt'])*binary_fraction) #number of binaries
#ndraw= int(len(model_vals['sing_spt'])/(1-binary_fraction))-int(len(model_vals['sing_spt']))
ndraw=int(len(model_vals['sing_spt'])* binary_fraction)
nans=np.isnan(model_vals['binary_spt'])
choices={'spt': np.random.choice(model_vals['binary_spt'][~nans], ndraw),
'teff': np.random.choice(model_vals['prim_evol']['temperature'].value[~nans], ndraw),
'age': np.random.choice(model_vals['prim_evol']['age'].value[~nans],ndraw),
'mass': np.random.choice(model_vals['prim_evol']['mass'].value[~nans]+model_vals['sec_evol']['mass'].value[~nans],ndraw)}
vs={'system_spts': np.concatenate([model_vals['sing_spt'], choices['spt']]),
'system_teff': np.concatenate([(model_vals['sing_evol']['temperature']).value, choices['teff']]),
'system_age': np.concatenate([(model_vals['sing_evol']['age']).value, choices['age']]),
'system_mass': np.concatenate([(model_vals['sing_evol']['mass']).value, choices['mass']])}
return vs | [
"numpy.log10",
"pandas.read_csv",
"numpy.array",
"pandas.read_pickle",
"numpy.histogram",
"numpy.place",
"numpy.polyval",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.random.normal",
"splat.simulate.simulateMassRatios",
"numpy.nanstd",
"numpy.random.choice",
"numpy.isnan",
"numpy.nans... | [((561, 620), 'pandas.read_pickle', 'pd.read_pickle', (["(DATA_FOLDER + '/binary_lookup_table.pkl.gz')"], {}), "(DATA_FOLDER + '/binary_lookup_table.pkl.gz')\n", (575, 620), True, 'import pandas as pd\n'), ((1178, 1245), 'scipy.interpolate.griddata', 'griddata', (['interpoints', 'interpolators[-1]', '(pr, sc)'], {'method': '"""linear"""'}), "(interpoints, interpolators[-1], (pr, sc), method='linear')\n", (1186, 1245), False, 'from scipy.interpolate import griddata\n'), ((1498, 1525), 'pandas.read_csv', 'pd.read_csv', (['model_filename'], {}), '(model_filename)\n', (1509, 1525), True, 'import pandas as pd\n'), ((1732, 1774), 'numpy.log10', 'np.log10', (['evolutiomodel.temperature.values'], {}), '(evolutiomodel.temperature.values)\n', (1740, 1774), True, 'import numpy as np\n'), ((1879, 1914), 'numpy.log10', 'np.log10', (['evolutiomodel.mass.values'], {}), '(evolutiomodel.mass.values)\n', (1887, 1914), True, 'import numpy as np\n'), ((1928, 1962), 'numpy.log10', 'np.log10', (['evolutiomodel.age.values'], {}), '(evolutiomodel.age.values)\n', (1936, 1962), True, 'import numpy as np\n'), ((8202, 8305), 'numpy.log10', 'np.log10', (["(10 ** mods['prim_evol']['luminosity'].value + 10 ** mods['sec_evol'][\n 'luminosity'].value)"], {}), "(10 ** mods['prim_evol']['luminosity'].value + 10 ** mods[\n 'sec_evol']['luminosity'].value)\n", (8210, 8305), True, 'import numpy as np\n'), ((8387, 8428), 'numpy.random.normal', 'np.random.normal', (["mods['binary_spt']", '(0.3)'], {}), "(mods['binary_spt'], 0.3)\n", (8403, 8428), True, 'import numpy as np\n'), ((11390, 11484), 'numpy.append', 'np.append', (["(kirkpatrick2020LF['bin_center'] - 75)", "(kirkpatrick2020LF['bin_center'][-1] + 75)"], {}), "(kirkpatrick2020LF['bin_center'] - 75, kirkpatrick2020LF[\n 'bin_center'][-1] + 75)\n", (11399, 11484), True, 'import numpy as np\n'), ((11661, 11698), 'numpy.array', 'np.array', (["kirkpatrick2020LF['values']"], {}), "(kirkpatrick2020LF['values'])\n", (11669, 11698), True, 'import numpy as np\n'), ((11707, 11741), 'numpy.array', 'np.array', (["kirkpatrick2020LF['unc']"], {}), "(kirkpatrick2020LF['unc'])\n", (11715, 11741), True, 'import numpy as np\n'), ((12920, 12954), 'numpy.isnan', 'np.isnan', (["model_vals['binary_spt']"], {}), "(model_vals['binary_spt'])\n", (12928, 12954), True, 'import numpy as np\n'), ((1055, 1067), 'numpy.isnan', 'np.isnan', (['sc'], {}), '(sc)\n', (1063, 1067), True, 'import numpy as np\n'), ((1072, 1084), 'numpy.isnan', 'np.isnan', (['sc'], {}), '(sc)\n', (1080, 1084), True, 'import numpy as np\n'), ((1119, 1165), 'numpy.array', 'np.array', (['[interpolators[0], interpolators[1]]'], {}), '([interpolators[0], interpolators[1]])\n', (1127, 1165), True, 'import numpy as np\n'), ((1979, 2008), 'numpy.array', 'np.array', (['[valuesm, valuesag]'], {}), '([valuesm, valuesag])\n', (1987, 2008), True, 'import numpy as np\n'), ((3394, 3492), 'splat.simulate.simulateMasses', 'spsim.simulateMasses', (['nsim'], {'range': '[ranges[0], ranges[1]]', 'distribution': '"""power-law"""', 'alpha': '(0.6)'}), "(nsim, range=[ranges[0], ranges[1]], distribution=\n 'power-law', alpha=0.6)\n", (3414, 3492), True, 'import splat.simulate as spsim\n'), ((3533, 3611), 'splat.simulate.simulateAges', 'spsim.simulateAges', (['nsim'], {'range': '[ranges[2], ranges[3]]', 'distribution': '"""uniform"""'}), "(nsim, range=[ranges[2], ranges[3]], distribution='uniform')\n", (3551, 3611), True, 'import splat.simulate as spsim\n'), ((3746, 3835), 'splat.simulate.simulateMassRatios', 'spsim.simulateMassRatios', (['nsim'], {'distribution': '"""power-law"""', 'q_range': '[0.1, 1.0]', 'gamma': '(4)'}), "(nsim, distribution='power-law', q_range=[0.1, 1.0],\n gamma=4)\n", (3770, 3835), True, 'import splat.simulate as spsim\n'), ((3846, 3944), 'splat.simulate.simulateMasses', 'spsim.simulateMasses', (['nsim'], {'range': '[ranges[0], ranges[1]]', 'distribution': '"""power-law"""', 'alpha': '(0.6)'}), "(nsim, range=[ranges[0], ranges[1]], distribution=\n 'power-law', alpha=0.6)\n", (3866, 3944), True, 'import splat.simulate as spsim\n'), ((3980, 4058), 'splat.simulate.simulateAges', 'spsim.simulateAges', (['nsim'], {'range': '[ranges[2], ranges[3]]', 'distribution': '"""uniform"""'}), "(nsim, range=[ranges[2], ranges[3]], distribution='uniform')\n", (3998, 4058), True, 'import splat.simulate as spsim\n'), ((5604, 5628), 'pandas.read_pickle', 'pd.read_pickle', (['filename'], {}), '(filename)\n', (5618, 5628), True, 'import pandas as pd\n'), ((5995, 6040), 'splat.empirical.typeToBC', 'spe.typeToBC', (['spt', 'filt'], {'ref': '"""filippazzo2015"""'}), "(spt, filt, ref='filippazzo2015')\n", (6007, 6040), True, 'import splat.empirical as spe\n'), ((6151, 6164), 'numpy.array', 'np.array', (['spt'], {}), '(spt)\n', (6159, 6164), True, 'import numpy as np\n'), ((6291, 6408), 'numpy.polyval', 'np.polyval', (["splat.SPT_BC_RELATIONS[ref]['filters'][filt]['coeff']", "(spt - splat.SPT_BC_RELATIONS[ref]['sptoffset'])"], {}), "(splat.SPT_BC_RELATIONS[ref]['filters'][filt]['coeff'], spt -\n splat.SPT_BC_RELATIONS[ref]['sptoffset'])\n", (6301, 6408), True, 'import numpy as np\n'), ((6500, 6530), 'numpy.random.normal', 'np.random.normal', (['bc', 'bc_error'], {}), '(bc, bc_error)\n', (6516, 6530), True, 'import numpy as np\n'), ((6548, 6575), 'numpy.place', 'np.place', (['res', '(~mask)', 'rands'], {}), '(res, ~mask, rands)\n', (6556, 6575), True, 'import numpy as np\n'), ((7881, 7911), 'numpy.ones_like', 'np.ones_like', (["mods['sing_spt']"], {}), "(mods['sing_spt'])\n", (7893, 7911), True, 'import numpy as np\n'), ((11116, 11188), 'numpy.array', 'np.array', (['[525, 675, 825, 975, 1125, 1275, 1425, 1575, 1725, 1875, 2025]'], {}), '([525, 675, 825, 975, 1125, 1275, 1425, 1575, 1725, 1875, 2025])\n', (11124, 11188), True, 'import numpy as np\n'), ((11208, 11282), 'numpy.array', 'np.array', (['[4.24, 2.8, 1.99, 1.72, 1.11, 1.95, 0.94, 0.81, 0.78, 0.5, 0.72]'], {}), '([4.24, 2.8, 1.99, 1.72, 1.11, 1.95, 0.94, 0.81, 0.78, 0.5, 0.72])\n', (11216, 11282), True, 'import numpy as np\n'), ((11297, 11368), 'numpy.array', 'np.array', (['[0.7, 0.37, 0.32, 0.3, 0.25, 0.3, 0.22, 0.2, 0.2, 0.17, 0.18]'], {}), '([0.7, 0.37, 0.32, 0.3, 0.25, 0.3, 0.22, 0.2, 0.2, 0.17, 0.18])\n', (11305, 11368), True, 'import numpy as np\n'), ((11596, 11644), 'numpy.histogram', 'np.histogram', (['teffs'], {'bins': 'binedges', 'normed': '(False)'}), '(teffs, bins=binedges, normed=False)\n', (11608, 11644), True, 'import numpy as np\n'), ((11830, 11859), 'numpy.ones_like', 'np.ones_like', (['obs_monte_carlo'], {}), '(obs_monte_carlo)\n', (11842, 11859), True, 'import numpy as np\n'), ((11884, 11913), 'numpy.ones_like', 'np.ones_like', (['obs_monte_carlo'], {}), '(obs_monte_carlo)\n', (11896, 11913), True, 'import numpy as np\n'), ((12099, 12118), 'numpy.nanmedian', 'np.nanmedian', (['scale'], {}), '(scale)\n', (12111, 12118), True, 'import numpy as np\n'), ((12120, 12136), 'numpy.nanstd', 'np.nanstd', (['scale'], {}), '(scale)\n', (12129, 12136), True, 'import numpy as np\n'), ((12980, 13036), 'numpy.random.choice', 'np.random.choice', (["model_vals['binary_spt'][~nans]", 'ndraw'], {}), "(model_vals['binary_spt'][~nans], ndraw)\n", (12996, 13036), True, 'import numpy as np\n'), ((13058, 13134), 'numpy.random.choice', 'np.random.choice', (["model_vals['prim_evol']['temperature'].value[~nans]", 'ndraw'], {}), "(model_vals['prim_evol']['temperature'].value[~nans], ndraw)\n", (13074, 13134), True, 'import numpy as np\n'), ((13156, 13224), 'numpy.random.choice', 'np.random.choice', (["model_vals['prim_evol']['age'].value[~nans]", 'ndraw'], {}), "(model_vals['prim_evol']['age'].value[~nans], ndraw)\n", (13172, 13224), True, 'import numpy as np\n'), ((13245, 13365), 'numpy.random.choice', 'np.random.choice', (["(model_vals['prim_evol']['mass'].value[~nans] + model_vals['sec_evol'][\n 'mass'].value[~nans])", 'ndraw'], {}), "(model_vals['prim_evol']['mass'].value[~nans] + model_vals[\n 'sec_evol']['mass'].value[~nans], ndraw)\n", (13261, 13365), True, 'import numpy as np\n'), ((13384, 13440), 'numpy.concatenate', 'np.concatenate', (["[model_vals['sing_spt'], choices['spt']]"], {}), "([model_vals['sing_spt'], choices['spt']])\n", (13398, 13440), True, 'import numpy as np\n'), ((13471, 13550), 'numpy.concatenate', 'np.concatenate', (["[model_vals['sing_evol']['temperature'].value, choices['teff']]"], {}), "([model_vals['sing_evol']['temperature'].value, choices['teff']])\n", (13485, 13550), True, 'import numpy as np\n'), ((13581, 13651), 'numpy.concatenate', 'np.concatenate', (["[model_vals['sing_evol']['age'].value, choices['age']]"], {}), "([model_vals['sing_evol']['age'].value, choices['age']])\n", (13595, 13651), True, 'import numpy as np\n'), ((13683, 13755), 'numpy.concatenate', 'np.concatenate', (["[model_vals['sing_evol']['mass'].value, choices['mass']]"], {}), "([model_vals['sing_evol']['mass'].value, choices['mass']])\n", (13697, 13755), True, 'import numpy as np\n'), ((2055, 2069), 'numpy.log10', 'np.log10', (['mass'], {}), '(mass)\n', (2063, 2069), True, 'import numpy as np\n'), ((2071, 2084), 'numpy.log10', 'np.log10', (['age'], {}), '(age)\n', (2079, 2084), True, 'import numpy as np\n'), ((2148, 2162), 'numpy.log10', 'np.log10', (['mass'], {}), '(mass)\n', (2156, 2162), True, 'import numpy as np\n'), ((2164, 2177), 'numpy.log10', 'np.log10', (['age'], {}), '(age)\n', (2172, 2177), True, 'import numpy as np\n'), ((5554, 5579), 'pickle.dump', 'pickle.dump', (['values', 'file'], {}), '(values, file)\n', (5565, 5579), False, 'import pickle\n'), ((6177, 6194), 'numpy.ones_like', 'np.ones_like', (['spt'], {}), '(spt)\n', (6189, 6194), True, 'import numpy as np\n'), ((7736, 7767), 'numpy.zeros_like', 'np.zeros_like', (["mods['sing_spt']"], {}), "(mods['sing_spt'])\n", (7749, 7767), True, 'import numpy as np\n'), ((8688, 8717), 'numpy.ones_like', 'np.ones_like', (["mods['sec_spt']"], {}), "(mods['sec_spt'])\n", (8700, 8717), True, 'import numpy as np\n'), ((10726, 10747), 'pandas.DataFrame', 'pd.DataFrame', (['singles'], {}), '(singles)\n', (10738, 10747), True, 'import pandas as pd\n'), ((10749, 10778), 'pandas.DataFrame', 'pd.DataFrame', (['chosen_binaries'], {}), '(chosen_binaries)\n', (10761, 10778), True, 'import pandas as pd\n'), ((11942, 12006), 'numpy.nansum', 'np.nansum', (['(obs_monte_carlo * pred_monte / unc_monte ** 2)'], {'axis': '(1)'}), '(obs_monte_carlo * pred_monte / unc_monte ** 2, axis=1)\n', (11951, 12006), True, 'import numpy as np\n'), ((12018, 12069), 'numpy.nansum', 'np.nansum', (['(pred_monte ** 2 / unc_monte ** 2)'], {'axis': '(1)'}), '(pred_monte ** 2 / unc_monte ** 2, axis=1)\n', (12027, 12069), True, 'import numpy as np\n'), ((12190, 12209), 'numpy.nanmedian', 'np.nanmedian', (['scale'], {}), '(scale)\n', (12202, 12209), True, 'import numpy as np\n'), ((5731, 5745), 'numpy.log10', 'np.log10', (['lumn'], {}), '(lumn)\n', (5739, 5745), True, 'import numpy as np\n'), ((6241, 6259), 'numpy.zeros_like', 'np.zeros_like', (['spt'], {}), '(spt)\n', (6254, 6259), True, 'import numpy as np\n'), ((5093, 5113), 'numpy.array', 'np.array', (['spt_primar'], {}), '(spt_primar)\n', (5101, 5113), True, 'import numpy as np\n'), ((5137, 5157), 'numpy.array', 'np.array', (['spt_second'], {}), '(spt_second)\n', (5145, 5157), True, 'import numpy as np\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
from enum import Enum
import numpy as np
from mmcv.utils import is_str
class Color(Enum):
"""An enum that defines common colors.
Contains red, green, blue, cyan, yellow, magenta, white and black.
"""
red = (0, 0, 255)
green = (0, 255, 0)
blue = (255, 0, 0)
cyan = (255, 255, 0)
yellow = (0, 255, 255)
magenta = (255, 0, 255)
white = (255, 255, 255)
black = (0, 0, 0)
def color_val(color):
"""Convert various input to color tuples.
Args:
color (:obj:`Color`/str/tuple/int/ndarray): Color inputs
Returns:
tuple[int]: A tuple of 3 integers indicating BGR channels.
"""
if is_str(color):
return Color[color].value
elif isinstance(color, Color):
return color.value
elif isinstance(color, tuple):
assert len(color) == 3
for channel in color:
assert 0 <= channel <= 255
return color
elif isinstance(color, int):
assert 0 <= color <= 255
return color, color, color
elif isinstance(color, np.ndarray):
assert color.ndim == 1 and color.size == 3
assert np.all((color >= 0) & (color <= 255))
color = color.astype(np.uint8)
return tuple(color)
else:
raise TypeError(f'Invalid type for color: {type(color)}')
| [
"numpy.all",
"mmcv.utils.is_str"
] | [((706, 719), 'mmcv.utils.is_str', 'is_str', (['color'], {}), '(color)\n', (712, 719), False, 'from mmcv.utils import is_str\n'), ((1180, 1217), 'numpy.all', 'np.all', (['((color >= 0) & (color <= 255))'], {}), '((color >= 0) & (color <= 255))\n', (1186, 1217), True, 'import numpy as np\n')] |
"""Lexical mapping of ontology classes
The core data structure used here is a Mapping Graph. This is a
networkx Graph object (i.e. singly labeled, non-directional) that
connects lexically mapped nodes between two ontologies.
Edge Properties
---------------
idpair: (string,string)
the pair of identifiers mapped
score: number
Number between 0 and 100 indicating strength of match based on multiple criteria
synonyms: (Synonym,Synonym)
pair of Synonym objects (including primary labels) used to create mapping
simscores: (number, number)
Semantic similarity A to B and B to A respectively.
Note that false positives or negatives in the ancestors or descendants in the xref graph will lead to bias in these scores.
reciprocal_score: int
A number between 0 and 4 that indicates whether this was a reciprocal best match (RBM), with additional gradation based on whether
ties are included. We distinguish between a true BM and a tied BM. 4 indicates true RBM. 1 indicates reciprocal tied BM (ie both are tied BMs). 2 indicates a combo of
a true BM and a tied BM.
Note that ties are less likely if semantic similarity is considered in the match.
"""
import networkx as nx
from networkx.algorithms import strongly_connected_components
import logging
import re
from ontobio.ontol import Synonym, Ontology
from collections import defaultdict
import pandas as pd
import numpy as np
import math
from marshmallow import Schema, fields, pprint, post_load
LABEL_OR_EXACT = 'label_or_exact'
logger = logging.getLogger(__name__)
def logit(p):
return math.log2(p/(1-p))
def inv_logit(w):
return 1/(1+2**(-w))
def default_wsmap():
"""
Default word to normalized synonym list
"""
return {
'a':'',
'of':'',
'the':'',
'i':'1',
'ii':'2',
'iii':'3',
'iv':'4',
'v':'5',
'vi':'6',
'vii':'7',
'viii':'8',
'ix':'9',
'x':'10',
'xi':'11',
'xii':'12',
'xiii':'13',
'xiv':'14',
'xv':'15',
'xvi':'16',
'xvii':'17',
'xviii':'18',
'xix':'19',
'xx':'20',
'':''
}
class LexicalMapEngine():
"""
generates lexical matches between pairs of ontology classes
"""
SCORE='score'
LEXSCORE='lexscore'
SIMSCORES='simscores'
CONDITIONAL_PR='cpr'
def __init__(self, wsmap=default_wsmap(), config=None):
"""
Arguments
---------
wdmap: dict
maps words to normalized synonyms.
config: dict
A configuration conforming to LexicalMapConfigSchema
"""
# maps label or syn value to Synonym object
self.lmap = {}
# maps node id to synonym objects
self.smap = {}
self.wsmap = wsmap
self.npattern = re.compile('[\W_]+')
self.exclude_obsolete = True
self.ontology_pairs = None
self.id_to_ontology_map = defaultdict(list)
self.merged_ontology = Ontology()
self.config = config if config is not None else {}
self.stats = {}
def index_ontologies(self, onts):
logger.info('Indexing: {}'.format(onts))
for ont in onts:
self.index_ontology(ont)
def index_ontology(self, ont):
"""
Adds an ontology to the index
This iterates through all labels and synonyms in the ontology, creating an index
"""
self.merged_ontology.merge([ont])
syns = ont.all_synonyms(include_label=True)
include_id = self._is_meaningful_ids()
logger.info("Include IDs as synonyms: {}".format(include_id))
if include_id:
for n in ont.nodes():
v = n
# Get fragment
if v.startswith('http'):
v = re.sub('.*/','',v)
v = re.sub('.*#','',v)
syns.append(Synonym(n, val=v, pred='label'))
logger.info("Indexing {} syns in {}".format(len(syns),ont))
logger.info("Distinct lexical values: {}".format(len(self.lmap.keys())))
for syn in syns:
self.index_synonym(syn, ont)
for nid in ont.nodes():
self.id_to_ontology_map[nid].append(ont)
def label(self, nid):
return self.merged_ontology.label(nid)
def index_synonym(self, syn, ont):
"""
Index a synonym
Typically not called from outside this object; called by `index_ontology`
"""
if not syn.val:
if syn.pred == 'label':
if not self._is_meaningful_ids():
if not ont.is_obsolete(syn.class_id):
pass
#logger.error('Use meaningful ids if label not present: {}'.format(syn))
else:
logger.warning("Incomplete syn: {}".format(syn))
return
if self.exclude_obsolete and ont.is_obsolete(syn.class_id):
return
syn.ontology = ont
prefix,_ = ont.prefix_fragment(syn.class_id)
v = syn.val
caps_match = re.match('[A-Z]+',v)
if caps_match:
# if > 75% of length is caps, assume abbreviation
if caps_match.span()[1] >= len(v)/3:
syn.is_abbreviation(True)
# chebi 'synonyms' are often not real synonyms
# https://github.com/ebi-chebi/ChEBI/issues/3294
if not re.match('.*[a-zA-Z]',v):
if prefix != 'CHEBI':
logger.warning('Ignoring suspicous synonym: {}'.format(syn))
return
v = self._standardize_label(v)
# TODO: do this once ahead of time
wsmap = {}
for w,s in self.wsmap.items():
wsmap[w] = s
for ss in self._get_config_val(prefix,'synsets',[]):
# TODO: weights
wsmap[ss['synonym']] = ss['word']
nv = self._normalize_label(v, wsmap)
self._index_synonym_val(syn, v)
nweight = self._get_config_val(prefix, 'normalized_form_confidence', 0.8)
if nweight > 0 and not syn.is_abbreviation():
if nv != v:
nsyn = Synonym(syn.class_id,
val=syn.val,
pred=syn.pred,
lextype=syn.lextype,
ontology=ont,
confidence=syn.confidence * nweight)
self._index_synonym_val(nsyn, nv)
def _index_synonym_val(self, syn, v):
lmap = self.lmap
smap = self.smap
cid = syn.class_id
if v not in lmap:
lmap[v] = []
lmap[v].append(syn)
if cid not in smap:
smap[cid] = []
smap[cid].append(syn)
def _standardize_label(self, v):
# Add spaces separating camelcased strings
v = re.sub('([a-z])([A-Z])',r'\1 \2',v)
# always use lowercase when comparing
# we may want to make this configurable in future
v = v.lower()
return v
def _normalize_label(self, s, wsmap):
"""
normalized form of a synonym
"""
toks = []
for tok in list(set(self.npattern.sub(' ', s).split(' '))):
if tok in wsmap:
tok=wsmap[tok]
if tok != "":
toks.append(tok)
toks.sort()
return " ".join(toks)
def _get_config_val(self, prefix, k, default=None):
v = None
for oc in self.config.get('ontology_configurations', []):
if prefix == oc.get('prefix', ''):
v = oc.get(k, None)
if v is None:
v = self.config.get(k, None)
if v is None:
v = default
return v
def _is_meaningful_ids(self):
return self.config.get('meaningful_ids', False)
def find_equiv_sets(self):
return self.lmap
def get_xref_graph(self):
"""
Generate mappings based on lexical properties and return as nx graph.
Algorithm
~~~~~~~~~
- A dictionary is stored between ref:`Synonym` values and synonyms. See ref:`index_synonym`.
Note that Synonyms include the primary label
- Each key in the dictionary is examined to determine if there exist two Synonyms from
different ontology classes
This avoids N^2 pairwise comparisons: instead the time taken is linear
After initial mapping is made, additional scoring is performed on each mapping
Edge properties
~~~~~~~~~~~~~~~
The return object is a nx graph, connecting pairs of ontology classes.
Edges are annotated with metadata about how the match was found:
syns: pair
pair of `Synonym` objects, corresponding to the synonyms for the two nodes
score: int
score indicating strength of mapping, between 0 and 100
Returns
-------
Graph
nx graph (bidirectional)
"""
# initial graph; all matches
g = nx.MultiDiGraph()
# lmap collects all syns by token
items = self.lmap.items()
logger.info("collecting initial xref graph, items={}".format(len(items)))
i = 0
sum_nsyns = 0
n_skipped = 0
has_self_comparison = False
if self.ontology_pairs:
for (o1id,o2id) in self.ontology_pairs:
if o1id == o2id:
has_self_comparison = True
for (v,syns) in items:
sum_nsyns += len(syns)
i += 1
if i % 1000 == 1:
logger.info('{}/{} lexical items avgSyns={}, skipped={}'.format(i,len(items), sum_nsyns/len(items), n_skipped))
if len(syns) < 2:
n_skipped += 1
next
if len(syns) > 10:
logger.info('Syns for {} = {}'.format(v,len(syns)))
for s1 in syns:
s1oid = s1.ontology.id
s1cid = s1.class_id
for s2 in syns:
# optimization step: although this is redundant with _is_comparable,
# we avoid inefficient additional calls
if s1oid == s2.ontology.id and not has_self_comparison:
next
if s1cid != s2.class_id:
if self._is_comparable(s1,s2):
g.add_edge(s1.class_id, s2.class_id, syns=(s1,s2))
logger.info("getting best supporting synonym pair for each match")
# graph of best matches
xg = nx.Graph()
for i in g.nodes():
for j in g.neighbors(i):
best = 0
bestm = None
for m in g.get_edge_data(i,j).values():
(s1,s2) = m['syns']
score = self._combine_syns(s1,s2)
if score > best:
best = score
bestm = m
syns = bestm['syns']
xg.add_edge(i, j,
score=best,
lexscore=best,
syns=syns,
idpair=(i,j))
self.score_xrefs_by_semsim(xg)
self.assign_best_matches(xg)
if self.merged_ontology.xref_graph is not None:
self.compare_to_xrefs(xg, self.merged_ontology.xref_graph)
else:
logger.error("No xref graph for merged ontology")
logger.info("finished xref graph")
return xg
# true if syns s1 and s2 should be compared.
# - if ontology_pairs is set, then only consider (s1,s2) if their respective source ontologies are in the list of pairs
# - otherwise compare all classes, but only in one direction
def _is_comparable(self, s1, s2):
if s1.class_id == s2.class_id:
return False
if self.ontology_pairs is not None:
#logger.debug('TEST: {}{} in {}'.format(s1.ontology.id, s2.ontology.id, self.ontology_pairs))
return (s1.ontology.id, s2.ontology.id) in self.ontology_pairs
else:
return s1.class_id < s2.class_id
def _blanket(self, nid):
nodes = set()
for ont in self.id_to_ontology_map[nid]:
nodes.update(ont.ancestors(nid))
nodes.update(ont.descendants(nid))
return list(nodes)
def score_xrefs_by_semsim(self, xg, ont=None):
"""
Given an xref graph (see ref:`get_xref_graph`), this will adjust scores based on
the semantic similarity of matches.
"""
logger.info("scoring xrefs by semantic similarity for {} nodes in {}".format(len(xg.nodes()), ont))
for (i,j,d) in xg.edges(data=True):
pfx1 = self._id_to_ontology(i)
pfx2 = self._id_to_ontology(j)
ancs1 = self._blanket(i)
ancs2 = self._blanket(j)
s1,_,_ = self._sim(xg, ancs1, ancs2, pfx1, pfx2)
s2,_,_ = self._sim(xg, ancs2, ancs1, pfx2, pfx1)
s = 1 - ((1-s1) * (1-s2))
logger.debug("Score {} x {} = {} x {} = {} // {}".format(i,j,s1,s2,s, d))
xg[i][j][self.SIMSCORES] = (s1,s2)
xg[i][j][self.SCORE] *= s
def _sim(self, xg, ancs1, ancs2, pfx1, pfx2):
"""
Compare two lineages
"""
xancs1 = set()
for a in ancs1:
if a in xg:
# TODO: restrict this to neighbors in single ontology
for n in xg.neighbors(a):
pfx = self._id_to_ontology(n)
if pfx == pfx2:
xancs1.add(n)
logger.debug('SIM={}/{} ## {}'.format(len(xancs1.intersection(ancs2)), len(xancs1), xancs1.intersection(ancs2), xancs1))
n_shared = len(xancs1.intersection(ancs2))
n_total = len(xancs1)
return (1+n_shared) / (1+n_total), n_shared, n_total
# given an ontology class id,
# return map keyed by ontology id, value is a list of (score, ext_class_id) pairs
def _neighborscores_by_ontology(self, xg, nid):
xrefmap = defaultdict(list)
for x in xg.neighbors(nid):
score = xg[nid][x][self.SCORE]
for ont in self.id_to_ontology_map[x]:
xrefmap[ont.id].append( (score,x) )
return xrefmap
# normalize direction
def _dirn(self, edge, i, j):
if edge['idpair'] == (i,j):
return 'fwd'
elif edge['idpair'] == (j,i):
return 'rev'
else:
return None
def _id_to_ontology(self, id):
return self.merged_ontology.prefix(id)
#onts = self.id_to_ontology_map[id]
#if len(onts) > 1:
# logger.warning(">1 ontology for {}".format(id))
def compare_to_xrefs(self, xg1, xg2):
"""
Compares a base xref graph with another one
"""
ont = self.merged_ontology
for (i,j,d) in xg1.edges(data=True):
ont_left = self._id_to_ontology(i)
ont_right = self._id_to_ontology(j)
unique_lr = True
num_xrefs_left = 0
same_left = False
if i in xg2:
for j2 in xg2.neighbors(i):
ont_right2 = self._id_to_ontology(j2)
if ont_right2 == ont_right:
unique_lr = False
num_xrefs_left += 1
if j2 == j:
same_left = True
unique_rl = True
num_xrefs_right = 0
same_right = False
if j in xg2:
for i2 in xg2.neighbors(j):
ont_left2 = self._id_to_ontology(i2)
if ont_left2 == ont_left:
unique_rl = False
num_xrefs_right += 1
if i2 == i:
same_right = True
(x,y) = d['idpair']
xg1[x][y]['left_novel'] = num_xrefs_left==0
xg1[x][y]['right_novel'] = num_xrefs_right==0
xg1[x][y]['left_consistent'] = same_left
xg1[x][y]['right_consistent'] = same_right
def assign_best_matches(self, xg):
"""
For each node in the xref graph, tag best match edges
"""
logger.info("assigning best matches for {} nodes".format(len(xg.nodes())))
for i in xg.nodes():
xrefmap = self._neighborscores_by_ontology(xg, i)
for (ontid,score_node_pairs) in xrefmap.items():
score_node_pairs.sort(reverse=True)
(best_score,best_node) = score_node_pairs[0]
logger.info("BEST for {}: {} in {} from {}".format(i, best_node, ontid, score_node_pairs))
edge = xg[i][best_node]
dirn = self._dirn(edge, i, best_node)
best_kwd = 'best_' + dirn
if len(score_node_pairs) == 1 or score_node_pairs[0] > score_node_pairs[1]:
edge[best_kwd] = 2
else:
edge[best_kwd] = 1
for (score,j) in score_node_pairs:
edge_ij = xg[i][j]
dirn_ij = self._dirn(edge_ij, i, j)
edge_ij['cpr_'+dirn_ij] = score / sum([s for s,_ in score_node_pairs])
for (i,j,edge) in xg.edges(data=True):
# reciprocal score is set if (A) i is best for j, and (B) j is best for i
rs = 0
if 'best_fwd' in edge and 'best_rev' in edge:
rs = edge['best_fwd'] * edge['best_rev']
edge['reciprocal_score'] = rs
edge['cpr'] = edge['cpr_fwd'] * edge['cpr_rev']
def _best_match_syn(self, sx, sys, scope_map):
"""
The best match is determined by the highest magnitude weight
"""
SUBSTRING_WEIGHT = 0.2
WBEST = None
sbest = None
sxv = self._standardize_label(sx.val)
sxp = self._id_to_ontology(sx.class_id)
for sy in sys:
syv = self._standardize_label(sy.val)
syp = self._id_to_ontology(sy.class_id)
W = None
if sxv == syv:
confidence = sx.confidence * sy.confidence
if sx.is_abbreviation() or sy.is_abbreviation:
confidence *= self._get_config_val(sxp, 'abbreviation_confidence', 0.5)
confidence *= self._get_config_val(syp, 'abbreviation_confidence', 0.5)
W = scope_map[sx.scope()][sy.scope()] + logit(confidence/2)
elif sxv in syv:
W = np.array((-SUBSTRING_WEIGHT, SUBSTRING_WEIGHT, 0, 0))
elif syv in sxv:
W = np.array((SUBSTRING_WEIGHT, -SUBSTRING_WEIGHT, 0, 0))
if W is not None:
# The best match is determined by the highest magnitude weight
if WBEST is None or max(abs(W)) > max(abs(WBEST)):
WBEST = W
sbest = sy
return WBEST, sbest
def weighted_axioms(self, x, y, xg):
"""
return a tuple (sub,sup,equiv,other) indicating estimated prior probabilities for an interpretation of a mapping
between x and y.
See kboom paper
"""
# TODO: allow additional weighting
# weights are log odds w=log(p/(1-p))
# (Sub,Sup,Eq,Other)
scope_pairs = [
('label', 'label', 0.0, 0.0, 3.0,-0.8),
('label', 'exact', 0.0, 0.0, 2.5,-0.5),
('label', 'broad', -1.0, 1.0, 0.0, 0.0),
('label', 'narrow', 1.0,-1.0, 0.0, 0.0),
('label', 'related', 0.0, 0.0, 0.0, 0.0),
('exact', 'exact', 0.0, 0.0, 2.5,-0.5),
('exact', 'broad', -1.0, 1.0, 0.0, 0.0),
('exact', 'narrow', 1.0,-1.0, 0.0, 0.0),
('exact', 'related', 0.0, 0.0, 0.0, 0.0),
('related', 'broad', -0.5, 0.5, 0.0, 0.0),
('related', 'narrow', 0.5,-0.5, 0.0, 0.0),
('related', 'related', 0.0, 0.0, 0.0, 0.0),
('broad', 'broad', 0.0, 0.0, 0.0, 1.0),
('broad', 'narrow', -0.5, 0.5, 0.0, 0.2),
('narrow', 'narrow', 0.0, 0.0, 0.0, 0.0)
]
# populate symmetric lookup matrix
scope_map = defaultdict(dict)
for (l,r,w1,w2,w3,w4) in scope_pairs:
l = l.upper()
r = r.upper()
scope_map[l][r] = np.array((w1,w2,w3,w4))
scope_map[r][l] = np.array((w2,w1,w3,w4))
# TODO: get prior based on ontology pair
# cumulative sum of weights
WS = None
pfx1 = self._id_to_ontology(x)
pfx2 = self._id_to_ontology(y)
for mw in self.config.get('match_weights', []):
mpfx1 = mw.get('prefix1','')
mpfx2 = mw.get('prefix2','')
X = np.array(mw['weights'])
if mpfx1 == pfx1 and mpfx2 == pfx2:
WS = X
elif mpfx2 == pfx1 and mpfx1 == pfx2:
WS = self._flipweights(X)
elif mpfx1 == pfx1 and mpfx2 == '' and WS is None:
WS = X
elif mpfx2 == pfx1 and mpfx1 == '' and WS is None:
WS = self._flipweights(X)
if WS is None:
WS = np.array((0.0, 0.0, 0.0, 0.0))
# defaults
WS += np.array(self.config.get('default_weights', [0.0, 0.0, 1.5, -0.1]))
logger.info('WS defaults={}'.format(WS))
for xw in self.config.get('xref_weights', []):
left = xw.get('left','')
right = xw.get('right','')
X = np.array(xw['weights'])
if x == left and y == right:
WS += X
logger.info('MATCH: {} for {}-{}'.format(X, x, y))
elif y == left and x == right:
WS += self._flipweights(X)
logger.info('IMATCH: {}'.format(X))
smap = self.smap
# TODO: symmetrical
WT = np.array((0.0, 0.0, 0.0, 0.0))
WBESTMAX = np.array((0.0, 0.0, 0.0, 0.0))
n = 0
for sx in smap[x]:
WBEST, _ = self._best_match_syn(sx, smap[y], scope_map)
if WBEST is not None:
WT += WBEST
n += 1
if max(abs(WBEST)) > max(abs(WBESTMAX)):
WBESTMAX = WBEST
for sy in smap[y]:
WBEST, _ = self._best_match_syn(sy, smap[x], scope_map)
if WBEST is not None:
WT += WBEST
n += 1
# average best match
if n > 0:
logger.info('Adding BESTMAX={}'.format(WBESTMAX))
WS += WBESTMAX
# TODO: xref, many to many
WS += self._graph_weights(x, y, xg)
# TODO: include additional defined weights, eg ORDO
logger.info('Adding WS, gw={}'.format(WS))
# jaccard similarity
(ss1,ss2) = xg[x][y][self.SIMSCORES]
WS[3] += ((1-ss1) + (1-ss2)) / 2
# reciprocal best hits are higher confidence of equiv
rs = xg[x][y]['reciprocal_score']
if rs == 4:
WS[2] += 0.5
if rs == 0:
WS[2] -= 0.2
#P = np.expit(WS)
P = 1/(1+np.exp(-WS))
logger.info('Final WS={}, init P={}'.format(WS, P))
# probs should sum to 1.0
P = P / np.sum(P)
return P
def _graph_weights(self, x, y, xg):
ont = self.merged_ontology
xancs = ont.ancestors(x)
yancs = ont.ancestors(y)
pfx = self._id_to_ontology(x)
pfy = self._id_to_ontology(y)
xns = [n for n in xg.neighbors(y) if n != x and pfx == self._id_to_ontology(n)]
yns = [n for n in xg.neighbors(x) if n != y and pfy == self._id_to_ontology(n)]
pweight = 1.0
W = np.array((0,0,0,0))
card = '11'
if len(xns) > 0:
card = 'm1'
for x2 in xns:
if x2 in xancs:
W[0] += pweight
if x in ont.ancestors(x2):
W[1] += pweight
if len(yns) > 0:
if card == '11':
card = '1m'
else:
card = 'mm'
for y2 in yns:
if y2 in yancs:
W[1] += pweight
if y in ont.ancestors(y2):
W[0] += pweight
logger.debug('CARD: {}/{} <-> {}/{} = {} // X={} Y={} // W={}'.format(x,pfx, y,pfy, card, xns, yns, W))
invcard = card
if card == '1m':
invcard = 'm1'
elif card == 'm1':
invcard = '1m'
CW = None
DEFAULT_CW = None
for cw in self.config.get('cardinality_weights', []):
if 'prefix1' not in cw and 'prefix2' not in cw:
if card == cw['cardinality']:
DEFAULT_CW = np.array(cw['weights'])
if invcard == cw['cardinality']:
DEFAULT_CW = self._flipweights(np.array(cw['weights']))
if 'prefix1' in cw and 'prefix2' in cw:
if pfx == cw['prefix1'] and pfy == cw['prefix2'] and card == cw['cardinality']:
CW = np.array(cw['weights'])
if pfx == cw['prefix2'] and pfy == cw['prefix1'] and invcard == cw['cardinality']:
CW = self._flipweights(np.array(cw['weights']))
if CW is None:
if DEFAULT_CW is not None:
CW = DEFAULT_CW
else:
if card == '11':
CW = np.array((0.0, 0.0, 1.0, 0.0))
elif card == '1m':
CW = np.array((0.6, 0.4, 0.0, 0.0))
elif card == 'm1':
CW = np.array((0.4, 0.6, 0.0, 0.0))
elif card == 'mm':
CW = np.array((0.2, 0.2, 0.0, 0.5))
return W + CW
def _flipweights(self, W):
return np.array((W[1],W[0],W[2],W[3]))
def grouped_mappings(self,id):
"""
return all mappings for a node, grouped by ID prefix
"""
g = self.get_xref_graph()
m = {}
for n in g.neighbors(id):
[prefix, local] = n.split(':')
if prefix not in m:
m[prefix] = []
m[prefix].append(n)
return m
def unmapped_nodes(self, xg, rs_threshold=0):
unmapped_set = set()
for nid in self.merged_ontology.nodes():
if nid in xg:
for (j,edge) in xg[nid].items():
rs = edge.get('reciprocal_score',0)
if rs < rs_threshold:
unmapped_set.add(nid)
else:
unmapped_set.add(nid)
return unmapped_set
def unmapped_dataframe(self, xg, **args):
unodes = self.unmapped_nodes(xg, **args)
ont = self.merged_ontology
eg = ont.equiv_graph()
items = []
for n in unodes:
mapped_equivs = ''
if n in eg:
equivs = set(eg.neighbors(n))
mapped_equivs = list(equivs - unodes)
items.append(dict(id=n,label=ont.label(n),mapped_equivs=mapped_equivs))
df = pd.DataFrame(items, columns=['id','label', 'mapped_equivs'])
df = df.sort_values(["id"])
return df
# scores a pairwise combination of synonyms. This will be a mix of
# * individual confidence in the synonyms themselves
# * confidence of equivalence based on scopes
# TODO: unify this with probabilistic calculation
def _combine_syns(self, s1,s2):
cpred = self._combine_preds(s1.pred, s2.pred)
s = self._pred_score(cpred)
s *= s1.confidence * s2.confidence
if s1.is_abbreviation() or s2.is_abbreviation():
s *= self._get_config_val(self._id_to_ontology(s1.class_id), 'abbreviation_confidence', 0.5)
s *= self._get_config_val(self._id_to_ontology(s1.class_id), 'abbreviation_confidence', 0.5)
logger.debug("COMBINED: {} + {} = {}/{}".format(s1,s2,cpred,s))
return round(s)
def _rollup(self, p):
if p == 'label':
return LABEL_OR_EXACT
if p == 'hasExactSynonym':
return LABEL_OR_EXACT
return p
def _combine_preds(self, p1, p2):
if p1 == p2:
return p1
if self._rollup(p1) == self._rollup(p2):
return self._rollup(p1)
return p1 + p2
## TODO: allow this to be weighted by ontology
def _pred_score(self,p):
if p == 'label':
return 100
if p == LABEL_OR_EXACT:
return 90
if p == 'hasExactSynonym':
return 90
return 50
def _in_clique(self, x, cliques):
for s in cliques:
if x in s:
return s
return set()
def as_dataframe(self, xg):
cliques = self.cliques(xg)
ont = self.merged_ontology
items = []
for (x,y,d) in xg.edges(data=True):
# xg is a non-directional Graph object.
# to get a deterministic ordering we use the idpair key
(x,y) = d['idpair']
(s1,s2)=d['syns']
(ss1,ss2)=d['simscores']
clique = self._in_clique(x, cliques)
#ancs = nx.ancestors(g,x)
left_label = ont.label(x)
right_label = ont.label(y)
if ont.is_obsolete(x) and not left_label.startwith('obsolete'):
left_label = "obsolete " + left_label
if ont.is_obsolete(y) and not right_label.startwith('obsolete'):
right_label = "obsolete " + right_label
P = self.weighted_axioms(x,y,xg)
item = {'left':x, 'left_label':left_label,
'right':y, 'right_label':right_label,
'score':d['score'],
'left_match_type': s1.pred,
'right_match_type': s2.pred,
'left_match_val': s1.val,
'right_match_val': s2.val,
'left_simscore':ss1,
'right_simscore':ss2,
'reciprocal_score':d.get('reciprocal_score',0),
'conditional_pr_equiv': d.get('cpr'),
'pr_subClassOf': P[0],
'pr_superClassOf': P[1],
'pr_equivalentTo': P[2],
'pr_other': P[3],
'left_novel': d.get('left_novel'),
'right_novel': d.get('right_novel'),
'left_consistent': d.get('left_consistent'),
'right_consistent': d.get('right_consistent'),
'equiv_clique_size': len(clique)}
items.append(item)
ix = ['left', 'left_label', 'right', 'right_label',
'left_match_type', 'right_match_type',
'left_match_val', 'right_match_val',
'score', 'left_simscore', 'right_simscore', 'reciprocal_score',
'conditional_pr_equiv',
'pr_subClassOf', 'pr_superClassOf', 'pr_equivalentTo', 'pr_other',
'left_novel',
'right_novel',
'left_consistent',
'right_consistent',
'equiv_clique_size']
df = pd.DataFrame(items, columns=ix)
df = df.sort_values(["left","score","right"])
return df
def cliques(self, xg):
"""
Return all equivalence set cliques, assuming each edge in the xref graph is treated as equivalent,
and all edges in ontology are subClassOf
Arguments
---------
xg : Graph
an xref graph
Returns
-------
list of sets
"""
g = nx.DiGraph()
for (x,y) in self.merged_ontology.get_graph().edges():
g.add_edge(x,y)
for (x,y) in xg.edges():
g.add_edge(x,y)
g.add_edge(y,x)
return list(strongly_connected_components(g))
### MARSHMALLOW SCHEMAS
class ScopeWeightMapSchema(Schema):
"""
Maps scope predicates (label, hasExactSynonym etc) to weights (0<=1.0).
Typically labels and exact matches have higher weight, although this
may vary with ontology
"""
label = fields.Float(default=1.0, description="weight of label matches")
hasExactSynonym = fields.Float(default=0.9, description="weight of exact matches")
hasRelatedSynonym = fields.Float(default=0.0, description="weight of related matches")
hasBroadSynonym = fields.Float(default=-0.2, description="weight of broad matches")
hasNarrowSynonym = fields.Float(default=-0.2, description="weight of narrow matches")
other = fields.Float(default=-0.5, description="weight of other kinds of matches")
class OntologyConfigurationSchema(Schema):
"""
configuration that is specific to an ontology
"""
prefix = fields.String(description="prefix of IDs in ontology, e.g. UBERON")
scope_weight_map = fields.Nested(ScopeWeightMapSchema(), description="local scope-weight map")
normalized_form_confidence = fields.Float(description="confidence of a synonym value derived via normalization (e.g. canonical ordering of tokens)")
abbreviation_confidence = fields.Float(default=0.5, description="confidence of an abbreviation")
class CardinalityWeights(Schema):
"""
Weights for different cardinality combinations,
"""
prefix1 = fields.String(description="prefix of IDs in ontology, e.g. MA")
prefix2 = fields.String(description="prefix of IDs in ontology, e.g. ZFA")
cardinality = fields.String(description="One of 11, 1m, m1 or mm")
weights = fields.List(fields.Float(), description="Sub/Sup/Eq/Other")
class MatchWeights(Schema):
"""
Default weights for a pair of ontologies
"""
prefix1 = fields.String(description="prefix of IDs in ontology, e.g. MA")
prefix2 = fields.String(description="prefix of IDs in ontology, e.g. ZFA")
weights = fields.List(fields.Float(), description="Sub/Sup/Eq/Other")
class XrefWeights(Schema):
"""
Default weights for a pair of classes
"""
left = fields.String(description="ID of first class")
right = fields.String(description="ID of second class")
weights = fields.List(fields.Float(), description="Sub/Sup/Eq/Other")
class LexicalMapConfigSchema(Schema):
"""
global configuration
"""
scope_weight_map = fields.Nested(ScopeWeightMapSchema(), description="global scope-weight map. May be overridden by ontologies")
ontology_configurations = fields.List(fields.Nested(OntologyConfigurationSchema()), description="configurations that are specific to an ontology")
normalized_form_confidence = fields.Float(default=0.8, description="confidence of a synonym value derived via normalization (e.g. canonical ordering of tokens)")
abbreviation_confidence = fields.Float(default=0.5, description="confidence of an abbreviation")
match_weights = fields.List(fields.Nested(MatchWeights()))
cardinality_weights = fields.List(fields.Nested(CardinalityWeights()))
xref_weights = fields.List(fields.Nested(XrefWeights()))
| [
"logging.getLogger",
"ontobio.ontol.Ontology",
"networkx.MultiDiGraph",
"ontobio.ontol.Synonym",
"re.compile",
"marshmallow.fields.Float",
"networkx.DiGraph",
"math.log2",
"re.match",
"networkx.Graph",
"numpy.exp",
"numpy.array",
"numpy.sum",
"collections.defaultdict",
"marshmallow.field... | [((1527, 1554), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1544, 1554), False, 'import logging\n'), ((1582, 1604), 'math.log2', 'math.log2', (['(p / (1 - p))'], {}), '(p / (1 - p))\n', (1591, 1604), False, 'import math\n'), ((32640, 32704), 'marshmallow.fields.Float', 'fields.Float', ([], {'default': '(1.0)', 'description': '"""weight of label matches"""'}), "(default=1.0, description='weight of label matches')\n", (32652, 32704), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((32727, 32791), 'marshmallow.fields.Float', 'fields.Float', ([], {'default': '(0.9)', 'description': '"""weight of exact matches"""'}), "(default=0.9, description='weight of exact matches')\n", (32739, 32791), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((32816, 32882), 'marshmallow.fields.Float', 'fields.Float', ([], {'default': '(0.0)', 'description': '"""weight of related matches"""'}), "(default=0.0, description='weight of related matches')\n", (32828, 32882), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((32905, 32970), 'marshmallow.fields.Float', 'fields.Float', ([], {'default': '(-0.2)', 'description': '"""weight of broad matches"""'}), "(default=-0.2, description='weight of broad matches')\n", (32917, 32970), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((32994, 33060), 'marshmallow.fields.Float', 'fields.Float', ([], {'default': '(-0.2)', 'description': '"""weight of narrow matches"""'}), "(default=-0.2, description='weight of narrow matches')\n", (33006, 33060), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((33073, 33147), 'marshmallow.fields.Float', 'fields.Float', ([], {'default': '(-0.5)', 'description': '"""weight of other kinds of matches"""'}), "(default=-0.5, description='weight of other kinds of matches')\n", (33085, 33147), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((33271, 33338), 'marshmallow.fields.String', 'fields.String', ([], {'description': '"""prefix of IDs in ontology, e.g. UBERON"""'}), "(description='prefix of IDs in ontology, e.g. UBERON')\n", (33284, 33338), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((33471, 33600), 'marshmallow.fields.Float', 'fields.Float', ([], {'description': '"""confidence of a synonym value derived via normalization (e.g. canonical ordering of tokens)"""'}), "(description=\n 'confidence of a synonym value derived via normalization (e.g. canonical ordering of tokens)'\n )\n", (33483, 33600), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((33621, 33691), 'marshmallow.fields.Float', 'fields.Float', ([], {'default': '(0.5)', 'description': '"""confidence of an abbreviation"""'}), "(default=0.5, description='confidence of an abbreviation')\n", (33633, 33691), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((33814, 33877), 'marshmallow.fields.String', 'fields.String', ([], {'description': '"""prefix of IDs in ontology, e.g. MA"""'}), "(description='prefix of IDs in ontology, e.g. MA')\n", (33827, 33877), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((33892, 33956), 'marshmallow.fields.String', 'fields.String', ([], {'description': '"""prefix of IDs in ontology, e.g. ZFA"""'}), "(description='prefix of IDs in ontology, e.g. ZFA')\n", (33905, 33956), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((33975, 34027), 'marshmallow.fields.String', 'fields.String', ([], {'description': '"""One of 11, 1m, m1 or mm"""'}), "(description='One of 11, 1m, m1 or mm')\n", (33988, 34027), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((34206, 34269), 'marshmallow.fields.String', 'fields.String', ([], {'description': '"""prefix of IDs in ontology, e.g. MA"""'}), "(description='prefix of IDs in ontology, e.g. MA')\n", (34219, 34269), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((34284, 34348), 'marshmallow.fields.String', 'fields.String', ([], {'description': '"""prefix of IDs in ontology, e.g. ZFA"""'}), "(description='prefix of IDs in ontology, e.g. ZFA')\n", (34297, 34348), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((34524, 34570), 'marshmallow.fields.String', 'fields.String', ([], {'description': '"""ID of first class"""'}), "(description='ID of first class')\n", (34537, 34570), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((34583, 34630), 'marshmallow.fields.String', 'fields.String', ([], {'description': '"""ID of second class"""'}), "(description='ID of second class')\n", (34596, 34630), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((35106, 35248), 'marshmallow.fields.Float', 'fields.Float', ([], {'default': '(0.8)', 'description': '"""confidence of a synonym value derived via normalization (e.g. canonical ordering of tokens)"""'}), "(default=0.8, description=\n 'confidence of a synonym value derived via normalization (e.g. canonical ordering of tokens)'\n )\n", (35118, 35248), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((35269, 35339), 'marshmallow.fields.Float', 'fields.Float', ([], {'default': '(0.5)', 'description': '"""confidence of an abbreviation"""'}), "(default=0.5, description='confidence of an abbreviation')\n", (35281, 35339), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((2863, 2884), 're.compile', 're.compile', (['"""[\\\\W_]+"""'], {}), "('[\\\\W_]+')\n", (2873, 2884), False, 'import re\n'), ((2990, 3007), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3001, 3007), False, 'from collections import defaultdict\n'), ((3039, 3049), 'ontobio.ontol.Ontology', 'Ontology', ([], {}), '()\n', (3047, 3049), False, 'from ontobio.ontol import Synonym, Ontology\n'), ((5170, 5191), 're.match', 're.match', (['"""[A-Z]+"""', 'v'], {}), "('[A-Z]+', v)\n", (5178, 5191), False, 'import re\n'), ((6966, 7004), 're.sub', 're.sub', (['"""([a-z])([A-Z])"""', '"""\\\\1 \\\\2"""', 'v'], {}), "('([a-z])([A-Z])', '\\\\1 \\\\2', v)\n", (6972, 7004), False, 'import re\n'), ((9177, 9194), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (9192, 9194), True, 'import networkx as nx\n'), ((10727, 10737), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (10735, 10737), True, 'import networkx as nx\n'), ((14267, 14284), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (14278, 14284), False, 'from collections import defaultdict\n'), ((20529, 20546), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (20540, 20546), False, 'from collections import defaultdict\n'), ((22198, 22228), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0))\n', (22206, 22228), True, 'import numpy as np\n'), ((22248, 22278), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0))\n', (22256, 22278), True, 'import numpy as np\n'), ((24035, 24057), 'numpy.array', 'np.array', (['(0, 0, 0, 0)'], {}), '((0, 0, 0, 0))\n', (24043, 24057), True, 'import numpy as np\n'), ((26200, 26234), 'numpy.array', 'np.array', (['(W[1], W[0], W[2], W[3])'], {}), '((W[1], W[0], W[2], W[3]))\n', (26208, 26234), True, 'import numpy as np\n'), ((27489, 27550), 'pandas.DataFrame', 'pd.DataFrame', (['items'], {'columns': "['id', 'label', 'mapped_equivs']"}), "(items, columns=['id', 'label', 'mapped_equivs'])\n", (27501, 27550), True, 'import pandas as pd\n'), ((31607, 31638), 'pandas.DataFrame', 'pd.DataFrame', (['items'], {'columns': 'ix'}), '(items, columns=ix)\n', (31619, 31638), True, 'import pandas as pd\n'), ((32071, 32083), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (32081, 32083), True, 'import networkx as nx\n'), ((34054, 34068), 'marshmallow.fields.Float', 'fields.Float', ([], {}), '()\n', (34066, 34068), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((34375, 34389), 'marshmallow.fields.Float', 'fields.Float', ([], {}), '()\n', (34387, 34389), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((34657, 34671), 'marshmallow.fields.Float', 'fields.Float', ([], {}), '()\n', (34669, 34671), False, 'from marshmallow import Schema, fields, pprint, post_load\n'), ((5511, 5536), 're.match', 're.match', (['""".*[a-zA-Z]"""', 'v'], {}), "('.*[a-zA-Z]', v)\n", (5519, 5536), False, 'import re\n'), ((20675, 20701), 'numpy.array', 'np.array', (['(w1, w2, w3, w4)'], {}), '((w1, w2, w3, w4))\n', (20683, 20701), True, 'import numpy as np\n'), ((20729, 20755), 'numpy.array', 'np.array', (['(w2, w1, w3, w4)'], {}), '((w2, w1, w3, w4))\n', (20737, 20755), True, 'import numpy as np\n'), ((21089, 21112), 'numpy.array', 'np.array', (["mw['weights']"], {}), "(mw['weights'])\n", (21097, 21112), True, 'import numpy as np\n'), ((21508, 21538), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0))\n', (21516, 21538), True, 'import numpy as np\n'), ((21837, 21860), 'numpy.array', 'np.array', (["xw['weights']"], {}), "(xw['weights'])\n", (21845, 21860), True, 'import numpy as np\n'), ((23580, 23589), 'numpy.sum', 'np.sum', (['P'], {}), '(P)\n', (23586, 23589), True, 'import numpy as np\n'), ((32284, 32316), 'networkx.algorithms.strongly_connected_components', 'strongly_connected_components', (['g'], {}), '(g)\n', (32313, 32316), False, 'from networkx.algorithms import strongly_connected_components\n'), ((6254, 6379), 'ontobio.ontol.Synonym', 'Synonym', (['syn.class_id'], {'val': 'syn.val', 'pred': 'syn.pred', 'lextype': 'syn.lextype', 'ontology': 'ont', 'confidence': '(syn.confidence * nweight)'}), '(syn.class_id, val=syn.val, pred=syn.pred, lextype=syn.lextype,\n ontology=ont, confidence=syn.confidence * nweight)\n', (6261, 6379), False, 'from ontobio.ontol import Synonym, Ontology\n'), ((23457, 23468), 'numpy.exp', 'np.exp', (['(-WS)'], {}), '(-WS)\n', (23463, 23468), True, 'import numpy as np\n'), ((3874, 3894), 're.sub', 're.sub', (['""".*/"""', '""""""', 'v'], {}), "('.*/', '', v)\n", (3880, 3894), False, 'import re\n'), ((3917, 3937), 're.sub', 're.sub', (['""".*#"""', '""""""', 'v'], {}), "('.*#', '', v)\n", (3923, 3937), False, 'import re\n'), ((3964, 3995), 'ontobio.ontol.Synonym', 'Synonym', (['n'], {'val': 'v', 'pred': '"""label"""'}), "(n, val=v, pred='label')\n", (3971, 3995), False, 'from ontobio.ontol import Synonym, Ontology\n'), ((18798, 18851), 'numpy.array', 'np.array', (['(-SUBSTRING_WEIGHT, SUBSTRING_WEIGHT, 0, 0)'], {}), '((-SUBSTRING_WEIGHT, SUBSTRING_WEIGHT, 0, 0))\n', (18806, 18851), True, 'import numpy as np\n'), ((25100, 25123), 'numpy.array', 'np.array', (["cw['weights']"], {}), "(cw['weights'])\n", (25108, 25123), True, 'import numpy as np\n'), ((25423, 25446), 'numpy.array', 'np.array', (["cw['weights']"], {}), "(cw['weights'])\n", (25431, 25446), True, 'import numpy as np\n'), ((25823, 25853), 'numpy.array', 'np.array', (['(0.0, 0.0, 1.0, 0.0)'], {}), '((0.0, 0.0, 1.0, 0.0))\n', (25831, 25853), True, 'import numpy as np\n'), ((18901, 18954), 'numpy.array', 'np.array', (['(SUBSTRING_WEIGHT, -SUBSTRING_WEIGHT, 0, 0)'], {}), '((SUBSTRING_WEIGHT, -SUBSTRING_WEIGHT, 0, 0))\n', (18909, 18954), True, 'import numpy as np\n'), ((25224, 25247), 'numpy.array', 'np.array', (["cw['weights']"], {}), "(cw['weights'])\n", (25232, 25247), True, 'import numpy as np\n'), ((25607, 25630), 'numpy.array', 'np.array', (["cw['weights']"], {}), "(cw['weights'])\n", (25615, 25630), True, 'import numpy as np\n'), ((25914, 25944), 'numpy.array', 'np.array', (['(0.6, 0.4, 0.0, 0.0)'], {}), '((0.6, 0.4, 0.0, 0.0))\n', (25922, 25944), True, 'import numpy as np\n'), ((26005, 26035), 'numpy.array', 'np.array', (['(0.4, 0.6, 0.0, 0.0)'], {}), '((0.4, 0.6, 0.0, 0.0))\n', (26013, 26035), True, 'import numpy as np\n'), ((26096, 26126), 'numpy.array', 'np.array', (['(0.2, 0.2, 0.0, 0.5)'], {}), '((0.2, 0.2, 0.0, 0.5))\n', (26104, 26126), True, 'import numpy as np\n')] |
from functools import lru_cache
import cv2
import numpy as np
from . import resources
import zipfile
from . import common
from util.richlog import get_logger
import config
idx2id = ['-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
prefer_svm = config.get('ocr/stage_prefer_svm', True)
logger = get_logger(__name__)
@lru_cache(maxsize=1)
def _load_svm():
with resources.open_file('resources/imgreco/stage_ocr/svm_data.zip') as f:
zf = zipfile.ZipFile(f, 'r')
ydoc = zf.read('svm_data.dat').decode('utf-8')
fs = cv2.FileStorage(ydoc, cv2.FileStorage_READ | cv2.FileStorage_MEMORY)
svm = cv2.ml.SVM_create()
svm.read(fs.getFirstTopLevelNode())
assert svm.isTrained()
return svm
@lru_cache(maxsize=1)
def _load_onnx_model():
with resources.open_file('resources/imgreco/stage_ocr/chars.onnx') as f:
data = f.read()
net = cv2.dnn.readNetFromONNX(data)
return net
def predict_cv(img):
net = _load_onnx_model()
char_imgs = crop_char_img(img)
if not char_imgs:
return ''
roi_list = [np.expand_dims(resize_char(x), 2) for x in char_imgs]
blob = cv2.dnn.blobFromImages(roi_list)
net.setInput(blob)
scores = net.forward()
predicts = scores.argmax(1)
# softmax = [common.softmax(score) for score in scores]
# probs = [softmax[i][predicts[i]] for i in range(len(predicts))]
# print(probs)
return ''.join([idx2id[p] for p in predicts])
def get_img_feature(img):
return resize_char(img).reshape((256, 1))
def resize_char(img):
h, w = img.shape[:2]
scale = 16 / max(h, w)
h = int(h * scale)
w = int(w * scale)
img2 = np.zeros((16, 16)).astype(np.uint8)
img = cv2.resize(img, (w, h))
img2[0:h, 0:w] = ~img
return img2
def predict(gray_img):
svm = _load_svm()
res = svm.predict(np.float32([get_img_feature(gray_img)]))
return chr(res[1][0][0])
def crop_char_img(img):
h, w = img.shape[:2]
has_black = False
last_x = None
res = []
for x in range(0, w):
for y in range(0, h):
has_black = False
if img[y][x] < 127:
has_black = True
if not last_x:
last_x = x
break
if not has_black and last_x:
if x - last_x >= 3:
min_y = None
max_y = None
for y1 in range(0, h):
has_black = False
for x1 in range(last_x, x):
if img[y1][x1] < 127:
has_black = True
if min_y is None:
min_y = y1
break
if not has_black and min_y is not None and max_y is None:
max_y = y1
break
res.append(img[min_y:max_y, last_x:x])
last_x = None
return res
def thresholding(image):
img = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
if img[0, 0] < 127:
img = ~img
return img
def pil_to_cv_gray_img(pil_img):
arr = np.asarray(pil_img, dtype=np.uint8)
return cv2.cvtColor(arr, cv2.COLOR_RGB2GRAY)
def invert_cv_gray_img_color(img):
return ~img
def cut_tag(screen, w, pt):
img_h, img_w = screen.shape[:2]
tag_w = 130
tag = thresholding(screen[pt[1] - 1:pt[1] + 40, pt[0] + w + 3:pt[0] + tag_w + w])
# 130 像素不一定能将 tag 截全,所以再检查一次看是否需要拓宽 tag 长度
for i in range(3):
for j in range(40):
if tag[j][tag_w - 4 - i] < 127:
tag_w = 160
if pt[0] + w + tag_w >= img_w:
return None
tag = thresholding(screen[pt[1] - 1:pt[1] + 40, pt[0] + w + 3:pt[0] + tag_w + w])
break
return tag
def remove_holes(img):
# 去除小连通域
# findContours 只能处理黑底白字的图像, 所以需要进行一下翻转
contours, hierarchy = cv2.findContours(~img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
# 计算区块面积
area = cv2.contourArea(contours[i])
if area < 8:
# 将面积较小的点涂成白色,以去除噪点
cv2.drawContours(img, [contours[i]], 0, 255, -1)
def recognize_stage_tags(pil_screen, template, ccoeff_threshold=0.75):
screen = pil_to_cv_gray_img(pil_screen)
img_h, img_w = screen.shape[:2]
ratio = 1080 / img_h
if ratio != 1:
ratio = 1080 / img_h
screen = cv2.resize(screen, (int(img_w * ratio), 1080))
result = cv2.matchTemplate(screen, template, cv2.TM_CCOEFF_NORMED)
loc = np.where(result >= ccoeff_threshold)
h, w = template.shape[:2]
img_h, img_w = screen.shape[:2]
tag_set = set()
tag_set2 = set()
res = []
dbg_screen = None
for pt in zip(*loc[::-1]):
pos_key = (pt[0] // 100, pt[1] // 100)
pos_key2 = (int(pt[0] / 100 + 0.5), int(pt[1] / 100 + 0.5))
if pos_key in tag_set or pos_key2 in tag_set2:
continue
tag_set.add(pos_key)
tag_set2.add(pos_key2)
tag_w = 130
# 检查边缘像素是否超出截图的范围
if pt[0] + w + tag_w < img_w:
tag = cut_tag(screen, w, pt)
if tag is None:
continue
remove_holes(tag)
tag_str = do_tag_ocr(tag)
if len(tag_str) < 3:
if dbg_screen is None:
dbg_screen = screen.copy()
cv2.rectangle(dbg_screen, pt, (pt[0] + w + tag_w, pt[1] + h), 0, 3)
continue
pos = (int((pt[0] + (tag_w / 2)) / ratio), int((pt[1] + 20) / ratio))
# logger.logtext('pos: %s' % str(pos))
# res.append({'tag_img': tag, 'pos': (pt[0] + (tag_w / 2), pt[1] + 20), 'tag_str': tag_str})
res.append({'pos': pos, 'tag_str': tag_str})
if dbg_screen is not None:
logger.logimage(common.convert_to_pil(dbg_screen))
return res
def do_tag_ocr(img):
logger.logimage(common.convert_to_pil(img))
res = do_tag_ocr_svm(img) if prefer_svm else do_tag_ocr_dnn(img)
logger.logtext('%s, res: %s' % ('svm' if prefer_svm else 'dnn', res))
return res
def do_tag_ocr_svm(img):
char_imgs = crop_char_img(img)
s = ''
for char_img in char_imgs:
c = predict(char_img)
s += c
return s
def do_tag_ocr_dnn(img):
return predict_cv(img)
stage_icon1 = pil_to_cv_gray_img(resources.load_image('stage_ocr/stage_icon1.png'))
stage_icon2 = pil_to_cv_gray_img(resources.load_image('stage_ocr/stage_icon2.png'))
def recognize_all_screen_stage_tags(pil_screen):
tags_map = {}
for tag in recognize_stage_tags(pil_screen, stage_icon1):
tags_map[tag['tag_str']] = tag['pos']
for tag in recognize_stage_tags(pil_screen, stage_icon2):
tags_map[tag['tag_str']] = tag['pos']
return tags_map
| [
"cv2.rectangle",
"zipfile.ZipFile",
"cv2.FileStorage",
"util.richlog.get_logger",
"numpy.where",
"cv2.threshold",
"numpy.asarray",
"cv2.dnn.blobFromImages",
"cv2.contourArea",
"cv2.dnn.readNetFromONNX",
"cv2.matchTemplate",
"cv2.drawContours",
"cv2.ml.SVM_create",
"cv2.cvtColor",
"cv2.re... | [((392, 432), 'config.get', 'config.get', (['"""ocr/stage_prefer_svm"""', '(True)'], {}), "('ocr/stage_prefer_svm', True)\n", (402, 432), False, 'import config\n'), ((442, 462), 'util.richlog.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (452, 462), False, 'from util.richlog import get_logger\n'), ((466, 486), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (475, 486), False, 'from functools import lru_cache\n'), ((888, 908), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (897, 908), False, 'from functools import lru_cache\n'), ((1305, 1337), 'cv2.dnn.blobFromImages', 'cv2.dnn.blobFromImages', (['roi_list'], {}), '(roi_list)\n', (1327, 1337), False, 'import cv2\n'), ((1872, 1895), 'cv2.resize', 'cv2.resize', (['img', '(w, h)'], {}), '(img, (w, h))\n', (1882, 1895), False, 'import cv2\n'), ((3330, 3365), 'numpy.asarray', 'np.asarray', (['pil_img'], {'dtype': 'np.uint8'}), '(pil_img, dtype=np.uint8)\n', (3340, 3365), True, 'import numpy as np\n'), ((3377, 3414), 'cv2.cvtColor', 'cv2.cvtColor', (['arr', 'cv2.COLOR_RGB2GRAY'], {}), '(arr, cv2.COLOR_RGB2GRAY)\n', (3389, 3414), False, 'import cv2\n'), ((4127, 4193), 'cv2.findContours', 'cv2.findContours', (['(~img)', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(~img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (4143, 4193), False, 'import cv2\n'), ((4707, 4764), 'cv2.matchTemplate', 'cv2.matchTemplate', (['screen', 'template', 'cv2.TM_CCOEFF_NORMED'], {}), '(screen, template, cv2.TM_CCOEFF_NORMED)\n', (4724, 4764), False, 'import cv2\n'), ((4775, 4811), 'numpy.where', 'np.where', (['(result >= ccoeff_threshold)'], {}), '(result >= ccoeff_threshold)\n', (4783, 4811), True, 'import numpy as np\n'), ((596, 619), 'zipfile.ZipFile', 'zipfile.ZipFile', (['f', '"""r"""'], {}), "(f, 'r')\n", (611, 619), False, 'import zipfile\n'), ((688, 756), 'cv2.FileStorage', 'cv2.FileStorage', (['ydoc', '(cv2.FileStorage_READ | cv2.FileStorage_MEMORY)'], {}), '(ydoc, cv2.FileStorage_READ | cv2.FileStorage_MEMORY)\n', (703, 756), False, 'import cv2\n'), ((771, 790), 'cv2.ml.SVM_create', 'cv2.ml.SVM_create', ([], {}), '()\n', (788, 790), False, 'import cv2\n'), ((1048, 1077), 'cv2.dnn.readNetFromONNX', 'cv2.dnn.readNetFromONNX', (['data'], {}), '(data)\n', (1071, 1077), False, 'import cv2\n'), ((3158, 3223), 'cv2.threshold', 'cv2.threshold', (['image', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (3171, 3223), False, 'import cv2\n'), ((4261, 4289), 'cv2.contourArea', 'cv2.contourArea', (['contours[i]'], {}), '(contours[i])\n', (4276, 4289), False, 'import cv2\n'), ((1826, 1844), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (1834, 1844), True, 'import numpy as np\n'), ((4355, 4403), 'cv2.drawContours', 'cv2.drawContours', (['img', '[contours[i]]', '(0)', '(255)', '(-1)'], {}), '(img, [contours[i]], 0, 255, -1)\n', (4371, 4403), False, 'import cv2\n'), ((5617, 5684), 'cv2.rectangle', 'cv2.rectangle', (['dbg_screen', 'pt', '(pt[0] + w + tag_w, pt[1] + h)', '(0)', '(3)'], {}), '(dbg_screen, pt, (pt[0] + w + tag_w, pt[1] + h), 0, 3)\n', (5630, 5684), False, 'import cv2\n')] |
import slippy
import slippy.core as core
import numpy as np
import numpy.testing as npt
import itertools
def test_basic_multi_convolve_fftw():
slippy.CUDA = False
comps = [a + b for a, b in itertools.product('xyz', 'xyz')]
ims = np.array([core.elastic_influence_matrix_spatial(comp, (64, 64), [1e-6] * 2, 200e9, 0.3) for comp in comps])
loads = np.zeros_like(ims[0])
loads[31, 31] = 1
out = core.plan_multi_convolve(loads, ims, circular=True, fft_ims=False)(loads)
for expt, got in zip(ims, out):
npt.assert_allclose(got, expt, atol=1e-30)
def test_multi_convolve_vs_sequential_fftw():
slippy.CUDA = False
periodics = [(False, False), (True, False), (False, True), (True, True)]
domains = (None, 0.5 > np.random.rand(16, 16))
comps = ['xz', 'zz']
loads = np.random.rand(16, 16)
for p, d in itertools.product(periodics, domains):
im_shape = tuple((2 - p) * s for p, s in zip(p, loads.shape))
ims = np.array([core.elastic_influence_matrix_spatial(comp, im_shape, [1e-6] * 2, 200e9, 0.3) for comp in comps])
multi_func = core.plan_multi_convolve(loads, ims, d, p, fft_ims=False)
if d is None:
multi_result = multi_func(loads)
else:
multi_result = multi_func(loads[d])
single_results = np.zeros_like(multi_result)
single_funcs = []
for i in range(2):
single_func = core.plan_convolve(loads, ims[i], d, p, fft_im=False)
if d is None:
single_results[i] = single_func(loads)
else:
single_results[i] = single_func(loads[d])
single_funcs.append(single_func)
npt.assert_allclose(multi_result, single_results, atol=1e-30)
if d is not None:
multi_result = multi_func(loads[d], ignore_domain=True)
single_results = np.zeros_like(multi_result)
for i in range(2):
single_results[i] = single_funcs[i](loads[d], ignore_domain=True)
npt.assert_allclose(multi_result, single_results, atol=1e-30)
| [
"numpy.random.rand",
"slippy.core.plan_multi_convolve",
"numpy.testing.assert_allclose",
"itertools.product",
"slippy.core.elastic_influence_matrix_spatial",
"slippy.core.plan_convolve",
"numpy.zeros_like"
] | [((363, 384), 'numpy.zeros_like', 'np.zeros_like', (['ims[0]'], {}), '(ims[0])\n', (376, 384), True, 'import numpy as np\n'), ((815, 837), 'numpy.random.rand', 'np.random.rand', (['(16)', '(16)'], {}), '(16, 16)\n', (829, 837), True, 'import numpy as np\n'), ((854, 891), 'itertools.product', 'itertools.product', (['periodics', 'domains'], {}), '(periodics, domains)\n', (871, 891), False, 'import itertools\n'), ((417, 483), 'slippy.core.plan_multi_convolve', 'core.plan_multi_convolve', (['loads', 'ims'], {'circular': '(True)', 'fft_ims': '(False)'}), '(loads, ims, circular=True, fft_ims=False)\n', (441, 483), True, 'import slippy.core as core\n'), ((535, 577), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['got', 'expt'], {'atol': '(1e-30)'}), '(got, expt, atol=1e-30)\n', (554, 577), True, 'import numpy.testing as npt\n'), ((1106, 1163), 'slippy.core.plan_multi_convolve', 'core.plan_multi_convolve', (['loads', 'ims', 'd', 'p'], {'fft_ims': '(False)'}), '(loads, ims, d, p, fft_ims=False)\n', (1130, 1163), True, 'import slippy.core as core\n'), ((1318, 1345), 'numpy.zeros_like', 'np.zeros_like', (['multi_result'], {}), '(multi_result)\n', (1331, 1345), True, 'import numpy as np\n'), ((1690, 1751), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['multi_result', 'single_results'], {'atol': '(1e-30)'}), '(multi_result, single_results, atol=1e-30)\n', (1709, 1751), True, 'import numpy.testing as npt\n'), ((200, 231), 'itertools.product', 'itertools.product', (['"""xyz"""', '"""xyz"""'], {}), "('xyz', 'xyz')\n", (217, 231), False, 'import itertools\n'), ((253, 345), 'slippy.core.elastic_influence_matrix_spatial', 'core.elastic_influence_matrix_spatial', (['comp', '(64, 64)', '([1e-06] * 2)', '(200000000000.0)', '(0.3)'], {}), '(comp, (64, 64), [1e-06] * 2, \n 200000000000.0, 0.3)\n', (290, 345), True, 'import slippy.core as core\n'), ((754, 776), 'numpy.random.rand', 'np.random.rand', (['(16)', '(16)'], {}), '(16, 16)\n', (768, 776), True, 'import numpy as np\n'), ((1425, 1478), 'slippy.core.plan_convolve', 'core.plan_convolve', (['loads', 'ims[i]', 'd', 'p'], {'fft_im': '(False)'}), '(loads, ims[i], d, p, fft_im=False)\n', (1443, 1478), True, 'import slippy.core as core\n'), ((1876, 1903), 'numpy.zeros_like', 'np.zeros_like', (['multi_result'], {}), '(multi_result)\n', (1889, 1903), True, 'import numpy as np\n'), ((2030, 2091), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['multi_result', 'single_results'], {'atol': '(1e-30)'}), '(multi_result, single_results, atol=1e-30)\n', (2049, 2091), True, 'import numpy.testing as npt\n'), ((987, 1079), 'slippy.core.elastic_influence_matrix_spatial', 'core.elastic_influence_matrix_spatial', (['comp', 'im_shape', '([1e-06] * 2)', '(200000000000.0)', '(0.3)'], {}), '(comp, im_shape, [1e-06] * 2, \n 200000000000.0, 0.3)\n', (1024, 1079), True, 'import slippy.core as core\n')] |
import glob
import random
import os
import numpy as np
from PIL import Image
import tensorflow as tf
class ImageDataset(object):
def __init__(self, root, img_size=128, load_size=None, mask_size=64, mode='train', crop_mode='random'):
self.img_size = img_size
self.load_size = load_size
self.mask_size = mask_size
self.mode = mode
self.files = sorted(glob.glob('%s/*.jpg' % root))
self.files = self.files[:-4000] if mode == 'train' else self.files[-4000:]
self.crop_mode=crop_mode
def crop_and_resize(self,img):
x,y = img.size
ms = min(img.size)
x_start = (x-ms)//2
y_start = (y-ms)//2
x_stop = x_start + ms
y_stop = y_start + ms
img = img.crop((x_start, y_start, x_stop, y_stop))
img = img.resize((self.img_size, self.img_size), Image.BICUBIC)
return img
def transform(self,img):
return np.array(img,'float32')/ 127.5 -1
def apply_random_mask(self, img):
"""Randomly masks image"""
y1, x1 = np.random.randint(0, self.img_size-self.mask_size, 2)
y2, x2 = y1 + self.mask_size, x1 + self.mask_size
mask = np.zeros((self.img_size, self.img_size, 1), 'float32')
mask[x1:x2, y1:y2, 0] = 1
masked_part = img.crop((x1, y1, x2, y2)).copy()
masked_img = img.copy()
for i in range(x1,x2):
for j in range(y1,y2):
masked_img.putpixel((i,j), (255,255,255))
return masked_img, masked_part, mask
def apply_center_mask(self, img):
"""Mask center part of image"""
# Get upper-left pixel coordinate
i = (self.img_size - self.mask_size) // 2
mask = np.zeros((self.img_size, self.img_size, 1), 'float32')
mask[i:i+self.mask_size, i:i+self.mask_size,0] = 1
masked_part = img.crop((i, i, i+self.mask_size, i+self.mask_size))
masked_img = img.copy()
for j in range(i,i+self.mask_size):
for k in range(i,i+self.mask_size):
masked_img.putpixel((j,k), (255,255,255))
return masked_img, masked_part, mask
def __getitem__(self, index):
img = Image.open(self.files[index % len(self.files)])
img = self.crop_and_resize(img)
#img = self.transform(img)
if self.mode == 'train':
if self.crop_mode=='random':
# For training data perform random mask
masked_img, aux, mask = self.apply_random_mask(img)
elif self.crop_mode == 'none':
masked_img, aux, mask = self.apply_center_mask(img)
else:
# For test data mask the center of the image
masked_img, aux, mask = self.apply_center_mask(img)
return self.transform(img), self.transform(masked_img), self.transform(aux), mask
def __len__(self):
return len(self.files) | [
"numpy.array",
"numpy.random.randint",
"glob.glob",
"numpy.zeros"
] | [((1066, 1121), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.img_size - self.mask_size)', '(2)'], {}), '(0, self.img_size - self.mask_size, 2)\n', (1083, 1121), True, 'import numpy as np\n'), ((1193, 1247), 'numpy.zeros', 'np.zeros', (['(self.img_size, self.img_size, 1)', '"""float32"""'], {}), "((self.img_size, self.img_size, 1), 'float32')\n", (1201, 1247), True, 'import numpy as np\n'), ((1726, 1780), 'numpy.zeros', 'np.zeros', (['(self.img_size, self.img_size, 1)', '"""float32"""'], {}), "((self.img_size, self.img_size, 1), 'float32')\n", (1734, 1780), True, 'import numpy as np\n'), ((394, 422), 'glob.glob', 'glob.glob', (["('%s/*.jpg' % root)"], {}), "('%s/*.jpg' % root)\n", (403, 422), False, 'import glob\n'), ((937, 961), 'numpy.array', 'np.array', (['img', '"""float32"""'], {}), "(img, 'float32')\n", (945, 961), True, 'import numpy as np\n')] |
"""
Code Author: <NAME> (<EMAIL>)
"""
import os
import argparse
import numpy as np
from autoencoder_models.GraphDiffVAE import GraphDiffVAE
from data.data_processing import get_gene_expression_data
from data.build_graphs import build_correlation_graph
def init_arg():
parser = argparse.ArgumentParser()
parser.add_argument("--gene_expression_filename", default='data/Zebrafish/GE_mvg.csv')
parser.add_argument("--hidden_dimensions", default=[512], nargs="*", type=int)
parser.add_argument("--latent_dimension", default=50, type=int)
parser.add_argument("--epochs", default=200, type=int)
parser.add_argument("--learning_rate", default=0.0001, type=float)
parser.add_argument("--model_name", default='graph_test')
return parser.parse_args()
if __name__ == '__main__':
args = init_arg()
if not os.path.exists('results/Graphs'):
os.mkdir('results/Graphs')
gene_expression_normalized = get_gene_expression_data(args.gene_expression_filename)
adj_matrix, initial_node_features = build_correlation_graph(gene_expression_normalized, num_neighbors=2)
np.save('results/Graphs/input_adj_matrix_' + args.model_name + '.npy', adj_matrix)
GraphVAE_model=GraphDiffVAE(num_nodes=adj_matrix.shape[0], num_features=initial_node_features.shape[1],
adj_matrix=adj_matrix, latent_dim=args.latent_dimension,
hidden_layers_dim=args.hidden_dimensions,
epochs=args.epochs,
learning_rate=args.learning_rate)
predictions, latent_res = GraphVAE_model.train_vae(initial_node_features, adj_matrix)
np.save('results/Graphs/predicted_adj_matrix_' + args.model_name + '.npy', predictions)
np.save('results/Graphs/node_features_' + args.model_name + '.npy', latent_res)
| [
"os.path.exists",
"autoencoder_models.GraphDiffVAE.GraphDiffVAE",
"argparse.ArgumentParser",
"data.build_graphs.build_correlation_graph",
"os.mkdir",
"data.data_processing.get_gene_expression_data",
"numpy.save"
] | [((285, 310), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (308, 310), False, 'import argparse\n'), ((943, 998), 'data.data_processing.get_gene_expression_data', 'get_gene_expression_data', (['args.gene_expression_filename'], {}), '(args.gene_expression_filename)\n', (967, 998), False, 'from data.data_processing import get_gene_expression_data\n'), ((1040, 1108), 'data.build_graphs.build_correlation_graph', 'build_correlation_graph', (['gene_expression_normalized'], {'num_neighbors': '(2)'}), '(gene_expression_normalized, num_neighbors=2)\n', (1063, 1108), False, 'from data.build_graphs import build_correlation_graph\n'), ((1113, 1199), 'numpy.save', 'np.save', (["('results/Graphs/input_adj_matrix_' + args.model_name + '.npy')", 'adj_matrix'], {}), "('results/Graphs/input_adj_matrix_' + args.model_name + '.npy',\n adj_matrix)\n", (1120, 1199), True, 'import numpy as np\n'), ((1216, 1472), 'autoencoder_models.GraphDiffVAE.GraphDiffVAE', 'GraphDiffVAE', ([], {'num_nodes': 'adj_matrix.shape[0]', 'num_features': 'initial_node_features.shape[1]', 'adj_matrix': 'adj_matrix', 'latent_dim': 'args.latent_dimension', 'hidden_layers_dim': 'args.hidden_dimensions', 'epochs': 'args.epochs', 'learning_rate': 'args.learning_rate'}), '(num_nodes=adj_matrix.shape[0], num_features=\n initial_node_features.shape[1], adj_matrix=adj_matrix, latent_dim=args.\n latent_dimension, hidden_layers_dim=args.hidden_dimensions, epochs=args\n .epochs, learning_rate=args.learning_rate)\n', (1228, 1472), False, 'from autoencoder_models.GraphDiffVAE import GraphDiffVAE\n'), ((1681, 1772), 'numpy.save', 'np.save', (["('results/Graphs/predicted_adj_matrix_' + args.model_name + '.npy')", 'predictions'], {}), "('results/Graphs/predicted_adj_matrix_' + args.model_name + '.npy',\n predictions)\n", (1688, 1772), True, 'import numpy as np\n'), ((1773, 1852), 'numpy.save', 'np.save', (["('results/Graphs/node_features_' + args.model_name + '.npy')", 'latent_res'], {}), "('results/Graphs/node_features_' + args.model_name + '.npy', latent_res)\n", (1780, 1852), True, 'import numpy as np\n'), ((840, 872), 'os.path.exists', 'os.path.exists', (['"""results/Graphs"""'], {}), "('results/Graphs')\n", (854, 872), False, 'import os\n'), ((882, 908), 'os.mkdir', 'os.mkdir', (['"""results/Graphs"""'], {}), "('results/Graphs')\n", (890, 908), False, 'import os\n')] |
"""
This is the Lithophane Module written by <NAME>.
Core of this module uses matlab-stl to write stl
files written by <NAME>.
"""
import matplotlib.image as img
import matplotlib.pyplot as plt
import os
import sys
#from PIL import Image
from skimage.transform import resize
import numpy as np
from mpl_toolkits import mplot3d
from matplotlib import pyplot
from stl import mesh
def rgb2gray(rgb):
"""Convert rgb image to grayscale image in range 0-1
>>> gray = factorial(rgbimg)
"""
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def scaleim(im, width_mm=40):
"""Scale image to 0.1 pixel width
For example the following:
>>> im_scaled = scaleim(im, width_mm = 100)
Will make an image with 1000 pixels wide.
The height will be scale proportionally
"""
ydim = im.shape[0]
xdim = im.shape[1]
scale = (width_mm*10/xdim)
newshape = (int(ydim*scale), int(xdim*scale), 3)
im = resize(im, newshape)
return im
def jpg2stl(im='', width='', h=3.0, d=0.5, show=True):
"""Function to convert filename to stl with width = width
:width: - Required parameter. Width
"""
depth = h
offset = d
if type(im) == str:
filename = im
print(f"Reading {filename}")
im = img.imread(filename)
else:
filenmae = 'image.xxx'
if width == '':
width = im.shape[1]
# TODO: Width is actually height
im = scaleim(im, width_mm=width)
im = im/np.max(im)
# Convert to grayscale
if len(im.shape) == 3:
gray = rgb2gray(im)
else:
gray = im
#g = np.fliplr(g)
if(show):
plt.imshow(gray, cmap=plt.get_cmap('gray'))
# print(np.max(g))
# print(g.shape)
# Invert threshold for z matrix
ngray = 1 - np.double(gray)
# scale z matrix to desired max depth and add base height
z_middle = ngray * depth + offset
# add border of zeros to help with back.
z = np.zeros([z_middle.shape[0]+2, z_middle.shape[1]+2])
z[1:-1, 1:-1] = z_middle
x1 = np.linspace(1, z.shape[1]/10, z.shape[1])
y1 = np.linspace(1, z.shape[0]/10, z.shape[0])
x, y = np.meshgrid(x1, y1)
x = np.fliplr(x)
return x, y, z
def makeCylinder(x, y, z):
'''Convert flat point cloud to Cylinder'''
newx = x.copy()
newz = z.copy()
radius = (np.max(x)-np.min(x))/(2*np.pi)
print(f"Cylinder Radius {radius}mm")
for r in range(0, x.shape[0]):
for c in range(0, x.shape[1]):
t = (c/(x.shape[1]-10))*2*np.pi
rad = radius + z[r, c]
newx[r, c] = rad*np.cos(t)
newz[r, c] = rad*np.sin(t)
return newx, y.copy(), newz
# Construct polygons from grid data
def makemesh(x, y, z):
'''Convert point cloud grid to mesh'''
count = 0
points = []
triangles = []
for i in range(z.shape[0]-1):
for j in range(z.shape[1]-1):
# Triangle 1
points.append([x[i][j], y[i][j], z[i][j]])
points.append([x[i][j+1], y[i][j+1], z[i][j+1]])
points.append([x[i+1][j], y[i+1][j], z[i+1][j]])
triangles.append([count, count+1, count+2])
# Triangle 2
points.append([x[i][j+1], y[i][j+1], z[i][j+1]])
points.append([x[i+1][j+1], y[i+1][j+1], z[i+1][j+1]])
points.append([x[i+1][j], y[i+1][j], z[i+1][j]])
triangles.append([count+3, count+4, count+5])
count += 6
# BACK
for j in range(x.shape[1]-1):
bot = x.shape[0]-1
# Back Triangle 1
points.append([x[bot][j], y[bot][j], z[bot][j]])
points.append([x[0][j+1], y[0][j+1], z[0][j+1]])
points.append([x[0][j], y[0][j], z[0][j]])
triangles.append([count, count+1, count+2])
# Triangle 2
points.append([x[bot][j], y[bot][j], z[bot][j]])
points.append([x[bot][j+1], y[bot][j+1], z[bot][j+1]])
points.append([x[0][j+1], y[0][j+1], z[0][j+1]])
triangles.append([count+3, count+4, count+5])
count += 6
# Create the mesh
model = mesh.Mesh(np.zeros(len(triangles), dtype=mesh.Mesh.dtype))
for i, f in enumerate(triangles):
for j in range(3):
model.vectors[i][j] = points[f[j]]
return model
def showstl(x, y, z):
'''
======================
3D surface (color map)
======================
Demonstrates plotting a 3D surface colored with the coolwarm color map.
The surface is made opaque by using antialiased=False.
Also demonstrates using the LinearLocator and custom formatting for the
z axis tick labels.
'''
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# plt.axis('equal')
if __name__ == "__main__":
import sys
jpg2stl(sys.argv[2])
| [
"numpy.double",
"numpy.fliplr",
"matplotlib.image.imread",
"numpy.max",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.min",
"numpy.sin",
"numpy.meshgrid",
"skimage.transform.resize",
"matplotlib.pyplot.get_cmap"
] | [((1016, 1036), 'skimage.transform.resize', 'resize', (['im', 'newshape'], {}), '(im, newshape)\n', (1022, 1036), False, 'from skimage.transform import resize\n'), ((2030, 2086), 'numpy.zeros', 'np.zeros', (['[z_middle.shape[0] + 2, z_middle.shape[1] + 2]'], {}), '([z_middle.shape[0] + 2, z_middle.shape[1] + 2])\n', (2038, 2086), True, 'import numpy as np\n'), ((2123, 2166), 'numpy.linspace', 'np.linspace', (['(1)', '(z.shape[1] / 10)', 'z.shape[1]'], {}), '(1, z.shape[1] / 10, z.shape[1])\n', (2134, 2166), True, 'import numpy as np\n'), ((2174, 2217), 'numpy.linspace', 'np.linspace', (['(1)', '(z.shape[0] / 10)', 'z.shape[0]'], {}), '(1, z.shape[0] / 10, z.shape[0])\n', (2185, 2217), True, 'import numpy as np\n'), ((2228, 2247), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'y1'], {}), '(x1, y1)\n', (2239, 2247), True, 'import numpy as np\n'), ((2257, 2269), 'numpy.fliplr', 'np.fliplr', (['x'], {}), '(x)\n', (2266, 2269), True, 'import numpy as np\n'), ((4931, 4943), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4941, 4943), True, 'import matplotlib.pyplot as plt\n'), ((1347, 1367), 'matplotlib.image.imread', 'img.imread', (['filename'], {}), '(filename)\n', (1357, 1367), True, 'import matplotlib.image as img\n'), ((1546, 1556), 'numpy.max', 'np.max', (['im'], {}), '(im)\n', (1552, 1556), True, 'import numpy as np\n'), ((1855, 1870), 'numpy.double', 'np.double', (['gray'], {}), '(gray)\n', (1864, 1870), True, 'import numpy as np\n'), ((2420, 2429), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (2426, 2429), True, 'import numpy as np\n'), ((2430, 2439), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (2436, 2439), True, 'import numpy as np\n'), ((1735, 1755), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (1747, 1755), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2683), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2680, 2683), True, 'import numpy as np\n'), ((2713, 2722), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2719, 2722), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from einsum import einsum
class TestEinsum(unittest.TestCase):
def test_einsum(self):
np.random.seed(0)
A = np.random.random((6, 8, 6))
B = np.random.random((8, 10, 2))
E1 = np.einsum("iji,jkl -> jkl", A, B)
E2 = einsum("i1,i2,i1; i2,i3,i4 -> i2,i3,i4", A, B)
self.assertTrue(np.allclose(E1, E2))
if __name__ == "__main__":
unittest.main()
| [
"numpy.allclose",
"numpy.random.random",
"numpy.einsum",
"numpy.random.seed",
"unittest.main",
"einsum.einsum"
] | [((420, 435), 'unittest.main', 'unittest.main', ([], {}), '()\n', (433, 435), False, 'import unittest\n'), ((135, 152), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (149, 152), True, 'import numpy as np\n'), ((165, 192), 'numpy.random.random', 'np.random.random', (['(6, 8, 6)'], {}), '((6, 8, 6))\n', (181, 192), True, 'import numpy as np\n'), ((205, 233), 'numpy.random.random', 'np.random.random', (['(8, 10, 2)'], {}), '((8, 10, 2))\n', (221, 233), True, 'import numpy as np\n'), ((248, 281), 'numpy.einsum', 'np.einsum', (['"""iji,jkl -> jkl"""', 'A', 'B'], {}), "('iji,jkl -> jkl', A, B)\n", (257, 281), True, 'import numpy as np\n'), ((295, 341), 'einsum.einsum', 'einsum', (['"""i1,i2,i1; i2,i3,i4 -> i2,i3,i4"""', 'A', 'B'], {}), "('i1,i2,i1; i2,i3,i4 -> i2,i3,i4', A, B)\n", (301, 341), False, 'from einsum import einsum\n'), ((366, 385), 'numpy.allclose', 'np.allclose', (['E1', 'E2'], {}), '(E1, E2)\n', (377, 385), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import numpy as np
import warnings
from .constraints import poly_constraints
from ..root_bunch import root_constraints, RBAlgorithms
log2 = np.log(2)
RBalgo = RBAlgorithms(
strict=False,
lax_line_tol=5e-1,
)
def coeff_canonicalization_gain(c):
# print('CCG: ', c[-1])
return c[-1]
def roots(c):
return np.polynomial.chebyshev.chebroots(c)
def roots_lnG(c):
lnG = np.log(c[-1])
return np.polynomial.chebyshev.chebroots(c), lnG
def roots_rB(c, constraint):
if poly_constraints.even_real <= constraint:
assert np.all(c[::2].imag == 0)
if poly_constraints.odd_real <= constraint:
assert np.all(c[1::2].imag == 0)
if poly_constraints.odd_imag <= constraint:
assert np.all(c[1::2].real == 0)
if poly_constraints.no_constraint == constraint:
rvec = roots(c)
rB = RBalgo.expect(rvec, constraint=root_constraints.no_constraint)
elif poly_constraints.eRoR == constraint:
rvec = roots(c)
rB = RBalgo.expect(rvec, constraint=root_constraints.mirror_real)
elif poly_constraints.eRoI == constraint:
rvec = roots(c)
rB = RBalgo.expect(
rvec, constraint=root_constraints.mirror_imag, allow_unknown=True
)
if len(rB.u) > 0:
warnings.warn(
"Unmirrored root in mirror_imag polynomial root finder (need to upgrade this algorithm)"
)
# HACK
# clear any unknown
rB.u = np.array([])
elif poly_constraints.eRoZ == constraint:
rvec = roots(c)
rB = RBalgo.expect(rvec, constraint=root_constraints.mirror_quad)
return rB
def fromroots_lnG(roots):
lnG = log2 * (len(roots) - 1)
c = np.polynomial.chebyshev.chebfromroots(roots)
cG = coeff_canonicalization_gain(c)
c /= cG
lnG += np.log(cG)
return c, lnG
def val_lnG(X, c, lnG=0):
# the LOG2 is because the last coefficient is assumed to be scaled to one (as is done in fromroots)
lnG_out = -(len(c) - 2) * log2
return np.polynomial.chebyshev.chebval(X, c), lnG + lnG_out
def vander_lnG(X, N, lnG=0):
# the LOG2 is because the last coefficient is assumed to be scaled to one (as is done in fromroots)
lnG_out = -(N - 1) * log2
return np.polynomial.chebyshev.chebvander(X, N), lnG + lnG_out
def companion(c):
return np.polynomial.chebyshev.chebcompanion(c)
| [
"numpy.polynomial.chebyshev.chebfromroots",
"numpy.polynomial.chebyshev.chebval",
"numpy.log",
"numpy.polynomial.chebyshev.chebcompanion",
"numpy.array",
"numpy.polynomial.chebyshev.chebroots",
"numpy.polynomial.chebyshev.chebvander",
"warnings.warn",
"numpy.all"
] | [((498, 507), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (504, 507), True, 'import numpy as np\n'), ((685, 721), 'numpy.polynomial.chebyshev.chebroots', 'np.polynomial.chebyshev.chebroots', (['c'], {}), '(c)\n', (718, 721), True, 'import numpy as np\n'), ((752, 765), 'numpy.log', 'np.log', (['c[-1]'], {}), '(c[-1])\n', (758, 765), True, 'import numpy as np\n'), ((2084, 2128), 'numpy.polynomial.chebyshev.chebfromroots', 'np.polynomial.chebyshev.chebfromroots', (['roots'], {}), '(roots)\n', (2121, 2128), True, 'import numpy as np\n'), ((2192, 2202), 'numpy.log', 'np.log', (['cG'], {}), '(cG)\n', (2198, 2202), True, 'import numpy as np\n'), ((2715, 2755), 'numpy.polynomial.chebyshev.chebcompanion', 'np.polynomial.chebyshev.chebcompanion', (['c'], {}), '(c)\n', (2752, 2755), True, 'import numpy as np\n'), ((777, 813), 'numpy.polynomial.chebyshev.chebroots', 'np.polynomial.chebyshev.chebroots', (['c'], {}), '(c)\n', (810, 813), True, 'import numpy as np\n'), ((914, 938), 'numpy.all', 'np.all', (['(c[::2].imag == 0)'], {}), '(c[::2].imag == 0)\n', (920, 938), True, 'import numpy as np\n'), ((1002, 1027), 'numpy.all', 'np.all', (['(c[1::2].imag == 0)'], {}), '(c[1::2].imag == 0)\n', (1008, 1027), True, 'import numpy as np\n'), ((1091, 1116), 'numpy.all', 'np.all', (['(c[1::2].real == 0)'], {}), '(c[1::2].real == 0)\n', (1097, 1116), True, 'import numpy as np\n'), ((2399, 2436), 'numpy.polynomial.chebyshev.chebval', 'np.polynomial.chebyshev.chebval', (['X', 'c'], {}), '(X, c)\n', (2430, 2436), True, 'import numpy as np\n'), ((2628, 2668), 'numpy.polynomial.chebyshev.chebvander', 'np.polynomial.chebyshev.chebvander', (['X', 'N'], {}), '(X, N)\n', (2662, 2668), True, 'import numpy as np\n'), ((1639, 1752), 'warnings.warn', 'warnings.warn', (['"""Unmirrored root in mirror_imag polynomial root finder (need to upgrade this algorithm)"""'], {}), "(\n 'Unmirrored root in mirror_imag polynomial root finder (need to upgrade this algorithm)'\n )\n", (1652, 1752), False, 'import warnings\n'), ((1843, 1855), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1851, 1855), True, 'import numpy as np\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import operator
import cv2
import numpy as np
import pyclipper
import torch
from mmcv.ops import contour_expand, pixel_group
from numpy.fft import ifft
from numpy.linalg import norm
from shapely.geometry import Polygon
from skimage.morphology import skeletonize
from mmocr.core import points2boundary
from mmocr.core.evaluation.utils import boundary_iou
def filter_instance(area, confidence, min_area, min_confidence):
return bool(area < min_area or confidence < min_confidence)
def decode(
decoding_type='pan', # 'pan' or 'pse'
**kwargs):
if decoding_type == 'pan':
return pan_decode(**kwargs)
if decoding_type == 'pse':
return pse_decode(**kwargs)
if decoding_type == 'db':
return db_decode(**kwargs)
if decoding_type == 'textsnake':
return textsnake_decode(**kwargs)
if decoding_type == 'fcenet':
return fcenet_decode(**kwargs)
if decoding_type == 'drrg':
return drrg_decode(**kwargs)
raise NotImplementedError
def pan_decode(preds,
text_repr_type='poly',
min_text_confidence=0.5,
min_kernel_confidence=0.5,
min_text_avg_confidence=0.85,
min_text_area=16):
"""Convert scores to quadrangles via post processing in PANet. This is
partially adapted from https://github.com/WenmuZhou/PAN.pytorch.
Args:
preds (tensor): The head output tensor of size 6xHxW.
text_repr_type (str): The boundary encoding type 'poly' or 'quad'.
min_text_confidence (float): The minimal text confidence.
min_kernel_confidence (float): The minimal kernel confidence.
min_text_avg_confidence (float): The minimal text average confidence.
min_text_area (int): The minimal text instance region area.
Returns:
boundaries: (list[list[float]]): The instance boundary and its
instance confidence list.
"""
preds[:2, :, :] = torch.sigmoid(preds[:2, :, :])
preds = preds.detach().cpu().numpy()
text_score = preds[0].astype(np.float32)
text = preds[0] > min_text_confidence
kernel = (preds[1] > min_kernel_confidence) * text
embeddings = preds[2:].transpose((1, 2, 0)) # (h, w, 4)
region_num, labels = cv2.connectedComponents(
kernel.astype(np.uint8), connectivity=4)
contours, _ = cv2.findContours((kernel * 255).astype(np.uint8),
cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
kernel_contours = np.zeros(text.shape, dtype='uint8')
cv2.drawContours(kernel_contours, contours, -1, 255)
text_points = pixel_group(text_score, text, embeddings, labels,
kernel_contours, region_num,
min_text_avg_confidence)
boundaries = []
for text_inx, text_point in enumerate(text_points):
text_confidence = text_point[0]
text_point = text_point[2:]
text_point = np.array(text_point, dtype=int).reshape(-1, 2)
area = text_point.shape[0]
if filter_instance(area, text_confidence, min_text_area,
min_text_avg_confidence):
continue
vertices_confidence = points2boundary(text_point, text_repr_type,
text_confidence)
if vertices_confidence is not None:
boundaries.append(vertices_confidence)
return boundaries
def pse_decode(preds,
text_repr_type='poly',
min_kernel_confidence=0.5,
min_text_avg_confidence=0.85,
min_kernel_area=0,
min_text_area=16):
"""Decoding predictions of PSENet to instances. This is partially adapted
from https://github.com/whai362/PSENet.
Args:
preds (tensor): The head output tensor of size nxHxW.
text_repr_type (str): The boundary encoding type 'poly' or 'quad'.
min_text_confidence (float): The minimal text confidence.
min_kernel_confidence (float): The minimal kernel confidence.
min_text_avg_confidence (float): The minimal text average confidence.
min_kernel_area (int): The minimal text kernel area.
min_text_area (int): The minimal text instance region area.
Returns:
boundaries: (list[list[float]]): The instance boundary and its
instance confidence list.
"""
preds = torch.sigmoid(preds) # text confidence
score = preds[0, :, :]
masks = preds > min_kernel_confidence
text_mask = masks[0, :, :]
kernel_masks = masks[0:, :, :] * text_mask
score = score.data.cpu().numpy().astype(np.float32) # to numpy
kernel_masks = kernel_masks.data.cpu().numpy().astype(np.uint8) # to numpy
region_num, labels = cv2.connectedComponents(
kernel_masks[-1], connectivity=4)
# labels = pse(kernel_masks, min_kernel_area)
labels = contour_expand(kernel_masks, labels, min_kernel_area, region_num)
labels = np.array(labels)
label_num = np.max(labels)
boundaries = []
for i in range(1, label_num + 1):
points = np.array(np.where(labels == i)).transpose((1, 0))[:, ::-1]
area = points.shape[0]
score_instance = np.mean(score[labels == i])
if filter_instance(area, score_instance, min_text_area,
min_text_avg_confidence):
continue
vertices_confidence = points2boundary(points, text_repr_type,
score_instance)
if vertices_confidence is not None:
boundaries.append(vertices_confidence)
return boundaries
def box_score_fast(bitmap, _box):
h, w = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int32), 0, w - 1)
xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int32), 0, w - 1)
ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int32), 0, h - 1)
ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int32), 0, h - 1)
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
box[:, 0] = box[:, 0] - xmin
box[:, 1] = box[:, 1] - ymin
cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
def unclip(box, unclip_ratio=1.5):
poly = Polygon(box)
distance = poly.area * unclip_ratio / poly.length
offset = pyclipper.PyclipperOffset()
offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
expanded = np.array(offset.Execute(distance))
return expanded
def db_decode(preds,
text_repr_type='poly',
mask_thr=0.3,
min_text_score=0.3,
min_text_width=5,
unclip_ratio=1.5,
max_candidates=3000):
"""Decoding predictions of DbNet to instances. This is partially adapted
from https://github.com/MhLiao/DB.
Args:
preds (Tensor): The head output tensor of size nxHxW.
text_repr_type (str): The boundary encoding type 'poly' or 'quad'.
mask_thr (float): The mask threshold value for binarization.
min_text_score (float): The threshold value for converting binary map
to shrink text regions.
min_text_width (int): The minimum width of boundary polygon/box
predicted.
unclip_ratio (float): The unclip ratio for text regions dilation.
max_candidates (int): The maximum candidate number.
Returns:
boundaries: (list[list[float]]): The predicted text boundaries.
"""
prob_map = preds[0, :, :]
text_mask = prob_map > mask_thr
score_map = prob_map.data.cpu().numpy().astype(np.float32)
text_mask = text_mask.data.cpu().numpy().astype(np.uint8) # to numpy
contours, _ = cv2.findContours((text_mask * 255).astype(np.uint8),
cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
boundaries = []
for i, poly in enumerate(contours):
if i > max_candidates:
break
epsilon = 0.01 * cv2.arcLength(poly, True)
approx = cv2.approxPolyDP(poly, epsilon, True)
points = approx.reshape((-1, 2))
if points.shape[0] < 4:
continue
score = box_score_fast(score_map, points)
if score < min_text_score:
continue
poly = unclip(points, unclip_ratio=unclip_ratio)
if len(poly) == 0 or isinstance(poly[0], list):
continue
poly = poly.reshape(-1, 2)
if text_repr_type == 'quad':
poly = points2boundary(poly, text_repr_type, score, min_text_width)
elif text_repr_type == 'poly':
poly = poly.flatten().tolist()
if score is not None:
poly = poly + [score]
if len(poly) < 8:
poly = None
else:
raise ValueError(f'Invalid text repr type {text_repr_type}')
if poly is not None:
boundaries.append(poly)
return boundaries
def fill_hole(input_mask):
h, w = input_mask.shape
canvas = np.zeros((h + 2, w + 2), np.uint8)
canvas[1:h + 1, 1:w + 1] = input_mask.copy()
mask = np.zeros((h + 4, w + 4), np.uint8)
cv2.floodFill(canvas, mask, (0, 0), 1)
canvas = canvas[1:h + 1, 1:w + 1].astype(np.bool)
return ~canvas | input_mask
def centralize(points_yx,
normal_sin,
normal_cos,
radius,
contour_mask,
step_ratio=0.03):
h, w = contour_mask.shape
top_yx = bot_yx = points_yx
step_flags = np.ones((len(points_yx), 1), dtype=np.bool)
step = step_ratio * radius * np.hstack([normal_sin, normal_cos])
while np.any(step_flags):
next_yx = np.array(top_yx + step, dtype=np.int32)
next_y, next_x = next_yx[:, 0], next_yx[:, 1]
step_flags = (next_y >= 0) & (next_y < h) & (next_x > 0) & (
next_x < w) & contour_mask[np.clip(next_y, 0, h - 1),
np.clip(next_x, 0, w - 1)]
top_yx = top_yx + step_flags.reshape((-1, 1)) * step
step_flags = np.ones((len(points_yx), 1), dtype=np.bool)
while np.any(step_flags):
next_yx = np.array(bot_yx - step, dtype=np.int32)
next_y, next_x = next_yx[:, 0], next_yx[:, 1]
step_flags = (next_y >= 0) & (next_y < h) & (next_x > 0) & (
next_x < w) & contour_mask[np.clip(next_y, 0, h - 1),
np.clip(next_x, 0, w - 1)]
bot_yx = bot_yx - step_flags.reshape((-1, 1)) * step
centers = np.array((top_yx + bot_yx) * 0.5, dtype=np.int32)
return centers
def merge_disks(disks, disk_overlap_thr):
xy = disks[:, 0:2]
radius = disks[:, 2]
scores = disks[:, 3]
order = scores.argsort()[::-1]
merged_disks = []
while order.size > 0:
if order.size == 1:
merged_disks.append(disks[order])
break
i = order[0]
d = norm(xy[i] - xy[order[1:]], axis=1)
ri = radius[i]
r = radius[order[1:]]
d_thr = (ri + r) * disk_overlap_thr
merge_inds = np.where(d <= d_thr)[0] + 1
if merge_inds.size > 0:
merge_order = np.hstack([i, order[merge_inds]])
merged_disks.append(np.mean(disks[merge_order], axis=0))
else:
merged_disks.append(disks[i])
inds = np.where(d > d_thr)[0] + 1
order = order[inds]
merged_disks = np.vstack(merged_disks)
return merged_disks
def textsnake_decode(preds,
text_repr_type='poly',
min_text_region_confidence=0.6,
min_center_region_confidence=0.2,
min_center_area=30,
disk_overlap_thr=0.03,
radius_shrink_ratio=1.03):
"""Decoding predictions of TextSnake to instances. This was partially
adapted from https://github.com/princewang1994/TextSnake.pytorch.
Args:
preds (tensor): The head output tensor of size 6xHxW.
text_repr_type (str): The boundary encoding type 'poly' or 'quad'.
min_text_region_confidence (float): The confidence threshold of text
region in TextSnake.
min_center_region_confidence (float): The confidence threshold of text
center region in TextSnake.
min_center_area (int): The minimal text center region area.
disk_overlap_thr (float): The radius overlap threshold for merging
disks.
radius_shrink_ratio (float): The shrink ratio of ordered disks radii.
Returns:
boundaries (list[list[float]]): The instance boundary and its
instance confidence list.
"""
assert text_repr_type == 'poly'
preds[:2, :, :] = torch.sigmoid(preds[:2, :, :])
preds = preds.detach().cpu().numpy()
pred_text_score = preds[0]
pred_text_mask = pred_text_score > min_text_region_confidence
pred_center_score = preds[1] * pred_text_score
pred_center_mask = pred_center_score > min_center_region_confidence
pred_sin = preds[2]
pred_cos = preds[3]
pred_radius = preds[4]
mask_sz = pred_text_mask.shape
scale = np.sqrt(1.0 / (pred_sin**2 + pred_cos**2 + 1e-8))
pred_sin = pred_sin * scale
pred_cos = pred_cos * scale
pred_center_mask = fill_hole(pred_center_mask).astype(np.uint8)
center_contours, _ = cv2.findContours(pred_center_mask, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
boundaries = []
for contour in center_contours:
if cv2.contourArea(contour) < min_center_area:
continue
instance_center_mask = np.zeros(mask_sz, dtype=np.uint8)
cv2.drawContours(instance_center_mask, [contour], -1, 1, -1)
skeleton = skeletonize(instance_center_mask)
skeleton_yx = np.argwhere(skeleton > 0)
y, x = skeleton_yx[:, 0], skeleton_yx[:, 1]
cos = pred_cos[y, x].reshape((-1, 1))
sin = pred_sin[y, x].reshape((-1, 1))
radius = pred_radius[y, x].reshape((-1, 1))
center_line_yx = centralize(skeleton_yx, cos, -sin, radius,
instance_center_mask)
y, x = center_line_yx[:, 0], center_line_yx[:, 1]
radius = (pred_radius[y, x] * radius_shrink_ratio).reshape((-1, 1))
score = pred_center_score[y, x].reshape((-1, 1))
instance_disks = np.hstack([np.fliplr(center_line_yx), radius, score])
instance_disks = merge_disks(instance_disks, disk_overlap_thr)
instance_mask = np.zeros(mask_sz, dtype=np.uint8)
for x, y, radius, score in instance_disks:
if radius > 1:
cv2.circle(instance_mask, (int(x), int(y)), int(radius), 1, -1)
contours, _ = cv2.findContours(instance_mask, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
score = np.sum(instance_mask * pred_text_score) / (
np.sum(instance_mask) + 1e-8)
if (len(contours) > 0 and cv2.contourArea(contours[0]) > 0
and contours[0].size > 8):
boundary = contours[0].flatten().tolist()
boundaries.append(boundary + [score])
return boundaries
def fcenet_decode(preds,
fourier_degree,
num_reconstr_points,
scale,
alpha=1.0,
beta=2.0,
text_repr_type='poly',
score_thr=0.3,
nms_thr=0.1):
"""Decoding predictions of FCENet to instances.
Args:
preds (list(Tensor)): The head output tensors.
fourier_degree (int): The maximum Fourier transform degree k.
num_reconstr_points (int): The points number of the polygon
reconstructed from predicted Fourier coefficients.
scale (int): The down-sample scale of the prediction.
alpha (float) : The parameter to calculate final scores. Score_{final}
= (Score_{text region} ^ alpha)
* (Score_{text center region}^ beta)
beta (float) : The parameter to calculate final score.
text_repr_type (str): Boundary encoding type 'poly' or 'quad'.
score_thr (float) : The threshold used to filter out the final
candidates.
nms_thr (float) : The threshold of nms.
Returns:
boundaries (list[list[float]]): The instance boundary and confidence
list.
"""
assert isinstance(preds, list)
assert len(preds) == 2
assert text_repr_type in ['poly', 'quad']
cls_pred = preds[0][0]
tr_pred = cls_pred[0:2].softmax(dim=0).data.cpu().numpy()
tcl_pred = cls_pred[2:].softmax(dim=0).data.cpu().numpy()
reg_pred = preds[1][0].permute(1, 2, 0).data.cpu().numpy()
x_pred = reg_pred[:, :, :2 * fourier_degree + 1]
y_pred = reg_pred[:, :, 2 * fourier_degree + 1:]
score_pred = (tr_pred[1]**alpha) * (tcl_pred[1]**beta)
tr_pred_mask = (score_pred) > score_thr
tr_mask = fill_hole(tr_pred_mask)
tr_contours, _ = cv2.findContours(
tr_mask.astype(np.uint8), cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE) # opencv4
mask = np.zeros_like(tr_mask)
boundaries = []
for cont in tr_contours:
deal_map = mask.copy().astype(np.int8)
cv2.drawContours(deal_map, [cont], -1, 1, -1)
score_map = score_pred * deal_map
score_mask = score_map > 0
xy_text = np.argwhere(score_mask)
dxy = xy_text[:, 1] + xy_text[:, 0] * 1j
x, y = x_pred[score_mask], y_pred[score_mask]
c = x + y * 1j
c[:, fourier_degree] = c[:, fourier_degree] + dxy
c *= scale
polygons = fourier2poly(c, num_reconstr_points)
score = score_map[score_mask].reshape(-1, 1)
polygons = poly_nms(np.hstack((polygons, score)).tolist(), nms_thr)
boundaries = boundaries + polygons
boundaries = poly_nms(boundaries, nms_thr)
if text_repr_type == 'quad':
new_boundaries = []
for boundary in boundaries:
poly = np.array(boundary[:-1]).reshape(-1, 2).astype(np.float32)
score = boundary[-1]
points = cv2.boxPoints(cv2.minAreaRect(poly))
points = np.int0(points)
new_boundaries.append(points.reshape(-1).tolist() + [score])
return boundaries
def poly_nms(polygons, threshold):
assert isinstance(polygons, list)
polygons = np.array(sorted(polygons, key=lambda x: x[-1]))
keep_poly = []
index = [i for i in range(polygons.shape[0])]
while len(index) > 0:
keep_poly.append(polygons[index[-1]].tolist())
A = polygons[index[-1]][:-1]
index = np.delete(index, -1)
iou_list = np.zeros((len(index), ))
for i in range(len(index)):
B = polygons[index[i]][:-1]
iou_list[i] = boundary_iou(A, B, 1)
remove_index = np.where(iou_list > threshold)
index = np.delete(index, remove_index)
return keep_poly
def fourier2poly(fourier_coeff, num_reconstr_points=50):
""" Inverse Fourier transform
Args:
fourier_coeff (ndarray): Fourier coefficients shaped (n, 2k+1),
with n and k being candidates number and Fourier degree
respectively.
num_reconstr_points (int): Number of reconstructed polygon points.
Returns:
Polygons (ndarray): The reconstructed polygons shaped (n, n')
"""
a = np.zeros((len(fourier_coeff), num_reconstr_points), dtype='complex')
k = (len(fourier_coeff[0]) - 1) // 2
a[:, 0:k + 1] = fourier_coeff[:, k:]
a[:, -k:] = fourier_coeff[:, :k]
poly_complex = ifft(a) * num_reconstr_points
polygon = np.zeros((len(fourier_coeff), num_reconstr_points, 2))
polygon[:, :, 0] = poly_complex.real
polygon[:, :, 1] = poly_complex.imag
return polygon.astype('int32').reshape((len(fourier_coeff), -1))
class Node:
def __init__(self, ind):
self.__ind = ind
self.__links = set()
@property
def ind(self):
return self.__ind
@property
def links(self):
return set(self.__links)
def add_link(self, link_node):
self.__links.add(link_node)
link_node.__links.add(self)
def graph_propagation(edges, scores, text_comps, edge_len_thr=50.):
"""Propagate edge score information and construct graph. This code was
partially adapted from https://github.com/GXYM/DRRG licensed under the MIT
license.
Args:
edges (ndarray): The edge array of shape N * 2, each row is a node
index pair that makes up an edge in graph.
scores (ndarray): The edge score array.
text_comps (ndarray): The text components.
edge_len_thr (float): The edge length threshold.
Returns:
vertices (list[Node]): The Nodes in graph.
score_dict (dict): The edge score dict.
"""
assert edges.ndim == 2
assert edges.shape[1] == 2
assert edges.shape[0] == scores.shape[0]
assert text_comps.ndim == 2
assert isinstance(edge_len_thr, float)
edges = np.sort(edges, axis=1)
score_dict = {}
for i, edge in enumerate(edges):
if text_comps is not None:
box1 = text_comps[edge[0], :8].reshape(4, 2)
box2 = text_comps[edge[1], :8].reshape(4, 2)
center1 = np.mean(box1, axis=0)
center2 = np.mean(box2, axis=0)
distance = norm(center1 - center2)
if distance > edge_len_thr:
scores[i] = 0
if (edge[0], edge[1]) in score_dict:
score_dict[edge[0], edge[1]] = 0.5 * (
score_dict[edge[0], edge[1]] + scores[i])
else:
score_dict[edge[0], edge[1]] = scores[i]
nodes = np.sort(np.unique(edges.flatten()))
mapping = -1 * np.ones((np.max(nodes) + 1), dtype=np.int)
mapping[nodes] = np.arange(nodes.shape[0])
order_inds = mapping[edges]
vertices = [Node(node) for node in nodes]
for ind in order_inds:
vertices[ind[0]].add_link(vertices[ind[1]])
return vertices, score_dict
def connected_components(nodes, score_dict, link_thr):
"""Conventional connected components searching. This code was partially
adapted from https://github.com/GXYM/DRRG licensed under the MIT license.
Args:
nodes (list[Node]): The list of Node objects.
score_dict (dict): The edge score dict.
link_thr (float): The link threshold.
Returns:
clusters (List[list[Node]]): The clustered Node objects.
"""
assert isinstance(nodes, list)
assert all([isinstance(node, Node) for node in nodes])
assert isinstance(score_dict, dict)
assert isinstance(link_thr, float)
clusters = []
nodes = set(nodes)
while nodes:
node = nodes.pop()
cluster = {node}
node_queue = [node]
while node_queue:
node = node_queue.pop(0)
neighbors = set([
neighbor for neighbor in node.links if
score_dict[tuple(sorted([node.ind, neighbor.ind]))] >= link_thr
])
neighbors.difference_update(cluster)
nodes.difference_update(neighbors)
cluster.update(neighbors)
node_queue.extend(neighbors)
clusters.append(list(cluster))
return clusters
def clusters2labels(clusters, num_nodes):
"""Convert clusters of Node to text component labels. This code was
partially adapted from https://github.com/GXYM/DRRG licensed under the MIT
license.
Args:
clusters (List[list[Node]]): The clusters of Node objects.
num_nodes (int): The total node number of graphs in an image.
Returns:
node_labels (ndarray): The node label array.
"""
assert isinstance(clusters, list)
assert all([isinstance(cluster, list) for cluster in clusters])
assert all(
[isinstance(node, Node) for cluster in clusters for node in cluster])
assert isinstance(num_nodes, int)
node_labels = np.zeros(num_nodes)
for cluster_ind, cluster in enumerate(clusters):
for node in cluster:
node_labels[node.ind] = cluster_ind
return node_labels
def remove_single(text_comps, comp_pred_labels):
"""Remove isolated text components. This code was partially adapted from
https://github.com/GXYM/DRRG licensed under the MIT license.
Args:
text_comps (ndarray): The text components.
comp_pred_labels (ndarray): The clustering labels of text components.
Returns:
filtered_text_comps (ndarray): The text components with isolated ones
removed.
comp_pred_labels (ndarray): The clustering labels with labels of
isolated text components removed.
"""
assert text_comps.ndim == 2
assert text_comps.shape[0] == comp_pred_labels.shape[0]
single_flags = np.zeros_like(comp_pred_labels)
pred_labels = np.unique(comp_pred_labels)
for label in pred_labels:
current_label_flag = (comp_pred_labels == label)
if np.sum(current_label_flag) == 1:
single_flags[np.where(current_label_flag)[0][0]] = 1
keep_ind = [i for i in range(len(comp_pred_labels)) if not single_flags[i]]
filtered_text_comps = text_comps[keep_ind, :]
filtered_labels = comp_pred_labels[keep_ind]
return filtered_text_comps, filtered_labels
def norm2(point1, point2):
return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2)**0.5
def min_connect_path(points):
"""Find the shortest path to traverse all points. This code was partially
adapted from https://github.com/GXYM/DRRG licensed under the MIT license.
Args:
points(List[list[int]]): The point sequence [[x0, y0], [x1, y1], ...].
Returns:
shortest_path(List[list[int]]): The shortest index path.
"""
assert isinstance(points, list)
assert all([isinstance(point, list) for point in points])
assert all([isinstance(coord, int) for point in points for coord in point])
points_queue = points.copy()
shortest_path = []
current_edge = [[], []]
edge_dict0 = {}
edge_dict1 = {}
current_edge[0] = points_queue[0]
current_edge[1] = points_queue[0]
points_queue.remove(points_queue[0])
while points_queue:
for point in points_queue:
length0 = norm2(point, current_edge[0])
edge_dict0[length0] = [point, current_edge[0]]
length1 = norm2(current_edge[1], point)
edge_dict1[length1] = [current_edge[1], point]
key0 = min(edge_dict0.keys())
key1 = min(edge_dict1.keys())
if key0 <= key1:
start = edge_dict0[key0][0]
end = edge_dict0[key0][1]
shortest_path.insert(0, [points.index(start), points.index(end)])
points_queue.remove(start)
current_edge[0] = start
else:
start = edge_dict1[key1][0]
end = edge_dict1[key1][1]
shortest_path.append([points.index(start), points.index(end)])
points_queue.remove(end)
current_edge[1] = end
edge_dict0 = {}
edge_dict1 = {}
shortest_path = functools.reduce(operator.concat, shortest_path)
shortest_path = sorted(set(shortest_path), key=shortest_path.index)
return shortest_path
def in_contour(cont, point):
x, y = point
is_inner = cv2.pointPolygonTest(cont, (int(x), int(y)), False) > 0.5
return is_inner
def fix_corner(top_line, bot_line, start_box, end_box):
"""Add corner points to predicted side lines. This code was partially
adapted from https://github.com/GXYM/DRRG licensed under the MIT license.
Args:
top_line (List[list[int]]): The predicted top sidelines of text
instance.
bot_line (List[list[int]]): The predicted bottom sidelines of text
instance.
start_box (ndarray): The first text component box.
end_box (ndarray): The last text component box.
Returns:
top_line (List[list[int]]): The top sidelines with corner point added.
bot_line (List[list[int]]): The bottom sidelines with corner point
added.
"""
assert isinstance(top_line, list)
assert all(isinstance(point, list) for point in top_line)
assert isinstance(bot_line, list)
assert all(isinstance(point, list) for point in bot_line)
assert start_box.shape == end_box.shape == (4, 2)
contour = np.array(top_line + bot_line[::-1])
start_left_mid = (start_box[0] + start_box[3]) / 2
start_right_mid = (start_box[1] + start_box[2]) / 2
end_left_mid = (end_box[0] + end_box[3]) / 2
end_right_mid = (end_box[1] + end_box[2]) / 2
if not in_contour(contour, start_left_mid):
top_line.insert(0, start_box[0].tolist())
bot_line.insert(0, start_box[3].tolist())
elif not in_contour(contour, start_right_mid):
top_line.insert(0, start_box[1].tolist())
bot_line.insert(0, start_box[2].tolist())
if not in_contour(contour, end_left_mid):
top_line.append(end_box[0].tolist())
bot_line.append(end_box[3].tolist())
elif not in_contour(contour, end_right_mid):
top_line.append(end_box[1].tolist())
bot_line.append(end_box[2].tolist())
return top_line, bot_line
def comps2boundaries(text_comps, comp_pred_labels):
"""Construct text instance boundaries from clustered text components. This
code was partially adapted from https://github.com/GXYM/DRRG licensed under
the MIT license.
Args:
text_comps (ndarray): The text components.
comp_pred_labels (ndarray): The clustering labels of text components.
Returns:
boundaries (List[list[float]]): The predicted boundaries of text
instances.
"""
assert text_comps.ndim == 2
assert len(text_comps) == len(comp_pred_labels)
boundaries = []
if len(text_comps) < 1:
return boundaries
for cluster_ind in range(0, int(np.max(comp_pred_labels)) + 1):
cluster_comp_inds = np.where(comp_pred_labels == cluster_ind)
text_comp_boxes = text_comps[cluster_comp_inds, :8].reshape(
(-1, 4, 2)).astype(np.int32)
score = np.mean(text_comps[cluster_comp_inds, -1])
if text_comp_boxes.shape[0] < 1:
continue
elif text_comp_boxes.shape[0] > 1:
centers = np.mean(
text_comp_boxes, axis=1).astype(np.int32).tolist()
shortest_path = min_connect_path(centers)
text_comp_boxes = text_comp_boxes[shortest_path]
top_line = np.mean(
text_comp_boxes[:, 0:2, :], axis=1).astype(np.int32).tolist()
bot_line = np.mean(
text_comp_boxes[:, 2:4, :], axis=1).astype(np.int32).tolist()
top_line, bot_line = fix_corner(top_line, bot_line,
text_comp_boxes[0],
text_comp_boxes[-1])
boundary_points = top_line + bot_line[::-1]
else:
top_line = text_comp_boxes[0, 0:2, :].astype(np.int32).tolist()
bot_line = text_comp_boxes[0, 2:4:-1, :].astype(np.int32).tolist()
boundary_points = top_line + bot_line
boundary = [p for coord in boundary_points for p in coord] + [score]
boundaries.append(boundary)
return boundaries
def drrg_decode(edges, scores, text_comps, link_thr):
"""Merge text components and construct boundaries of text instances.
Args:
edges (ndarray): The edge array of shape N * 2, each row is a node
index pair that makes up an edge in graph.
scores (ndarray): The edge score array.
text_comps (ndarray): The text components.
link_thr (float): The edge score threshold.
Returns:
boundaries (List[list[float]]): The predicted boundaries of text
instances.
"""
assert len(edges) == len(scores)
assert text_comps.ndim == 2
assert text_comps.shape[1] == 9
assert isinstance(link_thr, float)
vertices, score_dict = graph_propagation(edges, scores, text_comps)
clusters = connected_components(vertices, score_dict, link_thr)
pred_labels = clusters2labels(clusters, text_comps.shape[0])
text_comps, pred_labels = remove_single(text_comps, pred_labels)
boundaries = comps2boundaries(text_comps, pred_labels)
return boundaries
| [
"numpy.clip",
"numpy.sqrt",
"numpy.hstack",
"numpy.array",
"shapely.geometry.Polygon",
"cv2.approxPolyDP",
"numpy.linalg.norm",
"numpy.arange",
"numpy.mean",
"mmocr.core.evaluation.utils.boundary_iou",
"numpy.where",
"numpy.delete",
"numpy.sort",
"cv2.arcLength",
"mmcv.ops.pixel_group",
... | [((2032, 2062), 'torch.sigmoid', 'torch.sigmoid', (['preds[:2, :, :]'], {}), '(preds[:2, :, :])\n', (2045, 2062), False, 'import torch\n'), ((2571, 2606), 'numpy.zeros', 'np.zeros', (['text.shape'], {'dtype': '"""uint8"""'}), "(text.shape, dtype='uint8')\n", (2579, 2606), True, 'import numpy as np\n'), ((2611, 2663), 'cv2.drawContours', 'cv2.drawContours', (['kernel_contours', 'contours', '(-1)', '(255)'], {}), '(kernel_contours, contours, -1, 255)\n', (2627, 2663), False, 'import cv2\n'), ((2682, 2789), 'mmcv.ops.pixel_group', 'pixel_group', (['text_score', 'text', 'embeddings', 'labels', 'kernel_contours', 'region_num', 'min_text_avg_confidence'], {}), '(text_score, text, embeddings, labels, kernel_contours,\n region_num, min_text_avg_confidence)\n', (2693, 2789), False, 'from mmcv.ops import contour_expand, pixel_group\n'), ((4469, 4489), 'torch.sigmoid', 'torch.sigmoid', (['preds'], {}), '(preds)\n', (4482, 4489), False, 'import torch\n'), ((4833, 4890), 'cv2.connectedComponents', 'cv2.connectedComponents', (['kernel_masks[-1]'], {'connectivity': '(4)'}), '(kernel_masks[-1], connectivity=4)\n', (4856, 4890), False, 'import cv2\n'), ((4964, 5029), 'mmcv.ops.contour_expand', 'contour_expand', (['kernel_masks', 'labels', 'min_kernel_area', 'region_num'], {}), '(kernel_masks, labels, min_kernel_area, region_num)\n', (4978, 5029), False, 'from mmcv.ops import contour_expand, pixel_group\n'), ((5043, 5059), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5051, 5059), True, 'import numpy as np\n'), ((5076, 5090), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (5082, 5090), True, 'import numpy as np\n'), ((6086, 6146), 'numpy.zeros', 'np.zeros', (['(ymax - ymin + 1, xmax - xmin + 1)'], {'dtype': 'np.uint8'}), '((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)\n', (6094, 6146), True, 'import numpy as np\n'), ((6394, 6406), 'shapely.geometry.Polygon', 'Polygon', (['box'], {}), '(box)\n', (6401, 6406), False, 'from shapely.geometry import Polygon\n'), ((6474, 6501), 'pyclipper.PyclipperOffset', 'pyclipper.PyclipperOffset', ([], {}), '()\n', (6499, 6501), False, 'import pyclipper\n'), ((9147, 9181), 'numpy.zeros', 'np.zeros', (['(h + 2, w + 2)', 'np.uint8'], {}), '((h + 2, w + 2), np.uint8)\n', (9155, 9181), True, 'import numpy as np\n'), ((9243, 9277), 'numpy.zeros', 'np.zeros', (['(h + 4, w + 4)', 'np.uint8'], {}), '((h + 4, w + 4), np.uint8)\n', (9251, 9277), True, 'import numpy as np\n'), ((9283, 9321), 'cv2.floodFill', 'cv2.floodFill', (['canvas', 'mask', '(0, 0)', '(1)'], {}), '(canvas, mask, (0, 0), 1)\n', (9296, 9321), False, 'import cv2\n'), ((9779, 9797), 'numpy.any', 'np.any', (['step_flags'], {}), '(step_flags)\n', (9785, 9797), True, 'import numpy as np\n'), ((10244, 10262), 'numpy.any', 'np.any', (['step_flags'], {}), '(step_flags)\n', (10250, 10262), True, 'import numpy as np\n'), ((10652, 10701), 'numpy.array', 'np.array', (['((top_yx + bot_yx) * 0.5)'], {'dtype': 'np.int32'}), '((top_yx + bot_yx) * 0.5, dtype=np.int32)\n', (10660, 10701), True, 'import numpy as np\n'), ((11537, 11560), 'numpy.vstack', 'np.vstack', (['merged_disks'], {}), '(merged_disks)\n', (11546, 11560), True, 'import numpy as np\n'), ((12850, 12880), 'torch.sigmoid', 'torch.sigmoid', (['preds[:2, :, :]'], {}), '(preds[:2, :, :])\n', (12863, 12880), False, 'import torch\n'), ((13266, 13320), 'numpy.sqrt', 'np.sqrt', (['(1.0 / (pred_sin ** 2 + pred_cos ** 2 + 1e-08))'], {}), '(1.0 / (pred_sin ** 2 + pred_cos ** 2 + 1e-08))\n', (13273, 13320), True, 'import numpy as np\n'), ((13474, 13548), 'cv2.findContours', 'cv2.findContours', (['pred_center_mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(pred_center_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (13490, 13548), False, 'import cv2\n'), ((17275, 17297), 'numpy.zeros_like', 'np.zeros_like', (['tr_mask'], {}), '(tr_mask)\n', (17288, 17297), True, 'import numpy as np\n'), ((21223, 21245), 'numpy.sort', 'np.sort', (['edges'], {'axis': '(1)'}), '(edges, axis=1)\n', (21230, 21245), True, 'import numpy as np\n'), ((22010, 22035), 'numpy.arange', 'np.arange', (['nodes.shape[0]'], {}), '(nodes.shape[0])\n', (22019, 22035), True, 'import numpy as np\n'), ((24160, 24179), 'numpy.zeros', 'np.zeros', (['num_nodes'], {}), '(num_nodes)\n', (24168, 24179), True, 'import numpy as np\n'), ((25018, 25049), 'numpy.zeros_like', 'np.zeros_like', (['comp_pred_labels'], {}), '(comp_pred_labels)\n', (25031, 25049), True, 'import numpy as np\n'), ((25068, 25095), 'numpy.unique', 'np.unique', (['comp_pred_labels'], {}), '(comp_pred_labels)\n', (25077, 25095), True, 'import numpy as np\n'), ((27331, 27379), 'functools.reduce', 'functools.reduce', (['operator.concat', 'shortest_path'], {}), '(operator.concat, shortest_path)\n', (27347, 27379), False, 'import functools\n'), ((28610, 28645), 'numpy.array', 'np.array', (['(top_line + bot_line[::-1])'], {}), '(top_line + bot_line[::-1])\n', (28618, 28645), True, 'import numpy as np\n'), ((3272, 3332), 'mmocr.core.points2boundary', 'points2boundary', (['text_point', 'text_repr_type', 'text_confidence'], {}), '(text_point, text_repr_type, text_confidence)\n', (3287, 3332), False, 'from mmocr.core import points2boundary\n'), ((5281, 5308), 'numpy.mean', 'np.mean', (['score[labels == i]'], {}), '(score[labels == i])\n', (5288, 5308), True, 'import numpy as np\n'), ((5478, 5533), 'mmocr.core.points2boundary', 'points2boundary', (['points', 'text_repr_type', 'score_instance'], {}), '(points, text_repr_type, score_instance)\n', (5493, 5533), False, 'from mmocr.core import points2boundary\n'), ((6290, 6342), 'cv2.mean', 'cv2.mean', (['bitmap[ymin:ymax + 1, xmin:xmax + 1]', 'mask'], {}), '(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)\n', (6298, 6342), False, 'import cv2\n'), ((8165, 8202), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['poly', 'epsilon', '(True)'], {}), '(poly, epsilon, True)\n', (8181, 8202), False, 'import cv2\n'), ((9733, 9768), 'numpy.hstack', 'np.hstack', (['[normal_sin, normal_cos]'], {}), '([normal_sin, normal_cos])\n', (9742, 9768), True, 'import numpy as np\n'), ((9817, 9856), 'numpy.array', 'np.array', (['(top_yx + step)'], {'dtype': 'np.int32'}), '(top_yx + step, dtype=np.int32)\n', (9825, 9856), True, 'import numpy as np\n'), ((10282, 10321), 'numpy.array', 'np.array', (['(bot_yx - step)'], {'dtype': 'np.int32'}), '(bot_yx - step, dtype=np.int32)\n', (10290, 10321), True, 'import numpy as np\n'), ((11047, 11082), 'numpy.linalg.norm', 'norm', (['(xy[i] - xy[order[1:]])'], {'axis': '(1)'}), '(xy[i] - xy[order[1:]], axis=1)\n', (11051, 11082), False, 'from numpy.linalg import norm\n'), ((13755, 13788), 'numpy.zeros', 'np.zeros', (['mask_sz'], {'dtype': 'np.uint8'}), '(mask_sz, dtype=np.uint8)\n', (13763, 13788), True, 'import numpy as np\n'), ((13797, 13857), 'cv2.drawContours', 'cv2.drawContours', (['instance_center_mask', '[contour]', '(-1)', '(1)', '(-1)'], {}), '(instance_center_mask, [contour], -1, 1, -1)\n', (13813, 13857), False, 'import cv2\n'), ((13877, 13910), 'skimage.morphology.skeletonize', 'skeletonize', (['instance_center_mask'], {}), '(instance_center_mask)\n', (13888, 13910), False, 'from skimage.morphology import skeletonize\n'), ((13933, 13958), 'numpy.argwhere', 'np.argwhere', (['(skeleton > 0)'], {}), '(skeleton > 0)\n', (13944, 13958), True, 'import numpy as np\n'), ((14648, 14681), 'numpy.zeros', 'np.zeros', (['mask_sz'], {'dtype': 'np.uint8'}), '(mask_sz, dtype=np.uint8)\n', (14656, 14681), True, 'import numpy as np\n'), ((14862, 14933), 'cv2.findContours', 'cv2.findContours', (['instance_mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(instance_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (14878, 14933), False, 'import cv2\n'), ((17402, 17447), 'cv2.drawContours', 'cv2.drawContours', (['deal_map', '[cont]', '(-1)', '(1)', '(-1)'], {}), '(deal_map, [cont], -1, 1, -1)\n', (17418, 17447), False, 'import cv2\n'), ((17544, 17567), 'numpy.argwhere', 'np.argwhere', (['score_mask'], {}), '(score_mask)\n', (17555, 17567), True, 'import numpy as np\n'), ((18793, 18813), 'numpy.delete', 'np.delete', (['index', '(-1)'], {}), '(index, -1)\n', (18802, 18813), True, 'import numpy as np\n'), ((19007, 19037), 'numpy.where', 'np.where', (['(iou_list > threshold)'], {}), '(iou_list > threshold)\n', (19015, 19037), True, 'import numpy as np\n'), ((19054, 19084), 'numpy.delete', 'np.delete', (['index', 'remove_index'], {}), '(index, remove_index)\n', (19063, 19084), True, 'import numpy as np\n'), ((19792, 19799), 'numpy.fft.ifft', 'ifft', (['a'], {}), '(a)\n', (19796, 19799), False, 'from numpy.fft import ifft\n'), ((30206, 30247), 'numpy.where', 'np.where', (['(comp_pred_labels == cluster_ind)'], {}), '(comp_pred_labels == cluster_ind)\n', (30214, 30247), True, 'import numpy as np\n'), ((30374, 30416), 'numpy.mean', 'np.mean', (['text_comps[cluster_comp_inds, -1]'], {}), '(text_comps[cluster_comp_inds, -1])\n', (30381, 30416), True, 'import numpy as np\n'), ((8122, 8147), 'cv2.arcLength', 'cv2.arcLength', (['poly', '(True)'], {}), '(poly, True)\n', (8135, 8147), False, 'import cv2\n'), ((8629, 8689), 'mmocr.core.points2boundary', 'points2boundary', (['poly', 'text_repr_type', 'score', 'min_text_width'], {}), '(poly, text_repr_type, score, min_text_width)\n', (8644, 8689), False, 'from mmocr.core import points2boundary\n'), ((11288, 11321), 'numpy.hstack', 'np.hstack', (['[i, order[merge_inds]]'], {}), '([i, order[merge_inds]])\n', (11297, 11321), True, 'import numpy as np\n'), ((13659, 13683), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (13674, 13683), False, 'import cv2\n'), ((14990, 15029), 'numpy.sum', 'np.sum', (['(instance_mask * pred_text_score)'], {}), '(instance_mask * pred_text_score)\n', (14996, 15029), True, 'import numpy as np\n'), ((18337, 18352), 'numpy.int0', 'np.int0', (['points'], {}), '(points)\n', (18344, 18352), True, 'import numpy as np\n'), ((18962, 18983), 'mmocr.core.evaluation.utils.boundary_iou', 'boundary_iou', (['A', 'B', '(1)'], {}), '(A, B, 1)\n', (18974, 18983), False, 'from mmocr.core.evaluation.utils import boundary_iou\n'), ((21474, 21495), 'numpy.mean', 'np.mean', (['box1'], {'axis': '(0)'}), '(box1, axis=0)\n', (21481, 21495), True, 'import numpy as np\n'), ((21518, 21539), 'numpy.mean', 'np.mean', (['box2'], {'axis': '(0)'}), '(box2, axis=0)\n', (21525, 21539), True, 'import numpy as np\n'), ((21563, 21586), 'numpy.linalg.norm', 'norm', (['(center1 - center2)'], {}), '(center1 - center2)\n', (21567, 21586), False, 'from numpy.linalg import norm\n'), ((25194, 25220), 'numpy.sum', 'np.sum', (['current_label_flag'], {}), '(current_label_flag)\n', (25200, 25220), True, 'import numpy as np\n'), ((3020, 3051), 'numpy.array', 'np.array', (['text_point'], {'dtype': 'int'}), '(text_point, dtype=int)\n', (3028, 3051), True, 'import numpy as np\n'), ((11202, 11222), 'numpy.where', 'np.where', (['(d <= d_thr)'], {}), '(d <= d_thr)\n', (11210, 11222), True, 'import numpy as np\n'), ((11354, 11389), 'numpy.mean', 'np.mean', (['disks[merge_order]'], {'axis': '(0)'}), '(disks[merge_order], axis=0)\n', (11361, 11389), True, 'import numpy as np\n'), ((11463, 11482), 'numpy.where', 'np.where', (['(d > d_thr)'], {}), '(d > d_thr)\n', (11471, 11482), True, 'import numpy as np\n'), ((14509, 14534), 'numpy.fliplr', 'np.fliplr', (['center_line_yx'], {}), '(center_line_yx)\n', (14518, 14534), True, 'import numpy as np\n'), ((15046, 15067), 'numpy.sum', 'np.sum', (['instance_mask'], {}), '(instance_mask)\n', (15052, 15067), True, 'import numpy as np\n'), ((15110, 15138), 'cv2.contourArea', 'cv2.contourArea', (['contours[0]'], {}), '(contours[0])\n', (15125, 15138), False, 'import cv2\n'), ((18293, 18314), 'cv2.minAreaRect', 'cv2.minAreaRect', (['poly'], {}), '(poly)\n', (18308, 18314), False, 'import cv2\n'), ((21955, 21968), 'numpy.max', 'np.max', (['nodes'], {}), '(nodes)\n', (21961, 21968), True, 'import numpy as np\n'), ((30146, 30170), 'numpy.max', 'np.max', (['comp_pred_labels'], {}), '(comp_pred_labels)\n', (30152, 30170), True, 'import numpy as np\n'), ((10019, 10044), 'numpy.clip', 'np.clip', (['next_y', '(0)', '(h - 1)'], {}), '(next_y, 0, h - 1)\n', (10026, 10044), True, 'import numpy as np\n'), ((10085, 10110), 'numpy.clip', 'np.clip', (['next_x', '(0)', '(w - 1)'], {}), '(next_x, 0, w - 1)\n', (10092, 10110), True, 'import numpy as np\n'), ((10484, 10509), 'numpy.clip', 'np.clip', (['next_y', '(0)', '(h - 1)'], {}), '(next_y, 0, h - 1)\n', (10491, 10509), True, 'import numpy as np\n'), ((10550, 10575), 'numpy.clip', 'np.clip', (['next_x', '(0)', '(w - 1)'], {}), '(next_x, 0, w - 1)\n', (10557, 10575), True, 'import numpy as np\n'), ((17910, 17938), 'numpy.hstack', 'np.hstack', (['(polygons, score)'], {}), '((polygons, score))\n', (17919, 17938), True, 'import numpy as np\n'), ((5175, 5196), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (5183, 5196), True, 'import numpy as np\n'), ((25252, 25280), 'numpy.where', 'np.where', (['current_label_flag'], {}), '(current_label_flag)\n', (25260, 25280), True, 'import numpy as np\n'), ((18167, 18190), 'numpy.array', 'np.array', (['boundary[:-1]'], {}), '(boundary[:-1])\n', (18175, 18190), True, 'import numpy as np\n'), ((30546, 30578), 'numpy.mean', 'np.mean', (['text_comp_boxes'], {'axis': '(1)'}), '(text_comp_boxes, axis=1)\n', (30553, 30578), True, 'import numpy as np\n'), ((30760, 30803), 'numpy.mean', 'np.mean', (['text_comp_boxes[:, 0:2, :]'], {'axis': '(1)'}), '(text_comp_boxes[:, 0:2, :], axis=1)\n', (30767, 30803), True, 'import numpy as np\n'), ((30870, 30913), 'numpy.mean', 'np.mean', (['text_comp_boxes[:, 2:4, :]'], {'axis': '(1)'}), '(text_comp_boxes[:, 2:4, :], axis=1)\n', (30877, 30913), True, 'import numpy as np\n')] |
import pyfiglet
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='white')
from collections import defaultdict
import numpy as np
import click
from tabulate import tabulate
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
from nba_matchup import get_league, simulate_h2h, CURRENT_WEEK, hill_climb, visualize_matchup, get_free_agents, simulated_annealing
league = get_league()
def winning_prob(cats, points, scores, num_samples):
unique, nums = np.unique(points, return_counts=True)
counts = defaultdict(int)
counts.update(dict(zip(unique, nums)))
return sum([counts[p] for p in range(5, 10)]) / num_samples
def ev(cats, points, scores, num_samples):
return points.mean()
@click.command()
@click.option('--team1', type=str, default=None)
@click.option('--team2', type=str, default=None)
@click.option('--num_days', type=int, default=30)
@click.option('--num_samples', type=int, default=50000)
@click.option('--week', type=int, default=CURRENT_WEEK)
@click.option('--num_fa', type=int, default=0)
@click.option('--num_iters', type=int, default=100)
@click.option('--ignore_player', type=str, multiple=True)
@click.option('--half_life', type=float, default=14)
@click.option('--metric', type=str, default='winning_probability')
@click.option('--ignore_injured', is_flag=True)
def main(team1, team2, num_days, num_samples, week, num_fa, num_iters,
ignore_player, half_life, metric, ignore_injured):
league = get_league()
decay_rate = np.log(2) / half_life
# if week == 19: week = 18
# if week >= 19:
# week -= 1
if team1 is None:
team1 = league.current_team
else:
team1 = league.team_by_owner(team1)
if team2 is None:
team2 = league.get_matchup(team1, week=week)
else:
team2 = league.team_by_owner(team2)
pyfiglet.print_figlet("%s vs. %s" % (team1.manager_name,
team2.manager_name), font='banner',
width=160)
if week:
pyfiglet.print_figlet("Week %u" % week, font='big')
if metric == 'ev':
metric_fn = ev
else:
metric_fn = winning_prob
def roster_score(roster):
cats, points, scores, _ = simulate_h2h(roster,
team2.roster(week=week),
num_days=num_days, num_samples=num_samples,
week=week, decay_rate=decay_rate)
return metric_fn(cats, points, scores, num_samples)
def reverse_roster_score(roster):
cats, points, scores, _ = simulate_h2h(roster,
team1.roster(week=week),
num_days=num_days, num_samples=num_samples,
week=week, decay_rate=decay_rate)
return metric_fn(cats, points, scores, num_samples)
print("%s's roster:" % team1.manager_name, roster_score(team1.roster(week=week)))
print(tabulate([
[position, player.name] for player, position in
team1.roster(week=week).positions.items() if position not in {"BN", "IL"}
]))
print("%s's roster:" % team2.manager_name, reverse_roster_score(team2.roster(week=week)))
print(tabulate([
[position, player.name] for player, position in
team2.roster(week=week).positions.items() if position not in {"BN", "IL"}
]))
print("Optimizing %s's lineup" % team1.manager_name)
print("===========================================")
roster = team1.roster(week=week)
old_roster = roster
print("Adding free agents:")
for agent in get_free_agents(num_fa):
print(agent.name)
roster = roster.add(agent, "BN")
team1.set_roster(roster)
print("Ignoring players:", ", ".join(ignore_player))
scores = []
for roster, score in simulated_annealing(roster, roster_score, ignore_players={team1.roster(week=week).player_by_name(n) for n in ignore_player},
num_steps=num_iters,
ignore_injured=ignore_injured):
scores.append(score)
# print(tabulate([
# [position, player.name] for player, position in
# roster.positions.items() if position not in {"BN", "IL"}
# ]))
print("%s's optimized roster:" % team1.manager_name, score)
print(tabulate([
[position, player.name] for player, position in
roster.positions.items() if position not in {"BN", "IL"}
]))
def team_generator():
for r in [old_roster, roster]:
team1.set_roster(r)
yield team1
projections = visualize_matchup(team_generator(), team2,
num_days=num_days, num_samples=100000,
week=week, decay_rate=decay_rate,
show_plots=False)
with pd.option_context('display.max_rows', None, 'display.max_columns',
None, 'display.expand_frame_repr', False):
for i, team in enumerate([team1, team2]):
print("===========================================")
print("%s's projections:" % team.manager_name)
print(projections[1][i].round(2))
plt.figure()
plt.plot(scores)
plt.show()
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"logging.getLogger",
"seaborn.set",
"numpy.unique",
"matplotlib.pyplot.show",
"click.option",
"pandas.option_context",
"matplotlib.pyplot.plot",
"numpy.log",
"nba_matchup.get_league",
"pyfiglet.print_figlet",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"cl... | [((90, 112), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (97, 112), True, 'import seaborn as sns\n'), ((226, 247), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (245, 247), False, 'import logging\n'), ((432, 444), 'nba_matchup.get_league', 'get_league', ([], {}), '()\n', (442, 444), False, 'from nba_matchup import get_league, simulate_h2h, CURRENT_WEEK, hill_climb, visualize_matchup, get_free_agents, simulated_annealing\n'), ((764, 779), 'click.command', 'click.command', ([], {}), '()\n', (777, 779), False, 'import click\n'), ((781, 828), 'click.option', 'click.option', (['"""--team1"""'], {'type': 'str', 'default': 'None'}), "('--team1', type=str, default=None)\n", (793, 828), False, 'import click\n'), ((830, 877), 'click.option', 'click.option', (['"""--team2"""'], {'type': 'str', 'default': 'None'}), "('--team2', type=str, default=None)\n", (842, 877), False, 'import click\n'), ((879, 927), 'click.option', 'click.option', (['"""--num_days"""'], {'type': 'int', 'default': '(30)'}), "('--num_days', type=int, default=30)\n", (891, 927), False, 'import click\n'), ((929, 983), 'click.option', 'click.option', (['"""--num_samples"""'], {'type': 'int', 'default': '(50000)'}), "('--num_samples', type=int, default=50000)\n", (941, 983), False, 'import click\n'), ((985, 1039), 'click.option', 'click.option', (['"""--week"""'], {'type': 'int', 'default': 'CURRENT_WEEK'}), "('--week', type=int, default=CURRENT_WEEK)\n", (997, 1039), False, 'import click\n'), ((1041, 1086), 'click.option', 'click.option', (['"""--num_fa"""'], {'type': 'int', 'default': '(0)'}), "('--num_fa', type=int, default=0)\n", (1053, 1086), False, 'import click\n'), ((1088, 1138), 'click.option', 'click.option', (['"""--num_iters"""'], {'type': 'int', 'default': '(100)'}), "('--num_iters', type=int, default=100)\n", (1100, 1138), False, 'import click\n'), ((1140, 1196), 'click.option', 'click.option', (['"""--ignore_player"""'], {'type': 'str', 'multiple': '(True)'}), "('--ignore_player', type=str, multiple=True)\n", (1152, 1196), False, 'import click\n'), ((1198, 1249), 'click.option', 'click.option', (['"""--half_life"""'], {'type': 'float', 'default': '(14)'}), "('--half_life', type=float, default=14)\n", (1210, 1249), False, 'import click\n'), ((1251, 1316), 'click.option', 'click.option', (['"""--metric"""'], {'type': 'str', 'default': '"""winning_probability"""'}), "('--metric', type=str, default='winning_probability')\n", (1263, 1316), False, 'import click\n'), ((1318, 1364), 'click.option', 'click.option', (['"""--ignore_injured"""'], {'is_flag': '(True)'}), "('--ignore_injured', is_flag=True)\n", (1330, 1364), False, 'import click\n'), ((518, 555), 'numpy.unique', 'np.unique', (['points'], {'return_counts': '(True)'}), '(points, return_counts=True)\n', (527, 555), True, 'import numpy as np\n'), ((569, 585), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (580, 585), False, 'from collections import defaultdict\n'), ((1509, 1521), 'nba_matchup.get_league', 'get_league', ([], {}), '()\n', (1519, 1521), False, 'from nba_matchup import get_league, simulate_h2h, CURRENT_WEEK, hill_climb, visualize_matchup, get_free_agents, simulated_annealing\n'), ((1879, 1987), 'pyfiglet.print_figlet', 'pyfiglet.print_figlet', (["('%s vs. %s' % (team1.manager_name, team2.manager_name))"], {'font': '"""banner"""', 'width': '(160)'}), "('%s vs. %s' % (team1.manager_name, team2.manager_name\n ), font='banner', width=160)\n", (1900, 1987), False, 'import pyfiglet\n'), ((3626, 3649), 'nba_matchup.get_free_agents', 'get_free_agents', (['num_fa'], {}), '(num_fa)\n', (3641, 3649), False, 'from nba_matchup import get_league, simulate_h2h, CURRENT_WEEK, hill_climb, visualize_matchup, get_free_agents, simulated_annealing\n'), ((5237, 5249), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5247, 5249), True, 'import matplotlib.pyplot as plt\n'), ((5254, 5270), 'matplotlib.pyplot.plot', 'plt.plot', (['scores'], {}), '(scores)\n', (5262, 5270), True, 'import matplotlib.pyplot as plt\n'), ((5275, 5285), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5283, 5285), True, 'import matplotlib.pyplot as plt\n'), ((248, 267), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (265, 267), False, 'import logging\n'), ((1539, 1548), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1545, 1548), True, 'import numpy as np\n'), ((2074, 2125), 'pyfiglet.print_figlet', 'pyfiglet.print_figlet', (["('Week %u' % week)"], {'font': '"""big"""'}), "('Week %u' % week, font='big')\n", (2095, 2125), False, 'import pyfiglet\n'), ((4876, 4988), 'pandas.option_context', 'pd.option_context', (['"""display.max_rows"""', 'None', '"""display.max_columns"""', 'None', '"""display.expand_frame_repr"""', '(False)'], {}), "('display.max_rows', None, 'display.max_columns', None,\n 'display.expand_frame_repr', False)\n", (4893, 4988), True, 'import pandas as pd\n')] |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import time
import numpy as np
from xt.model.tf_compat import tf
from xt.model.model_zeus import XTModelZeus
from xt.model.ppo.default_config import \
LR, BATCH_SIZE, CRITIC_LOSS_COEF, ENTROPY_LOSS, LOSS_CLIPPING, MAX_GRAD_NORM, NUM_SGD_ITER, SUMMARY, VF_CLIP
from zeus.common.util.common import import_config
from zeus.common.util.register import Registers
from zeus import set_backend
from zeus.trainer_api import Trainer
from zeus.common.class_factory import ClassFactory, ClassType
from zeus.trainer.modules.conf.loss import LossConfig
from zeus.trainer.modules.conf.optim import OptimConfig
from zeus.modules.module import Module
from zeus.modules.operators.ops import Relu, Linear, Conv2d, View, softmax, Lambda
from zeus.modules.connections import Sequential
set_backend(backend='tensorflow', device_category='GPU')
@Registers.model
class PpoMlpZeus(XTModelZeus):
"""Docstring for ActorNetwork."""
def __init__(self, model_info):
model_config = model_info.get('model_config', None)
import_config(globals(), model_config)
self.state_dim = model_info['state_dim']
self.action_dim = model_info['action_dim']
self.action_type = model_config.get('action_type')
self.num_sgd_iter = model_config.get('NUM_SGD_ITER', NUM_SGD_ITER)
super().__init__(model_info)
def create_model(self, model_info):
zeus_model = PpoMlpNet(state_dim=self.state_dim, action_dim=self.action_dim)
LossConfig.type = 'ppo_loss'
OptimConfig.type = 'Adam'
OptimConfig.params.update({'lr': LR})
loss_input = dict()
loss_input['inputs'] = [{"name": "input_state", "type": "float32", "shape": self.state_dim}]
loss_input['labels'] = [{"name": "old_v", "type": "float32", "shape": 1}]
loss_input['labels'].append({"name": "target_v", "type": "float32", "shape": 1})
loss_input['labels'].append({"name": "old_p", "type": "float32", "shape": self.action_dim})
loss_input['labels'].append({"name": "target_p", "type": "int32", "shape": 1})
loss_input['labels'].append({"name": "adv", "type": "float32", "shape": 1})
model = Trainer(model=zeus_model, lazy_build=False, loss_input=loss_input)
return model
def train(self, state, label):
nbatch_train = BATCH_SIZE
nbatch = state[0].shape[0]
inds = np.arange(nbatch)
loss_val = []
start_time = time.time()
for _ in range(self.num_sgd_iter):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
inputs = [state[0][mbinds]]
action = np.expand_dims(label[0][mbinds], -1)
labels = [label[3][mbinds], label[4][mbinds], label[1][mbinds],
action, label[2][mbinds]]
loss = self.model.train(inputs, labels)
loss_val.append(np.mean(loss))
return np.mean(loss_val)
def predict(self, state):
"""Predict state."""
prob, logit, value = self.model.predict(state)
action = np.random.choice(self.action_dim, p=np.nan_to_num(prob[0]))
action = np.array([action])
return [action, logit, value]
class PpoMlpNet(Module):
"""Create DQN net with FineGrainedSpace."""
def __init__(self, **descript):
"""Create layers."""
super().__init__()
state_dim = descript.get("state_dim")
action_dim = descript.get("action_dim")
self.fc1 = Sequential(Linear(64, 64), Linear(64, action_dim))
self.fc2 = Sequential(Linear(64, 64), Linear(64, 1))
def __call__(self, inputs):
"""Override compile function, conect models into a seq."""
logit = self.fc1(inputs)
value = self.fc2(inputs)
prob = softmax(logit)
return prob, logit, value
@ClassFactory.register(ClassType.LOSS, 'ppo_loss')
def ppo_loss_zeus(logits, labels):
out_p, out_logits, out_v = logits
old_v, target_v, old_logits, action, adv = labels
loss = CRITIC_LOSS_COEF * value_loss(target_v, out_v, old_v)
loss += actor_loss_with_entropy(adv, old_logits, action, out_logits)
return loss
def value_loss(target_v, out_v, old_v):
"""Compute value loss for PPO."""
vpredclipped = old_v + tf.clip_by_value(out_v - old_v, -VF_CLIP, VF_CLIP)
vf_losses1 = tf.square(out_v - target_v)
vf_losses2 = tf.square(vpredclipped - target_v)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
return vf_loss
def actor_loss_with_entropy(adv, old_logits, behavior_action, out_logits):
"""Calculate actor loss with entropy."""
old_log_p = neglog_prob(behavior_action, old_logits)
action_log_prob = neglog_prob(behavior_action, out_logits)
ratio = tf.exp(action_log_prob - old_log_p)
surr_loss_1 = ratio * adv
surr_loss_2 = tf.clip_by_value(ratio, 1.0 - LOSS_CLIPPING, 1.0 + LOSS_CLIPPING) * adv
surr_loss = tf.reduce_mean(tf.minimum(surr_loss_1, surr_loss_2))
ent = entropy(out_logits)
ent = tf.reduce_mean(ent)
return -surr_loss - ENTROPY_LOSS * ent
def neglog_prob(x, logits):
size = logits.shape[-1]
x = tf.one_hot(x, size)
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(labels=x, logits=logits)
return -tf.expand_dims(neglogp, axis=-1)
def entropy(logits):
rescaled_logits = logits - tf.reduce_max(logits, axis=-1, keepdims=True)
exp_logits = tf.exp(rescaled_logits)
z = tf.reduce_sum(exp_logits, axis=-1, keepdims=True)
p = exp_logits / z
return tf.reduce_sum(p * (tf.log(z) - rescaled_logits), axis=-1, keepdims=True)
| [
"zeus.modules.operators.ops.Linear",
"numpy.array",
"xt.model.tf_compat.tf.reduce_max",
"xt.model.tf_compat.tf.one_hot",
"numpy.arange",
"xt.model.tf_compat.tf.reduce_sum",
"numpy.mean",
"xt.model.tf_compat.tf.exp",
"zeus.set_backend",
"xt.model.tf_compat.tf.maximum",
"xt.model.tf_compat.tf.redu... | [((1913, 1969), 'zeus.set_backend', 'set_backend', ([], {'backend': '"""tensorflow"""', 'device_category': '"""GPU"""'}), "(backend='tensorflow', device_category='GPU')\n", (1924, 1969), False, 'from zeus import set_backend\n'), ((5173, 5222), 'zeus.common.class_factory.ClassFactory.register', 'ClassFactory.register', (['ClassType.LOSS', '"""ppo_loss"""'], {}), "(ClassType.LOSS, 'ppo_loss')\n", (5194, 5222), False, 'from zeus.common.class_factory import ClassFactory, ClassType\n'), ((5680, 5707), 'xt.model.tf_compat.tf.square', 'tf.square', (['(out_v - target_v)'], {}), '(out_v - target_v)\n', (5689, 5707), False, 'from xt.model.tf_compat import tf\n'), ((5725, 5759), 'xt.model.tf_compat.tf.square', 'tf.square', (['(vpredclipped - target_v)'], {}), '(vpredclipped - target_v)\n', (5734, 5759), False, 'from xt.model.tf_compat import tf\n'), ((6103, 6138), 'xt.model.tf_compat.tf.exp', 'tf.exp', (['(action_log_prob - old_log_p)'], {}), '(action_log_prob - old_log_p)\n', (6109, 6138), False, 'from xt.model.tf_compat import tf\n'), ((6370, 6389), 'xt.model.tf_compat.tf.reduce_mean', 'tf.reduce_mean', (['ent'], {}), '(ent)\n', (6384, 6389), False, 'from xt.model.tf_compat import tf\n'), ((6500, 6519), 'xt.model.tf_compat.tf.one_hot', 'tf.one_hot', (['x', 'size'], {}), '(x, size)\n', (6510, 6519), False, 'from xt.model.tf_compat import tf\n'), ((6534, 6601), 'xt.model.tf_compat.tf.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'labels': 'x', 'logits': 'logits'}), '(labels=x, logits=logits)\n', (6576, 6601), False, 'from xt.model.tf_compat import tf\n'), ((6764, 6787), 'xt.model.tf_compat.tf.exp', 'tf.exp', (['rescaled_logits'], {}), '(rescaled_logits)\n', (6770, 6787), False, 'from xt.model.tf_compat import tf\n'), ((6796, 6845), 'xt.model.tf_compat.tf.reduce_sum', 'tf.reduce_sum', (['exp_logits'], {'axis': '(-1)', 'keepdims': '(True)'}), '(exp_logits, axis=-1, keepdims=True)\n', (6809, 6845), False, 'from xt.model.tf_compat import tf\n'), ((2681, 2718), 'zeus.trainer.modules.conf.optim.OptimConfig.params.update', 'OptimConfig.params.update', (["{'lr': LR}"], {}), "({'lr': LR})\n", (2706, 2718), False, 'from zeus.trainer.modules.conf.optim import OptimConfig\n'), ((3308, 3374), 'zeus.trainer_api.Trainer', 'Trainer', ([], {'model': 'zeus_model', 'lazy_build': '(False)', 'loss_input': 'loss_input'}), '(model=zeus_model, lazy_build=False, loss_input=loss_input)\n', (3315, 3374), False, 'from zeus.trainer_api import Trainer\n'), ((3517, 3534), 'numpy.arange', 'np.arange', (['nbatch'], {}), '(nbatch)\n', (3526, 3534), True, 'import numpy as np\n'), ((3578, 3589), 'time.time', 'time.time', ([], {}), '()\n', (3587, 3589), False, 'import time\n'), ((4262, 4279), 'numpy.mean', 'np.mean', (['loss_val'], {}), '(loss_val)\n', (4269, 4279), True, 'import numpy as np\n'), ((4489, 4507), 'numpy.array', 'np.array', (['[action]'], {}), '([action])\n', (4497, 4507), True, 'import numpy as np\n'), ((5121, 5135), 'zeus.modules.operators.ops.softmax', 'softmax', (['logit'], {}), '(logit)\n', (5128, 5135), False, 'from zeus.modules.operators.ops import Relu, Linear, Conv2d, View, softmax, Lambda\n'), ((5612, 5662), 'xt.model.tf_compat.tf.clip_by_value', 'tf.clip_by_value', (['(out_v - old_v)', '(-VF_CLIP)', 'VF_CLIP'], {}), '(out_v - old_v, -VF_CLIP, VF_CLIP)\n', (5628, 5662), False, 'from xt.model.tf_compat import tf\n'), ((6188, 6253), 'xt.model.tf_compat.tf.clip_by_value', 'tf.clip_by_value', (['ratio', '(1.0 - LOSS_CLIPPING)', '(1.0 + LOSS_CLIPPING)'], {}), '(ratio, 1.0 - LOSS_CLIPPING, 1.0 + LOSS_CLIPPING)\n', (6204, 6253), False, 'from xt.model.tf_compat import tf\n'), ((6291, 6327), 'xt.model.tf_compat.tf.minimum', 'tf.minimum', (['surr_loss_1', 'surr_loss_2'], {}), '(surr_loss_1, surr_loss_2)\n', (6301, 6327), False, 'from xt.model.tf_compat import tf\n'), ((6614, 6646), 'xt.model.tf_compat.tf.expand_dims', 'tf.expand_dims', (['neglogp'], {'axis': '(-1)'}), '(neglogp, axis=-1)\n', (6628, 6646), False, 'from xt.model.tf_compat import tf\n'), ((6701, 6746), 'xt.model.tf_compat.tf.reduce_max', 'tf.reduce_max', (['logits'], {'axis': '(-1)', 'keepdims': '(True)'}), '(logits, axis=-1, keepdims=True)\n', (6714, 6746), False, 'from xt.model.tf_compat import tf\n'), ((3681, 3704), 'numpy.random.shuffle', 'np.random.shuffle', (['inds'], {}), '(inds)\n', (3698, 3704), True, 'import numpy as np\n'), ((4839, 4853), 'zeus.modules.operators.ops.Linear', 'Linear', (['(64)', '(64)'], {}), '(64, 64)\n', (4845, 4853), False, 'from zeus.modules.operators.ops import Relu, Linear, Conv2d, View, softmax, Lambda\n'), ((4855, 4877), 'zeus.modules.operators.ops.Linear', 'Linear', (['(64)', 'action_dim'], {}), '(64, action_dim)\n', (4861, 4877), False, 'from zeus.modules.operators.ops import Relu, Linear, Conv2d, View, softmax, Lambda\n'), ((4909, 4923), 'zeus.modules.operators.ops.Linear', 'Linear', (['(64)', '(64)'], {}), '(64, 64)\n', (4915, 4923), False, 'from zeus.modules.operators.ops import Relu, Linear, Conv2d, View, softmax, Lambda\n'), ((4925, 4938), 'zeus.modules.operators.ops.Linear', 'Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (4931, 4938), False, 'from zeus.modules.operators.ops import Relu, Linear, Conv2d, View, softmax, Lambda\n'), ((5794, 5828), 'xt.model.tf_compat.tf.maximum', 'tf.maximum', (['vf_losses1', 'vf_losses2'], {}), '(vf_losses1, vf_losses2)\n', (5804, 5828), False, 'from xt.model.tf_compat import tf\n'), ((3973, 4009), 'numpy.expand_dims', 'np.expand_dims', (['label[0][mbinds]', '(-1)'], {}), '(label[0][mbinds], -1)\n', (3987, 4009), True, 'import numpy as np\n'), ((4448, 4470), 'numpy.nan_to_num', 'np.nan_to_num', (['prob[0]'], {}), '(prob[0])\n', (4461, 4470), True, 'import numpy as np\n'), ((6899, 6908), 'xt.model.tf_compat.tf.log', 'tf.log', (['z'], {}), '(z)\n', (6905, 6908), False, 'from xt.model.tf_compat import tf\n'), ((4231, 4244), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (4238, 4244), True, 'import numpy as np\n')] |
"""
Read SAS7BDAT files
Based on code written by <NAME>:
https://bitbucket.org/jaredhobbs/sas7bdat
See also:
https://github.com/BioStatMatt/sas7bdat
Partial documentation of the file format:
https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf
Reference for binary data compression:
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
"""
import pandas as pd
from pandas import compat
from pandas.io.common import get_filepath_or_buffer, BaseIterator
import numpy as np
import struct
from .saslib import (_rle_decompress, _rdc_decompress,
process_byte_array_with_data)
_magic = (b"\x00\x00\x00\x00\x00\x00\x00\x00" +
b"\x00\x00\x00\x00\xc2\xea\x81\x60" +
b"\xb3\x14\x11\xcf\xbd\x92\x08\x00" +
b"\x09\xc7\x31\x8c\x18\x1f\x10\x11")
_align_1_checker_value = b'3'
_align_1_offset = 32
_align_1_length = 1
_align_1_value = 4
_u64_byte_checker_value = b'3'
_align_2_offset = 35
_align_2_length = 1
_align_2_value = 4
_endianness_offset = 37
_endianness_length = 1
_platform_offset = 39
_platform_length = 1
_encoding_offset = 70
_encoding_length = 1
_dataset_offset = 92
_dataset_length = 64
_file_type_offset = 156
_file_type_length = 8
_date_created_offset = 164
_date_created_length = 8
_date_modified_offset = 172
_date_modified_length = 8
_header_size_offset = 196
_header_size_length = 4
_page_size_offset = 200
_page_size_length = 4
_page_count_offset = 204
_page_count_length = 4
_sas_release_offset = 216
_sas_release_length = 8
_sas_server_type_offset = 224
_sas_server_type_length = 16
_os_version_number_offset = 240
_os_version_number_length = 16
_os_maker_offset = 256
_os_maker_length = 16
_os_name_offset = 272
_os_name_length = 16
_page_bit_offset_x86 = 16
_page_bit_offset_x64 = 32
_subheader_pointer_length_x86 = 12
_subheader_pointer_length_x64 = 24
_page_type_offset = 0
_page_type_length = 2
_block_count_offset = 2
_block_count_length = 2
_subheader_count_offset = 4
_subheader_count_length = 2
_page_meta_type = 0
_page_data_type = 256
_page_amd_type = 1024
_page_metc_type = 16384
_page_comp_type = -28672
_page_mix_types = [512, 640]
_subheader_pointers_offset = 8
_truncated_subheader_id = 1
_compressed_subheader_id = 4
_compressed_subheader_type = 1
_text_block_size_length = 2
_row_length_offset_multiplier = 5
_row_count_offset_multiplier = 6
_col_count_p1_multiplier = 9
_col_count_p2_multiplier = 10
_row_count_on_mix_page_offset_multiplier = 15
_column_name_pointer_length = 8
_column_name_text_subheader_offset = 0
_column_name_text_subheader_length = 2
_column_name_offset_offset = 2
_column_name_offset_length = 2
_column_name_length_offset = 4
_column_name_length_length = 2
_column_data_offset_offset = 8
_column_data_length_offset = 8
_column_data_length_length = 4
_column_type_offset = 14
_column_type_length = 1
_column_format_text_subheader_index_offset = 22
_column_format_text_subheader_index_length = 2
_column_format_offset_offset = 24
_column_format_offset_length = 2
_column_format_length_offset = 26
_column_format_length_length = 2
_column_label_text_subheader_index_offset = 28
_column_label_text_subheader_index_length = 2
_column_label_offset_offset = 30
_column_label_offset_length = 2
_column_label_length_offset = 32
_column_label_length_length = 2
_rle_compression = 'SASYZCRL'
_rdc_compression = 'SASYZCR2'
_compression_literals = [_rle_compression, _rdc_compression]
# Incomplete list of encodings
_encoding_names = {29: "latin1", 20: "utf-8", 33: "cyrillic", 60: "wlatin2",
61: "wcyrillic", 62: "wlatin1", 90: "ebcdic870"}
# Should be enum
class _index:
rowSizeIndex = 0
columnSizeIndex = 1
subheaderCountsIndex = 2
columnTextIndex = 3
columnNameIndex = 4
columnAttributesIndex = 5
formatAndLabelIndex = 6
columnListIndex = 7
dataSubheaderIndex = 8
_subheader_signature_to_index = {
b"\xF7\xF7\xF7\xF7": _index.rowSizeIndex,
b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": _index.rowSizeIndex,
b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": _index.rowSizeIndex,
b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": _index.rowSizeIndex,
b"\xF6\xF6\xF6\xF6": _index.columnSizeIndex,
b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": _index.columnSizeIndex,
b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": _index.columnSizeIndex,
b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": _index.columnSizeIndex,
b"\x00\xFC\xFF\xFF": _index.subheaderCountsIndex,
b"\xFF\xFF\xFC\x00": _index.subheaderCountsIndex,
b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": _index.subheaderCountsIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": _index.subheaderCountsIndex,
b"\xFD\xFF\xFF\xFF": _index.columnTextIndex,
b"\xFF\xFF\xFF\xFD": _index.columnTextIndex,
b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": _index.columnTextIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": _index.columnTextIndex,
b"\xFF\xFF\xFF\xFF": _index.columnNameIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": _index.columnNameIndex,
b"\xFC\xFF\xFF\xFF": _index.columnAttributesIndex,
b"\xFF\xFF\xFF\xFC": _index.columnAttributesIndex,
b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": _index.columnAttributesIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": _index.columnAttributesIndex,
b"\xFE\xFB\xFF\xFF": _index.formatAndLabelIndex,
b"\xFF\xFF\xFB\xFE": _index.formatAndLabelIndex,
b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": _index.formatAndLabelIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": _index.formatAndLabelIndex,
b"\xFE\xFF\xFF\xFF": _index.columnListIndex,
b"\xFF\xFF\xFF\xFE": _index.columnListIndex,
b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": _index.columnListIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": _index.columnListIndex}
class _subheader_pointer(object):
pass
class _column(object):
pass
# SAS7BDAT represents a SAS data file in SAS7BDAT format.
class SAS7BDATReader(BaseIterator):
"""
Read SAS files in SAS7BDAT format.
Parameters
----------
path_or_buf : path name or buffer
Name of SAS file or file-like object pointing to SAS file
contents.
index : column identifier, defaults to None
Column to use as index.
convert_dates : boolean, defaults to True
Attempt to convert dates to Pandas datetime values. Note all
SAS date formats are supported.
blank_missing : boolean, defaults to True
Convert empty strings to missing values (SAS uses blanks to
indicate missing character variables).
chunksize : int, defaults to None
Return SAS7BDATReader object for iterations, returns chunks
with given number of lines.
encoding : string, defaults to None
String encoding. If None, text variables are left as raw bytes.
"""
def __init__(self, path_or_buf, index=None, convert_dates=True,
blank_missing=True, chunksize=None, encoding=None):
self.index = index
self.convert_dates = convert_dates
self.blank_missing = blank_missing
self.chunksize = chunksize
self.encoding = encoding
self.compression = ""
self.column_names_strings = []
self.column_names = []
self.column_types = []
self.column_formats = []
self.columns = []
self._current_page_data_subheader_pointers = []
self._cached_page = None
self._column_data_lengths = []
self._column_data_offsets = []
self._current_row_in_file_index = 0
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
self._path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if isinstance(self._path_or_buf, compat.string_types):
self._path_or_buf = open(self._path_or_buf, 'rb')
self._get_properties()
self._parse_metadata()
def _get_properties(self):
# Check magic number
self._path_or_buf.seek(0)
self._cached_page = self._path_or_buf.read(288)
if self._cached_page[0:len(_magic)] != _magic:
raise ValueError("magic number mismatch (not a SAS file?)")
# Get alignment information
align1, align2 = 0, 0
buf = self._read_bytes(_align_1_offset, _align_1_length)
if buf == _u64_byte_checker_value:
align2 = _align_2_value
self.U64 = True
self._int_length = 8
self._page_bit_offset = _page_bit_offset_x64
self._subheader_pointer_length = _subheader_pointer_length_x64
else:
self.U64 = False
self._page_bit_offset = _page_bit_offset_x86
self._subheader_pointer_length = _subheader_pointer_length_x86
self._int_length = 4
buf = self._read_bytes(_align_2_offset, _align_2_length)
if buf == _align_1_checker_value:
align1 = _align_2_value
total_align = align1 + align2
# Get endianness information
buf = self._read_bytes(_endianness_offset, _endianness_length)
if buf == b'\x01':
self.byte_order = "<"
else:
self.byte_order = ">"
# Get encoding information
buf = self._read_bytes(_encoding_offset, _encoding_length)[0]
if buf in _encoding_names:
self.file_encoding = _encoding_names[buf]
else:
self.file_encoding = "unknown (code=%s)" % str(buf)
# Get platform information
buf = self._read_bytes(_platform_offset, _platform_length)
if buf == b'1':
self.platform = "unix"
elif buf == b'2':
self.platform = "windows"
else:
self.platform = "unknown"
buf = self._read_bytes(_dataset_offset, _dataset_length)
self.name = buf.rstrip(b'\x00 ').decode()
buf = self._read_bytes(_file_type_offset, _file_type_length)
self.file_type = buf.rstrip(b'\x00 ').decode()
# Timestamp is epoch 01/01/1960
epoch = pd.datetime(1960, 1, 1)
x = self._read_float(_date_created_offset + align1,
_date_created_length)
self.date_created = epoch + pd.to_timedelta(x, unit='s')
x = self._read_float(_date_modified_offset + align1,
_date_modified_length)
self.date_modified = epoch + pd.to_timedelta(x, unit='s')
self.header_length = self._read_int(_header_size_offset + align1,
_header_size_length)
# Read the rest of the header into cached_page.
buf = self._path_or_buf.read(self.header_length - 288)
self._cached_page += buf
if len(self._cached_page) != self.header_length:
raise ValueError("The SAS7BDAT file appears to be truncated.")
self._page_length = self._read_int(_page_size_offset + align1,
_page_size_length)
self._page_count = self._read_int(_page_count_offset + align1,
_page_count_length)
buf = self._read_bytes(_sas_release_offset + total_align,
_sas_release_length)
self.sas_release = buf.rstrip(b'\x00 ').decode()
buf = self._read_bytes(_sas_server_type_offset + total_align,
_sas_server_type_length)
self.server_type = buf.rstrip(b'\x00 ').decode()
buf = self._read_bytes(_os_version_number_offset + total_align,
_os_version_number_length)
self.os_version = buf.rstrip(b'\x00 ').decode()
buf = self._read_bytes(
_os_name_offset, _os_name_length).rstrip(b'\x00 ')
if len(buf) > 0:
self.os_name = buf.rstrip(b'\x00 ').decode()
else:
buf = self._path_or_buf.read(_os_maker_offset, _os_maker_length)
self.os_name = buf.rstrip(b'\x00 ').decode()
# Read a single float of the given width (4 or 8).
def _read_float(self, offset, width):
if width not in (4, 8):
raise ValueError("invalid float width")
buf = self._read_bytes(offset, width)
fd = "f" if width == 4 else "d"
return struct.unpack(self.byte_order + fd, buf)[0]
# Read a single signed integer of the given width (1, 2, 4 or 8).
def _read_int(self, offset, width):
if width not in (1, 2, 4, 8):
raise ValueError("invalid int width")
buf = self._read_bytes(offset, width)
it = {1: "b", 2: "h", 4: "l", 8: "q"}[width]
iv = struct.unpack(self.byte_order + it, buf)[0]
return iv
def _read_bytes(self, offset, length):
if self._cached_page is None:
self._path_or_buf.seek(offset)
buf = self._path_or_buf.read(length)
if len(buf) < length:
msg = "Unable to read {:d} bytes from file position {:d}."
raise ValueError(msg.format(length, offset))
return buf
else:
if offset + length > len(self._cached_page):
raise ValueError("The cached page is too small.")
return self._cached_page[offset:offset + length]
def _parse_metadata(self):
done = False
while not done:
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
break
if len(self._cached_page) != self._page_length:
raise ValueError(
"Failed to read a meta data page from the SAS file.")
done = self._process_page_meta()
def _process_page_meta(self):
self._read_page_header()
pt = [_page_meta_type, _page_amd_type] + _page_mix_types
if self._current_page_type in pt:
self._process_page_metadata()
return ((self._current_page_type in [256] + _page_mix_types) or
(self._current_page_data_subheader_pointers is not None))
def _read_page_header(self):
bit_offset = self._page_bit_offset
tx = _page_type_offset + bit_offset
self._current_page_type = self._read_int(tx, _page_type_length)
tx = _block_count_offset + bit_offset
self._current_page_block_count = self._read_int(tx,
_block_count_length)
tx = _subheader_count_offset + bit_offset
self._current_page_subheaders_count = (
self._read_int(tx, _subheader_count_length))
def _process_page_metadata(self):
bit_offset = self._page_bit_offset
for i in range(self._current_page_subheaders_count):
pointer = self._process_subheader_pointers(
_subheader_pointers_offset + bit_offset, i)
if pointer.length == 0:
continue
if pointer.compression == _truncated_subheader_id:
continue
subheader_signature = self._read_subheader_signature(
pointer.offset)
subheader_index = (
self._get_subheader_index(subheader_signature,
pointer.compression, pointer.ptype))
self._process_subheader(subheader_index, pointer)
def _get_subheader_index(self, signature, compression, ptype):
index = _subheader_signature_to_index.get(signature)
if index is None:
f1 = ((compression == _compressed_subheader_id) or
(compression == 0))
f2 = (ptype == _compressed_subheader_type)
if (self.compression != "") and f1 and f2:
index = _index.dataSubheaderIndex
else:
raise ValueError("Unknown subheader signature")
return index
def _process_subheader_pointers(self, offset, subheader_pointer_index):
subheader_pointer_length = self._subheader_pointer_length
total_offset = (offset +
subheader_pointer_length * subheader_pointer_index)
subheader_offset = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_length = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_compression = self._read_int(total_offset, 1)
total_offset += 1
subheader_type = self._read_int(total_offset, 1)
x = _subheader_pointer()
x.offset = subheader_offset
x.length = subheader_length
x.compression = subheader_compression
x.ptype = subheader_type
return x
def _read_subheader_signature(self, offset):
subheader_signature = self._read_bytes(offset, self._int_length)
return subheader_signature
def _process_subheader(self, subheader_index, pointer):
offset = pointer.offset
length = pointer.length
if subheader_index == _index.rowSizeIndex:
processor = self._process_rowsize_subheader
elif subheader_index == _index.columnSizeIndex:
processor = self._process_columnsize_subheader
elif subheader_index == _index.columnTextIndex:
processor = self._process_columntext_subheader
elif subheader_index == _index.columnNameIndex:
processor = self._process_columnname_subheader
elif subheader_index == _index.columnAttributesIndex:
processor = self._process_columnattributes_subheader
elif subheader_index == _index.formatAndLabelIndex:
processor = self._process_format_subheader
elif subheader_index == _index.columnListIndex:
processor = self._process_columnlist_subheader
elif subheader_index == _index.subheaderCountsIndex:
processor = self._process_subheader_counts
elif subheader_index == _index.dataSubheaderIndex:
self._current_page_data_subheader_pointers.append(pointer)
return
else:
raise ValueError("unknown subheader index")
processor(offset, length)
def _process_rowsize_subheader(self, offset, length):
int_len = self._int_length
lcs_offset = offset
lcp_offset = offset
if self.U64:
lcs_offset += 682
lcp_offset += 706
else:
lcs_offset += 354
lcp_offset += 378
self.row_length = self._read_int(
offset + _row_length_offset_multiplier * int_len, int_len)
self.row_count = self._read_int(
offset + _row_count_offset_multiplier * int_len, int_len)
self.col_count_p1 = self._read_int(
offset + _col_count_p1_multiplier * int_len, int_len)
self.col_count_p2 = self._read_int(
offset + _col_count_p2_multiplier * int_len, int_len)
mx = _row_count_on_mix_page_offset_multiplier * int_len
self._mix_page_row_count = self._read_int(offset + mx, int_len)
self._lcs = self._read_int(lcs_offset, 2)
self._lcp = self._read_int(lcp_offset, 2)
def _process_columnsize_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
self.column_count = self._read_int(offset, int_len)
if (self.col_count_p1 + self.col_count_p2 !=
self.column_count):
print("Warning: column count mismatch (%d + %d != %d)\n",
self.col_count_p1, self.col_count_p2, self.column_count)
# Unknown purpose
def _process_subheader_counts(self, offset, length):
pass
def _process_columntext_subheader(self, offset, length):
offset += self._int_length
text_block_size = self._read_int(offset, _text_block_size_length)
buf = self._read_bytes(offset, text_block_size)
self.column_names_strings.append(
buf[0:text_block_size].rstrip(b"\x00 ").decode())
if len(self.column_names_strings) == 1:
column_name = self.column_names_strings[0]
compression_literal = ""
for cl in _compression_literals:
if cl in column_name:
compression_literal = cl
self.compression = compression_literal
offset -= self._int_length
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
compression_literal = buf.rstrip(b"\x00")
if compression_literal == "":
self._lcs = 0
offset1 = offset + 32
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp].decode()
elif compression_literal == _rle_compression:
offset1 = offset + 40
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp].decode()
elif self._lcs > 0:
self._lcp = 0
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcs)
self.creator_proc = buf[0:self._lcp].decode()
def _process_columnname_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
column_name_pointers_count = (length - 2 * int_len - 12) // 8
for i in range(column_name_pointers_count):
text_subheader = offset + _column_name_pointer_length * \
(i + 1) + _column_name_text_subheader_offset
col_name_offset = offset + _column_name_pointer_length * \
(i + 1) + _column_name_offset_offset
col_name_length = offset + _column_name_pointer_length * \
(i + 1) + _column_name_length_offset
idx = self._read_int(
text_subheader, _column_name_text_subheader_length)
col_offset = self._read_int(
col_name_offset, _column_name_offset_length)
col_len = self._read_int(
col_name_length, _column_name_length_length)
name_str = self.column_names_strings[idx]
self.column_names.append(name_str[col_offset:col_offset + col_len])
def _process_columnattributes_subheader(self, offset, length):
int_len = self._int_length
column_attributes_vectors_count = (
length - 2 * int_len - 12) // (int_len + 8)
self.column_types = np.empty(
column_attributes_vectors_count, dtype=np.dtype('S1'))
for i in range(column_attributes_vectors_count):
col_data_offset = (offset + int_len +
_column_data_offset_offset + i * (int_len + 8))
col_data_len = (offset + 2 * int_len +
_column_data_length_offset + i * (int_len + 8))
col_types = (offset + 2 * int_len +
_column_type_offset + i * (int_len + 8))
self._column_data_offsets.append(
self._read_int(col_data_offset, int_len))
x = self._read_int(col_data_len, _column_data_length_length)
self._column_data_lengths.append(x)
x = self._read_int(col_types, _column_type_length)
if x == 1:
self.column_types[i] = b'd'
else:
self.column_types[i] = b's'
def _process_columnlist_subheader(self, offset, length):
# unknown purpose
pass
def _process_format_subheader(self, offset, length):
int_len = self._int_length
text_subheader_format = offset + \
_column_format_text_subheader_index_offset + 3 * int_len
col_format_offset = offset + _column_format_offset_offset + 3 * int_len
col_format_len = offset + _column_format_length_offset + 3 * int_len
text_subheader_label = offset + \
_column_label_text_subheader_index_offset + 3 * int_len
col_label_offset = offset + _column_label_offset_offset + 3 * int_len
col_label_len = offset + _column_label_length_offset + 3 * int_len
x = self._read_int(text_subheader_format,
_column_format_text_subheader_index_length)
format_idx = min(x, len(self.column_names_strings) - 1)
format_start = self._read_int(
col_format_offset, _column_format_offset_length)
format_len = self._read_int(
col_format_len, _column_format_length_length)
label_idx = self._read_int(
text_subheader_label, _column_label_text_subheader_index_length)
label_idx = min(label_idx, len(self.column_names_strings) - 1)
label_start = self._read_int(
col_label_offset, _column_label_offset_length)
label_len = self._read_int(col_label_len, _column_label_length_length)
label_names = self.column_names_strings[label_idx]
column_label = label_names[label_start: label_start + label_len]
format_names = self.column_names_strings[format_idx]
column_format = format_names[format_start: format_start + format_len]
current_column_number = len(self.columns)
col = _column()
col.col_id = current_column_number
col.name = self.column_names[current_column_number]
col.label = column_label
col.format = column_format
col.ctype = self.column_types[current_column_number]
col.length = self._column_data_lengths[current_column_number]
self.column_formats.append(column_format)
self.columns.append(col)
def read(self, nrows=None):
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if self._current_row_in_file_index >= self.row_count:
return None
nd = (self.column_types == b'd').sum()
ns = (self.column_types == b's').sum()
self._string_chunk = np.empty((ns, nrows), dtype=np.object)
self._byte_chunk = np.empty((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
for i in range(nrows):
done = self._readline()
if done:
break
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
def _readline(self):
bit_offset = self._page_bit_offset
subheader_pointer_length = self._subheader_pointer_length
# If there is no page, go to the end of the header and read a page.
if self._cached_page is None:
self._path_or_buf.seek(self.header_length)
done = self._read_next_page()
if done:
return True
# Loop until a data row is read
while True:
if self._current_page_type == _page_meta_type:
flag = (self._current_row_on_page_index >=
len(self._current_page_data_subheader_pointers))
if flag:
done = self._read_next_page()
if done:
return True
self._current_row_on_page_index = 0
continue
current_subheader_pointer = (
self._current_page_data_subheader_pointers[
self._current_row_on_page_index])
process_byte_array_with_data(self,
current_subheader_pointer.offset,
current_subheader_pointer.length,
self._byte_chunk,
self._string_chunk)
return False
elif self._current_page_type in _page_mix_types:
align_correction = (bit_offset + _subheader_pointers_offset +
self._current_page_subheaders_count *
subheader_pointer_length)
align_correction = align_correction % 8
offset = bit_offset + align_correction
offset += _subheader_pointers_offset
offset += (self._current_page_subheaders_count *
subheader_pointer_length)
offset += self._current_row_on_page_index * self.row_length
process_byte_array_with_data(self, offset, self.row_length,
self._byte_chunk,
self._string_chunk)
mn = min(self.row_count, self._mix_page_row_count)
if self._current_row_on_page_index == mn:
done = self._read_next_page()
if done:
return True
self._current_row_on_page_index = 0
return False
elif self._current_page_type == _page_data_type:
process_byte_array_with_data(self,
bit_offset +
_subheader_pointers_offset +
self._current_row_on_page_index *
self.row_length,
self.row_length, self._byte_chunk,
self._string_chunk)
flag = (self._current_row_on_page_index ==
self._current_page_block_count)
if flag:
done = self._read_next_page()
if done:
return True
self._current_row_on_page_index = 0
return False
else:
raise ValueError("unknown page type: %s",
self._current_page_type)
def _read_next_page(self):
self._current_page_data_subheader_pointers = []
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
return True
elif len(self._cached_page) != self._page_length:
msg = ("failed to read complete page from file "
"(read {:d} of {:d} bytes)")
raise ValueError(msg.format(len(self._cached_page),
self._page_length))
self._read_page_header()
if self._current_page_type == _page_meta_type:
self._process_page_metadata()
pt = [_page_meta_type, _page_data_type] + [_page_mix_types]
if self._current_page_type not in pt:
return self._read_next_page()
return False
def _decompress(self, row_length, page):
page = np.frombuffer(page, dtype=np.uint8)
if self.compression == _rle_compression:
return _rle_decompress(row_length, page)
elif self.compression == _rdc_compression:
return _rdc_decompress(row_length, page)
else:
raise ValueError("unknown SAS compression method: %s" %
self.compression)
def _chunk_to_dataframe(self):
n = self._current_row_in_chunk_index
m = self._current_row_in_file_index
ix = range(m - n, m)
rslt = pd.DataFrame(index=ix)
js, jb = 0, 0
for j in range(self.column_count):
name = self.column_names[j]
if self.column_types[j] == b'd':
rslt[name] = self._byte_chunk[jb, :].view(
dtype=self.byte_order + 'd')
rslt[name] = np.asarray(rslt[name], dtype=np.float64)
if self.convert_dates and (self.column_formats[j] == "MMDDYY"):
epoch = pd.datetime(1960, 1, 1)
rslt[name] = epoch + pd.to_timedelta(rslt[name], unit='d')
jb += 1
elif self.column_types[j] == b's':
rslt[name] = self._string_chunk[js, :]
rslt[name] = rslt[name].apply(lambda x: x.rstrip(b'\x00 '))
if self.encoding is not None:
rslt[name] = rslt[name].apply(
lambda x: x.decode(encoding=self.encoding))
if self.blank_missing:
ii = rslt[name].str.len() == 0
rslt.loc[ii, name] = np.nan
js += 1
else:
raise ValueError("unknown column type %s" %
self.column_types[j])
return rslt
| [
"pandas.to_timedelta",
"numpy.asarray",
"struct.unpack",
"numpy.empty",
"pandas.datetime",
"pandas.DataFrame",
"numpy.frombuffer",
"numpy.dtype",
"pandas.io.common.get_filepath_or_buffer"
] | [((7607, 7642), 'pandas.io.common.get_filepath_or_buffer', 'get_filepath_or_buffer', (['path_or_buf'], {}), '(path_or_buf)\n', (7629, 7642), False, 'from pandas.io.common import get_filepath_or_buffer, BaseIterator\n'), ((9970, 9993), 'pandas.datetime', 'pd.datetime', (['(1960)', '(1)', '(1)'], {}), '(1960, 1, 1)\n', (9981, 9993), True, 'import pandas as pd\n'), ((26134, 26172), 'numpy.empty', 'np.empty', (['(ns, nrows)'], {'dtype': 'np.object'}), '((ns, nrows), dtype=np.object)\n', (26142, 26172), True, 'import numpy as np\n'), ((26200, 26241), 'numpy.empty', 'np.empty', (['(nd, 8 * nrows)'], {'dtype': 'np.uint8'}), '((nd, 8 * nrows), dtype=np.uint8)\n', (26208, 26241), True, 'import numpy as np\n'), ((31000, 31035), 'numpy.frombuffer', 'np.frombuffer', (['page'], {'dtype': 'np.uint8'}), '(page, dtype=np.uint8)\n', (31013, 31035), True, 'import numpy as np\n'), ((31541, 31563), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'ix'}), '(index=ix)\n', (31553, 31563), True, 'import pandas as pd\n'), ((10141, 10169), 'pandas.to_timedelta', 'pd.to_timedelta', (['x'], {'unit': '"""s"""'}), "(x, unit='s')\n", (10156, 10169), True, 'import pandas as pd\n'), ((10320, 10348), 'pandas.to_timedelta', 'pd.to_timedelta', (['x'], {'unit': '"""s"""'}), "(x, unit='s')\n", (10335, 10348), True, 'import pandas as pd\n'), ((12197, 12237), 'struct.unpack', 'struct.unpack', (['(self.byte_order + fd)', 'buf'], {}), '(self.byte_order + fd, buf)\n', (12210, 12237), False, 'import struct\n'), ((12552, 12592), 'struct.unpack', 'struct.unpack', (['(self.byte_order + it)', 'buf'], {}), '(self.byte_order + it, buf)\n', (12565, 12592), False, 'import struct\n'), ((22663, 22677), 'numpy.dtype', 'np.dtype', (['"""S1"""'], {}), "('S1')\n", (22671, 22677), True, 'import numpy as np\n'), ((31854, 31894), 'numpy.asarray', 'np.asarray', (['rslt[name]'], {'dtype': 'np.float64'}), '(rslt[name], dtype=np.float64)\n', (31864, 31894), True, 'import numpy as np\n'), ((32003, 32026), 'pandas.datetime', 'pd.datetime', (['(1960)', '(1)', '(1)'], {}), '(1960, 1, 1)\n', (32014, 32026), True, 'import pandas as pd\n'), ((32068, 32105), 'pandas.to_timedelta', 'pd.to_timedelta', (['rslt[name]'], {'unit': '"""d"""'}), "(rslt[name], unit='d')\n", (32083, 32105), True, 'import pandas as pd\n')] |
import numpy as np
import matplotlib.mlab as mlab
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from .filehandle import open_audio
from .constants import DEFAULTS
def create_buffer(sound: list) -> np.array:
return np.frombuffer(sound, dtype=np.int16)
def calculate_frames(buffer: np.array, desired_length: float = 0.3,
sample_rate: int = 48_000, overlap: float = 0.15):
"""Calculate amount of segments that could be made out of this buffer with given
parameters and other simple information."""
ms = int(1000*(len(buffer)/sample_rate))
overlap *= sample_rate
desired_length *= sample_rate
segments = int((len(buffer)-2*overlap)/desired_length)
return ms, segments, int(overlap), int(desired_length)
def divide(buffer: np.array, desired_length: float = 0.3,
sample_rate: int = 48_000, overlap: float = 0.15) -> np.array:
"""
Generate divided chunks.
buffer - buffer to divide [array]
deisred_length - lengths of divided chunks [time in seconds],
sample_rate - sampling of buffer [n],
overlap - how much overlap on each frame [time in seconds]
"""
if overlap > desired_length:
raise Exception("overlap > desired_length")
ms, fragments, \
overlap, desired_length = calculate_frames(buffer,
desired_length=desired_length,
sample_rate=sample_rate,
overlap=overlap)
i = overlap
while len(buffer) - i > 0:
yield (buffer[i-overlap:i+desired_length+overlap], i-overlap)
i += desired_length
def detect_peaks(buffer, size=DEFAULTS["DEFAULT_PEAK_SIZE"]):
local_max = maximum_filter(buffer, size=size)==buffer
background = (buffer==0)
eroded_background = binary_erosion(background, structure=np.ones((1,1)), border_value=1)
detected_peaks = local_max ^ eroded_background
return detected_peaks
def return_position(buffer_mask):
for y, vy in enumerate(buffer_mask):
for x, vx in enumerate(vy):
if vx:
yield (y, x)
def same_point(p1, p2):
return p1[0] == p2[0] and p1[1] == p2[1]
def get_point_distance_arr(p1, p2):
# points are in reverse order, (y, x)
return np.sqrt((p2[1]-p1[1])**2+(p2[0]-p1[0])**2)
def generate_point_mesh(position_list,
max_forward_distance=DEFAULTS["MAX_FORWARD_DISTANCE"]):
# y is first in the array (frequency, time)
# i have to sort the whole list, by the x coordinate
array = np.array(list(position_list))
# a[a[:,1].argsort()]
# https://stackoverflow.com/questions/2828059/sorting-arrays-in-numpy-by-column
array = array[array[:,1].argsort()]
# by enumerating by the x coordinate I eliminate the need for complex, and costly
# "is behind" algorithm
for i, point1 in enumerate(array):
for point2 in array[i:]:
if same_point(point1, point2):
continue
# first fast check if its definitely too far from us
if point2[0] - point1[0] > max_forward_distance or \
point2[1] - point1[1] > max_forward_distance:
continue
# now, the real check
if get_point_distance_arr(point1, point2) > max_forward_distance:
continue
# point2 - point1 is a valid point, now time to create raw hash data
assert point2[1]-point1[1] >= 0
# freq1, freq2, delta_time
yield (point1[0], point2[0], point2[1]-point1[1])
def find_clips(arr):
for start, i in enumerate(arr):
if i > 10:
break
for end, i in enumerate(arr[::-1]):
if i > 10:
break
return (start, len(arr)-end) | [
"numpy.frombuffer",
"numpy.sqrt",
"numpy.ones",
"scipy.ndimage.filters.maximum_filter"
] | [((311, 347), 'numpy.frombuffer', 'np.frombuffer', (['sound'], {'dtype': 'np.int16'}), '(sound, dtype=np.int16)\n', (324, 347), True, 'import numpy as np\n'), ((2457, 2509), 'numpy.sqrt', 'np.sqrt', (['((p2[1] - p1[1]) ** 2 + (p2[0] - p1[0]) ** 2)'], {}), '((p2[1] - p1[1]) ** 2 + (p2[0] - p1[0]) ** 2)\n', (2464, 2509), True, 'import numpy as np\n'), ((1879, 1912), 'scipy.ndimage.filters.maximum_filter', 'maximum_filter', (['buffer'], {'size': 'size'}), '(buffer, size=size)\n', (1893, 1912), False, 'from scipy.ndimage.filters import maximum_filter\n'), ((2013, 2028), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2020, 2028), True, 'import numpy as np\n')] |
import numpy as np
import cairo
import random
import math
random.seed(0)
np.random.seed(0)
image_path = 'shapes/images'
text_path = 'shapes/texts'
WIDTH = 64
HEIGHT = 64
colors = {
'red': (1.0, 0.0, 0.0),
'green': (0.0, 1.0, 0.0),
'blue': (0.0, 0.0, 1.0),
'yellow': (1.0, 1.0, 0.0),
'orange': (1.0, 0.66, 0.0),
'purple': (0.5, 0.0, 0.5),
#'black': (0.0, 0.0, 0.0),
#'white': (1.0, 1.0, 1.0)
}
colors_list = list(colors.items())
colors_list.sort()
#shapes = ['square', 'circle', 'triangle']
shapes = ['square', 'circle']
SQUARE_SIZE = 25
CIRCLE_RADIUS = 15
TRIANGLE_SIZE = 30
"""
formatting for text descriptions
0 - shape color
1 - background color
2 - shape
"""
desc_fmts = [
"{0} {2} on a {1} background",
"a {0} {2} on a {1} background",
"a {2} colored {0} on a {1} background",
"{2} that is {0} on a {1} background",
"{2} that is colored {0} on a {1} background",
"there is a {0} {2} on a {1} background",
#"there is a {2} colored {0} on a {1} background",
"there is a {2} colored {0} on a background colored {1}",
"the shape is a {2} colored {0} on a {1} background",
"this {0} {2} is on a background that is {1}",
#"this {0} {2} is on a background colored {1}",
#"this {0} {2} is on a {1} background",
#"this {2} is {0} and the background is {1}",
#"this {2} is {0} on a {1} background",
#"this {2} is {0} and the background is colored {1}",
#"this {2} is colored {0} on a {1} background",
#"this {2} is colored {0} and the background is {1}",
#"this {2} is colored {0} and the background is colored {1}",
#"this {2} is colored {0} and there is a {1} background",
"a {1} background with a {0} {2}",
"a {1} background with a {2} that is {0}",
"a {1} background with a {2} colored {0}",
"a {1} background with a {2} that is colored {0}",
"there is a {1} background with a {0} {2}",
"there is a {1} background with a {2} that is {0}",
#"there is a {1} background with a {2} colored {0}",
#"there is a {1} background with a {2} that is colored {0}",
"{1} background with a {0} {2}",
"{1} background with a {2} that is {0}",
"the background is {1} and the shape is a {0} {2}",
#"the background is {1} and the shape is a {2} colored {0}",
#"the background is colored {1} and the shape is a {0} {2}",
#"the background is colored {1} and the shape is a {2} colored {0}",
#"this background is {1} and the shape is a {0} {2}",
#"this background is {1} and the shape is a {2} colored {0}",
"this background is colored {1} and the shape is a {0} {2}",
"this background is colored {1} and the shape is a {2} colored {0}"
]
def main():
#random.seed(0)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, WIDTH, HEIGHT)
ctx = cairo.Context(surface)
for i in range(4056):
shape_col, shape_rgb, bg_col, bg_rgb = select_colors()
#print(shape_col, shape_rgb, bg_col, bg_rgb)
# draw background
ctx.set_source_rgb(*bg_rgb)
ctx.paint()
# draw shape
ctx.set_source_rgb(*shape_rgb)
shape = draw_shape(ctx)
file_name = '{:05d}'.format(i)
surface.write_to_png('{}/{}.png'.format(image_path, file_name))
# generate text descriptions
gen_descriptions(file_name, shape_col, bg_col, shape)
# Returns color, rgb, color, rgb
def select_colors():
cols = random.sample(colors_list, 2)
return cols[0][0], cols[0][1], cols[1][0], cols[1][1]
# Returns string of shape
def draw_shape(ctx):
# calculate offset
x_offset = np.random.normal(scale=5.0)
y_offset = np.random.normal(scale=5.0)
# center + offset
ctr_x = WIDTH / 2 + x_offset
ctr_y = HEIGHT / 2 + y_offset
# select a shape
shape = random.choice(shapes)
if shape == 'square':
pos_x = ctr_x - SQUARE_SIZE / 2
pos_y = ctr_y - SQUARE_SIZE / 2
ctx.rectangle(pos_x, pos_y, SQUARE_SIZE, SQUARE_SIZE)
ctx.fill()
elif shape == 'circle':
ctx.arc(ctr_x, ctr_y, CIRCLE_RADIUS, 0, 2*math.pi)
ctx.fill()
elif shape == 'triangle':
h = TRIANGLE_SIZE / 2 * math.sqrt(3)
ctx.move_to(ctr_x, ctr_y-h/2)
ctx.line_to(ctr_x+TRIANGLE_SIZE/2, ctr_y+h/2)
ctx.line_to(ctr_x-TRIANGLE_SIZE/2, ctr_y+h/2)
ctx.fill()
return shape
# Generates 10 text descriptions and writes them to a file
def gen_descriptions(file_name, shape_col, bg_color, shape):
file_path = '{}/{}.txt'.format(text_path, file_name)
with open(file_path, 'w+') as f:
fmts = random.sample(desc_fmts, 10)
for fmt in fmts:
f.write('{}\n'.format(fmt.format(shape_col, bg_color, shape)))
if __name__ == '__main__':
main()
| [
"numpy.random.normal",
"random.sample",
"cairo.ImageSurface",
"random.choice",
"cairo.Context",
"math.sqrt",
"random.seed",
"numpy.random.seed"
] | [((59, 73), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (70, 73), False, 'import random\n'), ((74, 91), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (88, 91), True, 'import numpy as np\n'), ((2753, 2807), 'cairo.ImageSurface', 'cairo.ImageSurface', (['cairo.FORMAT_ARGB32', 'WIDTH', 'HEIGHT'], {}), '(cairo.FORMAT_ARGB32, WIDTH, HEIGHT)\n', (2771, 2807), False, 'import cairo\n'), ((2818, 2840), 'cairo.Context', 'cairo.Context', (['surface'], {}), '(surface)\n', (2831, 2840), False, 'import cairo\n'), ((3468, 3497), 'random.sample', 'random.sample', (['colors_list', '(2)'], {}), '(colors_list, 2)\n', (3481, 3497), False, 'import random\n'), ((3651, 3678), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(5.0)'}), '(scale=5.0)\n', (3667, 3678), True, 'import numpy as np\n'), ((3694, 3721), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(5.0)'}), '(scale=5.0)\n', (3710, 3721), True, 'import numpy as np\n'), ((3846, 3867), 'random.choice', 'random.choice', (['shapes'], {}), '(shapes)\n', (3859, 3867), False, 'import random\n'), ((4650, 4678), 'random.sample', 'random.sample', (['desc_fmts', '(10)'], {}), '(desc_fmts, 10)\n', (4663, 4678), False, 'import random\n'), ((4224, 4236), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (4233, 4236), False, 'import math\n')] |
import astropy.units as u
import numpy as np
from astropy.time import Time
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from astropy.stats import mad_std
from .regression import regression_coeffs, regression_model
__all__ = ['PCA_light_curve']
def PCA_light_curve(pr, transit_parameters, buffer_time=5*u.min,
outlier_mad_std_factor=3.0, plots=False,
validation_duration_fraction=1/6,
flux_threshold=0.89, validation_time=-0.65,
plot_validation=False):
"""
Parameters
----------
pr : `~toolkit.PhotometryResults`
transit_parameters : `~batman.TransitParams`
buffer_time : `~astropy.units.Quantity`
outlier_mad_std_factor : float
plots : bool
validation_duration_fraction : float
Returns
-------
best_lc : `~numpy.ndarray`
"""
expected_mid_transit_jd = ((np.max(np.abs(pr.times - transit_parameters.t0) //
transit_parameters.per)) * # need to add +1 here for 20170502, 20170912, don't know why TMP
transit_parameters.per + transit_parameters.t0)
mid_transit_time = Time(expected_mid_transit_jd, format='jd')
transit_duration = transit_parameters.duration + buffer_time
final_lc_mad = np.ones(len(pr.aperture_radii))
final_lc = None
figures = []
for aperture_index in range(len(pr.aperture_radii)):
target_fluxes = pr.fluxes[:, 0, aperture_index]
target_errors = pr.errors[:, 0, aperture_index]
inliers = np.ones_like(pr.fluxes[:, 0, aperture_index]).astype(bool)
inliers = target_fluxes >= flux_threshold*target_fluxes.max()
# inliers &= np.arange(len(inliers)) < len(inliers) - 50
# # inliers = np.ones_like(pr.fluxes[:, 0, aperture_index]).astype(bool)
#
# for i in range(pr.fluxes.shape[1]):
# flux_i = pr.fluxes[:, i, aperture_index]
#
# linear_flux_trend = np.polyval(np.polyfit(pr.times - pr.times.mean(),
# flux_i, 2),
# pr.times - pr.times.mean())
# new_inliers = (np.abs(flux_i - linear_flux_trend) < outlier_mad_std_factor *
# mad_std(flux_i))
# inliers &= new_inliers
# plt.figure()
# plt.title('outliers')
# plt.plot(pr.times, flux_i - linear_flux_trend)
# plt.plot(pr.times[np.logical_not(inliers)],
# (flux_i - linear_flux_trend)[np.logical_not(inliers)],
# 'ro')
# plt.show()
out_of_transit = ((Time(pr.times, format='jd') > mid_transit_time + transit_duration/2) |
(Time(pr.times, format='jd') < mid_transit_time - transit_duration/2))
validation_duration = validation_duration_fraction * transit_duration
validation_mask = ((Time(pr.times, format='jd') < mid_transit_time +
validation_time * transit_duration + validation_duration / 2) &
(Time(pr.times, format='jd') > mid_transit_time +
validation_time * transit_duration - validation_duration / 2))
oot = out_of_transit & inliers
oot_no_validation = (out_of_transit & inliers & np.logical_not(validation_mask))
if plot_validation:
plt.figure()
plt.plot(pr.times[~oot], target_fluxes[~oot], '.', label='in-t')
plt.plot(pr.times[oot], target_fluxes[oot], '.', label='oot')
plt.plot(pr.times[validation_mask], target_fluxes[validation_mask], '.',
label='validation')
plt.axvline(mid_transit_time.jd, ls='--', color='r', label='midtrans')
plt.legend()
plt.title(np.count_nonzero(validation_mask))
plt.xlabel('JD')
plt.ylabel('Flux')
plt.show()
ones = np.ones((len(pr.times), 1))
regressors = np.hstack([pr.fluxes[:, 1:, aperture_index],
pr.xcentroids[:, 0, np.newaxis],
pr.ycentroids[:, 0, np.newaxis],
pr.airmass[:, np.newaxis],
#pr.airmass[:, np.newaxis]**2,
pr.airpressure[:, np.newaxis],
pr.humidity[:, np.newaxis],
pr.psf_stddev[:, np.newaxis],
pr.background_median[:, np.newaxis],
#pr.altitude[:, np.newaxis],
#pr.altitude[:, np.newaxis]**2,
])
n_components = np.arange(2, regressors.shape[1])
def train_pca_linreg_model(out_of_transit_mask, oot_no_validation_mask, n_comp):
# OOT chunk first:
pca = PCA(n_components=n_comp)
reduced_regressors = pca.fit_transform(regressors[out_of_transit_mask],
target_fluxes[out_of_transit_mask])
prepended_regressors_oot = np.hstack([ones[out_of_transit_mask],
reduced_regressors])
c_oot = regression_coeffs(prepended_regressors_oot,
target_fluxes[out_of_transit_mask],
target_errors[out_of_transit_mask])
lc_training = (target_fluxes[out_of_transit_mask] -
regression_model(c_oot, prepended_regressors_oot))
median_oot = np.median(target_fluxes[out_of_transit_mask])
std_lc_training = np.std((lc_training + median_oot) / median_oot)
# Now on validation chunk:
reduced_regressors_no_validation = pca.fit_transform(regressors[oot_no_validation_mask],
target_fluxes[oot_no_validation_mask])
prepended_regressors_no_validation = np.hstack([ones[oot_no_validation_mask],
reduced_regressors_no_validation])
c_no_validation = regression_coeffs(prepended_regressors_no_validation,
target_fluxes[oot_no_validation_mask],
target_errors[oot_no_validation_mask])
lc_validation = (target_fluxes[out_of_transit_mask] -
regression_model(c_no_validation, prepended_regressors_oot))
std_lc_validation = np.std((lc_validation + median_oot) / median_oot)
#return lc_training, lc_validation
return lc_training, lc_validation, std_lc_training, std_lc_validation
stds_validation = np.zeros_like(n_components, dtype=float)
stds_training = np.zeros_like(n_components, dtype=float)
for i, n_comp in enumerate(n_components):
results = train_pca_linreg_model(oot, oot_no_validation, n_comp)
lc_training, lc_validation, std_lc_training, std_lc_validation = results
stds_validation[i] = std_lc_validation
stds_training[i] = std_lc_training
# plt.title(n_comp)
# plt.plot(times[oot], lc_training, 'b.')
# plt.plot(times[oot], lc_validation, 'r.')
# # plt.plot(times[inliers], target_fluxes[inliers], '.')
# # plt.plot(times[not_masked], model)
# plt.show()
best_n_components = n_components[np.argmin(stds_validation)]
if plots:
fig = plt.figure()
plt.plot(n_components, stds_validation, label='validation')
plt.plot(n_components, stds_training, label='training')
plt.xlabel('Components')
plt.ylabel('std')
plt.axvline(best_n_components, color='r', ls='--')
plt.title("Aperture: {0} (index: {1})"
.format(pr.aperture_radii[aperture_index],
aperture_index))
plt.legend()
figures.append(fig)
# plt.show()
# Now apply PCA to generate light curve with best number of components
pca = PCA(n_components=best_n_components)
reduced_regressors = pca.fit_transform(regressors[oot], target_fluxes[oot])
all_regressors = pca.transform(regressors)
prepended_all_regressors = np.hstack([ones, all_regressors])
prepended_regressors_oot = np.hstack([ones[oot], reduced_regressors])
c_oot = regression_coeffs(prepended_regressors_oot,
target_fluxes[oot],
target_errors[oot])
best_lc = ((target_fluxes - regression_model(c_oot, prepended_all_regressors)) /
np.median(target_fluxes)) + 1
final_lc_mad[aperture_index] = mad_std(best_lc[out_of_transit])
if final_lc_mad[aperture_index] == np.min(final_lc_mad):
final_lc = best_lc.copy()
print('best aperture: {0}'.format(pr.aperture_radii[aperture_index]))
if plots:
# Close all validation plots except the best aperture's
for i, fig in enumerate(figures):
if i != np.argmin(final_lc_mad):
plt.close(fig)
plt.figure()
plt.plot(pr.aperture_radii, final_lc_mad)
plt.axvline(pr.aperture_radii[np.argmin(final_lc_mad)], ls='--', color='r')
plt.xlabel('Aperture radii')
plt.ylabel('mad(out-of-transit light curve)')
plt.figure()
plt.plot(pr.times, final_lc, 'k.')
plt.xlabel('Time [JD]')
plt.ylabel('Flux')
plt.show()
return final_lc
| [
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"numpy.logical_not",
"numpy.count_nonzero",
"numpy.arange",
"astropy.stats.mad_std",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.min",
"numpy.argmin",
"numpy.abs",
"numpy.st... | [((1209, 1251), 'astropy.time.Time', 'Time', (['expected_mid_transit_jd'], {'format': '"""jd"""'}), "(expected_mid_transit_jd, format='jd')\n", (1213, 1251), False, 'from astropy.time import Time\n'), ((4065, 4343), 'numpy.hstack', 'np.hstack', (['[pr.fluxes[:, 1:, aperture_index], pr.xcentroids[:, 0, np.newaxis], pr.\n ycentroids[:, 0, np.newaxis], pr.airmass[:, np.newaxis], pr.airpressure\n [:, np.newaxis], pr.humidity[:, np.newaxis], pr.psf_stddev[:, np.\n newaxis], pr.background_median[:, np.newaxis]]'], {}), '([pr.fluxes[:, 1:, aperture_index], pr.xcentroids[:, 0, np.newaxis\n ], pr.ycentroids[:, 0, np.newaxis], pr.airmass[:, np.newaxis], pr.\n airpressure[:, np.newaxis], pr.humidity[:, np.newaxis], pr.psf_stddev[:,\n np.newaxis], pr.background_median[:, np.newaxis]])\n', (4074, 4343), True, 'import numpy as np\n'), ((4800, 4833), 'numpy.arange', 'np.arange', (['(2)', 'regressors.shape[1]'], {}), '(2, regressors.shape[1])\n', (4809, 4833), True, 'import numpy as np\n'), ((6911, 6951), 'numpy.zeros_like', 'np.zeros_like', (['n_components'], {'dtype': 'float'}), '(n_components, dtype=float)\n', (6924, 6951), True, 'import numpy as np\n'), ((6976, 7016), 'numpy.zeros_like', 'np.zeros_like', (['n_components'], {'dtype': 'float'}), '(n_components, dtype=float)\n', (6989, 7016), True, 'import numpy as np\n'), ((8344, 8379), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'best_n_components'}), '(n_components=best_n_components)\n', (8347, 8379), False, 'from sklearn.decomposition import PCA\n'), ((8551, 8584), 'numpy.hstack', 'np.hstack', (['[ones, all_regressors]'], {}), '([ones, all_regressors])\n', (8560, 8584), True, 'import numpy as np\n'), ((8621, 8663), 'numpy.hstack', 'np.hstack', (['[ones[oot], reduced_regressors]'], {}), '([ones[oot], reduced_regressors])\n', (8630, 8663), True, 'import numpy as np\n'), ((9011, 9043), 'astropy.stats.mad_std', 'mad_std', (['best_lc[out_of_transit]'], {}), '(best_lc[out_of_transit])\n', (9018, 9043), False, 'from astropy.stats import mad_std\n'), ((9427, 9439), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9437, 9439), True, 'from matplotlib import pyplot as plt\n'), ((9448, 9489), 'matplotlib.pyplot.plot', 'plt.plot', (['pr.aperture_radii', 'final_lc_mad'], {}), '(pr.aperture_radii, final_lc_mad)\n', (9456, 9489), True, 'from matplotlib import pyplot as plt\n'), ((9582, 9610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Aperture radii"""'], {}), "('Aperture radii')\n", (9592, 9610), True, 'from matplotlib import pyplot as plt\n'), ((9619, 9664), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mad(out-of-transit light curve)"""'], {}), "('mad(out-of-transit light curve)')\n", (9629, 9664), True, 'from matplotlib import pyplot as plt\n'), ((9674, 9686), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9684, 9686), True, 'from matplotlib import pyplot as plt\n'), ((9695, 9729), 'matplotlib.pyplot.plot', 'plt.plot', (['pr.times', 'final_lc', '"""k."""'], {}), "(pr.times, final_lc, 'k.')\n", (9703, 9729), True, 'from matplotlib import pyplot as plt\n'), ((9738, 9761), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [JD]"""'], {}), "('Time [JD]')\n", (9748, 9761), True, 'from matplotlib import pyplot as plt\n'), ((9770, 9788), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (9780, 9788), True, 'from matplotlib import pyplot as plt\n'), ((9797, 9807), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9805, 9807), True, 'from matplotlib import pyplot as plt\n'), ((3388, 3419), 'numpy.logical_not', 'np.logical_not', (['validation_mask'], {}), '(validation_mask)\n', (3402, 3419), True, 'import numpy as np\n'), ((3462, 3474), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3472, 3474), True, 'from matplotlib import pyplot as plt\n'), ((3487, 3551), 'matplotlib.pyplot.plot', 'plt.plot', (['pr.times[~oot]', 'target_fluxes[~oot]', '"""."""'], {'label': '"""in-t"""'}), "(pr.times[~oot], target_fluxes[~oot], '.', label='in-t')\n", (3495, 3551), True, 'from matplotlib import pyplot as plt\n'), ((3564, 3625), 'matplotlib.pyplot.plot', 'plt.plot', (['pr.times[oot]', 'target_fluxes[oot]', '"""."""'], {'label': '"""oot"""'}), "(pr.times[oot], target_fluxes[oot], '.', label='oot')\n", (3572, 3625), True, 'from matplotlib import pyplot as plt\n'), ((3638, 3734), 'matplotlib.pyplot.plot', 'plt.plot', (['pr.times[validation_mask]', 'target_fluxes[validation_mask]', '"""."""'], {'label': '"""validation"""'}), "(pr.times[validation_mask], target_fluxes[validation_mask], '.',\n label='validation')\n", (3646, 3734), True, 'from matplotlib import pyplot as plt\n'), ((3764, 3834), 'matplotlib.pyplot.axvline', 'plt.axvline', (['mid_transit_time.jd'], {'ls': '"""--"""', 'color': '"""r"""', 'label': '"""midtrans"""'}), "(mid_transit_time.jd, ls='--', color='r', label='midtrans')\n", (3775, 3834), True, 'from matplotlib import pyplot as plt\n'), ((3847, 3859), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3857, 3859), True, 'from matplotlib import pyplot as plt\n'), ((3929, 3945), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""JD"""'], {}), "('JD')\n", (3939, 3945), True, 'from matplotlib import pyplot as plt\n'), ((3958, 3976), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (3968, 3976), True, 'from matplotlib import pyplot as plt\n'), ((3989, 3999), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3997, 3999), True, 'from matplotlib import pyplot as plt\n'), ((4974, 4998), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_comp'}), '(n_components=n_comp)\n', (4977, 4998), False, 'from sklearn.decomposition import PCA\n'), ((5210, 5268), 'numpy.hstack', 'np.hstack', (['[ones[out_of_transit_mask], reduced_regressors]'], {}), '([ones[out_of_transit_mask], reduced_regressors])\n', (5219, 5268), True, 'import numpy as np\n'), ((5700, 5745), 'numpy.median', 'np.median', (['target_fluxes[out_of_transit_mask]'], {}), '(target_fluxes[out_of_transit_mask])\n', (5709, 5745), True, 'import numpy as np\n'), ((5776, 5823), 'numpy.std', 'np.std', (['((lc_training + median_oot) / median_oot)'], {}), '((lc_training + median_oot) / median_oot)\n', (5782, 5823), True, 'import numpy as np\n'), ((6119, 6194), 'numpy.hstack', 'np.hstack', (['[ones[oot_no_validation_mask], reduced_regressors_no_validation]'], {}), '([ones[oot_no_validation_mask], reduced_regressors_no_validation])\n', (6128, 6194), True, 'import numpy as np\n'), ((6703, 6752), 'numpy.std', 'np.std', (['((lc_validation + median_oot) / median_oot)'], {}), '((lc_validation + median_oot) / median_oot)\n', (6709, 6752), True, 'import numpy as np\n'), ((7660, 7686), 'numpy.argmin', 'np.argmin', (['stds_validation'], {}), '(stds_validation)\n', (7669, 7686), True, 'import numpy as np\n'), ((7724, 7736), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7734, 7736), True, 'from matplotlib import pyplot as plt\n'), ((7749, 7808), 'matplotlib.pyplot.plot', 'plt.plot', (['n_components', 'stds_validation'], {'label': '"""validation"""'}), "(n_components, stds_validation, label='validation')\n", (7757, 7808), True, 'from matplotlib import pyplot as plt\n'), ((7821, 7876), 'matplotlib.pyplot.plot', 'plt.plot', (['n_components', 'stds_training'], {'label': '"""training"""'}), "(n_components, stds_training, label='training')\n", (7829, 7876), True, 'from matplotlib import pyplot as plt\n'), ((7889, 7913), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Components"""'], {}), "('Components')\n", (7899, 7913), True, 'from matplotlib import pyplot as plt\n'), ((7926, 7943), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""std"""'], {}), "('std')\n", (7936, 7943), True, 'from matplotlib import pyplot as plt\n'), ((7956, 8006), 'matplotlib.pyplot.axvline', 'plt.axvline', (['best_n_components'], {'color': '"""r"""', 'ls': '"""--"""'}), "(best_n_components, color='r', ls='--')\n", (7967, 8006), True, 'from matplotlib import pyplot as plt\n'), ((8182, 8194), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8192, 8194), True, 'from matplotlib import pyplot as plt\n'), ((9088, 9108), 'numpy.min', 'np.min', (['final_lc_mad'], {}), '(final_lc_mad)\n', (9094, 9108), True, 'import numpy as np\n'), ((1597, 1642), 'numpy.ones_like', 'np.ones_like', (['pr.fluxes[:, 0, aperture_index]'], {}), '(pr.fluxes[:, 0, aperture_index])\n', (1609, 1642), True, 'import numpy as np\n'), ((2707, 2734), 'astropy.time.Time', 'Time', (['pr.times'], {'format': '"""jd"""'}), "(pr.times, format='jd')\n", (2711, 2734), False, 'from astropy.time import Time\n'), ((2805, 2832), 'astropy.time.Time', 'Time', (['pr.times'], {'format': '"""jd"""'}), "(pr.times, format='jd')\n", (2809, 2832), False, 'from astropy.time import Time\n'), ((2983, 3010), 'astropy.time.Time', 'Time', (['pr.times'], {'format': '"""jd"""'}), "(pr.times, format='jd')\n", (2987, 3010), False, 'from astropy.time import Time\n'), ((3152, 3179), 'astropy.time.Time', 'Time', (['pr.times'], {'format': '"""jd"""'}), "(pr.times, format='jd')\n", (3156, 3179), False, 'from astropy.time import Time\n'), ((3882, 3915), 'numpy.count_nonzero', 'np.count_nonzero', (['validation_mask'], {}), '(validation_mask)\n', (3898, 3915), True, 'import numpy as np\n'), ((8941, 8965), 'numpy.median', 'np.median', (['target_fluxes'], {}), '(target_fluxes)\n', (8950, 8965), True, 'import numpy as np\n'), ((9362, 9385), 'numpy.argmin', 'np.argmin', (['final_lc_mad'], {}), '(final_lc_mad)\n', (9371, 9385), True, 'import numpy as np\n'), ((9403, 9417), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9412, 9417), True, 'from matplotlib import pyplot as plt\n'), ((9528, 9551), 'numpy.argmin', 'np.argmin', (['final_lc_mad'], {}), '(final_lc_mad)\n', (9537, 9551), True, 'import numpy as np\n'), ((932, 972), 'numpy.abs', 'np.abs', (['(pr.times - transit_parameters.t0)'], {}), '(pr.times - transit_parameters.t0)\n', (938, 972), True, 'import numpy as np\n')] |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import fnmatch
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.image_utils import imread, imresize
image_extentions = [".png"]
file_type_id = {"leftImg8bit": 0, "instanceIds": 1, "labelIds": 2}
##################################
# preprocessing
##################################
def get_cityscape_datalist(args, data_type="train", save_file=False):
list_path = os.path.abspath("./data_list_{}.txt".format(data_type))
if os.path.exists(list_path):
with open(list_path, "r") as f:
lines = f.readlines()
return [line.strip().split(",") for line in lines]
root_dir_path = os.path.abspath(args.data_dir)
if not os.path.exists(root_dir_path):
raise ValueError(
"path for data_dir doesn't exist. ({})".format(args.data_dir))
collections = {}
for dirpath, dirnames, filenames in os.walk(root_dir_path):
# really naive...
if not fnmatch.fnmatch(dirpath, "*{}*".format(data_type)):
continue
images = [filename for filename in filenames if filename.endswith(
*image_extentions)]
if len(images) > 0:
for image in images:
key = "_".join(image.split("_")[:3])
file_type = image.split("_")[-1].split(".")[0]
if file_type not in file_type_id:
continue
image_path = os.path.join(dirpath, image)
if key not in collections:
collections[key] = [None, None, None]
collections[key][file_type_id[file_type]] = image_path
outs = collections.values()
if save_file:
write_outs = []
for path_list in outs:
if None in path_list:
raise ValueError(
"unexpected error is happened during setting up dataset.")
write_outs.append(",".join(path_list))
with open(list_path, "w") as f:
f.write("\n".join(write_outs))
return list(outs)
##################################################
# data loader / iterator
##################################################
def load_function(image_path, inst_path, label_path, image_shape):
# naive image read implementation
image = imread(image_path, channel_first=True)
inst_map = imread(inst_path, as_uint16=True)
label_map = imread(label_path)
if image.shape[1:] != image_shape:
# imresize takes (width, height) as shape.
resize_shape = (image_shape[1], image_shape[0])
image = imresize(image, resize_shape, channel_first=True)
inst_map = imresize(inst_map, resize_shape)
label_map = imresize(label_map, resize_shape)
# normalize
image = (image - 127.5) / 127.5 # -> [-1, 1]
return image, inst_map, label_map
class CityScapesIterator(DataSource):
def __init__(self, data_list, image_shape=(1024, 2048), shuffle=True, rng=None, flip=True):
super(CityScapesIterator, self).__init__(shuffle=shuffle, rng=rng)
self._data_list = data_list # [[image, inst, label], ...]
self._image_shape = image_shape
self._size = len(self._data_list)
self._variables = ("image", "instance_id", "label_id")
self.flip = flip
self.reset()
def reset(self):
self._idxs = self._rng.permutation(
self._size) if self.shuffle else np.arange(self._size)
super(CityScapesIterator, self).reset()
def __iter__(self):
self.reset()
return self
def _get_data(self, position):
i = self._idxs[position]
image_path, inst_path, label_path = self._data_list[i]
image, inst_map, label_map = load_function(
image_path, inst_path, label_path, self._image_shape)
if self.flip:
if np.random.rand() > 0.5:
image = image[..., ::-1]
inst_map = inst_map[..., ::-1]
label_map = label_map[..., ::-1]
return image, inst_map, label_map
def create_data_iterator(batch_size, data_list, image_shape, shuffle=True, rng=None,
with_memory_cache=False, with_parallel=False, with_file_cache=False, flip=True):
return data_iterator(CityScapesIterator(data_list, image_shape, shuffle=shuffle, rng=rng, flip=flip),
batch_size,
with_memory_cache,
with_parallel,
with_file_cache)
| [
"os.path.exists",
"nnabla.utils.image_utils.imresize",
"numpy.random.rand",
"numpy.arange",
"os.path.join",
"os.path.abspath",
"nnabla.utils.image_utils.imread",
"os.walk"
] | [((1142, 1167), 'os.path.exists', 'os.path.exists', (['list_path'], {}), '(list_path)\n', (1156, 1167), False, 'import os\n'), ((1324, 1354), 'os.path.abspath', 'os.path.abspath', (['args.data_dir'], {}), '(args.data_dir)\n', (1339, 1354), False, 'import os\n'), ((1560, 1582), 'os.walk', 'os.walk', (['root_dir_path'], {}), '(root_dir_path)\n', (1567, 1582), False, 'import os\n'), ((2956, 2994), 'nnabla.utils.image_utils.imread', 'imread', (['image_path'], {'channel_first': '(True)'}), '(image_path, channel_first=True)\n', (2962, 2994), False, 'from nnabla.utils.image_utils import imread, imresize\n'), ((3011, 3044), 'nnabla.utils.image_utils.imread', 'imread', (['inst_path'], {'as_uint16': '(True)'}), '(inst_path, as_uint16=True)\n', (3017, 3044), False, 'from nnabla.utils.image_utils import imread, imresize\n'), ((3062, 3080), 'nnabla.utils.image_utils.imread', 'imread', (['label_path'], {}), '(label_path)\n', (3068, 3080), False, 'from nnabla.utils.image_utils import imread, imresize\n'), ((1366, 1395), 'os.path.exists', 'os.path.exists', (['root_dir_path'], {}), '(root_dir_path)\n', (1380, 1395), False, 'import os\n'), ((3244, 3293), 'nnabla.utils.image_utils.imresize', 'imresize', (['image', 'resize_shape'], {'channel_first': '(True)'}), '(image, resize_shape, channel_first=True)\n', (3252, 3293), False, 'from nnabla.utils.image_utils import imread, imresize\n'), ((3313, 3345), 'nnabla.utils.image_utils.imresize', 'imresize', (['inst_map', 'resize_shape'], {}), '(inst_map, resize_shape)\n', (3321, 3345), False, 'from nnabla.utils.image_utils import imread, imresize\n'), ((3366, 3399), 'nnabla.utils.image_utils.imresize', 'imresize', (['label_map', 'resize_shape'], {}), '(label_map, resize_shape)\n', (3374, 3399), False, 'from nnabla.utils.image_utils import imread, imresize\n'), ((4088, 4109), 'numpy.arange', 'np.arange', (['self._size'], {}), '(self._size)\n', (4097, 4109), True, 'import numpy as np\n'), ((2094, 2122), 'os.path.join', 'os.path.join', (['dirpath', 'image'], {}), '(dirpath, image)\n', (2106, 2122), False, 'import os\n'), ((4515, 4531), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4529, 4531), True, 'import numpy as np\n')] |
import configparser
from datetime import datetime
from math import cos
from skimage import filters
from skimage import measure
from math import radians
from scipy.interpolate import splprep, splev
import numpy as np
import pandas as pd
import scipy.ndimage as img
""" Tools to manipulate and analyze data """
def canopy_cover(data, radius):
"""Computes leaf area index: ratio of leaf to ground for a certain area.
Performs a convolve with an arbitrary radius to calculate how many
nonzero values are present within a
:param data: 2D or 3D numpy array of height data
:param radius: radius of the region (in number of 0.1 m squares)
:return: leaf area index computed for each point
"""
c = np.zeros_like(data, int)
kernel = np.ones((radius * 2 + 1, radius * 2 + 1))
for x in range(data.shape[2]):
d = data[:, :, x]
d[d > 0] = 1
conv = img.filters.convolve(d, kernel)
c[:, :, x] = conv
return c
def create_path_splines(points, data_shape, converter):
"""Create managable path splines from the thousands of datapoints"""
# Unpack rows and columns from data shape
rows, cols, _ = data_shape
# Convert the gps information and store it into a new DataFrame
path_pts = []
for p in points.itertuples():
# This gives us the data coordinates from the gps location
data_y = int(converter.lat_to_y(p[1]))
data_x = int(converter.lng_to_x(p[2]))
path_pts.append([data_x, data_y])
np.transpose(path_pts)
# Remove any duplicate points from the path,
# keeping the original array order
path_vals, ind = np.unique(path_pts, axis=0, return_index=True)
ind = sorted(ind)
path_pts = [path_pts[i] for i in ind]
# Create a spline from the remaining path points
# noinspection PyTupleAssignmentBalance
tck, u = splprep(np.transpose(path_pts), u=None, s=0.0)
return tck
def create_stress_map(height, canopy, rad, threshold):
"""Create map showing frequently stressed areas of plot.
Sums the stress masks for each individual date to determine which
areas are frequently coming up stressed.
:return - The 2D map of stressed locations
"""
# Deprecated create_suggestion_mask_v1 code
###########################################################################
# # Suggestion mask data (Normalized Height + Normalized Canopy)
# normh = np.divide(height, np.amax(height, axis=(0, 1))) * 50
# normc = np.divide(canopy, np.amax(canopy, axis=(0, 1))) * 50
#
# # Process data to create stress mask for each snapshot
# comb_data = np.add(normh, normc)
# stress_dates = create_suggestion_mask_v1(comb_data, rad, threshold)
###########################################################################
# Create the suggestion mask for each snapshot
stress_dates = create_suggestion_mask_v2(height, canopy, rad, threshold)
# Create stress map based on all data up to current date
stress_map = np.zeros_like(stress_dates)
for i in range(stress_dates.shape[2]):
stress_map[:, :, i] = np.sum(stress_dates[:, :, 0:(i+1)], axis=2)
stress_map[:, :, i] = np.divide(stress_map[:, :, i], (i+1))
return stress_map
def create_suggestion_mask_v1(d, rad, threshold):
"""Keep this here for a little while, then delete it. Suggestion mask v1
Uses statistical methods to determine outliers below the general
population of the data, and uses image processing techniques to discount
the edges of the plot from skewing the results.
:param d: The input data to create the mask
:param rad: The radius to average and desample the data
:param threshold: The percentile above which points will be filtered
:return: The mask from which suggested points are chosen
"""
# Create a new copy of the data to work on
data = np.copy(d)
# filter out data less than zero
data[data < 0] = 0
# Calculates each point as sum of nearby values within radius r
c = np.zeros_like(data, float)
kernel = np.ones((rad * 2 + 1, rad * 2 + 1))
for x in range(data.shape[2]):
conv = img.filters.convolve(data[:, :, x], kernel)
c[:, :, x] = conv
# Downsample array into pixels with same size as convolve
c = c[::rad * 2 + 1, ::rad * 2 + 1, :]
fullmask = np.zeros_like(d)
for i in range(c.shape[2]):
# Extract the ith layer of data
mask = c[:, :, i]
# Use image processing morphology to smooth out data
mask = img.grey_closing(mask, structure=np.ones((3, 3)))
# Use Sobel edge detection to decrease weight of edges
gx = img.sobel(mask, axis=0)
gy = img.sobel(mask, axis=1)
grad = np.hypot(gx, gy)
grad = (np.divide(grad, np.amax(grad))) * 100
mask = (np.divide(mask, np.amax(mask))) * 100
mask -= grad
# Calculate the threshold percentile, ignoring zeros
mask[mask <= 0] = np.nan
percent = np.nanpercentile(mask, threshold)
mask = np.nan_to_num(mask)
# Filter out data and create mask
mask[mask > percent] = 0
mask[mask > 0] = 1
# Perform binary opening to remove small regions
mask = img.binary_opening(mask)
# Rescale mask to fit data size
scale = np.divide(fullmask[:, :, 0].shape, mask.shape)
fullmask[:, :, i] = img.zoom(mask, scale, order=0)
return fullmask
def create_suggestion_mask_v2(height, canopy, rad=4, threshold=(20, 40)):
# Copy the data
height_data = np.copy(height)
# Silence isolated points (low canopy)
height_data[canopy < 5] = 0
# Downscale dataset to 0.5m squares, taking the max within each
height_data = downscale_max(height_data, rad)
# Place points into stress levels
stress_data = np.zeros_like(height_data)
for x in range(stress_data.shape[2]):
stress_layer = stress_data[:, :, x]
height_layer = height_data[:, :, x]
high_med_stress = np.percentile(height_layer[np.nonzero(height_layer)],
threshold[0])
med_low_stress = np.percentile(height_layer[np.nonzero(height_layer)],
threshold[1])
stress_layer[height_layer >= med_low_stress] = 0.01 # Low
height_layer[stress_layer > 0] = 0 # silence low points
stress_layer[height_layer >= high_med_stress] = 0.5 # Medium
height_layer[stress_layer > 0] = 0 # silence med points
stress_layer[0 < height_layer] = 0.99 # High
stress_data[:, :, x] = stress_layer
stress_data = rescale_like(stress_data, height)
return stress_data
def define_regions(data, rad):
"""Identify regions of high stress areas and """
region_map = np.copy(data)
region_map = region_map[::rad * 2 + 1, ::rad * 2 + 1]
val = filters.threshold_otsu(region_map)
mask = region_map > val
mask = img.binary_opening(mask, iterations=2)
scale = np.divide(data.shape, mask.shape)
mask = img.zoom(mask, scale, order=0)
labels = measure.label(mask, background=0)
regions = img.find_objects(labels)
small_regions = []
for i in range(len(regions)):
if np.nonzero(labels == i + 1)[0].size <= 500:
labels[regions[i]] = 0
small_regions.append(i)
for i in small_regions[::-1]:
del regions[i]
return StressMapWrapper(labels, regions)
def downscale_avg(data, radius):
# Calculates each point as sum of nearby values within radius r
diam = 2 * radius + 1
kernel = np.ones((diam, diam))
fullmap = np.zeros_like(data, float)
for x in range(data.shape[2]):
conv = img.filters.convolve(data[:, :, x], kernel)
fullmap[:, :, x] = conv
# Downsample array into pixels with same size as convolve
fullmap = fullmap[::diam, ::diam, :]
return fullmap
def downscale_max(data, radius):
# Turn radius into diameter centered at original point
diam = 2 * radius + 1
fullmap = np.zeros_like(data[::diam, ::diam, :], float)
for x in range(data.shape[2]):
layer = np.zeros_like(data[::diam, ::diam, 0])
for r in range((int(data.shape[0] / diam)) - 1):
for c in range((int(data.shape[1] / diam)) - 1):
selection = data[(r*diam):(r*diam + diam),
(c*diam):(c*diam + diam), x]
max_val = np.amax(selection)
layer[r, c] = max_val
fullmap[:, :, x] = layer
return fullmap
def evaluate_path_spline(spline, num_points):
u_vals = np.linspace(0, 1, num_points)
pts = splev(u_vals, spline)
pts = pd.DataFrame(np.transpose(pts))
return pts
def filter_outliers(data, rmin = 0.2, rmax = 1.0):
"""Sets outliers outside user defined range to zero.
Calculates the average and standard deviation of the dataset. Filters out
all data below rmin * std and avg + rmax * std.
:param data: 2D numpy array of data to filter
:param rmin: data within (r * standard deviation) of zero is set to zero
:param rmax: data above (average + r * standard deviation) is set to zero
:return: filtered data
"""
for x in range(data.shape[2]):
d = np.nan_to_num(data[:, :, x])
# Silence negative values
d[d < 0] = 0
# Calculate average and std
avg = np.average(d[np.nonzero(d)])
std = np.std(d[np.nonzero(d)])
d[np.absolute(d) < avg - (rmin * std)] = 0 # filter points below avg
d[np.absolute(d) > avg + (rmax * std)] = 0 # filter points above avg
data[:, :, x] = d
return data
def rescale_like(data, like):
# Rescale mask to fit data size
# scale = np.divide(like[:, :, 0].shape, data[:, :, 0].shape)
scale = np.true_divide(like[:, :, 0].shape, data[:, :, 0].shape)
fullmap = np.zeros_like(like, float)
for x in range(data.shape[2]):
fullmap[:, :, x] = img.zoom(data[:, :, x], scale, order=0)
return fullmap
class DataSet4D:
"""This class contains all of the data for a particular mode.
This class is responsible for handling the datasets for each different
mode or filter. It has behaviors to change the date, perform statistics,
and manipulate the data to some extent."""
def __init__(self, data, dates):
# Expand dimensions if necessary
if len(data.shape) == 2:
data = np.expand_dims(data, axis=2)
self.data = np.nan_to_num(data)
self.dates = sorted(dates)
self.filter = filter
date_length = self.dates.__len__()
data_length = self.data.shape[2]
# Duplicate last data element to match date length
while data_length < date_length:
self.data = np.concatenate((self.data,
np.expand_dims(data[:, :, -1], axis=2)),
axis=2)
data_length += 1
self._n_samples = self.data.shape[2]
self._active_sample = 0
self._active_data = self.data[:, :, self._active_sample]
self.max_val = 0
self.min_val = 0
self.average = 0
self.std_dev = 0
self.pct_coverage = 0
self.refresh_statistics()
def get_data(self):
"""Get the 2D array of the map at the current date."""
data = self._active_data
return data
def get_date(self):
"""Return the current date as a datetime object."""
return self.dates[self._active_sample]
def get_dates(self):
"""Return the backing array of datetime objects."""
return self.dates
def get_date_ind(self):
"""Return the index for the current date."""
return self._active_sample
def set_dates(self, dataset):
"""Copy the date object from another dataset."""
self._n_samples = dataset.data.shape[2]
self.dates = dataset.get_dates()
def derivative(self):
"""Take the derivative of the dataset over time."""
# If there is only one data set, return unchanged
if self.data.shape[2] == 1:
return self.data
derivatives = np.empty([self.data.shape[0], self.data.shape[1],
self._n_samples - 1])
diff = self.data[:, :, 1::] - self.data[:, :, 0:-1]
for i in range(len(self.dates) - 1):
date_interval = self.dates[i + 1] - self.dates[i]
derivatives[:, :, i] = np.divide(diff[:, :, i], date_interval.days)
return derivatives
def next_data(self):
"""Advance the active data to the next date, if possible."""
if self._active_sample >= self._n_samples - 1:
self._active_sample = 0
else:
self._active_sample += 1
self._active_data = self.data[:, :, self._active_sample]
self.refresh_statistics()
def prev_data(self):
"""Reqind the active data to the previous date, if possible."""
if self._active_sample == 0:
self._active_sample = self._n_samples - 1
else:
self._active_sample -= 1
self._active_data = self.data[:, :, self._active_sample]
self.refresh_statistics()
def refresh_statistics(self):
"""Recalculate statistics for this dataset."""
self.max_val = np.max(self._active_data)
self.min_val = np.min(self._active_data)
self.average = np.average(self._active_data[np.nonzero(
self._active_data)])
self.std_dev = np.std(self._active_data[np.nonzero(self._active_data)])
self.pct_coverage = np.count_nonzero(self._active_data)\
/ (self._active_data.shape[0] * self._active_data.shape[1]) * 100
def reset_date(self):
"""Reset the data to the first date in the dataset."""
self._active_sample = 0
self._active_data = self.data[:, :, 0]
class LatLngConverter:
"""This utility class converts x and y positions to coordinates."""
def __init__(self, config):
self.lng0 = float(config['GPS']['origin_lng'])
self.lat0 = float(config['GPS']['origin_lat'])
self.origin = (self.lat0, self.lng0)
self.diam = float(config['GPS']['size'])
def data_to_latlng(self, points):
latlng = pd.DataFrame.copy(points)
latlng.columns = ["field.latitude", "field.longitude"]
for pt in points.itertuples():
latlng["field.latitude"][pt[0]] = self.y_to_lat(pt[2])
latlng["field.longitude"][pt[0]] = self.x_to_lng(pt[1])
return latlng
def lat_to_y(self, lat):
# Inverse of above function
d_lat = self.lat0 - lat
m = d_lat * 111111
y = m / self.diam
return y
def lng_to_x(self, lng):
# Inverse of above function
d_lng = self.lng0 - lng
m = d_lng * (111111 * cos(radians(self.lat0)))
x = m / self.diam
return x
def x_to_lng(self, x):
# Convert data point to distance in meters
m = x * self.diam
# Convert data point to longitude with shortcut:
# 1 deg lng = 111111 * cos(lat) * m
d_lng = m / (111111 * cos(radians(self.lat0)))
# Determine new longitude from base longitude
return self.lng0 + d_lng
def y_to_lat(self, y):
# Convert data point to distance in meters
m = y * self.diam
# Convert data point to latitude with shortcut:
# 1 deg lng = 111111
d_lng = m / 111111
# Determine new longitude from base longitude
return self.lat0 + d_lng
class StressMapWrapper:
def __init__(self, stress_map, regions):
self.map = stress_map
self.regions = regions
if __name__ == '__main__':
'''Simple script to convert the lat/lng of a point relative to a given
anchor'''
config = configparser.ConfigParser()
config['GPS'] = {'origin_lat': '31.52036604680005',
'origin_lng': '-83.54861912284196',
'size': '0.2'}
convert = LatLngConverter(config)
x = -296
y = -601
print("lat: " + str(convert.y_to_lat(y)))
print("lng: " + str(convert.x_to_lng(x)))
| [
"configparser.ConfigParser",
"numpy.nanpercentile",
"skimage.filters.threshold_otsu",
"numpy.count_nonzero",
"scipy.ndimage.binary_opening",
"scipy.ndimage.zoom",
"numpy.divide",
"scipy.ndimage.find_objects",
"numpy.max",
"numpy.linspace",
"scipy.interpolate.splev",
"numpy.empty",
"numpy.min... | [((725, 749), 'numpy.zeros_like', 'np.zeros_like', (['data', 'int'], {}), '(data, int)\n', (738, 749), True, 'import numpy as np\n'), ((763, 804), 'numpy.ones', 'np.ones', (['(radius * 2 + 1, radius * 2 + 1)'], {}), '((radius * 2 + 1, radius * 2 + 1))\n', (770, 804), True, 'import numpy as np\n'), ((1513, 1535), 'numpy.transpose', 'np.transpose', (['path_pts'], {}), '(path_pts)\n', (1525, 1535), True, 'import numpy as np\n'), ((1646, 1692), 'numpy.unique', 'np.unique', (['path_pts'], {'axis': '(0)', 'return_index': '(True)'}), '(path_pts, axis=0, return_index=True)\n', (1655, 1692), True, 'import numpy as np\n'), ((3025, 3052), 'numpy.zeros_like', 'np.zeros_like', (['stress_dates'], {}), '(stress_dates)\n', (3038, 3052), True, 'import numpy as np\n'), ((3898, 3908), 'numpy.copy', 'np.copy', (['d'], {}), '(d)\n', (3905, 3908), True, 'import numpy as np\n'), ((4047, 4073), 'numpy.zeros_like', 'np.zeros_like', (['data', 'float'], {}), '(data, float)\n', (4060, 4073), True, 'import numpy as np\n'), ((4087, 4122), 'numpy.ones', 'np.ones', (['(rad * 2 + 1, rad * 2 + 1)'], {}), '((rad * 2 + 1, rad * 2 + 1))\n', (4094, 4122), True, 'import numpy as np\n'), ((4365, 4381), 'numpy.zeros_like', 'np.zeros_like', (['d'], {}), '(d)\n', (4378, 4381), True, 'import numpy as np\n'), ((5588, 5603), 'numpy.copy', 'np.copy', (['height'], {}), '(height)\n', (5595, 5603), True, 'import numpy as np\n'), ((5856, 5882), 'numpy.zeros_like', 'np.zeros_like', (['height_data'], {}), '(height_data)\n', (5869, 5882), True, 'import numpy as np\n'), ((6830, 6843), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (6837, 6843), True, 'import numpy as np\n'), ((6913, 6947), 'skimage.filters.threshold_otsu', 'filters.threshold_otsu', (['region_map'], {}), '(region_map)\n', (6935, 6947), False, 'from skimage import filters\n'), ((6988, 7026), 'scipy.ndimage.binary_opening', 'img.binary_opening', (['mask'], {'iterations': '(2)'}), '(mask, iterations=2)\n', (7006, 7026), True, 'import scipy.ndimage as img\n'), ((7040, 7073), 'numpy.divide', 'np.divide', (['data.shape', 'mask.shape'], {}), '(data.shape, mask.shape)\n', (7049, 7073), True, 'import numpy as np\n'), ((7085, 7115), 'scipy.ndimage.zoom', 'img.zoom', (['mask', 'scale'], {'order': '(0)'}), '(mask, scale, order=0)\n', (7093, 7115), True, 'import scipy.ndimage as img\n'), ((7130, 7163), 'skimage.measure.label', 'measure.label', (['mask'], {'background': '(0)'}), '(mask, background=0)\n', (7143, 7163), False, 'from skimage import measure\n'), ((7178, 7202), 'scipy.ndimage.find_objects', 'img.find_objects', (['labels'], {}), '(labels)\n', (7194, 7202), True, 'import scipy.ndimage as img\n'), ((7635, 7656), 'numpy.ones', 'np.ones', (['(diam, diam)'], {}), '((diam, diam))\n', (7642, 7656), True, 'import numpy as np\n'), ((7671, 7697), 'numpy.zeros_like', 'np.zeros_like', (['data', 'float'], {}), '(data, float)\n', (7684, 7697), True, 'import numpy as np\n'), ((8084, 8129), 'numpy.zeros_like', 'np.zeros_like', (['data[::diam, ::diam, :]', 'float'], {}), '(data[::diam, ::diam, :], float)\n', (8097, 8129), True, 'import numpy as np\n'), ((8659, 8688), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_points'], {}), '(0, 1, num_points)\n', (8670, 8688), True, 'import numpy as np\n'), ((8699, 8720), 'scipy.interpolate.splev', 'splev', (['u_vals', 'spline'], {}), '(u_vals, spline)\n', (8704, 8720), False, 'from scipy.interpolate import splprep, splev\n'), ((9859, 9915), 'numpy.true_divide', 'np.true_divide', (['like[:, :, 0].shape', 'data[:, :, 0].shape'], {}), '(like[:, :, 0].shape, data[:, :, 0].shape)\n', (9873, 9915), True, 'import numpy as np\n'), ((9931, 9957), 'numpy.zeros_like', 'np.zeros_like', (['like', 'float'], {}), '(like, float)\n', (9944, 9957), True, 'import numpy as np\n'), ((15981, 16008), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (16006, 16008), False, 'import configparser\n'), ((902, 933), 'scipy.ndimage.filters.convolve', 'img.filters.convolve', (['d', 'kernel'], {}), '(d, kernel)\n', (922, 933), True, 'import scipy.ndimage as img\n'), ((1876, 1898), 'numpy.transpose', 'np.transpose', (['path_pts'], {}), '(path_pts)\n', (1888, 1898), True, 'import numpy as np\n'), ((3126, 3169), 'numpy.sum', 'np.sum', (['stress_dates[:, :, 0:i + 1]'], {'axis': '(2)'}), '(stress_dates[:, :, 0:i + 1], axis=2)\n', (3132, 3169), True, 'import numpy as np\n'), ((3200, 3237), 'numpy.divide', 'np.divide', (['stress_map[:, :, i]', '(i + 1)'], {}), '(stress_map[:, :, i], i + 1)\n', (3209, 3237), True, 'import numpy as np\n'), ((4173, 4216), 'scipy.ndimage.filters.convolve', 'img.filters.convolve', (['data[:, :, x]', 'kernel'], {}), '(data[:, :, x], kernel)\n', (4193, 4216), True, 'import scipy.ndimage as img\n'), ((4684, 4707), 'scipy.ndimage.sobel', 'img.sobel', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (4693, 4707), True, 'import scipy.ndimage as img\n'), ((4721, 4744), 'scipy.ndimage.sobel', 'img.sobel', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (4730, 4744), True, 'import scipy.ndimage as img\n'), ((4760, 4776), 'numpy.hypot', 'np.hypot', (['gx', 'gy'], {}), '(gx, gy)\n', (4768, 4776), True, 'import numpy as np\n'), ((5019, 5052), 'numpy.nanpercentile', 'np.nanpercentile', (['mask', 'threshold'], {}), '(mask, threshold)\n', (5035, 5052), True, 'import numpy as np\n'), ((5068, 5087), 'numpy.nan_to_num', 'np.nan_to_num', (['mask'], {}), '(mask)\n', (5081, 5087), True, 'import numpy as np\n'), ((5264, 5288), 'scipy.ndimage.binary_opening', 'img.binary_opening', (['mask'], {}), '(mask)\n', (5282, 5288), True, 'import scipy.ndimage as img\n'), ((5346, 5392), 'numpy.divide', 'np.divide', (['fullmask[:, :, 0].shape', 'mask.shape'], {}), '(fullmask[:, :, 0].shape, mask.shape)\n', (5355, 5392), True, 'import numpy as np\n'), ((5421, 5451), 'scipy.ndimage.zoom', 'img.zoom', (['mask', 'scale'], {'order': '(0)'}), '(mask, scale, order=0)\n', (5429, 5451), True, 'import scipy.ndimage as img\n'), ((7748, 7791), 'scipy.ndimage.filters.convolve', 'img.filters.convolve', (['data[:, :, x]', 'kernel'], {}), '(data[:, :, x], kernel)\n', (7768, 7791), True, 'import scipy.ndimage as img\n'), ((8182, 8220), 'numpy.zeros_like', 'np.zeros_like', (['data[::diam, ::diam, 0]'], {}), '(data[::diam, ::diam, 0])\n', (8195, 8220), True, 'import numpy as np\n'), ((8745, 8762), 'numpy.transpose', 'np.transpose', (['pts'], {}), '(pts)\n', (8757, 8762), True, 'import numpy as np\n'), ((9309, 9337), 'numpy.nan_to_num', 'np.nan_to_num', (['data[:, :, x]'], {}), '(data[:, :, x])\n', (9322, 9337), True, 'import numpy as np\n'), ((10020, 10059), 'scipy.ndimage.zoom', 'img.zoom', (['data[:, :, x]', 'scale'], {'order': '(0)'}), '(data[:, :, x], scale, order=0)\n', (10028, 10059), True, 'import scipy.ndimage as img\n'), ((10547, 10566), 'numpy.nan_to_num', 'np.nan_to_num', (['data'], {}), '(data)\n', (10560, 10566), True, 'import numpy as np\n'), ((12257, 12328), 'numpy.empty', 'np.empty', (['[self.data.shape[0], self.data.shape[1], self._n_samples - 1]'], {}), '([self.data.shape[0], self.data.shape[1], self._n_samples - 1])\n', (12265, 12328), True, 'import numpy as np\n'), ((13428, 13453), 'numpy.max', 'np.max', (['self._active_data'], {}), '(self._active_data)\n', (13434, 13453), True, 'import numpy as np\n'), ((13477, 13502), 'numpy.min', 'np.min', (['self._active_data'], {}), '(self._active_data)\n', (13483, 13502), True, 'import numpy as np\n'), ((14406, 14431), 'pandas.DataFrame.copy', 'pd.DataFrame.copy', (['points'], {}), '(points)\n', (14423, 14431), True, 'import pandas as pd\n'), ((10497, 10525), 'numpy.expand_dims', 'np.expand_dims', (['data'], {'axis': '(2)'}), '(data, axis=2)\n', (10511, 10525), True, 'import numpy as np\n'), ((12564, 12608), 'numpy.divide', 'np.divide', (['diff[:, :, i]', 'date_interval.days'], {}), '(diff[:, :, i], date_interval.days)\n', (12573, 12608), True, 'import numpy as np\n'), ((4590, 4605), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (4597, 4605), True, 'import numpy as np\n'), ((4809, 4822), 'numpy.amax', 'np.amax', (['grad'], {}), '(grad)\n', (4816, 4822), True, 'import numpy as np\n'), ((4863, 4876), 'numpy.amax', 'np.amax', (['mask'], {}), '(mask)\n', (4870, 4876), True, 'import numpy as np\n'), ((6067, 6091), 'numpy.nonzero', 'np.nonzero', (['height_layer'], {}), '(height_layer)\n', (6077, 6091), True, 'import numpy as np\n'), ((6200, 6224), 'numpy.nonzero', 'np.nonzero', (['height_layer'], {}), '(height_layer)\n', (6210, 6224), True, 'import numpy as np\n'), ((8486, 8504), 'numpy.amax', 'np.amax', (['selection'], {}), '(selection)\n', (8493, 8504), True, 'import numpy as np\n'), ((9458, 9471), 'numpy.nonzero', 'np.nonzero', (['d'], {}), '(d)\n', (9468, 9471), True, 'import numpy as np\n'), ((9497, 9510), 'numpy.nonzero', 'np.nonzero', (['d'], {}), '(d)\n', (9507, 9510), True, 'import numpy as np\n'), ((9524, 9538), 'numpy.absolute', 'np.absolute', (['d'], {}), '(d)\n', (9535, 9538), True, 'import numpy as np\n'), ((9602, 9616), 'numpy.absolute', 'np.absolute', (['d'], {}), '(d)\n', (9613, 9616), True, 'import numpy as np\n'), ((13556, 13585), 'numpy.nonzero', 'np.nonzero', (['self._active_data'], {}), '(self._active_data)\n', (13566, 13585), True, 'import numpy as np\n'), ((13671, 13700), 'numpy.nonzero', 'np.nonzero', (['self._active_data'], {}), '(self._active_data)\n', (13681, 13700), True, 'import numpy as np\n'), ((13731, 13766), 'numpy.count_nonzero', 'np.count_nonzero', (['self._active_data'], {}), '(self._active_data)\n', (13747, 13766), True, 'import numpy as np\n'), ((7272, 7299), 'numpy.nonzero', 'np.nonzero', (['(labels == i + 1)'], {}), '(labels == i + 1)\n', (7282, 7299), True, 'import numpy as np\n'), ((10908, 10946), 'numpy.expand_dims', 'np.expand_dims', (['data[:, :, -1]'], {'axis': '(2)'}), '(data[:, :, -1], axis=2)\n', (10922, 10946), True, 'import numpy as np\n'), ((14995, 15013), 'math.radians', 'radians', (['self.lat0'], {}), '(self.lat0)\n', (15002, 15013), False, 'from math import radians\n'), ((15301, 15319), 'math.radians', 'radians', (['self.lat0'], {}), '(self.lat0)\n', (15308, 15319), False, 'from math import radians\n')] |
from abc import ABC, abstractmethod
import os
import getpass
import yaml
import copy
import numpy as np
from .utils import (get_time, get_datetime, create_unique_folder,
benchmark_matrix_inverse, benchmark_sha_hashing)
from .procedures import Procedure
from .functions import TestFunction, MLFunction
class Experiment(ABC):
"""
Base class for performing experiments on Procedures with TestFunctions
This class allows to test sampling procedures implemented as a derived
class from the procedures.Procedure class by letting it work on a
TestFunction derived class instance. It automatically takes care of logging
(through a Logger instance) and sanity checks.
Args:
procedure: An instance of a Procedure derived class that needs to be
tested in this experiment.
path: Path to which the experiment should write its logs.
verbose: Boolean or integer indicating if intermediate output should to
stdout should be provided, indicating how many samples were taken
and in which procedure call the experiment is. If a boolean, each
procedure call will get its current sample count outputted. If an
integer, output will only be given if the number of procedure calls
is a multiple of said integer.
"""
def __init__(self, procedure, path, verbose=True):
if not isinstance(procedure, Procedure):
raise Exception("""Experiments should be provided an instance of a
class derived from the procedures.Procedure class.""")
self.path = path
self.procedure = procedure
self.logger = None
self.verbose = int(verbose)
def _perform_experiment(self, function, log_data=True):
"""
Run the experiment.
Calling this method will run the experiment on the provided function.
It will continue to run as long as the procedure being tested in this
experiment is not finished (checked through its is_finished method)
or a specified number of sampled datapoints is reached (configured
via the finish_line argument of this procedure).
Args:
function: Function to run the experiment with. This should be an
instance of a class with the functions.TestFunction class as
base class.
log_data: Boolean indicating if the sampled data should be logged
as well. It is set to True by default.
finish_line: If the total sampled data set reaches or exceeds this
size, the experiment is stopped. This is a hard stop, not a
stopping condition that has to be met: if the procedure being
tested indicates it is finished, the experiment will be
stopped, regardless of the size of the sampled data set. The
finish_line is set to 10,000 by default. If set to None, the
experiment will continue to run until the procedure indicates
it is finished.
Raises:
Exception: Provided function should have functions.TestFunction as
base class.
"""
# Test if function is a TestFunction instance
if not isinstance(function, TestFunction):
raise Exception("""Provided function should have
functions.TestFunction as base class.""")
# Test if the procedure can run on the provided test function
if not self.procedure.check_testfunction(function):
raise Exception(
"""Test function '{}' can not be used for '{}'. Ignoring and
continuing.""".format(function.name,
type(self.procedure).__name__))
# Start experiment
print("Run experiment for '{}' on function '{}'...".format(
type(self.procedure).__name__, function.name))
self._event_start_experiment()
# Setup logger
self.logger = Logger(self.path, (function.name).lower())
self.logger.log_experiment(self, function)
self.logger.log_benchmarks()
# Make function available both to the Experiment and the Procedure
self.function = function
self.procedure.reset()
self.procedure.function = self.function
# Perform sampling as long as procedure is not finished
is_finished = False
n_sampled = 0
n_functioncalls = 0
n_derivativecalls = 0
t_experiment_start = get_time()
while not is_finished:
self.logger.procedure_calls += 1
# Perform an procedure iteration and keep track of time elapsed
t_start = get_time()
x, y = self.procedure(self.function)
dt = get_time() - t_start
# Reshape output arrays to match expectation
if len(x.shape) == 1:
if x.shape[0] == self.function.get_dimensionality():
x = x.reshape((1, -1))
else:
x = x.reshape((-1, 1))
if len(y.shape) == 1:
y = y.reshape((len(x), 1))
self._event_new_samples(x, y)
# Log procedure call
n = len(x)
n_sampled += n
if self.verbose != 0:
if self.logger.procedure_calls % self.verbose == 0:
print("{} samples taken in {} procedure calls".format(
n_sampled, self.logger.procedure_calls))
self.logger.log_procedure_calls(dt, n_sampled, n)
# Log sampled data
if log_data:
self.logger.log_samples(x, y)
# Log function calls and reset the counter
n_functioncalls += self.function.count_calls("normal")[1]
n_derivativecalls += self.function.count_calls("derivative")[1]
self.logger.log_function_calls(self.function)
self.function.reset()
# Check if the experiment has to stop and update the while
# condition to control this.
is_finished = (self.procedure.is_finished()
or self._stop_experiment(x, y))
if self.verbose:
print(
"Experiment finished with {} procedure calls and {} samples.".
format(self.logger.procedure_calls, n_sampled))
self._event_end_experiment()
# Log result metrics
t_experiment_end = get_time()
metrics = {
'time': (t_experiment_end - t_experiment_start),
'n_functioncalls': n_functioncalls,
'n_derivativecalls': n_derivativecalls
}
metrics = {**metrics, **self.make_metrics()}
self.logger.log_results(metrics)
# Delete the logger to close all handles
del (self.logger)
def _stop_experiment(self, x, y):
"""
Uses the stopping criterion defined in the .run() method to determine
if the experiment should be stopped.
Args:
x: Sampled data in the form of a numpy.ndarray of shape
(nDatapoints, nVariables).
y: Function values for the samples datapoints of shape
(nDatapoints, ?)
Returns:
Boolean indicating if the experiment should be stopped (i.e. the
stopping criterion is reached).
"""
self.n_sampled += len(x)
if self.n_sampled >= self.finish_line:
return True
return False
@abstractmethod
def make_metrics(self):
"""
Creates metrics to report in experiment.yaml
This is an abstract method and should be implemented in
Experiment-specific classes derived from this one.
Returns:
Dictionary containing the metrics by name.
"""
return {}
@abstractmethod
def _event_start_experiment(self):
"""
Event that is run when a new experiment is started.
This is an abstract method and should be implemented in
Experiment-specific classes derived from this one.
"""
pass
@abstractmethod
def _event_end_experiment(self):
"""
Event that is run when an experiment is ended, but before the metrics
are stored to the experiment.yaml file.
This is an abstract method and should be implemented in
Experiment-specific classes derived from this one.
"""
pass
@abstractmethod
def _event_new_samples(self, x, y):
"""
Event that is run when new samples are obtained from the specified
procedure.
This is an abstract method and should be implemented in
Experiment-specific classes derived from this one.
Args:
x: Sampled data in the form of a numpy.ndarray of shape
(nDatapoints, nVariables).
y: Function values for the samples datapoints of shape
(nDatapoints, ?)
"""
pass
def run(self, function, finish_line=1000, log_data=True):
"""
Run the experiment on the provided test function.
The experiment is stopped if the total number of sampled points reaches
or exceeds the number defined in the `finish_line` argument.
Args:
function: Function to run the experiment with. This should be an
instance of a class with the functions.TestFunction class as
base class.
finish_line: If the total sampled data set reaches or exceeds this
size, the experiment is stopped. This is a hard stop, not a
stopping condition that has to be met: if the procedure being
tested indicates it is finished, the experiment will be
stopped, regardless of the size of the sampled data set. The
finish_line is set to 10,000 by default. If set to None, the
experiment will continue to run until the procedure indicates
it is finished.
log_data: Boolean indicating if the sampled data should be logged
as well. It is set to True by default.
"""
self.finish_line = finish_line
self.n_sampled = 0
self._perform_experiment(function, log_data)
class OptimisationExperiment(Experiment):
"""
Experiment class for optimisation experiments
Implements automatic logging of best obtained result to the experiment.yaml
file.
"""
def __init__(self, *args, **kwargs):
super(OptimisationExperiment, self).__init__(*args, **kwargs)
self.threshold_x = np.inf
self.tollerance_y = 0
def detect_multiple_minima(self, threshold_x=np.inf, tollerance_y=0):
"""
Allow the detection of multiple minima by setting thresholds for the
identification of a minimum as a unique minimum.
By calling this function with anything by the default parameters,
multiple minima can be detected by the OptimisationExperiment. The
`threshold_x` parameter defines the minimum required euclidean distance
between the different found candidate minima, so a higher threshold
makes it harder to find secundary minima (default is `numpy.inf`).
`tollerance_y` sets how much the function value of the test function
may vary with respect to the absolute minimum found so far in order
to be considered a candidate minimum (default is `0`).
If a point is both more than `threshold_x` away from the best found
minimum (and other, already found secundary minima) and its function
value is within `tollerance_y` of the best found minimum, it is
considered a secundary minimum and will be reported as such in the
`experiment.yaml` file.
Args:
threshold_x: Float indicating the minimum distance between the
different minima to be found. Distance is measured using the
euclidean distance norm.
tollerance_y: Allowed difference between secundary minima and
the best found minimum.
"""
self.threshold_x = threshold_x
self.tollerance_y = tollerance_y
def _find_minima(self, x, y, previous_x, previous_y):
# Find best minimum so far
if previous_x is None:
x_candi, y_candi = x, y
else:
x_candi = np.vstack((x, np.array(previous_x)))
y_candi = np.vstack((y, np.array(previous_y)))
minimum = np.amin(y)
# Select based on y_threshold
indices = np.argwhere(
y_candi.flatten() <= minimum + self.tollerance_y).flatten()
x_candi, y_candi = x_candi[indices], y_candi[indices]
# Sort candidates based on y value
indices = np.argsort(y_candi, axis=0).flatten()
indices = indices
x_candi, y_candi = x_candi[indices], y_candi[indices]
# Select based on x_threshold
indices = []
for i in range(len(x_candi)):
if len(indices) == 0:
indices.append(i)
elif self._is_minimum_new(x_candi[i], x_candi[np.array(indices)]):
indices.append(i)
indices = np.array(indices).flatten()
# Return selection
return x_candi[indices], y_candi[indices]
def _is_minimum_new(self, x, x_prime):
for z in x_prime:
if np.linalg.norm(x - z) < self.threshold_x:
return False
return True
def _event_start_experiment(self):
"""
Event that is run when a new experiment is started.
"""
self.best_x = None
self.best_y = None
def _event_end_experiment(self):
"""
Event that is run when a experiment ends.
"""
pass
def _event_new_samples(self, x, y):
"""
Event that is run when new samples are obtained from the specified
procedure.
This implementation checks all sampled points and their function values
and stores the (x,y) pair that has the lowest function value.
Args:
x: Sampled data in the form of a numpy.ndarray of shape
(nDatapoints, nVariables).
y: Function values for the samples datapoints of shape
(nDatapoints, ?)
"""
self.best_x, self.best_y = self._find_minima(x, y, self.best_x,
self.best_y)
def make_metrics(self):
"""
Creates metrics to report in results.yaml. Specifically: it reports the
best found point (i.e. the point with the lowest function value).
Returns:
Dictionary containing the metrics by name.
"""
if len(self.best_x) == 1:
return {
'best_point': self.best_x[0].tolist(),
'best_value': self.best_y[0].tolist()
}
return {
'best_point': self.best_x.tolist(),
'best_value': self.best_y.tolist()
}
class OptimizationExperiment(OptimisationExperiment):
"""
Experiment class for optimisation experiments. This class is a copy of
OptimisationExperiment and its purpose is solely to support multiple
language conventions.
"""
pass
class PosteriorSamplingExperiment(Experiment):
"""
Experiment class for posterior sampling experiments.
"""
def _event_start_experiment(self):
"""
Event that is run when a new experiment is started.
"""
pass
def _event_end_experiment(self):
"""
Event that is run when a experiment ends.
"""
pass
def _event_new_samples(self, x, y):
"""
Event that is run when new samples are obtained from the specified
procedure.
Args:
x: Sampled data in the form of a numpy.ndarray of shape
(nDatapoints, nVariables).
y: Function values for the samples datapoints of shape
(nDatapoints, ?)
"""
pass
def make_metrics(self):
"""
Creates metrics to report in results.yaml. Specifically: it reports the
best found point (i.e. the point with the lowest function value).
Returns:
Dictionary containing the metrics by name.
"""
return {}
class Logger:
"""
Class that takes care of all logging of experiments.
An instance of this class is automatically made and handled within the
Experiment class.
Args:
path: Path to which logging results should be written. Within this
folder each test function will get its own subfolder.
prefered_subfolder: Name of the folder to be created in the logging
path. The folder is created with the utils.create_unique_folder
function, so naming conflicts will be automatically resolved.
"""
def __init__(self, path, prefered_subfolder):
self.basepath = path
self.path = create_unique_folder(path, prefered_subfolder)
self.procedure_calls = 0
self.create_samples_header = True
self._create_handles()
def __del__(self):
"""
Closes all the opened handles at deletion of the instance.
"""
handles = ["samples", "functioncalls", "procedurecalls"]
for handle in handles:
if hasattr(self, 'handle_' + handle):
getattr(self, 'handle_' + handle).close()
def _create_handles(self):
"""
Creates the file handles needed for logging. Created csv files also get
their headers added if already possible.
"""
self.handle_samples = open(self.path + os.sep + "samples.csv", "w")
self.handle_functioncalls = open(
self.path + os.sep + "functioncalls.csv", "w")
self.handle_functioncalls.write(
'procedure_call_id,n_queried,dt,asked_for_derivative\n')
self.handle_procedurecalls = open(
self.path + os.sep + "procedurecalls.csv", "w")
self.handle_procedurecalls.write(
'procedure_call_id,dt,total_dataset_size,new_data_generated\n')
def log_samples(self, x, y):
"""
Log samples and their obtained function values from the test function.
The data and their target values are written to the samples.csv file
created at initialisation of the Logger object. As this is the first
moment we know how many parameters the problem has, this function will
create a header in this file as well if it is called for the first
time.
Args:
x: numpy.ndarray of shape (nDatapoints, nVariables) containing the
data to be logged.
y: numpy.ndarray of shape (nDatapoints, nTargetVariables)
containing the sampled function values of the test function.
"""
# Create header
if self.create_samples_header:
header = ['procedure_call_id']
header += ['x' + str(i) for i in range(len(x[0]))]
header += ['y' + str(i) for i in range(len(y[0]))]
self.handle_samples.write(",".join(header) + "\n")
self.create_samples_header = False
# Create and write line
n_datapoints = len(x)
points = x.astype(str).tolist()
labels = y.astype(str).tolist()
for i in range(n_datapoints):
line = [str(self.procedure_calls)]
line += points[i]
line += labels[i]
self.handle_samples.write(','.join(line) + "\n")
self.handle_samples.flush()
def log_procedure_calls(self, dt, size_total, size_generated):
"""
Log a procedure call to the procedurecalls.csv file.
Args:
dt: Time in ms spend on the procedure call.
size_total: Number of data points sampled in total for all
procedure calls so far. This should include the data points
sampled in the iteration that is currently sampled.
size_generated: Number of data points sampled in this specific
procedure call.
"""
line = [
int(self.procedure_calls), dt,
int(size_total),
int(size_generated)
]
line = list(map(str, line))
self.handle_procedurecalls.write(','.join(line) + "\n")
self.handle_procedurecalls.flush()
def log_function_calls(self, function):
"""
Log the number of calls to the test function and whether or not it is
queried for a derivative.
Function calls will be logged in the functioncalls.csv file.
Args:
function: Test function that was used in an experiment iteration.
This test function should be a class with
functions.TestFunction as its base class.
"""
for entry in function.counter:
line = [
int(self.procedure_calls),
int(entry[0]),
float(entry[1]),
bool(entry[2])
]
line = list(map(str, line))
self.handle_functioncalls.write(','.join(line) + "\n")
self.handle_functioncalls.flush()
def log_benchmarks(self):
"""
Benchmark the machine with some simple benchmark algorithms (as
implemented in the utils module).
Results are stored in the base log path in the benchmarks.yaml file. If
this file already exists, no benchmarks are run.
"""
if os.path.exists(self.basepath + os.sep + "benchmarks.yaml"):
return
with open(self.basepath + os.sep + "benchmarks.yaml", "w") as handle:
info = {}
# Get meta data of experiment
info['benchmarks'] = {
'matrix_inversion': benchmark_matrix_inverse(),
'sha_hashing': benchmark_sha_hashing(),
}
yaml.dump(info, handle, default_flow_style=False)
def log_experiment(self, experiment, function):
"""
Log the setup and the function set up to a .yaml-file in order to
optimize reproducability.
This method should be called *before* the first experiment iteration.
Args:
experiment: Experiment to be run, containing the procedure to be
tested (which needs to be provided at initialisation).
function: Test function that was used in an experiment iteration.
This test function should be a class with
functions.TestFunction as its base class.
"""
with open(self.path + os.sep + "experiment.yaml", "w") as handle:
info = {}
# Get meta data of experiment
info['meta'] = {
'datetime': str(get_datetime()),
'timestamp': str(get_time()),
'user': getpass.getuser(),
}
# Get properties of function
func_props = copy.copy(vars(function))
if isinstance(function, MLFunction):
del(func_props['model'])
for prop in func_props:
if prop == 'name':
continue
if isinstance(func_props[prop], np.ndarray):
func_props[prop] = func_props[prop].tolist()
info['function'] = {
'name': function.name,
'testfunction': type(function).__name__,
'properties': func_props
}
del (info['function']['properties']['counter'])
# Get properties of experiment
info['procedure'] = {
'name': type(experiment.procedure).__name__,
'properties': {}
}
info['experiment'] = {
'type': experiment.__class__.__name__,
}
for prop in experiment.procedure.store_parameters:
info['procedure']['properties'][prop] = getattr(
experiment.procedure, prop)
if isinstance(info['procedure']['properties'][prop],
np.ndarray):
info['procedure']['properties'][prop] = info['procedure'][
'properties'][prop].tolist()
# Convert information to yaml and write to file
yaml.dump(info, handle, default_flow_style=False)
def log_results(self, metrics):
"""
Log the results of the experiment in the experiment.yaml file
This method should be called *before* the first experiment iteration.
Args:
metrics: Dictionary containing the result metrics to store. Keys
represent the name with which the values should be stored.
"""
# Parse experiment yaml file and add results
with open(self.path + os.sep + "experiment.yaml", 'r') as stream:
experiment = yaml.safe_load(stream)
experiment['results'] = metrics
# Write new content to file
with open(self.path + os.sep + "experiment.yaml", 'w') as handle:
yaml.dump(experiment, handle, default_flow_style=False)
| [
"os.path.exists",
"numpy.amin",
"yaml.dump",
"numpy.argsort",
"yaml.safe_load",
"numpy.array",
"numpy.linalg.norm",
"getpass.getuser"
] | [((12664, 12674), 'numpy.amin', 'np.amin', (['y'], {}), '(y)\n', (12671, 12674), True, 'import numpy as np\n'), ((21787, 21845), 'os.path.exists', 'os.path.exists', (["(self.basepath + os.sep + 'benchmarks.yaml')"], {}), "(self.basepath + os.sep + 'benchmarks.yaml')\n", (21801, 21845), False, 'import os\n'), ((22189, 22238), 'yaml.dump', 'yaml.dump', (['info', 'handle'], {'default_flow_style': '(False)'}), '(info, handle, default_flow_style=False)\n', (22198, 22238), False, 'import yaml\n'), ((24612, 24661), 'yaml.dump', 'yaml.dump', (['info', 'handle'], {'default_flow_style': '(False)'}), '(info, handle, default_flow_style=False)\n', (24621, 24661), False, 'import yaml\n'), ((25191, 25213), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (25205, 25213), False, 'import yaml\n'), ((25376, 25431), 'yaml.dump', 'yaml.dump', (['experiment', 'handle'], {'default_flow_style': '(False)'}), '(experiment, handle, default_flow_style=False)\n', (25385, 25431), False, 'import yaml\n'), ((12939, 12966), 'numpy.argsort', 'np.argsort', (['y_candi'], {'axis': '(0)'}), '(y_candi, axis=0)\n', (12949, 12966), True, 'import numpy as np\n'), ((13361, 13378), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (13369, 13378), True, 'import numpy as np\n'), ((13551, 13572), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - z)'], {}), '(x - z)\n', (13565, 13572), True, 'import numpy as np\n'), ((23146, 23163), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (23161, 23163), False, 'import getpass\n'), ((12564, 12584), 'numpy.array', 'np.array', (['previous_x'], {}), '(previous_x)\n', (12572, 12584), True, 'import numpy as np\n'), ((12623, 12643), 'numpy.array', 'np.array', (['previous_y'], {}), '(previous_y)\n', (12631, 12643), True, 'import numpy as np\n'), ((13288, 13305), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (13296, 13305), True, 'import numpy as np\n')] |
import sys,os
import numpy as np
import tqdm
from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog,QVBoxLayout,QMessageBox,QCheckBox
from PyQt5 import QtGui
from PyQt5 import QtCore, QtWidgets
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from PyQt5 import QtCore, QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from photonpy.smlm.dataset import Dataset
from photonpy.smlm.ui import main_ui, linklocs_ui
from photonpy.smlm.ui.progressbar import ProgressBar
from photonpy.smlm.ui.qtplot import PlotDialog
import threading
import json
import functools
import pyqtgraph as pg
import photonpy.smlm.process_movie as process_movie
import photonpy.smlm.extract_rois as extract_rois
from photonpy.smlm.util import imshow_hstack
from photonpy.smlm.ui.drift_correct_dlg import DriftCorrectionDialog
import matplotlib as mpl
#mpl.use('svg')
new_rc_params = {
# "font.family": 'Times',
"font.size": 15,
"font.serif": [],
"svg.fonttype": 'none'} #to store text as text, not as path
mpl.rcParams.update(new_rc_params)
class MplCanvas(FigureCanvasQTAgg):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
super(MplCanvas, self).__init__(fig)
#import photonpy.simflux.locs_to_pattern as simflux_pattern
#ript(Run In Plotting Thread) decorator
def ript(function):
def ript_this(*args, **kwargs):
global send_queue, return_queue, plot_thread
if threading.currentThread() == plot_thread: #if called from the plotting thread -> execute
return function(*args, **kwargs)
else: #if called from a diffrent thread -> send function to queue
send_queue.put(functools.partial(function, *args, **kwargs))
return_parameters = return_queue.get(True) # blocking (wait for return value)
return return_parameters
return ript_this
def showMessage(txt):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(txt)
msg.exec_()
def createDatasetViewer(ds:Dataset):
# Interpret image data as row-major instead of col-major
pg.setConfigOptions(imageAxisOrder='row-major')
img = ds.renderGaussianSpots(10, 0.5)
## Create window with ImageView widget
win = QtGui.QDialog()
win.resize(800,800)
layout = QVBoxLayout(win)
imv = pg.ImageView()
layout.addWidget(imv)
#win.setCentralWidget(imv)
win.show()
name = ds['locs_path']
win.setWindowTitle(f'Viewing {name}')
## Add time-varying signal
"""
sig = np.zeros(data.shape[0])
sig[30:] += np.exp(-np.linspace(1,10, 70))
sig[40:] += np.exp(-np.linspace(1,10, 60))
sig[70:] += np.exp(-np.linspace(1,10, 30))
sig = sig[:,np.newaxis,np.newaxis] * 3
data[:,50:60,30:40] += sig
"""
imv.setImage(img)
## Display the data and assign each frame a time value from 1.0 to 3.0
#imv.setImage(data, xvals=np.linspace(1., 3., data.shape[0]))
## Set a custom color map
colors = [
(0, 0, 0),
(45, 5, 61),
(84, 42, 55),
(150, 87, 60),
(208, 171, 141),
(255, 255, 255)
]
cmap = pg.ColorMap(pos=np.linspace(0.0, 1.0, 6), color=colors)
imv.setColorMap(cmap)
return win
class LinkLocsDialog(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.ui = linklocs_ui.Ui_Dialog()
self.ui.setupUi(self)
self.ui.btnBrowse.clicked.connect(self._onBrowse)
self.ui.btnEstimate.clicked.connect(self.estimate)
def setLocsFile(self,fn):
self.ui.txtLocsFile.setText(fn)
def _onBrowse(self):
options = QFileDialog.Options()
# options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"", "","All Files (*);;HDF5 Files (*.hdf5)", options=options)
if fileName:
self.ui.txtLocsFile.setText(fileName)
def estimate(self):
from utils.link_locs import estimate_on_time
maxdist = self.ui.maxDistance.value()
frameskip = self.ui.frameskip.value()
fig,bins,framecounts = estimate_on_time(self.ui.txtLocsFile.text(),maxdist,frameskip)
import photonpy.smlm.ui.qtplot as qtplot
plotdlg=qtplot.PlotDialog(fig,self)
plotdlg.setModal(True)
plotdlg.show()
def getWidgetValues(widgets):
d={}
for w in widgets:
if type(w) == QtWidgets.QDoubleSpinBox or type(w) == QtWidgets.QSpinBox:
v = w.value()
elif type(w) == QLineEdit:
v = w.text()
elif type(w) == QCheckBox:
v = w.isChecked()
else:
continue
d[w.objectName()] = v
return d
def setWidgetValues(widgets,values):
for w in widgets:
if w.objectName() in values:
v = values[w.objectName()]
if type(w) == QtWidgets.QDoubleSpinBox or type(w) == QtWidgets.QSpinBox:
w.setValue(v)
elif type(w) == QLineEdit:
w.setText(v)
elif type(w) == QCheckBox:
w.setChecked(v)
class Window(QDialog):
localizeDone = QtCore.pyqtSignal()
localizeFailed = QtCore.pyqtSignal([str])
roiExtractionDone = QtCore.pyqtSignal()
datasets = []
def __init__(self):
super().__init__()
self.title = 'Photonpy localization microscopy analysis toolbox'
self.viewers = []
self.ui = main_ui.Ui_Dialog()
ui=self.ui
ui.setupUi(self)
ui.btnBrowseTiff.clicked.connect(self.onBrowseTiff)
ui.btnLocalize.clicked.connect(self.localize)
ui.btnLinkLocs.clicked.connect(self.linklocs)
ui.btnBrowseCameraDarkFrames.clicked.connect(self.onBrowseCameraDarkFrames)
ui.btnBrowseCameraLightFrames.clicked.connect(self.onBrowseCameraLightFrames)
ui.btnBrowseROIs.clicked.connect(self.onBrowseROIFile)
ui.btnRCC.clicked.connect(self.onDriftCorrectRCC)
ui.btnMinEntropyDrift.clicked.connect(self.onDriftCorrectMinEntropy)
ui.btnExtractROIs.clicked.connect(self.onExtractROIs)
ui.checkBoxPerPixelCamCalib.toggled.connect(self.onPerPixelCamCalibChanged)
self.onPerPixelCamCalibChanged()
ui.btnViewSelected.clicked.connect(self.onViewSelected)
ui.btnLoad.clicked.connect(self.onLoadLocs)
self.localizeFailed.connect(self.onLocalizeFailed)
self.localizeDone.connect(self.onLocalizeDone)
self.roiExtractionDone.connect(self.onROIExtractionDone)
self.cfgFile = os.path.dirname(__file__) + '/ui-cfg.json'
self.cfgWidgets = {
ui.roisize,
ui.gain,
ui.offset,
ui.detectionThreshold,
ui.pixelsize,
ui.spotDetectionPSFSigma,
ui.spinSigmaFitFramesPerBin,
ui.tiffPath,
ui.txtCameraDarkFrames,
ui.txtCameraLightFrames,
ui.startFrame,
ui.maxLinkDistance,
ui.maxLinkFrameskip,
ui.txtROIFile,
ui.roiExtractMinSpotFrames,
ui.roiExtractSpotFrames,
ui.roiExtractAppend,
ui.maxLinkDistanceIntensity,
ui.checkBoxPerPixelCamCalib,
ui.spinSpotDetectorUseMeanImage,
ui.spinNumFrames,
ui.chiSquareThreshold,
ui.spinSumFrames,
ui.rccFramesPerBin,
ui.minEntFramesPerBin,
ui.minEntMaxSpots
}
self.load()
@property
def selectedDataset(self):
idx = self.ui.listDatasets.currentIndex().row()
return self.datasets[idx]
def onViewSelected(self):
ds = self.selectedDataset
self.viewers.append(createDatasetViewer(ds))
def onDriftCorrectRCC(self):
fpb = self.ui.rccFramesPerBin.value()
ds = self.selectedDataset.copy()
drift = ds.estimateDriftRCC(framesPerBin=fpb, maxdrift=5)
ds.applyDrift(drift)
path = os.path.splitext( ds['imagefile'])[0]+"_undrifted_rcc.hdf5"
ds.save(path)
ds['locs_path'] = path
self.datasets.append(ds)
self.updateList()
def onDriftCorrectMinEntropy(self):
fpb = self.ui.minEntFramesPerBin.value()
maxspots = self.ui.minEntMaxSpots.value()
ds = self.selectedDataset.copy()
path_noext = os.path.splitext( ds['locs_path'])[0]
rcc_fpb = self.ui.rccFramesPerBin.value()
coarseFPB = self.ui.minEntCoarseFPB.value()
if coarseFPB==0:
coarseFPB=None
coarseSigmaM = self.ui.minEntCoarseSigmaMultiplier.value()
sigma = ds.data.crlb.pos.mean(0) * coarseSigmaM
drift, prec = ds.estimateDriftMinEntropy(framesPerBin=fpb,
pixelsize = self.ui.pixelsize.value(),
maxdrift = 5, maxspots = maxspots,
initializeWithRCC = ds.numFrames//rcc_fpb,
coarseFramesPerBin = coarseFPB,
coarseSigma = sigma,
outputfn = path_noext+"_drift_dme")
ds.applyDrift(drift)
path = path_noext+"_undrifted_dme.hdf5"
ds.save(path)
ds['locs_path'] = path
self.datasets.append(ds)
self.updateList()
def onPerPixelCamCalibChanged(self):
v = self.ui.checkBoxPerPixelCamCalib.checkState()
self.ui.offset.setEnabled(not v)
self.ui.gain.setEnabled(not v)
self.ui.txtCameraDarkFrames.setEnabled(v)
self.ui.txtCameraLightFrames.setEnabled(v)
def load(self):
path = os.path.abspath(self.cfgFile)
print(f"Loading UI state from {path}")
if os.path.exists(self.cfgFile):
with open(self.cfgFile,'r') as f:
d = json.load(f)
setWidgetValues(self.cfgWidgets,d)
def save(self):
d = getWidgetValues(self.cfgWidgets)
with open(self.cfgFile,'w') as f:
json.dump(d,f,indent=4)
def closeEvent(self,event):
self.save()
def linklocs(self):
dlg = LinkLocsDialog(self)
dlg.setLocsFile(self.ui.smlmLocsFile.text())
dlg.show()
def updatePaths(self):
tiff_path = self.ui.tiffPath.text()
def onBrowseCameraDarkFrames(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Browse movie containing dark calibration:", "","All Files (*);;TIFF File (*.tif)", options=options)
if fileName:
self.ui.txtCameraDarkFrames.setText(fileName)
def onBrowseCameraLightFrames(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Browse movie containing light frames for calibration:", "","All Files (*);;TIFF File (*.tif)", options=options)
if fileName:
self.ui.txtCameraLightFrames.setText(fileName)
def onBrowseROIFile(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Browse ROI file", "","All Files (*);;TIFF File (*.tif)", options=options)
if fileName:
self.ui.txtROIFile.setText(fileName)
def onBrowseTiff(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Browse TIFF", "","All Files (*);;TIFF File (*.tif)", options=options)
if fileName:
self.ui.tiffPath.setText(fileName)
self.updatePaths()
def onLoadLocs(self):
options = QFileDialog.Options()
filename, _ = QFileDialog.getOpenFileName(self,"Browse ROI file", "","Picasso compatible HDF5 (*.hdf5);;Thunderstorm CSV (*.csv)", options=options)
if filename:
try:
ds = Dataset.load(filename)
self.result = ds
self.datasets = [ds]
self.updateList()
except ValueError as e:
showMessage(f'Error: {str(e)}')
def onExtractROIs(self):
locs_fn = self.ui.smlmLocsFile.text()
tiff_path = self.ui.tiffPath.text()
rois_path = self.ui.txtROIFile.text()
pbar = ProgressBar("Extracting ROIs and estimating spot background and intensity")
def progress_update(msg,done):
if msg is not None:
pbar.setMsg.emit(msg)
if done is not None:
pbar.update.emit(done)
return not pbar.abortPressed
cfg = self.getConfig()
cfg = {**cfg,
'maxlinkdistXY': self.ui.maxLinkDistance.value(),
'maxlinkdistI': self.ui.maxLinkDistanceIntensity.value(),
'maxlinkframeskip': self.ui.maxLinkFrameskip.value()
}
maxroiframes = self.ui.roiExtractSpotFrames.value()
minroiframes = self.ui.roiExtractMinSpotFrames.value()
appendFrames = self.ui.roiExtractAppend.value()
def process_thread():
self.rois,self.roiframes = extract_rois.extract_rois(rois_path, tiff_path, cfg, minroiframes,
maxroiframes, appendFrames, locs_fn, progress_update)
if not pbar.abortPressed:
self.roiExtractionDone.emit()
t = threading.Thread(target=process_thread)
t.start()
pbar.show()
def onViewROIs(self):
rois_path = self.ui.txtROIFile.text()
roidata = extract_rois.ROIData.load(rois_path)
plt.figure()
for k in range(20):
imshow_hstack(roidata.frames[k])
def updateList(self):
model = QtGui.QStandardItemModel()
self.ui.listDatasets.setModel(model)
for d in self.datasets:
item = QtGui.QStandardItem(f"{d['locs_path']} - {d.info()}")
model.appendRow(item)
def getConfig(self):
offset = self.ui.offset.value()
gain = self.ui.gain.value()
if self.ui.checkBoxPerPixelCamCalib.isChecked():
offset = self.ui.txtCameraDarkFrames.text()
gain = self.ui.txtCameraLightFrames.text()
if len(offset) == 0:
showMessage('Need to provide movie with dark frames')
return
if len(gain) == 0:
showMessage('Need to provide movie with light frames')
return
cfg = {
'roisize': self.ui.roisize.value(),
'threshold': self.ui.detectionThreshold.value(),
'sigmaframesperbin': self.ui.spinSigmaFitFramesPerBin.value(),
'gain': gain,
'maxframes': self.ui.spinNumFrames.value(),
'offset': offset,
'startframe': self.ui.startFrame.value(),
'pixelsize': self.ui.pixelsize.value(),
'spotdetectsigma': self.ui.spotDetectionPSFSigma.value(),
'sumframes': self.ui.spinSumFrames.value()
}
chisq = self.ui.chiSquareThreshold.value()
if chisq > 0 :
cfg['maxchisq'] = chisq
return cfg
def localize(self):
tiff_path = self.ui.tiffPath.text()
if not os.path.exists(tiff_path):
return
cfg = self.getConfig()
if cfg is None:
return
locs_fn = os.path.splitext(tiff_path)[0]+".hdf5"
self.ui.labelLocsInfo.setText('')
pbar = ProgressBar("Running spot detection and 2D Gaussian localization...")
def progress_update(msg,done):
if msg is not None:
pbar.setMsg.emit(msg)
if done is not None:
pbar.update.emit(done)
return not pbar.abortPressed
def localize_thread():
print (f"Localize thread: {threading.get_ident()}")
try:
self.localizer = process_movie.Localizer2D()
self.localizer.process(tiff_path, cfg, locs_fn, progress_update)
self.tiff_path = tiff_path
if not pbar.abortPressed:
self.localizeDone.emit()
except ValueError as e:
self.localizeFailed.emit(str(e))
if True:
t = threading.Thread(target=localize_thread)
t.start()
else: #debug -- skip the threading
self.localizer = process_movie.Localizer2D()
self.localizer.process(tiff_path, cfg, locs_fn, progress_update)
self.localizeDone.emit()
pbar.show()
@QtCore.pyqtSlot(str)
def onLocalizeFailed(self, msg):
showMessage(f'Error: {msg}')
@QtCore.pyqtSlot()
def onLocalizeDone(self):
print("localize done")
self.localizer.plotChiSquare()
self.localizer.plotSigmaTimeSeries()
self.localizer.plotIntensityHistogram()
self.result = self.localizer.result
#img = self.result.renderGaussianSpots(20, 1)
#plt.figure()
#plt.imshow(img)
self.viewers.append (createDatasetViewer(self.result))
if 'sigma' in self.result.dtypeEstim.fields:
sx = self.result.data.estim.sigma[:,0]
sy = self.result.data.estim.sigma[:,1]
self.ui.psfSigmaX.setValue(np.median(sx))
self.ui.psfSigmaY.setValue(np.median(sy))
fig = plt.figure(figsize=(8,5))
plt.hist([sx,sy],label=['Sigma X','Sigma Y'],range=(1,3),bins=100)
plt.legend()
plt.xlabel('PSF Sigma [pixels]')
plt.show()
#PlotDialog(fig).show()
self.datasets = [ self.result ]
self.updateList()
#self.ui.labelLocsInfo.setText(self.datasets[0].info())
@QtCore.pyqtSlot()
def onROIExtractionDone(self):
print("roi extraction done")
def run_ui():
app = QApplication.instance()
if app is None:
app = QApplication(sys.argv)
wnd = Window()
wnd.show()
wnd.activateWindow()
app.exec_()
wnd = None
#del tqdm # prevent exception at exit about not being able to join thread
del app # prevent IPython+Qt issue https://github.com/spyder-ide/spyder/issues/2970
if __name__ == '__main__':
print('Opening UI')
run_ui() | [
"photonpy.smlm.ui.qtplot.PlotDialog",
"photonpy.smlm.process_movie.Localizer2D",
"photonpy.smlm.extract_rois.extract_rois",
"matplotlib.pyplot.hist",
"PyQt5.QtWidgets.QMessageBox",
"photonpy.smlm.extract_rois.ROIData.load",
"PyQt5.QtWidgets.QApplication",
"photonpy.smlm.ui.linklocs_ui.Ui_Dialog",
"P... | [((233, 257), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (247, 257), False, 'import matplotlib\n'), ((1197, 1231), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['new_rc_params'], {}), '(new_rc_params)\n', (1216, 1231), True, 'import matplotlib as mpl\n'), ((2162, 2175), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (2173, 2175), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((2358, 2405), 'pyqtgraph.setConfigOptions', 'pg.setConfigOptions', ([], {'imageAxisOrder': '"""row-major"""'}), "(imageAxisOrder='row-major')\n", (2377, 2405), True, 'import pyqtgraph as pg\n'), ((2515, 2530), 'PyQt5.QtGui.QDialog', 'QtGui.QDialog', ([], {}), '()\n', (2528, 2530), False, 'from PyQt5 import QtGui\n'), ((2568, 2584), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['win'], {}), '(win)\n', (2579, 2584), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((2595, 2609), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {}), '()\n', (2607, 2609), True, 'import pyqtgraph as pg\n'), ((5497, 5516), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', ([], {}), '()\n', (5514, 5516), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((5538, 5562), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['[str]'], {}), '([str])\n', (5555, 5562), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((5587, 5606), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', ([], {}), '()\n', (5604, 5606), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((17530, 17550), 'PyQt5.QtCore.pyqtSlot', 'QtCore.pyqtSlot', (['str'], {}), '(str)\n', (17545, 17550), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((17631, 17648), 'PyQt5.QtCore.pyqtSlot', 'QtCore.pyqtSlot', ([], {}), '()\n', (17646, 17648), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((18768, 18785), 'PyQt5.QtCore.pyqtSlot', 'QtCore.pyqtSlot', ([], {}), '()\n', (18783, 18785), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((18891, 18914), 'PyQt5.QtWidgets.QApplication.instance', 'QApplication.instance', ([], {}), '()\n', (18912, 18914), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((1349, 1389), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(width, height)', 'dpi': 'dpi'}), '(figsize=(width, height), dpi=dpi)\n', (1355, 1389), False, 'from matplotlib.figure import Figure\n'), ((3662, 3685), 'photonpy.smlm.ui.linklocs_ui.Ui_Dialog', 'linklocs_ui.Ui_Dialog', ([], {}), '()\n', (3683, 3685), False, 'from photonpy.smlm.ui import main_ui, linklocs_ui\n'), ((3953, 3974), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (3972, 3974), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((4049, 4149), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '""""""', '""""""', '"""All Files (*);;HDF5 Files (*.hdf5)"""'], {'options': 'options'}), "(self, '', '',\n 'All Files (*);;HDF5 Files (*.hdf5)', options=options)\n", (4076, 4149), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((4558, 4586), 'photonpy.smlm.ui.qtplot.PlotDialog', 'qtplot.PlotDialog', (['fig', 'self'], {}), '(fig, self)\n', (4575, 4586), True, 'import photonpy.smlm.ui.qtplot as qtplot\n'), ((5809, 5828), 'photonpy.smlm.ui.main_ui.Ui_Dialog', 'main_ui.Ui_Dialog', ([], {}), '()\n', (5826, 5828), False, 'from photonpy.smlm.ui import main_ui, linklocs_ui\n'), ((10401, 10430), 'os.path.abspath', 'os.path.abspath', (['self.cfgFile'], {}), '(self.cfgFile)\n', (10416, 10430), False, 'import sys, os\n'), ((10489, 10517), 'os.path.exists', 'os.path.exists', (['self.cfgFile'], {}), '(self.cfgFile)\n', (10503, 10517), False, 'import sys, os\n'), ((11149, 11170), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (11168, 11170), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((11193, 11336), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Browse movie containing dark calibration:"""', '""""""', '"""All Files (*);;TIFF File (*.tif)"""'], {'options': 'options'}), "(self,\n 'Browse movie containing dark calibration:', '',\n 'All Files (*);;TIFF File (*.tif)', options=options)\n", (11220, 11336), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((11466, 11487), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (11485, 11487), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((11510, 11665), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Browse movie containing light frames for calibration:"""', '""""""', '"""All Files (*);;TIFF File (*.tif)"""'], {'options': 'options'}), "(self,\n 'Browse movie containing light frames for calibration:', '',\n 'All Files (*);;TIFF File (*.tif)', options=options)\n", (11537, 11665), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((11786, 11807), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (11805, 11807), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((11830, 11943), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Browse ROI file"""', '""""""', '"""All Files (*);;TIFF File (*.tif)"""'], {'options': 'options'}), "(self, 'Browse ROI file', '',\n 'All Files (*);;TIFF File (*.tif)', options=options)\n", (11857, 11943), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((12055, 12076), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (12074, 12076), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((12099, 12208), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Browse TIFF"""', '""""""', '"""All Files (*);;TIFF File (*.tif)"""'], {'options': 'options'}), "(self, 'Browse TIFF', '',\n 'All Files (*);;TIFF File (*.tif)', options=options)\n", (12126, 12208), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((12359, 12380), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (12378, 12380), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((12403, 12547), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Browse ROI file"""', '""""""', '"""Picasso compatible HDF5 (*.hdf5);;Thunderstorm CSV (*.csv)"""'], {'options': 'options'}), "(self, 'Browse ROI file', '',\n 'Picasso compatible HDF5 (*.hdf5);;Thunderstorm CSV (*.csv)', options=\n options)\n", (12430, 12547), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((13001, 13076), 'photonpy.smlm.ui.progressbar.ProgressBar', 'ProgressBar', (['"""Extracting ROIs and estimating spot background and intensity"""'], {}), "('Extracting ROIs and estimating spot background and intensity')\n", (13012, 13076), False, 'from photonpy.smlm.ui.progressbar import ProgressBar\n'), ((14125, 14164), 'threading.Thread', 'threading.Thread', ([], {'target': 'process_thread'}), '(target=process_thread)\n', (14141, 14164), False, 'import threading\n'), ((14302, 14338), 'photonpy.smlm.extract_rois.ROIData.load', 'extract_rois.ROIData.load', (['rois_path'], {}), '(rois_path)\n', (14327, 14338), True, 'import photonpy.smlm.extract_rois as extract_rois\n'), ((14356, 14368), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14366, 14368), True, 'import matplotlib.pyplot as plt\n'), ((14485, 14511), 'PyQt5.QtGui.QStandardItemModel', 'QtGui.QStandardItemModel', ([], {}), '()\n', (14509, 14511), False, 'from PyQt5 import QtGui\n'), ((16379, 16448), 'photonpy.smlm.ui.progressbar.ProgressBar', 'ProgressBar', (['"""Running spot detection and 2D Gaussian localization..."""'], {}), "('Running spot detection and 2D Gaussian localization...')\n", (16390, 16448), False, 'from photonpy.smlm.ui.progressbar import ProgressBar\n'), ((18949, 18971), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (18961, 18971), False, 'from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog, QVBoxLayout, QMessageBox, QCheckBox\n'), ((1701, 1726), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (1724, 1726), False, 'import threading\n'), ((3457, 3481), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(6)'], {}), '(0.0, 1.0, 6)\n', (3468, 3481), True, 'import numpy as np\n'), ((6990, 7015), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7005, 7015), False, 'import sys, os\n'), ((8961, 8994), 'os.path.splitext', 'os.path.splitext', (["ds['locs_path']"], {}), "(ds['locs_path'])\n", (8977, 8994), False, 'import sys, os\n'), ((10777, 10802), 'json.dump', 'json.dump', (['d', 'f'], {'indent': '(4)'}), '(d, f, indent=4)\n', (10786, 10802), False, 'import json\n'), ((13843, 13967), 'photonpy.smlm.extract_rois.extract_rois', 'extract_rois.extract_rois', (['rois_path', 'tiff_path', 'cfg', 'minroiframes', 'maxroiframes', 'appendFrames', 'locs_fn', 'progress_update'], {}), '(rois_path, tiff_path, cfg, minroiframes,\n maxroiframes, appendFrames, locs_fn, progress_update)\n', (13868, 13967), True, 'import photonpy.smlm.extract_rois as extract_rois\n'), ((14409, 14441), 'photonpy.smlm.util.imshow_hstack', 'imshow_hstack', (['roidata.frames[k]'], {}), '(roidata.frames[k])\n', (14422, 14441), False, 'from photonpy.smlm.util import imshow_hstack\n'), ((16117, 16142), 'os.path.exists', 'os.path.exists', (['tiff_path'], {}), '(tiff_path)\n', (16131, 16142), False, 'import sys, os\n'), ((17210, 17250), 'threading.Thread', 'threading.Thread', ([], {'target': 'localize_thread'}), '(target=localize_thread)\n', (17226, 17250), False, 'import threading\n'), ((17345, 17372), 'photonpy.smlm.process_movie.Localizer2D', 'process_movie.Localizer2D', ([], {}), '()\n', (17370, 17372), True, 'import photonpy.smlm.process_movie as process_movie\n'), ((18371, 18397), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (18381, 18397), True, 'import matplotlib.pyplot as plt\n'), ((18409, 18481), 'matplotlib.pyplot.hist', 'plt.hist', (['[sx, sy]'], {'label': "['Sigma X', 'Sigma Y']", 'range': '(1, 3)', 'bins': '(100)'}), "([sx, sy], label=['Sigma X', 'Sigma Y'], range=(1, 3), bins=100)\n", (18417, 18481), True, 'import matplotlib.pyplot as plt\n'), ((18488, 18500), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18498, 18500), True, 'import matplotlib.pyplot as plt\n'), ((18513, 18545), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""PSF Sigma [pixels]"""'], {}), "('PSF Sigma [pixels]')\n", (18523, 18545), True, 'import matplotlib.pyplot as plt\n'), ((18558, 18568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18566, 18568), True, 'import matplotlib.pyplot as plt\n'), ((1936, 1980), 'functools.partial', 'functools.partial', (['function', '*args'], {}), '(function, *args, **kwargs)\n', (1953, 1980), False, 'import functools\n'), ((8562, 8595), 'os.path.splitext', 'os.path.splitext', (["ds['imagefile']"], {}), "(ds['imagefile'])\n", (8578, 8595), False, 'import sys, os\n'), ((10585, 10597), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10594, 10597), False, 'import json\n'), ((12596, 12618), 'photonpy.smlm.dataset.Dataset.load', 'Dataset.load', (['filename'], {}), '(filename)\n', (12608, 12618), False, 'from photonpy.smlm.dataset import Dataset\n'), ((16273, 16300), 'os.path.splitext', 'os.path.splitext', (['tiff_path'], {}), '(tiff_path)\n', (16289, 16300), False, 'import sys, os\n'), ((16826, 16853), 'photonpy.smlm.process_movie.Localizer2D', 'process_movie.Localizer2D', ([], {}), '()\n', (16851, 16853), True, 'import photonpy.smlm.process_movie as process_movie\n'), ((18271, 18284), 'numpy.median', 'np.median', (['sx'], {}), '(sx)\n', (18280, 18284), True, 'import numpy as np\n'), ((18325, 18338), 'numpy.median', 'np.median', (['sy'], {}), '(sy)\n', (18334, 18338), True, 'import numpy as np\n'), ((16751, 16772), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (16770, 16772), False, 'import threading\n')] |
import abc
import itertools
import numpy as np
from keras.preprocessing.image import apply_affine_transform
class AffineTransformation(object):
def __init__(self, flip, tx, ty, k_90_rotate):
self.flip = flip
self.tx = tx
self.ty = ty
self.k_90_rotate = k_90_rotate
def __call__(self, x):
res_x = x
if self.flip:
res_x = np.fliplr(res_x)
if self.tx != 0 or self.ty != 0:
res_x = apply_affine_transform(res_x, tx=self.tx, ty=self.ty, channel_axis=2, fill_mode='reflect')
if self.k_90_rotate != 0:
res_x = np.rot90(res_x, self.k_90_rotate)
return res_x
class AbstractTransformer(abc.ABC):
def __init__(self):
self._transformation_list = None
self._create_transformation_list()
@property
def n_transforms(self):
return len(self._transformation_list)
@abc.abstractmethod
def _create_transformation_list(self):
return
# 这个 object list 的写法也太酷了,一下子包含了好多重transform的class,只需要用idx调整即可
def transform_batch(self, x_batch, t_inds):
assert len(x_batch) == len(t_inds)
transformed_batch = x_batch.copy()
for i, t_ind in enumerate(t_inds):
transformed_batch[i] = self._transformation_list[t_ind](transformed_batch[i])
return transformed_batch
class Transformer(AbstractTransformer):
def __init__(self, translation_x=8, translation_y=8):
self.max_tx = translation_x
self.max_ty = translation_y
super().__init__()
def _create_transformation_list(self):
transformation_list = []
# iteration tools to generate several iterations
for is_flip, tx, ty, k_rotate in itertools.product((False, True),
(0, -self.max_tx, self.max_tx),
(0, -self.max_ty, self.max_ty),
range(4)):
# Make an affine transformation, then return a transformation result
transformation = AffineTransformation(is_flip, tx, ty, k_rotate)
transformation_list.append(transformation)
self._transformation_list = transformation_list
class SimpleTransformer(AbstractTransformer):
def _create_transformation_list(self):
transformation_list = []
for is_flip, k_rotate in itertools.product((False, True),
range(4)):
transformation = AffineTransformation(is_flip, 0, 0, k_rotate)
transformation_list.append(transformation)
self._transformation_list = transformation_list
| [
"numpy.fliplr",
"keras.preprocessing.image.apply_affine_transform",
"numpy.rot90"
] | [((392, 408), 'numpy.fliplr', 'np.fliplr', (['res_x'], {}), '(res_x)\n', (401, 408), True, 'import numpy as np\n'), ((470, 564), 'keras.preprocessing.image.apply_affine_transform', 'apply_affine_transform', (['res_x'], {'tx': 'self.tx', 'ty': 'self.ty', 'channel_axis': '(2)', 'fill_mode': '"""reflect"""'}), "(res_x, tx=self.tx, ty=self.ty, channel_axis=2,\n fill_mode='reflect')\n", (492, 564), False, 'from keras.preprocessing.image import apply_affine_transform\n'), ((615, 648), 'numpy.rot90', 'np.rot90', (['res_x', 'self.k_90_rotate'], {}), '(res_x, self.k_90_rotate)\n', (623, 648), True, 'import numpy as np\n')] |
# main.py
import numpy as np
import matplotlib.pyplot as plt
# from a_star import AStar
from random_map import RandomMap
fig,ax = plt.subplots()
ax.cla()
map = RandomMap()
mapData = np.zeros((map.size, map.size,3), int)
# map.Setup()
for grid in map.grids:
i = grid.x
j = grid.y
if grid.value == -2:
mapData[i,j] = (128,128,128) # Wall is gray
elif grid.value == -1:
mapData[i,j] = (0,0,0) # Obstacles' is black
else:
mapData[i,j] = (256,256,256) # Rest of map is white
ax.set_xlim([-1, map.size])
ax.set_ylim([-1, map.size])
ax.imshow(mapData)
# a_star = AStar(map)
# a_star.RunAndSaveImage(ax, plt)
# a_star.Init()
while True:
# a_star.RunAndSaveImage()
# for grid in a_star.map.grid:
# i = grid.x
# j = grid.y
# if grid.value == -2:
# mapData[i,j] = (128,128,128) # Wall is gray
# elif grid.value == -1:
# mapData[i,j] = (0,0,0) # Obstacles' is black
# elif grid.value == 1:
# mapData[i,j] = (0,255,0)
# else:
# mapData[i,j] = (256,256,256) # Rest of map is white
ax.imshow(mapData)
plt.pause(0.1) | [
"random_map.RandomMap",
"numpy.zeros",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots"
] | [((133, 147), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (145, 147), True, 'import matplotlib.pyplot as plt\n'), ((164, 175), 'random_map.RandomMap', 'RandomMap', ([], {}), '()\n', (173, 175), False, 'from random_map import RandomMap\n'), ((186, 224), 'numpy.zeros', 'np.zeros', (['(map.size, map.size, 3)', 'int'], {}), '((map.size, map.size, 3), int)\n', (194, 224), True, 'import numpy as np\n'), ((1157, 1171), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (1166, 1171), True, 'import matplotlib.pyplot as plt\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
class Lane():
fig = plt.figure()
plt.ion()
def __init__(self, focal_point=None, roi_height=None, source_pts=None):
# initalises common variables in the class
# focal_point : location of the focal point of the lane. Can be the
# vanishing point of the image
# roi_height : height where the lane region of interest is at most
# considered
# source_pts : bottom start points of the lane roi
if focal_point is None:
self.focal_point = [0,0]
else:
self.focal_point = focal_point
if roi_height is None:
self.roi_height = 0.
else:
self.roi_height = roi_height
if source_pts is None:
self.source_pts = [[0, 0], [0, 0]]
else:
self.source_pts = source_pts
self.roi_pts = np.float32([[0, 0], [0, 0], [0, 0], [0, 0]])
self.left_fit = None
self.right_fit = None
def lane_roi(self, img_shape, roi_height=None, focal_point=None, source_pts=None):
# defines a lanes region of interest
# img_shape : shape of the input image
# roi_height : the pixel height of the higest point of interest
# focal_point : location of the focal focal_point. If None, will use the center of the image
# source_pts : location of the two bottom corner points
# return : coordinates of the region of interest of a lane
if focal_point is None:
focal_point = self.focal_point
if roi_height is None:
roi_height = self.roi_height
h = img_shape[0] # image height
# top of the roi is a factor of the height from the bottom of the roi
# to the focal point.
# ratio = (1 - fph/h) -> focal point position compared to the height
# inv_fp = (1 - fph/h)*h -> inverse focal position
# h_top = (ratio * (1 - roi_height)) * inv_fp
# h_top is the y position of the height with respect to the focal
fph = self.focal_point[1] # height of focal point
fp_ratio = (1 - fph / h)
h_top = h * fp_ratio**2 * (1 - roi_height)
if source_pts is None:
# create the source points as the two bottom corners of the image
source_pts = self.source_pts
m_left = (focal_point[1] - source_pts[0][1]) / (focal_point[0] - source_pts[0][0])
b_left = focal_point[1] - (m_left * focal_point[0])
x_left = (h_top - b_left) // m_left
m_right = (focal_point[1] - source_pts[1][1]) / (focal_point[0] - source_pts[1][0])
b_right = focal_point[1] - (m_right * focal_point[0])
x_right = (h_top - b_right) // m_right
self.roi_pts = np.float32([source_pts[0], [x_left, h_top], [x_right, h_top], source_pts[1]])
return self.roi_pts
def draw_lane_roi(self, img, roi_pts=None, focal_point=None, color=(255, 255, 255)):
# draws the region of interest onto the supplied image
# img : source image
# roi_pts : coordinate points of the region of interest
# focal_point : location of the focal focal_point
# return : the supplied image with the roi drawn on
if focal_point is None:
focal_point = self.focal_point
if roi_pts is None:
roi_pts = self.roi_pts
image = img.copy()
pts = np.int32(roi_pts)
pts = pts.reshape((-1, 1, 2))
cv2.circle(image, (focal_point[0], focal_point[1]), 5, color, 2)
cv2.polylines(image, [pts], True, color, 2)
return image
def warp_image(self, img, roi_pts=None, location_pts=None, padding=(0,0)):
# img : image to be transformed into the new perspective
# roi_pts : location points from the original image to be transformed.
# Points must be in a clock wise order.
# location_pts : the final location points in the image where the
# old_pts will be located. If None supplied, the new points
# will be the four corners off the supplied image in a
# clockwise order, starting at point (0,0).
# offset : adds padding onto the roi points so the warped image is
# larger than the roi. Supplied as (width, height) padding
# returns : the warped perspective image with the supplied points
if roi_pts is None:
roi_pts = self.roi_pts
h, w = img.shape[:2]
if location_pts is None:
location_pts = np.float32([[padding[0], h-padding[1]], # bot-left
[padding[0], padding[1]], # top-left
[w-padding[0], padding[1]], # top-right
[w-padding[0], h-padding[1]]]) # bot-right
# calculate the perspective transform matrix between the old and new points
M = cv2.getPerspectiveTransform(roi_pts, location_pts)
# Warp the image to the new perspective
return cv2.warpPerspective(img, M, (w, h))
def mask_roi(self, img, roi_pts=None, outside_mask=True):
# create a masked image showing only the area of the roi_pts
# img : source image to be masked
# roi_pts : region for masking
# outside_mask : True if masking area outside roi, False if masking roi
# return : masked image
if roi_pts is None:
roi_pts = self.roi_pts
pts = np.int32(roi_pts)
pts = [pts.reshape((-1, 1, 2))]
# create a blank image to create a threshold
"""mask = np.ones_like(img)
ignore_mask_color = (0, 0, 0) # *channel_count
# create a polygon that is white
m = cv2.fillPoly(mask, pts, ignore_mask_color)"""
mask = np.zeros_like(img)
ignore_mask_color = (255, 255, 255) # *channel_count
# create a polygon that is white
m = cv2.fillPoly(mask, pts, ignore_mask_color)
# return the applyed mask
if outside_mask == False:
m = cv2.bitwise_not(m)
return cv2.bitwise_and(img, m)
else:
return cv2.bitwise_and(img, mask)
def combine_images(self, img_one, img_two, img_one_weight=0.8, img_two_weight=1.):
# combines two images into one for display purposes
# img_one : image one
# img_two : image two
# img_one_weight : transparency weight of image one
# img_two_weight : transparency weight of image two
# return : combined image
return cv2.addWeighted(img_one, img_one_weight, img_two, img_two_weight, 0)
def gauss(self, x, mu, sigma, A):
# creates a gaussian distribution from the data
# x : input data
# mu : mean data point
# sigma : variance from the mean
# return : Gaussian distribution
return A * np.exp(-(x - mu) ** 2 / 2 / sigma ** 2)
def bimodal(self, x, mu1, sigma1, A1, mu2, sigma2, A2):
return self.gauss(x, mu1, sigma1, A1) + self.gauss(x, mu2, sigma2, A2)
def plot_histogram(self, data):
# plot a real time histogram of the supplied data
# data : data to plot
plt.clf()
plt.plot(data)
plt.pause(0.00001)
def histogram(self, data):
# calculates the histogram of data
# data : data to be transformed into a histogram
# returns : a vector of the histogram data
return np.sum(data, axis=0)
def histogram_peaks(self, data, plot_hist=False):
hist = self.histogram(data)
if plot_hist == True:
self.plot_histogram(hist)
midpoint = np.int(hist.shape[0] // 2)
leftx_base = np.argmax(hist[:midpoint])
rightx_base = np.argmax(hist[midpoint:]) + midpoint
return leftx_base, rightx_base
def plot_best_fit(self, img, nonzerox, nonzeroy, left_lane_inds, right_lane_inds, margin=100):
# Generate x and y values for plotting
ploty = np.linspace(0, img.shape[0] - 1, img.shape[0])
left_fitx = self.left_fit[0] * ploty ** 2 + self.left_fit[1] * ploty + self.left_fit[2]
right_fitx = self.right_fit[0] * ploty ** 2 + self.right_fit[1] * ploty + self.right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((img, img, img)) * 255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
result = self.combine_images(out_img, window_img, img_one_weight=1, img_two_weight=0.3)
cv2.imshow('result', result) # visulise the output of the function
def find_lane_lines(self, img, line_windows=10, plot_line=False, draw_square=False):
out_img = img.copy()
# Set height of windows
window_height = np.int(img.shape[0] / line_windows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx, rightx = self.histogram_peaks(img)
leftx_current = leftx
rightx_current = rightx
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(line_windows):
# Identify window boundaries in x and y (and right and left)
win_y_low = img.shape[0] - (window + 1) * window_height
win_y_high = img.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
if draw_square == True:
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (255, 255, 255), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (255, 255, 255), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
self.left_fit = np.polyfit(lefty, leftx, 2)
self.right_fit = np.polyfit(righty, rightx, 2)
if plot_line==True:
# plot the line of best fit onto the image
self.plot_best_fit(out_img, nonzerox, nonzeroy, left_lane_inds, right_lane_inds)
return self.left_fit, self.right_fit
def lane_lines(self, img, plot_line=False):
# Does the program know where the lane lines are?
#image = self.left_fit
if self.left_fit is None or self.right_fit is None:
# Don't know where the lane lines are, so go and find them
self.find_lane_lines(img)
else:
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = (
(nonzerox > (self.left_fit[0] * (nonzeroy ** 2) + self.left_fit[1] * nonzeroy + self.left_fit[2] - margin)) & (
nonzerox < (self.left_fit[0] * (nonzeroy ** 2) + self.left_fit[1] * nonzeroy + self.left_fit[2] + margin)))
right_lane_inds = (
(nonzerox > (self.right_fit[0] * (nonzeroy ** 2) + self.right_fit[1] * nonzeroy + self.right_fit[2] - margin)) & (
nonzerox < (self.right_fit[0] * (nonzeroy ** 2) + self.right_fit[1] * nonzeroy + self.right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
self.left_fit = np.polyfit(lefty, leftx, 2)
self.right_fit = np.polyfit(righty, rightx, 2)
if plot_line == True:
# plot the line of best fit onto the image
self.plot_best_fit(img, nonzerox, nonzeroy, left_lane_inds, right_lane_inds)
return self.left_fit, self.right_fit
def set_roi_points(self, roi_pts):
# set the region of interest for the class
# roi_pts : region of interest points
self.roi_pts = roi_pts
def get_roi_points(self):
# gets the current the region of interest points for the class
# return : roi_pts
return self.roi_pts
def set_focal_point(self, focal_point):
# set the focal_point for the class
# focal_point : the new focal point
self.focal_point = focal_point
def get_focal_point(self):
# gets the current focal point for the class
# return : focal_point
return self.focal_point
def set_roi_height(self, height):
# set the roi_height for the class
# height : the new roi_height
self.roi_height = height
def get_roi_height(self):
# gets the current roi_height for the class
# return : roi_height
return self.roi_height | [
"cv2.rectangle",
"numpy.hstack",
"numpy.polyfit",
"numpy.int32",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"numpy.mean",
"matplotlib.pyplot.plot",
"numpy.exp",
"cv2.addWeighted",
"numpy.linspace",
"numpy.vstack",
"numpy.concatenate",
"cv2.fillPoly",
"cv2.getPerspectiveTransfo... | [((89, 101), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (99, 101), True, 'import matplotlib.pyplot as plt\n'), ((106, 115), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (113, 115), True, 'import matplotlib.pyplot as plt\n'), ((946, 990), 'numpy.float32', 'np.float32', (['[[0, 0], [0, 0], [0, 0], [0, 0]]'], {}), '([[0, 0], [0, 0], [0, 0], [0, 0]])\n', (956, 990), True, 'import numpy as np\n'), ((2813, 2890), 'numpy.float32', 'np.float32', (['[source_pts[0], [x_left, h_top], [x_right, h_top], source_pts[1]]'], {}), '([source_pts[0], [x_left, h_top], [x_right, h_top], source_pts[1]])\n', (2823, 2890), True, 'import numpy as np\n'), ((3464, 3481), 'numpy.int32', 'np.int32', (['roi_pts'], {}), '(roi_pts)\n', (3472, 3481), True, 'import numpy as np\n'), ((3528, 3592), 'cv2.circle', 'cv2.circle', (['image', '(focal_point[0], focal_point[1])', '(5)', 'color', '(2)'], {}), '(image, (focal_point[0], focal_point[1]), 5, color, 2)\n', (3538, 3592), False, 'import cv2\n'), ((3601, 3644), 'cv2.polylines', 'cv2.polylines', (['image', '[pts]', '(True)', 'color', '(2)'], {}), '(image, [pts], True, color, 2)\n', (3614, 3644), False, 'import cv2\n'), ((5000, 5050), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['roi_pts', 'location_pts'], {}), '(roi_pts, location_pts)\n', (5027, 5050), False, 'import cv2\n'), ((5114, 5149), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(w, h)'], {}), '(img, M, (w, h))\n', (5133, 5149), False, 'import cv2\n'), ((5554, 5571), 'numpy.int32', 'np.int32', (['roi_pts'], {}), '(roi_pts)\n', (5562, 5571), True, 'import numpy as np\n'), ((5873, 5891), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (5886, 5891), True, 'import numpy as np\n'), ((6007, 6049), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'pts', 'ignore_mask_color'], {}), '(mask, pts, ignore_mask_color)\n', (6019, 6049), False, 'import cv2\n'), ((6634, 6702), 'cv2.addWeighted', 'cv2.addWeighted', (['img_one', 'img_one_weight', 'img_two', 'img_two_weight', '(0)'], {}), '(img_one, img_one_weight, img_two, img_two_weight, 0)\n', (6649, 6702), False, 'import cv2\n'), ((7268, 7277), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7275, 7277), True, 'import matplotlib.pyplot as plt\n'), ((7286, 7300), 'matplotlib.pyplot.plot', 'plt.plot', (['data'], {}), '(data)\n', (7294, 7300), True, 'import matplotlib.pyplot as plt\n'), ((7309, 7325), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-05)'], {}), '(1e-05)\n', (7318, 7325), True, 'import matplotlib.pyplot as plt\n'), ((7526, 7546), 'numpy.sum', 'np.sum', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (7532, 7546), True, 'import numpy as np\n'), ((7727, 7753), 'numpy.int', 'np.int', (['(hist.shape[0] // 2)'], {}), '(hist.shape[0] // 2)\n', (7733, 7753), True, 'import numpy as np\n'), ((7775, 7801), 'numpy.argmax', 'np.argmax', (['hist[:midpoint]'], {}), '(hist[:midpoint])\n', (7784, 7801), True, 'import numpy as np\n'), ((8066, 8112), 'numpy.linspace', 'np.linspace', (['(0)', '(img.shape[0] - 1)', 'img.shape[0]'], {}), '(0, img.shape[0] - 1, img.shape[0])\n', (8077, 8112), True, 'import numpy as np\n'), ((8461, 8483), 'numpy.zeros_like', 'np.zeros_like', (['out_img'], {}), '(out_img)\n', (8474, 8483), True, 'import numpy as np\n'), ((9062, 9111), 'numpy.hstack', 'np.hstack', (['(left_line_window1, left_line_window2)'], {}), '((left_line_window1, left_line_window2))\n', (9071, 9111), True, 'import numpy as np\n'), ((9338, 9389), 'numpy.hstack', 'np.hstack', (['(right_line_window1, right_line_window2)'], {}), '((right_line_window1, right_line_window2))\n', (9347, 9389), True, 'import numpy as np\n'), ((9693, 9721), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'result'], {}), "('result', result)\n", (9703, 9721), False, 'import cv2\n'), ((9937, 9972), 'numpy.int', 'np.int', (['(img.shape[0] / line_windows)'], {}), '(img.shape[0] / line_windows)\n', (9943, 9972), True, 'import numpy as np\n'), ((10100, 10120), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (10108, 10120), True, 'import numpy as np\n'), ((10140, 10160), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (10148, 10160), True, 'import numpy as np\n'), ((12389, 12419), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (12403, 12419), True, 'import numpy as np\n'), ((12446, 12477), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (12460, 12477), True, 'import numpy as np\n'), ((12774, 12801), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (12784, 12801), True, 'import numpy as np\n'), ((12827, 12856), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (12837, 12856), True, 'import numpy as np\n'), ((4615, 4751), 'numpy.float32', 'np.float32', (['[[padding[0], h - padding[1]], [padding[0], padding[1]], [w - padding[0],\n padding[1]], [w - padding[0], h - padding[1]]]'], {}), '([[padding[0], h - padding[1]], [padding[0], padding[1]], [w -\n padding[0], padding[1]], [w - padding[0], h - padding[1]]])\n', (4625, 4751), True, 'import numpy as np\n'), ((6135, 6153), 'cv2.bitwise_not', 'cv2.bitwise_not', (['m'], {}), '(m)\n', (6150, 6153), False, 'import cv2\n'), ((6173, 6196), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'm'], {}), '(img, m)\n', (6188, 6196), False, 'import cv2\n'), ((6230, 6256), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'mask'], {}), '(img, mask)\n', (6245, 6256), False, 'import cv2\n'), ((6955, 6994), 'numpy.exp', 'np.exp', (['(-(x - mu) ** 2 / 2 / sigma ** 2)'], {}), '(-(x - mu) ** 2 / 2 / sigma ** 2)\n', (6961, 6994), True, 'import numpy as np\n'), ((7824, 7850), 'numpy.argmax', 'np.argmax', (['hist[midpoint:]'], {}), '(hist[midpoint:])\n', (7833, 7850), True, 'import numpy as np\n'), ((8407, 8433), 'numpy.dstack', 'np.dstack', (['(img, img, img)'], {}), '((img, img, img))\n', (8416, 8433), True, 'import numpy as np\n'), ((9476, 9500), 'numpy.int_', 'np.int_', (['[left_line_pts]'], {}), '([left_line_pts])\n', (9483, 9500), True, 'import numpy as np\n'), ((9548, 9573), 'numpy.int_', 'np.int_', (['[right_line_pts]'], {}), '([right_line_pts])\n', (9555, 9573), True, 'import numpy as np\n'), ((13461, 13481), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (13469, 13481), True, 'import numpy as np\n'), ((13505, 13525), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (13513, 13525), True, 'import numpy as np\n'), ((14438, 14465), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (14448, 14465), True, 'import numpy as np\n'), ((14495, 14524), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (14505, 14524), True, 'import numpy as np\n'), ((11237, 11341), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xleft_low, win_y_low)', '(win_xleft_high, win_y_high)', '(255, 255, 255)', '(2)'], {}), '(out_img, (win_xleft_low, win_y_low), (win_xleft_high,\n win_y_high), (255, 255, 255), 2)\n', (11250, 11341), False, 'import cv2\n'), ((11354, 11460), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xright_low, win_y_low)', '(win_xright_high, win_y_high)', '(255, 255, 255)', '(2)'], {}), '(out_img, (win_xright_low, win_y_low), (win_xright_high,\n win_y_high), (255, 255, 255), 2)\n', (11367, 11460), False, 'import cv2\n'), ((8892, 8930), 'numpy.vstack', 'np.vstack', (['[left_fitx - margin, ploty]'], {}), '([left_fitx - margin, ploty])\n', (8901, 8930), True, 'import numpy as np\n'), ((9164, 9203), 'numpy.vstack', 'np.vstack', (['[right_fitx - margin, ploty]'], {}), '([right_fitx - margin, ploty])\n', (9173, 9203), True, 'import numpy as np\n'), ((12162, 12195), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (12169, 12195), True, 'import numpy as np\n'), ((12283, 12317), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (12290, 12317), True, 'import numpy as np\n'), ((8995, 9033), 'numpy.vstack', 'np.vstack', (['[left_fitx + margin, ploty]'], {}), '([left_fitx + margin, ploty])\n', (9004, 9033), True, 'import numpy as np\n'), ((9269, 9308), 'numpy.vstack', 'np.vstack', (['[right_fitx + margin, ploty]'], {}), '([right_fitx + margin, ploty])\n', (9278, 9308), True, 'import numpy as np\n')] |
import pytest
from pathlib import Path
import shutil
from spikeinterface import set_global_tmp_folder
from spikeinterface.core.testing_tools import generate_recording
from spikeinterface.toolkit.preprocessing import clip, blank_staturation
import numpy as np
if hasattr(pytest, "global_test_folder"):
cache_folder = pytest.global_test_folder / "toolkit"
else:
cache_folder = Path("cache_folder") / "toolkit"
set_global_tmp_folder(cache_folder)
def test_clip():
rec = generate_recording()
rec0 = clip(rec, a_min=-2, a_max=3.)
rec0.save(verbose=False)
rec1 = clip(rec, a_min=-1.5)
rec1.save(verbose=False)
traces0 = rec0.get_traces(segment_index=0, channel_ids=[1])
assert traces0.shape[1] == 1
assert np.all(-2 <= traces0[0] <= 3)
traces1 = rec1.get_traces(segment_index=0, channel_ids=[0, 1])
assert traces1.shape[1] == 2
assert np.all(-1.5 <= traces1[1])
def test_blank_staturation():
rec = generate_recording()
rec0 = blank_staturation(rec, abs_threshold=3.)
rec0.save(verbose=False)
rec1 = blank_staturation(rec, quantile_threshold=0.01, direction='both',
chunk_size=10000)
rec1.save(verbose=False)
traces0 = rec0.get_traces(segment_index=0, channel_ids=[1])
assert traces0.shape[1] == 1
assert np.all(traces0 < 3.)
traces1 = rec1.get_traces(segment_index=0, channel_ids=[0])
assert traces1.shape[1] == 1
# use a smaller value to be sure
a_min = rec1._recording_segments[0].a_min
assert np.all(traces1 >= a_min)
if __name__ == '__main__':
test_clip()
test_blank_staturation()
| [
"spikeinterface.core.testing_tools.generate_recording",
"pathlib.Path",
"spikeinterface.set_global_tmp_folder",
"spikeinterface.toolkit.preprocessing.clip",
"spikeinterface.toolkit.preprocessing.blank_staturation",
"numpy.all"
] | [((422, 457), 'spikeinterface.set_global_tmp_folder', 'set_global_tmp_folder', (['cache_folder'], {}), '(cache_folder)\n', (443, 457), False, 'from spikeinterface import set_global_tmp_folder\n'), ((487, 507), 'spikeinterface.core.testing_tools.generate_recording', 'generate_recording', ([], {}), '()\n', (505, 507), False, 'from spikeinterface.core.testing_tools import generate_recording\n'), ((520, 550), 'spikeinterface.toolkit.preprocessing.clip', 'clip', (['rec'], {'a_min': '(-2)', 'a_max': '(3.0)'}), '(rec, a_min=-2, a_max=3.0)\n', (524, 550), False, 'from spikeinterface.toolkit.preprocessing import clip, blank_staturation\n'), ((591, 612), 'spikeinterface.toolkit.preprocessing.clip', 'clip', (['rec'], {'a_min': '(-1.5)'}), '(rec, a_min=-1.5)\n', (595, 612), False, 'from spikeinterface.toolkit.preprocessing import clip, blank_staturation\n'), ((752, 781), 'numpy.all', 'np.all', (['(-2 <= traces0[0] <= 3)'], {}), '(-2 <= traces0[0] <= 3)\n', (758, 781), True, 'import numpy as np\n'), ((895, 921), 'numpy.all', 'np.all', (['(-1.5 <= traces1[1])'], {}), '(-1.5 <= traces1[1])\n', (901, 921), True, 'import numpy as np\n'), ((964, 984), 'spikeinterface.core.testing_tools.generate_recording', 'generate_recording', ([], {}), '()\n', (982, 984), False, 'from spikeinterface.core.testing_tools import generate_recording\n'), ((997, 1038), 'spikeinterface.toolkit.preprocessing.blank_staturation', 'blank_staturation', (['rec'], {'abs_threshold': '(3.0)'}), '(rec, abs_threshold=3.0)\n', (1014, 1038), False, 'from spikeinterface.toolkit.preprocessing import clip, blank_staturation\n'), ((1079, 1166), 'spikeinterface.toolkit.preprocessing.blank_staturation', 'blank_staturation', (['rec'], {'quantile_threshold': '(0.01)', 'direction': '"""both"""', 'chunk_size': '(10000)'}), "(rec, quantile_threshold=0.01, direction='both',\n chunk_size=10000)\n", (1096, 1166), False, 'from spikeinterface.toolkit.preprocessing import clip, blank_staturation\n'), ((1330, 1351), 'numpy.all', 'np.all', (['(traces0 < 3.0)'], {}), '(traces0 < 3.0)\n', (1336, 1351), True, 'import numpy as np\n'), ((1543, 1567), 'numpy.all', 'np.all', (['(traces1 >= a_min)'], {}), '(traces1 >= a_min)\n', (1549, 1567), True, 'import numpy as np\n'), ((388, 408), 'pathlib.Path', 'Path', (['"""cache_folder"""'], {}), "('cache_folder')\n", (392, 408), False, 'from pathlib import Path\n')] |
"""
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.utils.misc import NumpyRNGContext
import pytest
from .external_delta_sigma import external_delta_sigma
from ..delta_sigma import delta_sigma, delta_sigma_from_precomputed_pairs
from ..surface_density import surface_density_in_annulus, surface_density_in_cylinder
from ..surface_density_helpers import log_interpolation_with_inner_zero_masking as log_interp
from ..mass_in_cylinders import total_mass_enclosed_per_cylinder
from ....empirical_models import PrebuiltSubhaloModelFactory
from ....sim_manager import CachedHaloCatalog
from ....mock_observables import return_xyz_formatted_array
__all__ = ('test_delta_sigma_consistency', )
fixed_seed = 43
@pytest.mark.slow
def test_delta_sigma1():
"""
"""
model = PrebuiltSubhaloModelFactory('behroozi10')
try:
halocat = CachedHaloCatalog()
except:
return # Skip test if the environment does not have the default halo catalog
model.populate_mock(halocat, seed=fixed_seed)
px = model.mock.ptcl_table['x']
py = model.mock.ptcl_table['y']
pz = model.mock.ptcl_table['z']
Nptcls_to_keep = int(1e5)
randomizer = np.random.random(len(model.mock.ptcl_table))
sorted_randoms = np.sort(randomizer)
ptcl_mask = np.where(sorted_randoms < sorted_randoms[Nptcls_to_keep])[0]
particles = return_xyz_formatted_array(px, py, pz, mask=ptcl_mask)
x = model.mock.galaxy_table['x']
y = model.mock.galaxy_table['y']
z = model.mock.galaxy_table['z']
mstar105_mask = (model.mock.galaxy_table['stellar_mass'] > 10**10.25)
mstar105_mask *= (model.mock.galaxy_table['stellar_mass'] < 10**10.75)
galaxies = return_xyz_formatted_array(x, y, z, mask=mstar105_mask)
period = halocat.Lbox[0]
projection_period = period
rp_bins = np.logspace(np.log10(0.25), np.log10(15), 10)
try:
rp_mids_external, dsigma_external = external_delta_sigma(galaxies[:, :2], particles[:, :2],
rp_bins, period, projection_period, cosmology=halocat.cosmology)
except:
return # skip test if testing environment has scipy version incompatibilities
downsampling_factor = halocat.num_ptcl_per_dim**3/float(particles.shape[0])
rp_mids, dsigma = delta_sigma(galaxies, particles, halocat.particle_mass,
downsampling_factor, rp_bins, halocat.Lbox)
dsigma_interpol = np.exp(np.interp(np.log(rp_mids_external),
np.log(rp_mids), np.log(dsigma)))
assert np.allclose(dsigma_interpol, dsigma_external, rtol=0.1)
def test_delta_sigma_consistency():
"""This testing function guarantees consistency between the delta_sigma
function and the surface_density_in_annulus and surface_density_in_cylinder functions,
effectively freezing the internal calculation of delta_sigma.
"""
num_centers, num_ptcl = 100, 500
with NumpyRNGContext(fixed_seed):
centers = np.random.random((num_centers, 3))
particles = np.random.random((num_ptcl, 3))
particle_masses = np.ones(num_ptcl)
downsampling_factor = 1
rp_bins = np.linspace(0.1, 0.3, 5)
Lbox = 1.
rp_mids, ds = delta_sigma(centers, particles, particle_masses,
downsampling_factor, rp_bins, Lbox)
sigma_annulus = surface_density_in_annulus(centers, particles, particle_masses,
downsampling_factor, rp_bins, Lbox)
sigma_inside_cylinder = surface_density_in_cylinder(centers, particles, particle_masses,
downsampling_factor, rp_bins, Lbox)
sigma_inside_cylinder_interp = log_interp(sigma_inside_cylinder, rp_bins, rp_mids)
implied_delta_sigma = sigma_inside_cylinder_interp - sigma_annulus
assert np.allclose(implied_delta_sigma, ds, rtol=0.001)
def test_delta_sigma_raises_exceptions1():
num_centers, num_ptcl = 100, 500
with NumpyRNGContext(fixed_seed):
centers = np.random.random((num_centers, 3))
particles = np.random.random((num_ptcl, 3))
particle_masses = np.ones(num_ptcl-1)
downsampling_factor = 1
rp_bins = np.linspace(0.1, 0.3, 5)
Lbox = 1.
with pytest.raises(AssertionError) as err:
rp_mids, ds = delta_sigma(centers, particles, particle_masses,
downsampling_factor, rp_bins, Lbox)
substr = "Must have same number of ``particle_masses`` as particles"
assert substr in err.value.args[0]
def test_delta_sigma_raises_exceptions2():
num_centers, num_ptcl = 100, 500
with NumpyRNGContext(fixed_seed):
centers = np.random.random((num_centers, 3))
particles = np.random.random((num_ptcl, 3))
particle_masses = np.ones(num_ptcl)
downsampling_factor = 0.5
rp_bins = np.linspace(0.1, 0.3, 5)
Lbox = 1.
with pytest.raises(AssertionError) as err:
rp_mids, ds = delta_sigma(centers, particles, particle_masses,
downsampling_factor, rp_bins, Lbox)
substr = "downsampling_factor = 0.5 < 1, which is impossible".format(downsampling_factor)
assert substr in err.value.args[0]
def test_delta_sigma_from_precomputed_pairs():
num_centers, num_ptcl = 1000, 5000
with NumpyRNGContext(fixed_seed):
galaxies = np.random.random((num_centers, 3))
particles = np.random.random((num_ptcl, 3))
particle_masses = np.ones(num_ptcl)
downsampling_factor = 1
rp_bins = np.linspace(0.1, 0.3, 5)
period = 1.
rp_mids, ds1 = delta_sigma(galaxies, particles, particle_masses,
downsampling_factor, rp_bins, period)
mass_encl = total_mass_enclosed_per_cylinder(galaxies, particles, particle_masses,
downsampling_factor, rp_bins, period)
rp_mids2, ds2 = delta_sigma_from_precomputed_pairs(galaxies, mass_encl, rp_bins, period)
assert np.allclose(ds1, ds2)
| [
"numpy.allclose",
"numpy.log10",
"numpy.ones",
"astropy.utils.misc.NumpyRNGContext",
"numpy.where",
"numpy.random.random",
"numpy.sort",
"numpy.log",
"numpy.linspace",
"pytest.raises"
] | [((1312, 1331), 'numpy.sort', 'np.sort', (['randomizer'], {}), '(randomizer)\n', (1319, 1331), True, 'import numpy as np\n'), ((2555, 2610), 'numpy.allclose', 'np.allclose', (['dsigma_interpol', 'dsigma_external'], {'rtol': '(0.1)'}), '(dsigma_interpol, dsigma_external, rtol=0.1)\n', (2566, 2610), True, 'import numpy as np\n'), ((3093, 3110), 'numpy.ones', 'np.ones', (['num_ptcl'], {}), '(num_ptcl)\n', (3100, 3110), True, 'import numpy as np\n'), ((3154, 3178), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.3)', '(5)'], {}), '(0.1, 0.3, 5)\n', (3165, 3178), True, 'import numpy as np\n'), ((3741, 3789), 'numpy.allclose', 'np.allclose', (['implied_delta_sigma', 'ds'], {'rtol': '(0.001)'}), '(implied_delta_sigma, ds, rtol=0.001)\n', (3752, 3789), True, 'import numpy as np\n'), ((4038, 4059), 'numpy.ones', 'np.ones', (['(num_ptcl - 1)'], {}), '(num_ptcl - 1)\n', (4045, 4059), True, 'import numpy as np\n'), ((4101, 4125), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.3)', '(5)'], {}), '(0.1, 0.3, 5)\n', (4112, 4125), True, 'import numpy as np\n'), ((4667, 4684), 'numpy.ones', 'np.ones', (['num_ptcl'], {}), '(num_ptcl)\n', (4674, 4684), True, 'import numpy as np\n'), ((4730, 4754), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.3)', '(5)'], {}), '(0.1, 0.3, 5)\n', (4741, 4754), True, 'import numpy as np\n'), ((5324, 5341), 'numpy.ones', 'np.ones', (['num_ptcl'], {}), '(num_ptcl)\n', (5331, 5341), True, 'import numpy as np\n'), ((5385, 5409), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.3)', '(5)'], {}), '(0.1, 0.3, 5)\n', (5396, 5409), True, 'import numpy as np\n'), ((5781, 5802), 'numpy.allclose', 'np.allclose', (['ds1', 'ds2'], {}), '(ds1, ds2)\n', (5792, 5802), True, 'import numpy as np\n'), ((1348, 1405), 'numpy.where', 'np.where', (['(sorted_randoms < sorted_randoms[Nptcls_to_keep])'], {}), '(sorted_randoms < sorted_randoms[Nptcls_to_keep])\n', (1356, 1405), True, 'import numpy as np\n'), ((1899, 1913), 'numpy.log10', 'np.log10', (['(0.25)'], {}), '(0.25)\n', (1907, 1913), True, 'import numpy as np\n'), ((1915, 1927), 'numpy.log10', 'np.log10', (['(15)'], {}), '(15)\n', (1923, 1927), True, 'import numpy as np\n'), ((2936, 2963), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (2951, 2963), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((2983, 3017), 'numpy.random.random', 'np.random.random', (['(num_centers, 3)'], {}), '((num_centers, 3))\n', (2999, 3017), True, 'import numpy as np\n'), ((3038, 3069), 'numpy.random.random', 'np.random.random', (['(num_ptcl, 3)'], {}), '((num_ptcl, 3))\n', (3054, 3069), True, 'import numpy as np\n'), ((3881, 3908), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (3896, 3908), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((3928, 3962), 'numpy.random.random', 'np.random.random', (['(num_centers, 3)'], {}), '((num_centers, 3))\n', (3944, 3962), True, 'import numpy as np\n'), ((3983, 4014), 'numpy.random.random', 'np.random.random', (['(num_ptcl, 3)'], {}), '((num_ptcl, 3))\n', (3999, 4014), True, 'import numpy as np\n'), ((4150, 4179), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (4163, 4179), False, 'import pytest\n'), ((4510, 4537), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (4525, 4537), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((4557, 4591), 'numpy.random.random', 'np.random.random', (['(num_centers, 3)'], {}), '((num_centers, 3))\n', (4573, 4591), True, 'import numpy as np\n'), ((4612, 4643), 'numpy.random.random', 'np.random.random', (['(num_ptcl, 3)'], {}), '((num_ptcl, 3))\n', (4628, 4643), True, 'import numpy as np\n'), ((4779, 4808), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (4792, 4808), False, 'import pytest\n'), ((5166, 5193), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (5181, 5193), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((5214, 5248), 'numpy.random.random', 'np.random.random', (['(num_centers, 3)'], {}), '((num_centers, 3))\n', (5230, 5248), True, 'import numpy as np\n'), ((5269, 5300), 'numpy.random.random', 'np.random.random', (['(num_ptcl, 3)'], {}), '((num_ptcl, 3))\n', (5285, 5300), True, 'import numpy as np\n'), ((2471, 2495), 'numpy.log', 'np.log', (['rp_mids_external'], {}), '(rp_mids_external)\n', (2477, 2495), True, 'import numpy as np\n'), ((2509, 2524), 'numpy.log', 'np.log', (['rp_mids'], {}), '(rp_mids)\n', (2515, 2524), True, 'import numpy as np\n'), ((2526, 2540), 'numpy.log', 'np.log', (['dsigma'], {}), '(dsigma)\n', (2532, 2540), True, 'import numpy as np\n')] |
import numpy as np
def negate_bool_features(Xin: np.array, negate: list) -> np.array:
X = np.array(Xin)
neg = np.array(negate)
return (X * (1 - neg * 2) + neg).astype(bool)
| [
"numpy.array"
] | [((96, 109), 'numpy.array', 'np.array', (['Xin'], {}), '(Xin)\n', (104, 109), True, 'import numpy as np\n'), ((120, 136), 'numpy.array', 'np.array', (['negate'], {}), '(negate)\n', (128, 136), True, 'import numpy as np\n')] |
'''Implements algorithms for clustering scans by
spectral similarity.
'''
import operator
import functools
import bisect
import json
from collections import deque
import numpy as np
from scipy.special import comb
from ms_deisotope.task.log_utils import LogUtilsMixin
from ms_deisotope.data_source.dispatch import (
SubsequenceMappingProxy as _SubsequenceMappingProxy,
DynamicallyLoadingProxyResolver as _DynamicallyLoadingResolver)
from .similarity_methods import peak_set_similarity
peak_set_getter = operator.attrgetter("peak_set")
deconvoluted_peak_set_getter = operator.attrgetter("deconvoluted_peak_set")
def _ppm_error(x, y):
return (x - y) / y
@functools.total_ordering
class SpectrumCluster(object):
'''A collection of similar spectra.
A :class:`SpectrumCluster` is compare-able by :attr:`neutral_mass`
and acts as a :class:`collections.Sequence` over its members stored
in :attr:`scans`.
Attributes
----------
scans: :class:`list`
A list of spectra which are similar to each other.
neutral_mass: :class:`float`
The neutral mass of the representative spectrum's precursor.
'''
def __init__(self, scans=None, neutral_mass=None, average_similarity=None, annotations=None):
if scans is None:
scans = []
if annotations is None:
annotations = dict()
self.scans = scans
self.neutral_mass = None
if neutral_mass:
self.neutral_mass = neutral_mass
elif self.scans:
self.neutral_mass = self.scans[0].precursor_information.neutral_mass
else:
self.neutral_mass = 0.0
if average_similarity is None and len(self) == 1:
average_similarity = 1.0
self._average_similarity = average_similarity
self.annotations = annotations
def __lt__(self, other):
return self.neutral_mass < other.neutral_mass
def __gt__(self, other):
return self.neutral_mass > other.neutral_mass
def __eq__(self, other):
return (abs(self.neutral_mass - other.neutral_mass) / other.neutral_mass) < 1e-6
def __repr__(self):
return "SpectrumCluster(%f, %d)" % (self.neutral_mass, len(self))
def __iter__(self):
return iter(self.scans)
def __len__(self):
return len(self.scans)
def __getitem__(self, i):
return self.scans[i]
def _invalidate(self):
self._average_similarity = None
def append(self, item, incremental_similarity=False):
'''Add a new spectrum to the cluster.
Parameters
----------
item: :class:`~.ScanBase`
'''
if not self.neutral_mass:
self.neutral_mass = item.precursor_information.neutral_mass
if incremental_similarity:
self._incremental_similarity(item)
else:
self._invalidate()
self.scans.append(item)
def _calculate_similarity_with(self, scan, from_ix=0, to_ix=None, *args, **kwargs):
if from_ix is None:
from_ix = 0
if to_ix is None:
to_ix = len(self)
acc = []
for member in self[from_ix:to_ix]:
acc.append(peak_set_similarity(scan, member, *args, **kwargs))
return acc
def _incremental_similarity(self, scan, *args, **kwargs):
new_sims = self._calculate_similarity_with(scan, *args, **kwargs)
aggregate_size = comb(len(self), 2)
n = (aggregate_size + len(new_sims))
if n == 0:
n = 1
self._average_similarity = (aggregate_size * self.average_similarity() + sum(new_sims)
) / n
def _full_similarity(self, *args, **kwargs):
similarity_method = kwargs.pop(
"similarity_method", peak_set_similarity)
ratings = []
n = len(self)
for i in range(n):
scan_i = self[i]
for j in range(i + 1, n):
scan_j = self[j]
ratings.append(similarity_method(
scan_i, scan_j, *args, **kwargs))
self._average_similarity = sum(ratings) / len(ratings)
def average_similarity(self, *args, **kwargs):
'''Calculate the within-cluster similarity among all cluster members
and returns the average.
All arguments are forwarded to :func:`~.peak_set_similarity`.
If the cluster is a singleton or smaller, by definition its similarity
is ``1.0``.
Parameters
----------
similarity_method: Callable, optional
The peak set similarity method to use. Defaults to :func:`~.peak_set_similarity`
All other arguments are forwarded to this function.
Returns
-------
:class:`float`
'''
n = len(self)
if n <= 1:
return 1.0
if self._average_similarity is not None:
return self._average_similarity
self._full_similarity(*args, **kwargs)
return self._average_similarity
def similarity_matrix(self, *args, **kwargs):
'''Compute a symmetric similarity matrix comparing each
spectrum within the cluster to each other spectrum.
Parameters
----------
similarity_method: Callable, optional
The peak set similarity method to use. Defaults to :func:`~.peak_set_similarity`
All other arguments are forwarded to this function.
Returns
-------
:class:`np.ndarray`: m
The value at position ``m[i,j]`` is the simiarlity of ``self[i]`` with ``self[j]``
'''
similarity_method = kwargs.pop("similarity_method", peak_set_similarity)
n = len(self)
mat = np.identity(n)
for i in range(n):
scan_i = self[i]
for j in range(i + 1, n):
scan_j = self[j]
mat[i, j] = mat[j, i] = (similarity_method(scan_i, scan_j, *args, **kwargs))
return mat
def to_dict(self):
'''Convert the cluster to a JSON-safe :class:`dict`
Returns
-------
:class:`dict`
'''
d = {}
d['neutral_mass'] = self.neutral_mass
d['size'] = len(self)
d['average_similarity'] = self.average_similarity()
scans = []
for scan in self:
scan_source = scan.source
if scan_source is not None:
source_name = scan_source.source_file
if not isinstance(source_name, basestring):
if hasattr(source_name, 'name'):
source_name = source_name.name
else:
source_name = ":detatched:"
scans.append({
"id": scan.id,
"source": source_name,
"neutral_mass": scan.precursor_information.neutral_mass,
})
d['scans'] = scans
return d
class SpectrumClusterCollection(object):
'''A sorted :class:`~.Sequence` of :class:`SpectrumCluster` instances
that supports searching by precursor mass.
'''
def __init__(self, clusters=None):
if clusters is None:
clusters = []
self.clusters = list(clusters)
def add(self, cluster):
'''Add a new :class:`SpectrumCluster` to the collection,
preserving sorted order.
Parameters
----------
cluster: :class:`SpectrumCluster`
The cluster to add
'''
bisect.insort(self.clusters, cluster)
def __getitem__(self, i):
return self.clusters[i]
def __setitem__(self, i, value):
self.clusters[i] = value
def __len__(self):
return len(self.clusters)
def __iter__(self):
return iter(self.clusters)
def __repr__(self):
template = "{self.__class__.__name__}({size})"
size = len(self)
return template.format(self=self, size=size)
def _binary_search(self, mass, error_tolerance=1e-5):
array = self.clusters
n = len(array)
lo = 0
hi = n
while hi != lo:
mid = (hi + lo) // 2
y = array[mid].neutral_mass
err = (y - mass) / mass
if hi - lo == 1:
best_index = mid
best_error = abs(err)
i = mid - 1
while i >= 0:
x = array[i]
err = abs((x.neutral_mass - mass) / mass)
if err < best_error:
best_error = err
best_index = i
elif err > error_tolerance:
break
i -= 1
lo_index = i + 1
i = mid + 1
while i < n:
x = array[i]
err = abs((x.neutral_mass - mass) / mass)
if err < best_error:
best_error = err
best_index = i
elif err > error_tolerance:
break
i += 1
hi_index = i
return best_index, lo_index, hi_index
elif err > 0:
hi = mid
else:
lo = mid
return 0, 0, 0
def find(self, mass, error_tolerance=1e-5):
'''Finds the cluster whose precursor mass is closest to
``mass`` within ``error_tolerance`` ppm error.
Parameters
----------
mass: :class:`float`
The mass to search for.
error_tolerance: :class:`float`
The PPM error tolerance to use. Defaults to 1e-5.
Returns
-------
:class:`SpectrumCluster` or :const:`None`
'''
target_ix, _, _ = self._binary_search(mass, error_tolerance)
target = self[target_ix]
if abs(target.neutral_mass - mass) / mass > error_tolerance:
return None
return target
def find_all(self, mass, error_tolerance=1e-5):
'''Finds all clusters whose precursor mass is within ``error_tolerance``
ppm of ``mass``.
Parameters
----------
mass: :class:`float`
The mass to search for.
error_tolerance: :class:`float`
The PPM error tolerance to use. Defaults to 1e-5.
Returns
-------
:class:`list`
'''
_, lo_ix, hi_ix = self._binary_search(mass, error_tolerance)
result = [
target for target in self[lo_ix:hi_ix]
if abs(target.neutral_mass - mass) / mass <= error_tolerance
]
return result
class _PeakGetterStrategyBase(object):
def __call__(self, scan):
return self.peaks(scan)
class _FittedPeakAccessorStrategy(_PeakGetterStrategyBase):
def peaks(self, scan):
return scan.peak_set
def tic(self, scan):
return scan.tic.centroided()
class _DeconvolutedPeakAccessorStrategy(_PeakGetterStrategyBase):
def peaks(self, scan):
return scan.deconvoluted_peak_set
def tic(self, scan):
return scan.tic.deconvoluted()
class _DynamicPeakAccessorStrategy(_PeakGetterStrategyBase):
def _ensure_peak_set(self, scan):
if scan.peak_set is None:
scan.pick_peaks()
return scan.peak_set
def peaks(self, scan):
if scan.deconvoluted_peak_set is not None:
return scan.deconvoluted_peak_set
else:
return self._ensure_peak_set(scan)
def tic(self, scan):
if scan.deconvoluted_peak_set is not None:
return scan.tic.deconvoluted()
else:
self._ensure_peak_set(scan)
return scan.tic.centroided()
class ScanClusterBuilder(LogUtilsMixin):
"""Clusters spectra based upon peak pattern similarity
Attributes
----------
clusters : :class:`SpectrumClusterCollection`
The clusters built so far
minimum_similarity : float
The minimum similarity score needed to consider two spectra
similar enough to form a cluster
peak_getter : :class:`Callable`
A function to call on each spectrum to retrieve the peak list
to cluster over
precursor_error_tolerance : float
The maximum precursor mass error (in PPM) to permit between
two spectra to consider comparing them
track_incremental_similarity : bool
Whether to incrementally update a cluster's similarity when it
grows.
"""
@classmethod
def _guess_peak_getter(cls, getter):
if getter is None:
return _DynamicPeakAccessorStrategy()
if callable(getter):
return getter
if isinstance(getter, basestring):
if getter == "d":
return _DeconvolutedPeakAccessorStrategy()
if getter in ('p', 'c'):
return _FittedPeakAccessorStrategy()
else:
raise ValueError("Cannot infer peak getter strategy from %r" % (getter, ))
raise ValueError(
"Cannot infer peak set getter strategy from %r" % (getter, ))
def __init__(self, clusters=None, precursor_error_tolerance=1e-5, minimum_similarity=0.1,
peak_getter=None, track_incremental_similarity=True):
peak_getter = self._guess_peak_getter(peak_getter)
if clusters is None:
clusters = []
self.clusters = SpectrumClusterCollection(clusters)
self.precursor_error_tolerance = precursor_error_tolerance
self.minimum_similarity = minimum_similarity
self.peak_getter = peak_getter
self.track_incremental_similarity = track_incremental_similarity
def _binsearch_simple(self, x):
n = len(self)
lo = 0
hi = n
while hi != lo:
mid = (hi + lo) // 2
y = self[mid].neutral_mass
err = y - x
# Do refinement in `find_best_cluster_for_scan`
if hi - lo == 1:
return mid
elif err > 0:
hi = mid
else:
lo = mid
return 0
def peak_set_similarity(self, scan_i, scan_j):
'''Calculate the similarity between the peaks of
``scan_i`` and ``scan_j``, where the peaks are
those points given by :attr:`peak_getter`
Parameters
----------
scan_i: :class`~.ScanBase`
scan_j: :class`~.ScanBase`
Returns
-------
:class:`float`
'''
peak_set_a = self.peak_getter(scan_i)
peak_set_b = self.peak_getter(scan_j)
return peak_set_similarity(
peak_set_a, peak_set_b)
def find_best_cluster_for_scan(self, scan):
'''Locate the best cluster to add ``scan`` to according to
precursor mass and peak set similarity.
Parameters
----------
scan: :class:`~.ScanBase`
Returns
-------
:class:`SpectrumCluster`
'''
best_cluster = None
best_similarity = 0.0
n = len(self.clusters)
if n == 0:
return best_cluster
center_i = self._binsearch_simple(scan.precursor_information.neutral_mass)
i = center_i
while i >= 0:
cluster = self.clusters[i]
if abs(_ppm_error(scan.precursor_information.neutral_mass,
cluster.neutral_mass)) > self.precursor_error_tolerance:
break
similarity = self.peak_set_similarity(scan, cluster[0])
i -= 1
if similarity > best_similarity and similarity > self.minimum_similarity:
best_similarity = similarity
best_cluster = cluster
i = center_i + 1
while i < n:
cluster = self.clusters[i]
if abs(_ppm_error(scan.precursor_information.neutral_mass,
cluster.neutral_mass)) > self.precursor_error_tolerance:
break
similarity = self.peak_set_similarity(scan, cluster[0])
i += 1
if similarity > best_similarity and similarity > self.minimum_similarity:
best_similarity = similarity
best_cluster = cluster
return best_cluster
def add_scan(self, scan):
'''Add ``scan`` to the cluster collection, adding it to the best
matching cluster, or starting a new cluster around it if no good
match can be found.
Parameters
----------
scan: :class:`~.ScanBase`
'''
best_cluster = self.find_best_cluster_for_scan(scan)
if best_cluster:
best_cluster.append(scan, incremental_similarity=self.track_incremental_similarity)
else:
self.clusters.add(SpectrumCluster([scan]))
def __iter__(self):
return iter(self.clusters)
def __len__(self):
return len(self.clusters)
def __getitem__(self, i):
return self.clusters[i]
def _get_tic(self, scan):
try:
return self.peak_getter.tic(scan)
except AttributeError:
return sum(p.intensity for p in self.peak_getter(scan))
@classmethod
def cluster_scans(cls, scans, precursor_error_tolerance=1e-5, minimum_similarity=0.1,
peak_getter=None, sort=True, track_incremental_similarity=False):
'''Cluster scans by precursor mass and peak set similarity.
Parameters
----------
scans: :class:`Iterable`
An iterable of :class:`Scan`-like objects
precursor_error_tolerance: :class:`float`
The PPM mass accuracy threshold for precursor mass differences to
tolerate when deciding whether to compare spectra. Defaults to 1e-5.
minimum_similarity: :class:`float`
The minimum peak set similarity required to consider adding a spectrum
to a cluster. Defaults to 0.1
peak_getter: :class:`Callable`
A callable object used to get peaks from elements of ``scans``.
sort: :class:`bool`
Whether or not to sort spectra by their total ion current before clustering.
'''
self = cls([], precursor_error_tolerance, minimum_similarity,
peak_getter, track_incremental_similarity)
if sort:
scans = self._sort_by_tic(scans)
if len(scans) > 100:
self.log("Clustering %d Scans" % (len(scans), ))
n = len(scans)
report_interval = max(min(n // 10, 1000), 50)
for i, scan in enumerate(scans):
if i % report_interval == 0 and i:
self.log("... Handled %d Scans (%0.2f%%)" % (i, i * 100.0 / n))
self.add_scan(scan)
return self.clusters
def _sort_by_tic(self, scans):
should_log = len(scans) > 100
if should_log:
self.log("Sorting Scans By TIC")
augmented = []
n = len(scans)
for i, scan in enumerate(scans):
if i % 1000 == 0 and i > 0:
self.log("... Loaded TIC for %d Scans (%0.2f%%)" % (i, i * 100.0 / n))
augmented.append((self._get_tic(scan), scan))
augmented.sort(key=lambda x: x[0], reverse=True)
scans = [a[1] for a in augmented]
return scans
@classmethod
def iterative_clustering(cls, scans, precursor_error_tolerance=1e-5, similarity_thresholds=None,
peak_getter=None):
'''Cluster scans by precursor mass and peak set similarity, iteratively refining
clusters with increasing similarity threshold requirements.
Parameters
----------
scans: :class:`Iterable`
An iterable of :class:`Scan`-like objects
precursor_error_tolerance: :class:`float`
The PPM mass accuracy threshold for precursor mass differences to
tolerate when deciding whether to compare spectra. Defaults to 1e-5.
similarity_thresholds: :class:`Sequence` of :class:`float`
A series of similarity thresholds to apply as spectra are added to clusters
and as clusters are iteratively refined.
peak_getter: :class:`Callable`
A callable object used to get peaks from elements of ``scans``.
'''
peak_getter = cls._guess_peak_getter(peak_getter)
if similarity_thresholds is None:
similarity_thresholds = [0.1, .4, 0.7]
singletons = []
to_bisect = [scans]
logger = LogUtilsMixin()
track_similarity = [False] * (len(similarity_thresholds) - 1)
track_similarity.append(True)
for similarity_threshold, track in zip(similarity_thresholds, track_similarity):
logger.log("Clustering with Threshold %0.2f" % (similarity_threshold, ))
next_to_bisect = []
if len(to_bisect) > 1:
logger.log("Refining %d Clusters" % (len(to_bisect)))
elif to_bisect:
logger.log("Clustering %d Scans" % (len(to_bisect[0])))
else:
logger.log("Nothing to cluster...")
break
n = len(to_bisect)
report_interval = max(min(n // 10, 1000), 1)
for i, group in enumerate(to_bisect):
if i % report_interval == 0:
logger.log("... Handling Batch %d (%d Scans)" % (i, len(group)))
clusters = cls.cluster_scans(
group, precursor_error_tolerance,
minimum_similarity=similarity_threshold,
peak_getter=peak_getter, sort=True, track_incremental_similarity=track)
for cluster in clusters:
if len(cluster) == 1:
singletons.append(cluster)
else:
next_to_bisect.append(cluster)
logger.log("%d Singletons and %d Groups" % (len(singletons), len(next_to_bisect)))
to_bisect = next_to_bisect
return SpectrumClusterCollection(sorted(list(singletons) + list(to_bisect)))
cluster_scans = ScanClusterBuilder.cluster_scans
iterative_clustering = ScanClusterBuilder.iterative_clustering
class ScanClusterWriterBase(object):
'''A base class for writing :class:`ScanCluster` objects to an
I/O stream like a file.
Attributes
----------
stream: :class:`io.IOBase`
The stream to write the clusters to
metadata: :class:`dict`
A set of key-value pairs that describe this
collection.
'''
def __init__(self, stream, metadata=None):
self.stream = stream
self.metadata = metadata or {}
self._wrote_metadata = False
def _write(self, data):
self.stream.write(data)
def save(self, cluster):
'''Write ``cluster`` to the output stream, recording its
members and calculating it's average similarity.
Parameters
----------
cluster: :class:`SpectrumCluster`
The spectrum cluster to write out
'''
if not self._wrote_metadata:
self.write_metadata()
self._wrote_metadata = True
self._save(cluster)
def _save(self, cluster):
raise NotImplementedError()
def save_all(self, clusters):
'''Write each :class:`SpectrumCluster` in ``clusters`` out,
calling :meth:`save` on each one.
Parameters
----------
clusters: :class:`collections.Iterable` of :class:`SpectrumCluster`
The spectrum clusters to write out
'''
raise NotImplementedError()
def add_metadata(self, key, value):
'''Add metadata to the writer. That metadata will be flushed
out upon starting to write clusters out.
Parameters
----------
key: :class:`str`
The metadata element's name
value: :class:`str`, :class:`float`
The metadata element's value
'''
if self.wrote_metadata:
raise TypeError(
"Cannot add additional metadata, the metadata has already been written")
self.metadata[key] = value
def write_metadata(self):
'''Write the accumulated metadata out in a format-appropriate
manner at the top of the file.
'''
if self._wrote_metadata:
raise TypeError("Already wrote metadata!")
class ScanClusterWriter(ScanClusterWriterBase):
'''Writes :class:`ScanCluster` objects to a hierarchical text stream
Parameters
----------
stream: :class:`io.IOBase`
The stream to write the clusters to
'''
def __init__(self, stream, metadata=None):
super(ScanClusterWriter, self).__init__(stream, metadata)
def write_metadata(self):
for key, value in self.metadata.items():
self._write("#%s = %s\n" % (key, value))
self._write("\n")
def _save(self, cluster):
'''Write ``cluster`` as a tab delimited tree, recording its
members and calculating it's average similarity.
Parameters
----------
cluster: :class:`SpectrumCluster`
The spectrum cluster to write out
'''
self._write("%f\t%d\t%f\n" % (cluster.neutral_mass, len(cluster), cluster.average_similarity()))
for member in cluster:
member_source = member.source
if member_source is not None:
source_name = member_source.source_file
if not isinstance(source_name, basestring):
if hasattr(source_name, 'name'):
source_name = source_name.name
else:
source_name = ":detatched:"
self._write("\t%s\t%s\n" % (source_name, member.id))
self._write('\n')
def save_all(self, clusters):
'''Write each :class:`SpectrumCluster` in ``clusters`` out,
calling :meth:`save` on each one.
Parameters
----------
clusters: :class:`collections.Iterable` of :class:`SpectrumCluster`
The spectrum clusters to write out
'''
for cluster in clusters:
self.save(cluster)
class JSONScanClusterWriter(ScanClusterWriterBase):
'''A :class:`ScanClusterWriterBase` that uses JSON
lines.
'''
def write_metadata(self):
json.dump(self.metadata, self.stream)
self._write("\n")
def _save(self, cluster):
json.dump(cluster.to_dict(), self.stream)
self._write("\n")
def save_all(self, clusters):
for cluster in clusters:
self.save(cluster)
class ScanClusterReaderBase(object):
'''Base class for reading spectrum clusters from disk.
Attributes
----------
stream: :class:`io.IOBase`
The stream to read the clusters from
resolver_map: :class:`dict`
A mapping from scan source name to a :class:`Callable`
which will return a :class:`~.ScanBase` object representing
that spectrum.
metadata: :class:`dict`
A set of key-value pairs that describe this
collection.
clusters: :class:`list`
The read clusters.
'''
def __init__(self, stream, resolver_map):
self.stream = stream
self.resolver_map = resolver_map
self.clusters = list()
self.metadata = {}
self._generator = None
def _resolve(self, source, scan_id):
resolver = self.resolver_map[source]
try:
return resolver.get_scan_by_id(scan_id)
except AttributeError:
return resolver(scan_id)
def _parse(self):
'''Parse the cluster collection from :attr:`stream`
'''
self._load_metadata()
return self._load_clusters()
def _load_metadata(self):
'''Read the metadata header from :attr:`stream`.
'''
raise NotImplementedError()
def _load_clusters(self):
'''Read the data describing :class:`SpectrumCluster` objects from
:attr:`stream`.
'''
raise NotImplementedError()
def __iter__(self):
return self
def __next__(self):
'''Advance the iterator, retrieving the next :class:`SpectrumCluster`
Returns
-------
:class:`SpectrumCluster`
'''
if self._generator is None:
self._generator = self._parse()
return next(self._generator)
def next(self):
'''Advance the iterator, retrieving the next :class:`SpectrumCluster`
Returns
-------
:class:`SpectrumCluster`
'''
return self.__next__()
class ScanClusterReader(ScanClusterReaderBase):
'''Reads :class:`SpectrumCluster` objects from hierarchical text files written by
:class:`ScanClusterWriter`.
'''
def __init__(self, stream, resolver_map):
super(ScanClusterReader, self).__init__(stream, resolver_map)
self._line_buffer = deque()
def _next_line(self):
if self._line_buffer:
return self._line_buffer.popleft()
return self.stream.readline()
def _return_line(self, line):
self._line_buffer.append(line)
def _stream_lines(self):
line = self._next_line()
while line:
yield line
line = self._next_line()
def _load_metadata(self):
line = self._next_line()
while line.startswith("#"):
key, value = line.strip().split(" = ", 1)
try:
value = float(value)
except ValueError:
value = str(value)
self.metadata[key] = value
self._return_line(line)
def _load_clusters(self):
current_cluster = []
mass = None
similarity = None
for line in self._stream_lines():
line = line.rstrip()
if not line or not line.startswith('\t'):
if current_cluster:
yield SpectrumCluster(current_cluster, mass, similarity)
current_cluster = []
mass = None
similarity = None
if line:
tokens = line.split('\t')
mass = float(tokens[0])
# size = int(tokens[1])
similarity = float(tokens[2])
elif line.startswith("\t"):
tokens = line.split("\t")
source = tokens[1]
scan_id = tokens[2]
scan = self._resolve(source, scan_id)
current_cluster.append(scan)
if current_cluster:
yield SpectrumCluster(current_cluster, mass, similarity)
class JSONScanClusterReader(ScanClusterReader):
'''Reads :class:`SpectrumCluster` objects from hierarchical text files written by
:class:`JSONScanClusterWriter`.
'''
def _load_metadata(self):
return json.loads(self.stream.readline())
def _load_clusters(self):
line = self.stream.readline()
while line:
data = json.loads(line)
mass = data['neutral_mass']
similarity = data['average_similarity']
scans = []
for scan_bundle in data['scans']:
scans.append(self._resolve(scan_bundle['source'], scan_bundle['id']))
yield SpectrumCluster(scans, mass, similarity)
line = self.stream.readline()
| [
"operator.attrgetter",
"numpy.identity",
"json.loads",
"collections.deque",
"ms_deisotope.task.log_utils.LogUtilsMixin",
"bisect.insort",
"json.dump"
] | [((516, 547), 'operator.attrgetter', 'operator.attrgetter', (['"""peak_set"""'], {}), "('peak_set')\n", (535, 547), False, 'import operator\n'), ((579, 623), 'operator.attrgetter', 'operator.attrgetter', (['"""deconvoluted_peak_set"""'], {}), "('deconvoluted_peak_set')\n", (598, 623), False, 'import operator\n'), ((5751, 5765), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (5762, 5765), True, 'import numpy as np\n'), ((7507, 7544), 'bisect.insort', 'bisect.insort', (['self.clusters', 'cluster'], {}), '(self.clusters, cluster)\n', (7520, 7544), False, 'import bisect\n'), ((20596, 20611), 'ms_deisotope.task.log_utils.LogUtilsMixin', 'LogUtilsMixin', ([], {}), '()\n', (20609, 20611), False, 'from ms_deisotope.task.log_utils import LogUtilsMixin\n'), ((26444, 26481), 'json.dump', 'json.dump', (['self.metadata', 'self.stream'], {}), '(self.metadata, self.stream)\n', (26453, 26481), False, 'import json\n'), ((29039, 29046), 'collections.deque', 'deque', ([], {}), '()\n', (29044, 29046), False, 'from collections import deque\n'), ((31136, 31152), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (31146, 31152), False, 'import json\n')] |
import pyaudio
import _thread
import os
from scipy.signal import butter, lfilter
import numpy as np
FORMAT = pyaudio.paFloat32
CHANNELS = 1
RATE = 8000
CHUNK = 1024
RECORD_SECONDS = 30
p = pyaudio.PyAudio()
# Menu thread
class menu(object):
def __init__(self):
self.audioON = False
self.filterON = False
self.finished = False
def selectMenu(self):
while(True):
os.system("clear")
os.system("cls")
print(f'AUDIO OUTPUT: {self.audioON}')
print(f'FILTERING: {self.filterON}')
print("\nEnter a command:\n<A> Toggle audio output\n<F> Toggle filtering\n<Q> Exit\n")
sel = input('command: ')
if sel.lower() == 'a':
self.audioON = not self.audioON
elif sel.lower() == 'f':
self.filterON = not self.filterON
elif sel.lower() == 'q':
self.finished = True
else:
pass
if self.finished:
break
# Start an output stream on specified output_device_id
def start_out_stream(outDevice):
stream = p.open(format=FORMAT, channels=1, rate=8000, output_device_index=outDevice, output=True)
return stream
# Start an output stream on specified input_device_id
def start_input_stream(inDevice):
stream = p.open(format=FORMAT, channels=1, rate=8000, input_device_index=inDevice, input=True)
return stream
# Make a list of the connected audio devices
def list_devices():
info = p.get_host_api_info_by_index(0)
num_devices = info.get('deviceCount')
print('The following audio devices were found:')
print('INPUT')
for i in range(0, num_devices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:
print("ID: ", i, " : ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
print('OUTPUT')
for i in range(0, num_devices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels')) > 0:
print("ID: ", i, " : ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
# Butterworth bandstop filter
def butter_bandstop(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='bandstop')
return b, a
def butter_bandstop_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandstop(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
list_devices()
input_ID = int(input("Select an input device ID:\n"))
output_ID = int(input("Select an output device ID:\n"))
# Start menu thread
menu = menu()
_thread.start_new_thread(menu.selectMenu,())
# Initialize input stream
in_stream = start_input_stream(input_ID)
# Initialize output stream
out_stream = start_out_stream(output_ID)
while(True):
# Read a chunk of data from input
data = in_stream.read(CHUNK)
# If output stream is enabled, write on output
if menu.audioON:
# If filter is enabled, filter the signal before writing
if menu.filterON:
# Decode input signal
decoded = np.frombuffer(data, 'float32')
# Process input signal
filtered_signal = None
filtered_signal = butter_bandstop_filter(decoded, 500, 2000, RATE)
# Encode the signal again and write on output stream
out = np.array(filtered_signal, dtype='<f4').tobytes()
out_stream.write(out)
else:
# Write signal without processing
out_stream.write(data)
if menu.finished:
break
print("END")
# Close streams
out_stream.stop_stream()
out_stream.close()
in_stream.stop_stream()
in_stream.close()
p.terminate()
| [
"numpy.frombuffer",
"scipy.signal.butter",
"numpy.array",
"scipy.signal.lfilter",
"os.system",
"pyaudio.PyAudio",
"_thread.start_new_thread"
] | [((203, 220), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (218, 220), False, 'import pyaudio\n'), ((2847, 2892), '_thread.start_new_thread', '_thread.start_new_thread', (['menu.selectMenu', '()'], {}), '(menu.selectMenu, ())\n', (2871, 2892), False, 'import _thread\n'), ((2438, 2482), 'scipy.signal.butter', 'butter', (['order', '[low, high]'], {'btype': '"""bandstop"""'}), "(order, [low, high], btype='bandstop')\n", (2444, 2482), False, 'from scipy.signal import butter, lfilter\n'), ((2638, 2657), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'data'], {}), '(b, a, data)\n', (2645, 2657), False, 'from scipy.signal import butter, lfilter\n'), ((444, 462), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (453, 462), False, 'import os\n'), ((476, 492), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (485, 492), False, 'import os\n'), ((3353, 3383), 'numpy.frombuffer', 'np.frombuffer', (['data', '"""float32"""'], {}), "(data, 'float32')\n", (3366, 3383), True, 'import numpy as np\n'), ((3637, 3675), 'numpy.array', 'np.array', (['filtered_signal'], {'dtype': '"""<f4"""'}), "(filtered_signal, dtype='<f4')\n", (3645, 3675), True, 'import numpy as np\n')] |
import numpy
from theano import *
import theano.tensor as T
class HiddenLayer(object):
"""
+ The hidden layer transforms the data space.
+ The points are projected onto hyperplanes. A non-linear function of their
distance to the planes forms the coordinates of the points in the new referential.
x -> tanh(W*x + b)
"""
def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh):
""" Defines the layer and initializes its parameters"""
self.input = input
if W is None:
W_values = numpy.asarray(
rng.uniform(
low = -numpy.sqrt(6 / (n_in + n_out)),
high = numpy.sqrt(6 / (n_in + n_out)),
size = (n_in, n_out)),
dtype = theano.config.floatX)
if activation == T.nnet.sigmoid:
W_values *= 4
self.W = theano.shared(
value = W_values,
name = 'W',
borrow = True)
if b is None:
self.b = theano.shared(
value = numpy.zeros(
(n_out,),
dtype = theano.config.floatX),
name = 'b',
borrow = True)
self.params = [self.W, self.b]
self.output = activation( T.dot( self.input, self.W ) + self.b )
| [
"numpy.zeros",
"numpy.sqrt",
"theano.tensor.dot"
] | [((1374, 1399), 'theano.tensor.dot', 'T.dot', (['self.input', 'self.W'], {}), '(self.input, self.W)\n', (1379, 1399), True, 'import theano.tensor as T\n'), ((1146, 1195), 'numpy.zeros', 'numpy.zeros', (['(n_out,)'], {'dtype': 'theano.config.floatX'}), '((n_out,), dtype=theano.config.floatX)\n', (1157, 1195), False, 'import numpy\n'), ((726, 756), 'numpy.sqrt', 'numpy.sqrt', (['(6 / (n_in + n_out))'], {}), '(6 / (n_in + n_out))\n', (736, 756), False, 'import numpy\n'), ((667, 697), 'numpy.sqrt', 'numpy.sqrt', (['(6 / (n_in + n_out))'], {}), '(6 / (n_in + n_out))\n', (677, 697), False, 'import numpy\n')] |
#!/usr/bin/env python
# coding: utf-8
# this code is a modification of:
# notes:
# todo : I canceled the randomize weights for the last layer + freezed the weights for all of the layers (some weights were trained anyway).
#todo : mayb -- save to fule during evaluate function the outputs
# **Outline of Steps**
# + Initialization
# + Download COCO detection data from http://cocodataset.org/#download
# + http://images.cocodataset.org/zips/train2014.zip <= train images
# + http://images.cocodataset.org/zips/val2014.zip <= validation images
# + http://images.cocodataset.org/annotations/annotations_trainval2014.zip <= train and validation annotations
# + Run this script to convert annotations in COCO format to VOC format
# + https://gist.github.com/chicham/6ed3842d0d2014987186#file-coco2pascal-py
# + Download pre-trained weights from https://pjreddie.com/darknet/yolo/
# + https://pjreddie.com/media/files/yolo.weights
# + Specify the directory of train annotations (train_annot_folder) and train images (train_image_folder)
# + Specify the directory of validation annotations (valid_annot_folder) and validation images (valid_image_folder)
# + Specity the path of pre-trained weights by setting variable *wt_path*
# + Construct equivalent network in Keras
# + Network arch from https://github.com/pjreddie/darknet/blob/master/cfg/yolo-voc.cfg
# + Load the pretrained weights
# + Perform training
# + Perform detection on an image with newly trained weights
# + Perform detection on an video with newly trained weights
# # Initialization
# In[51]:
#from IPython import get_ipython
from keras.models import Sequential, Model
from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, \
UpSampling2D, TimeDistributed, LSTM
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.optimizers import SGD, Adam, RMSprop
from keras.layers.merge import concatenate
import matplotlib.pyplot as plt
import keras.backend as K
import tensorflow as tf
import imgaug as ia
from tqdm import tqdm
from imgaug import augmenters as iaa
import numpy as np
import pickle
import os, cv2
from preprocessing import parse_annotation, BatchGenerator, LSTMBatchGenerator
from utils import WeightReader, decode_netout, draw_boxes
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[52]:
SUP_NUM_IMAGES = 3
UNSUP_NUM_IMAGES = 3
EVAL_NUM_IMAGES = 3
LABELS = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush']
IMAGE_H, IMAGE_W = 416, 416
GRID_H, GRID_W = 13, 13
BOX = 5
CLASS = len(LABELS)
CLASS_WEIGHTS = np.ones(CLASS, dtype='float32')
OBJ_THRESHOLD = 0.3 # 0.5
NMS_THRESHOLD = 0.3 # 0.45
ANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
NO_OBJECT_SCALE = 1.0
OBJECT_SCALE = 5.0
COORD_SCALE = 1.0
CLASS_SCALE = 1.0
BATCH_SIZE = 16
WARM_UP_BATCHES = 0
TRUE_BOX_BUFFER = 50
MAX_BOX_PER_IMAGE = 10
# In[53]:
wt_path = 'yolov2.weights'
train_image_folder = './data/images/train2014/'
train_annot_folder = './data/train_converted/'
valid_image_folder = './data/images/val2014/'
valid_annot_folder = './data/val_converted/'
# # Construct the network
# the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)
def space_to_depth_x2(x):
return tf.space_to_depth(x, block_size=2)
import frontend
""" creates a new dir names coco_x with the results, weights, and all the relevant files"""
# TB_COUNT = len([d for d in os.listdir(os.path.expanduser('./results_lstm/')) if 'coco_' in d]) + 1
# PATH = os.path.expanduser('./results_lstm/') + 'coco_' + '_' + str(TB_COUNT)
# os.makedirs(PATH)
PATH = './lstm/'
print("=================== Directory " , PATH , " Created ")
# PATH = "./results/coco__25"
class ToharGenerator2(BatchGenerator):
def __getitem__(self, item):
# t= [x_batch,b_batch],y_batch
# [input,goutndtruth],desired network output]
t = super().__getitem__(item)
x_batch = t[0][0] #the input
GT = t[0][1]
y_batch = t[1]
new_x_batch = predict(model,x_batch) #instead of input img vector we want the YOLO's output vector
t[0][0]= new_x_batch
return [new_x_batch, GT], y_batch
input_image = Input(shape=(IMAGE_H, IMAGE_W, 3))
true_boxes = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER, 4))
# Layer 1
x = Conv2D(32, (3, 3), strides=(1, 1), padding='same', name='conv_1', use_bias=False)(input_image)
x = BatchNormalization(name='norm_1')(x)
x = LeakyReLU(alpha=0.1)(x)
encoded = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 2
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='conv_2', use_bias=False, trainable=False)(encoded)
x = BatchNormalization(name='norm_2', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 3
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_3', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_3', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 4
x = Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_4', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_4', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 5
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_5', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_5', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 6
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_6', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_6', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 7
x = Conv2D(128, (1, 1), strides=(1, 1), padding='same', name='conv_7', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_7', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 8
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_8', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_8', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 9
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_9', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_9', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 10
x = Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_10', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_10', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 11
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_11', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_11', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 12
x = Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_12', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_12', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 13
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_13', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_13', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
skip_connection = x
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 14
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_14', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_14', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 15
x = Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_15', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_15', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 16
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_16', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_16', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 17
x = Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_17', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_17', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 18
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_18', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 19
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_19', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_19', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 20
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_20', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_20', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 21
skip_connection = Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_21', use_bias=False, trainable=False)(
skip_connection)
skip_connection = BatchNormalization(name='norm_21', trainable=False)(skip_connection)
skip_connection = LeakyReLU(alpha=0.1)(skip_connection)
skip_connection = Lambda(space_to_depth_x2)(skip_connection)
x = concatenate([skip_connection, x])
# Layer 22
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_22', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_22', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 23
x = Conv2D(BOX * (4 + 1 + CLASS), (1, 1), strides=(1, 1), padding='same', name='conv_23')(x)
output = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x)
# small hack to allow true_boxes to be registered when Keras build the model
# for more information: https://github.com/fchollet/keras/issues/2790
output = Lambda(lambda args: args[0])([output, true_boxes])
model = Model([input_image, true_boxes], output)
# model.summary()
print("output=====")
print(output.shape)
'''build lstm model: '''
lstm_input = Input(shape=(GRID_H, GRID_W, BOX, 4 + 1 + CLASS))
input_dim = GRID_H * GRID_W * BOX * (4 + 1 + CLASS)
# input_dim=(GRID_H,GRID_W, BOX, 4 + 1 + CLASS, 1, 1, 1, TRUE_BOX_BUFFER, 4)
print(input_dim)
timesteps = EVAL_NUM_IMAGES
# lstm.add(units= Dense(input_shape=(GRID_H, GRID_W, BOX, 4 + 1 + CLASS)))
# l=Lambda(lambda x: K.batch_flatten(x))(lstm_input)
# l=LSTM(input_dim, batch_input_shape= (None, timesteps, input_dim), activation='sigmoid',recurrent_activation='hard_sigmoid',return_sequences=True)(l)
# # l = (Dense(output_dim=input_dim, activation="relu"))(lstm)
# #
# # # l = LSTM(input_dim)(l)
# # # # hidden_layer = Dense(output_dim=input_shape, activation="relu")(x)
# # # # outputs = Dense(output_dim=input_shape, activation="softmax")(hidden_layer)
# #
# loutput = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(l)
# #
# # # small hack to allow true_boxes to be registered when Keras build the model
# # # for more information: https://github.com/fchollet/keras/issues/2790
# out = Lambda(lambda args: args[0])([loutput, true_boxes])
#
#
#
# lstm = Model([lstm_input, true_boxes], out)
# lstm.summary()
input_dim = GRID_H * GRID_W * BOX * (4 + 1 + CLASS)
#take 5 frames every time
frames = Input(shape=(5, IMAGE_H, IMAGE_W, 3))
x = TimeDistributed(model)(frames)
x = TimeDistributed(Flatten())(x)
#now- timestamsp=5
x = LSTM(input_dim, name='lstm')(x)
out = Dense(input_dim, name='out')(x)
lstm = Model(inputs=frames, outputs=out)
exit()
# # Load pretrained weights
# **Load the weights originally provided by YOLO**
print("**Load the weights originally provided by YOLO**")
weight_reader = WeightReader(wt_path)
weight_reader.reset() # don't worry! it doesn't delete the weights.
nb_conv = 23
for i in range(1, nb_conv + 1):
conv_layer = model.get_layer('conv_' + str(i))
if i < nb_conv:
norm_layer = model.get_layer('norm_' + str(i))
size = np.prod(norm_layer.get_weights()[0].shape)
beta = weight_reader.read_bytes(size)
gamma = weight_reader.read_bytes(size)
mean = weight_reader.read_bytes(size)
var = weight_reader.read_bytes(size)
weights = norm_layer.set_weights([gamma, beta, mean, var])
if len(conv_layer.get_weights()) > 1:
bias = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2, 3, 1, 0])
conv_layer.set_weights([kernel, bias])
else:
kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2, 3, 1, 0])
conv_layer.set_weights([kernel])
# model_t = model #model that trained but not pre-trained
# model_un = model #model without training at all
# **Randomize weights of the last layer**
# In[ ]:
# print("========randomize last layer")
# layer = model.layers[-4] # the last convolutional layer
# weights = layer.get_weights()
#
# new_kernel = np.random.normal(size=weights[0].shape)/(GRID_H*GRID_W)
# new_bias = np.random.normal(size=weights[1].shape)/(GRID_H*GRID_W)
#
# layer.set_weights([new_kernel, new_bias])
# # Perform training
# **Loss function**
# $$\begin{multline}
# \lambda_\textbf{coord}
# \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{obj}}
# \left[
# \left(
# x_i - \hat{x}_i
# \right)^2 +
# \left(
# y_i - \hat{y}_i
# \right)^2
# \right]
# \\
# + \lambda_\textbf{coord}
# \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{obj}}
# \left[
# \left(
# \sqrt{w_i} - \sqrt{\hat{w}_i}
# \right)^2 +
# \left(
# \sqrt{h_i} - \sqrt{\hat{h}_i}
# \right)^2
# \right]
# \\
# + \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{obj}}
# \left(
# C_i - \hat{C}_i
# \right)^2
# \\
# + \lambda_\textrm{noobj}
# \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{noobj}}
# \left(
# C_i - \hat{C}_i
# \right)^2
# \\
# + \sum_{i = 0}^{S^2}
# L_i^{\text{obj}}
# \sum_{c \in \textrm{classes}}
# \left(
# p_i(c) - \hat{p}_i(c)
# \right)^2
# \end{multline}$$
# In[ ]:
import backend
def predict(model, image, i, img_name, path=""):
"""
input_size = IMAGE_H
image_h, image_w, _ = image.shape
feature_extractor = backend.FullYoloFeature()
image = cv2.resize(image, (input_size, input_size))
image =feature_extractor.normalize(image)
input_image = image[:,:,::-1]
input_image = np.expand_dims(input_image, 0)
dummy_array = np.zeros((1,1,1,1, MAX_BOX_PER_IMAGE,4))
netout = model.predict([input_image, dummy_array])[0]
boxes = decode_netout(netout, ANCHORS, len(LABELS))
"""
dummy_array = np.zeros((1, 1, 1, 1, TRUE_BOX_BUFFER, 4))
# print("dummy array:", dummy_array)
plt.figure(figsize=(10, 10))
input_image = cv2.resize(image, (416, 416))
input_image = input_image / 255.
input_image = input_image[:, :, ::-1]
input_image = np.expand_dims(input_image, 0)
netout = model.predict([input_image, dummy_array])
boxes = decode_netout(netout[0],
obj_threshold=OBJ_THRESHOLD,
nms_threshold=NMS_THRESHOLD,
anchors=ANCHORS,
nb_class=CLASS)
image = draw_boxes(image, boxes, labels=LABELS)
plt.imshow(image[:, :, ::-1])
path = str(path)
if i <= 100:
# Create target directory & all intermediate directories if don't exists
if not os.path.exists(path):
os.makedirs(path)
print("Directory ", path, " Created ")
else:
pass
# print("Directory ", path, " already exists")
#os.makedirs(path) # create the directory on given path, also if any intermediate-level directory don’t exists then it will create that too.
plt.savefig(path+ "/" + img_name)
return boxes
from utils import decode_netout, compute_overlap, compute_ap
from os.path import normpath, basename
def evaluate(model, generator,
iou_threshold=0.3,
score_threshold=0.3,
max_detections=100,
save_path=None):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
generator : The generator that represents the dataset to evaluate.
model : The model to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in range(generator.size()):
raw_image = generator.load_image(i)
path = generator.images[i]['filename']
img_name = basename(normpath(path))
raw_height, raw_width, raw_channels = raw_image.shape
# make the boxes and the labels
pred_boxes = predict(model, raw_image, i, img_name, path=save_path)
score = np.array([box.score for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin * raw_width, box.ymin * raw_height, box.xmax * raw_width,
box.ymax * raw_height, box.score] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
import pickle
f = open(save_path+"/mAP.pkl", "wb")
pickle.dump(average_precisions, f)
f.close()
return average_precisions
def custom_loss(y_true, y_pred):
mask_shape = tf.shape(y_true)[:4]
cell_x = tf.to_float(tf.reshape(tf.tile(tf.range(GRID_W), [GRID_H]), (1, GRID_H, GRID_W, 1, 1)))
cell_y = tf.transpose(cell_x, (0, 2, 1, 3, 4))
cell_grid = tf.tile(tf.concat([cell_x, cell_y], -1), [BATCH_SIZE, 1, 1, 5, 1])
coord_mask = tf.zeros(mask_shape)
conf_mask = tf.zeros(mask_shape)
class_mask = tf.zeros(mask_shape)
seen = tf.Variable(0.)
total_recall = tf.Variable(0.)
"""
Adjust prediction
"""
### adjust x and y
pred_box_xy = tf.sigmoid(y_pred[..., :2]) + cell_grid
### adjust w and h
pred_box_wh = tf.exp(y_pred[..., 2:4]) * np.reshape(ANCHORS, [1, 1, 1, BOX, 2])
### adjust confidence
pred_box_conf = tf.sigmoid(y_pred[..., 4])
### adjust class probabilities
pred_box_class = y_pred[..., 5:]
"""
Adjust ground truth
"""
### adjust x and y
true_box_xy = y_true[..., 0:2] # relative position to the containing cell
### adjust w and h
true_box_wh = y_true[..., 2:4] # number of cells accross, horizontally and vertically
### adjust confidence
true_wh_half = true_box_wh / 2.
true_mins = true_box_xy - true_wh_half
true_maxes = true_box_xy + true_wh_half
pred_wh_half = pred_box_wh / 2.
pred_mins = pred_box_xy - pred_wh_half
pred_maxes = pred_box_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_box_wh[..., 0] * true_box_wh[..., 1]
pred_areas = pred_box_wh[..., 0] * pred_box_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
true_box_conf = iou_scores * y_true[..., 4]
### adjust class probabilities
true_box_class = tf.argmax(y_true[..., 5:], -1)
"""
Determine the masks
"""
### coordinate mask: simply the position of the ground truth boxes (the predictors)
coord_mask = tf.expand_dims(y_true[..., 4], axis=-1) * COORD_SCALE
### confidence mask: penelize predictors + penalize boxes with low IOU
# penalize the confidence of the boxes, which have IOU with some ground truth box < 0.6
true_xy = true_boxes[..., 0:2]
true_wh = true_boxes[..., 2:4]
true_wh_half = true_wh / 2.
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
pred_xy = tf.expand_dims(pred_box_xy, 4)
pred_wh = tf.expand_dims(pred_box_wh, 4)
pred_wh_half = pred_wh / 2.
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
best_ious = tf.reduce_max(iou_scores, axis=4)
conf_mask = conf_mask + tf.to_float(best_ious < 0.6) * (1 - y_true[..., 4]) * NO_OBJECT_SCALE
# penalize the confidence of the boxes, which are reponsible for corresponding ground truth box
conf_mask = conf_mask + y_true[..., 4] * OBJECT_SCALE
### class mask: simply the position of the ground truth boxes (the predictors)
class_mask = y_true[..., 4] * tf.gather(CLASS_WEIGHTS, true_box_class) * CLASS_SCALE
"""
Warm-up training
"""
no_boxes_mask = tf.to_float(coord_mask < COORD_SCALE / 2.)
seen = tf.assign_add(seen, 1.)
true_box_xy, true_box_wh, coord_mask = tf.cond(tf.less(seen, WARM_UP_BATCHES),
lambda: [true_box_xy + (0.5 + cell_grid) * no_boxes_mask,
true_box_wh + tf.ones_like(true_box_wh) * np.reshape(
ANCHORS, [1, 1, 1, BOX, 2]) * no_boxes_mask,
tf.ones_like(coord_mask)],
lambda: [true_box_xy,
true_box_wh,
coord_mask])
"""
Finalize the loss
"""
nb_coord_box = tf.reduce_sum(tf.to_float(coord_mask > 0.0))
nb_conf_box = tf.reduce_sum(tf.to_float(conf_mask > 0.0))
nb_class_box = tf.reduce_sum(tf.to_float(class_mask > 0.0))
loss_xy = tf.reduce_sum(tf.square(true_box_xy - pred_box_xy) * coord_mask) / (nb_coord_box + 1e-6) / 2.
loss_wh = tf.reduce_sum(tf.square(true_box_wh - pred_box_wh) * coord_mask) / (nb_coord_box + 1e-6) / 2.
loss_conf = tf.reduce_sum(tf.square(true_box_conf - pred_box_conf) * conf_mask) / (nb_conf_box + 1e-6) / 2.
loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class)
loss_class = tf.reduce_sum(loss_class * class_mask) / (nb_class_box + 1e-6)
loss = loss_xy + loss_wh + loss_conf + loss_class
nb_true_box = tf.reduce_sum(y_true[..., 4])
nb_pred_box = tf.reduce_sum(tf.to_float(true_box_conf > 0.5) * tf.to_float(pred_box_conf > 0.3))
"""
Debugging code
"""
current_recall = nb_pred_box / (nb_true_box + 1e-6)
total_recall = tf.assign_add(total_recall, current_recall)
loss = tf.Print(loss, [tf.zeros((1))], message='Dummy Line \t', summarize=1000)
loss = tf.Print(loss, [loss_xy], message='Loss XY \t', summarize=1000)
loss = tf.Print(loss, [loss_wh], message='Loss WH \t', summarize=1000)
loss = tf.Print(loss, [loss_conf], message='Loss Conf \t', summarize=1000)
loss = tf.Print(loss, [loss_class], message='Loss Class \t', summarize=1000)
loss = tf.Print(loss, [loss], message='Total Loss \t', summarize=1000)
loss = tf.Print(loss, [current_recall], message='Current Recall \t', summarize=1000)
loss = tf.Print(loss, [total_recall / seen], message='Average Recall \t', summarize=1000)
return loss
# **Parse the annotations to construct train generator and validation generator**
generator_config = {
'IMAGE_H': IMAGE_H,
'IMAGE_W': IMAGE_W,
'GRID_H': GRID_H,
'GRID_W': GRID_W,
'BOX': BOX,
'LABELS': LABELS,
'CLASS': len(LABELS),
'ANCHORS': ANCHORS,
'BATCH_SIZE': BATCH_SIZE,
'TRUE_BOX_BUFFER': 50,
}
def normalize(image):
return image / 255.
# train_imgs, seen_train_labels = parse_annotation(train_annot_folder, train_image_folder, labels=LABELS)
# ## write parsed annotations to pickle for fast retrieval next time
# with open('train_imgs', 'wb') as fp:
# pickle.dump(train_imgs, fp)
# ## read saved pickle of parsed annotations
# with open('train_imgs', 'rb') as fp:
# train_imgs = pickle.load(fp)
#
# from random import shuffle
# shuffle(train_imgs)
#
# with open('train_imgs_shuffled', 'wb') as fp:
# pickle.dump(train_imgs, fp)
with open('train_imgs_shuffled', 'rb') as fp:
train_imgs = pickle.load(fp)
# valid_imgs, seen_valid_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels=LABELS)
# ## write parsed annotations to pickle for fast retrieval next time
# with open('valid_imgs', 'wb') as fp:
# pickle.dump(valid_imgs, fp)
## read saved pickle of parsed annotations
with open('valid_imgs', 'rb') as fp:
valid_imgs = pickle.load(fp)
sup_train_imgs = train_imgs[:SUP_NUM_IMAGES]
# split the training set (supervised date) into train and validation 80%, 20% respectively:
train = sup_train_imgs[:int(SUP_NUM_IMAGES*0.8)]
val = sup_train_imgs[-int(SUP_NUM_IMAGES*0.2):] #takes the last 20% images from the training
train_batch = BatchGenerator(train, generator_config, norm=normalize)
eval_imgs = valid_imgs[:EVAL_NUM_IMAGES] #we use the valid_imgs as our evaluation set (testing). while we use 20% of the training for validation.
valid_batch = BatchGenerator(val, generator_config, norm=normalize, jitter=False)
"""we evaluate the model on the validation set"""
tohar_eval_batch = BatchGenerator(eval_imgs, generator_config, norm=normalize, jitter=False,
shuffle=False)
# **Setup a few callbacks and start the training**
early_stop = EarlyStopping(monitor='val_loss',
min_delta=0.001,
patience=3,
mode='min',
verbose=1)
checkpoint = ModelCheckpoint(PATH+'/LSTM_weights_coco.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=1)
org_checkpoint = ModelCheckpoint(PATH+'/original_weights_coco.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=1)
# In[ ]:
tb_counter = len([log for log in os.listdir(os.path.expanduser('./lstm/')) if 'coco_' in log]) + 1
tensorboard = TensorBoard(log_dir=os.path.expanduser('./lstm/') + 'coco_' + '_' + str(tb_counter),
histogram_freq=0,
write_graph=True,
write_images=False)
optimizer = Adam(lr=0.5e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
# optimizer = SGD(lr=1e-4, decay=0.0005, momentum=0.9)
# optimizer = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(loss=custom_loss, optimizer=optimizer)
# model_t.compile(loss=custom_loss, optimizer=optimizer)
from keras.callbacks import TensorBoard
"""evaluating on original YOLO (no training at all)"""
model.load_weights("yolo.h5")
# YOLO = evaluate(model, tohar_eval_batch, save_path=PATH+"/YOLO")
# print("YOLO:\n", YOLO)
# print(np.average(list(YOLO.values())))
'''creating a modified batch to the lstm:'''
# [x_batch, GT], y_batch
# [x_batch, GT], \
lstm_batch = LSTMBatchGenerator(eval_imgs, generator_config, model, norm=None, jitter=False, shuffle=False)
print(len(lstm_batch))
exit()
"""X_train2 should be YOLO's output vectors
y_train2 should be the ground truth in the exact same format of YOLO's output
"""
# autoencoder.fit_generator(generator=train_batch_lstm, #(input, input)
# steps_per_epoch=len(train_batch_lstm),
# epochs=100,
# verbose=1,
# # validation_data=tohar_valid_batch,
# # validation_steps=len(tohar_valid_batch),
# callbacks=[early_stop, ae_checkpoint, tensorboard],
# max_queue_size=3)
# print("===================== Done training AE")
# print("===================== Save weights to AE_weights_coco.h5")
# autoencoder.save_weights(PATH+"/AE_weights_coco.h5") # save the autoencoder's weights in this file
# print("===================== Load weights from AE_weights_coco.h5")
# model.load_weights(PATH+'/AE_weights_coco.h5',
# by_name=True) # copy the AE's weights to the "YOLO model" weights, only to layers with the same name as the AE
## end ae
##uncomment for training:
# Perform detection on image
# print("===================== load YOLO model's weights to weights_coco.h5")
# evaluate:
# train_batch_lstm = ToharGenerator2(train, generator_config, norm=normalize)
""" Add lstm on top of the trained YOLO model. the lstm should have many to many sturcture. each latm cell predict 1 output . help:"""
# https://stackoverflow.com/questions/49535488/lstm-on-top-of-a-pre-trained-cnn
# https://github.com/keras-team/keras/issues/5527
''' Freeze previous layers '''
for layer in model.layers:
layer.trainable = False
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Dense, Input
from keras.layers.pooling import GlobalAveragePooling2D
from keras.layers.recurrent import LSTM
from keras.layers.wrappers import TimeDistributed
from keras.optimizers import Nadam
frames = len(tohar_eval_batch)
print(frames)
units = GRID_H * GRID_W * BOX * (4 + 1 + CLASS)
print("==========",units)
length=5 #todo:batch size
#todo: input dim is problematic.
# input_images = Input(shape=( None, frames ,IMAGE_H, IMAGE_W, 3))
#https://riptutorial.com/keras/example/29812/vgg-16-cnn-and-lstm-for-video-classification
# frames, rows, columns, channels = 10, IMAGE_H, IMAGE_W, 3
# video = Input(shape=(frames,
# rows,
# columns,
# channels))
#
# # cnn_base = VGG16(input_shape=(rows, columns, channels),
# # weights="imagenet",
# # include_top=False)
# # cnn_out = GlobalAveragePooling2D()(cnn_base.output)
# # cnn = Model(input=cnn_base.input, output=cnn_out)
#
# model.trainable = False
#
# encoded_frames = TimeDistributed(model)(video)
# encoded_sequence = LSTM(256)(encoded_frames)
# hidden_layer = Dense(output_dim=1024, activation="relu")(encoded_sequence)
# outputs = Dense(output_dim=units, activation="softmax")(hidden_layer)
# lstm = Model([video], outputs)
#
# # x = Reshape((len(train_batch)*10 ,IMAGE_H, IMAGE_W, 3))(input_images)
# x = TimeDistributed(model)(x)
# x = TimeDistributed(Flatten())(x)
# x = LSTM(units, name='lstm')(x) # This has the effect of each LSTM unit returning a sequence of 1 output, one for each time step in the input data
# # x = Dense( n_output,name='lstm_out')(x)
# # x = Conv2D(BOX * (4 + 1 + CLASS), (1, 1), strides=(1, 1), padding='same', name='lstm_conv')(x)
# out = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x)
print("======== lstm:")
lstm.summary()
lstm.compile(loss=custom_loss, optimizer=optimizer)
exit()
lstm.fit_generator(generator=train_batch, # train_batch #(input, ground_truth)
steps_per_epoch=len(train_batch),
epochs=3,
verbose=1,
validation_data=valid_batch,
validation_steps=len(valid_batch),
callbacks=[early_stop, checkpoint, tensorboard],
max_queue_size=3)
"""evaluating on LSTM YOLO """
LSTM = evaluate(model, tohar_eval_batch, save_path=PATH+"/LSTM")
print("LSTM:\n",LSTM)
print(np.average(list(LSTM.values())))
# """evaluating on original YOLO (no training at all)"""
# model.load_weights("yolo.h5")
# YOLO = evaluate(model, tohar_eval_batch, save_path=PATH+"/YOLO")
# print("YOLO:\n", YOLO)
# print(np.average(list(YOLO.values())))
#
#
# """evaluating on original YOLO (no training at all) """
# model_t.load_weights(PATH+"/T_weights_coco.h5")
# NO_AE = evaluate(model_t, tohar_eval_batch, save_path=PATH+"/NO_AE")
# print("NO_AE:\n", NO_AE)
# print(np.average(list(NO_AE.values())))
params={"SUP_NUM_IMAGES:": SUP_NUM_IMAGES,
"UNSUP_NUM_IMAGES:":UNSUP_NUM_IMAGES,
"EVAL_NUM_IMAGES:":EVAL_NUM_IMAGES}
f = open(PATH + "/mAP.txt", "w")
f.write("LSTM:\n")
f.write(str(LSTM)+"\n")
f.write("NO_AE:\n")
# f.write(str(NO_AE)+"\n")
f.write("YOLO:\n")
# f.write(str(YOLO)+"\n")
f.write("AVG:"+"\n")
f.write(str(np.average(list(LSTM.values())))+"\n")
# f.write(str(np.average(list(NO_AE.values())))+"\n")
# f.write(str(np.average(list(YOLO.values())))+"\n")
f.write("LOG:"+"\n")
f.write(str(params) )
f.close()
# image = cv2.imread('images/giraffe.jpg')
# dummy_array = np.zeros((1,1,1,1,TRUE_BOX_BUFFER,4))
# plt.figure(figsize=(10,10))
#
# input_image = cv2.resize(image, (416, 416))
# input_image = input_image / 255.
# input_image = input_image[:,:,::-1]
# input_image = np.expand_dims(input_image, 0)
#
# netout = model.predict([input_image, dummy_array])
#
# boxes = decode_netout(netout[0],
# obj_threshold=OBJ_THRESHOLD,
# nms_threshold=NMS_THRESHOLD,
# anchors=ANCHORS,
# nb_class=CLASS)
#
# image = draw_boxes(image, boxes, labels=LABELS)
#
# plt.imshow(image[:,:,::-1]); #plt.show()
# i=0
# plt.savefig("./predictions/figure"+str(i))
print('\a')
print('\a')
exit()
# # Perform detection on video
# In[ ]:
model.load_weights("weights_coco.h5")
dummy_array = np.zeros((1, 1, 1, 1, TRUE_BOX_BUFFER, 4))
# In[ ]:
video_inp = '../basic-yolo-keras/images/phnom_penh.mp4'
video_out = '../basic-yolo-keras/images/phnom_penh_bbox.mp4'
video_reader = cv2.VideoCapture(video_inp)
nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
video_writer = cv2.VideoWriter(video_out,
cv2.VideoWriter_fourcc(*'XVID'),
50.0,
(frame_w, frame_h))
for i in tqdm(range(nb_frames)):
ret, image = video_reader.read()
input_image = cv2.resize(image, (416, 416))
input_image = input_image / 255.
input_image = input_image[:, :, ::-1]
input_image = np.expand_dims(input_image, 0)
netout = model.predict([input_image, dummy_array])
boxes = decode_netout(netout[0],
obj_threshold=0.3,
nms_threshold=NMS_THRESHOLD,
anchors=ANCHORS,
nb_class=CLASS)
image = draw_boxes(image, boxes, labels=LABELS)
video_writer.write(np.uint8(image))
video_reader.release()
video_writer.release()
| [
"numpy.uint8",
"tensorflow.truediv",
"keras.layers.Conv2D",
"tensorflow.shape",
"utils.decode_netout",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"preprocessing.LSTMBatchGenerator",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.array",
"numpy.argsort",
"numpy.cumsum",
... | [((3768, 3799), 'numpy.ones', 'np.ones', (['CLASS'], {'dtype': '"""float32"""'}), "(CLASS, dtype='float32')\n", (3775, 3799), True, 'import numpy as np\n'), ((5441, 5475), 'keras.layers.Input', 'Input', ([], {'shape': '(IMAGE_H, IMAGE_W, 3)'}), '(shape=(IMAGE_H, IMAGE_W, 3))\n', (5446, 5475), False, 'from keras.layers import Dense, Input\n'), ((5489, 5531), 'keras.layers.Input', 'Input', ([], {'shape': '(1, 1, 1, TRUE_BOX_BUFFER, 4)'}), '(shape=(1, 1, 1, TRUE_BOX_BUFFER, 4))\n', (5494, 5531), False, 'from keras.layers import Dense, Input\n'), ((10209, 10242), 'keras.layers.merge.concatenate', 'concatenate', (['[skip_connection, x]'], {}), '([skip_connection, x])\n', (10220, 10242), False, 'from keras.layers.merge import concatenate\n'), ((10831, 10871), 'keras.models.Model', 'Model', (['[input_image, true_boxes]', 'output'], {}), '([input_image, true_boxes], output)\n', (10836, 10871), False, 'from keras.models import Model\n'), ((10970, 11019), 'keras.layers.Input', 'Input', ([], {'shape': '(GRID_H, GRID_W, BOX, 4 + 1 + CLASS)'}), '(shape=(GRID_H, GRID_W, BOX, 4 + 1 + CLASS))\n', (10975, 11019), False, 'from keras.layers import Dense, Input\n'), ((12175, 12212), 'keras.layers.Input', 'Input', ([], {'shape': '(5, IMAGE_H, IMAGE_W, 3)'}), '(shape=(5, IMAGE_H, IMAGE_W, 3))\n', (12180, 12212), False, 'from keras.layers import Dense, Input\n'), ((12382, 12415), 'keras.models.Model', 'Model', ([], {'inputs': 'frames', 'outputs': 'out'}), '(inputs=frames, outputs=out)\n', (12387, 12415), False, 'from keras.models import Model\n'), ((12578, 12599), 'utils.WeightReader', 'WeightReader', (['wt_path'], {}), '(wt_path)\n', (12590, 12599), False, 'from utils import WeightReader, decode_netout, draw_boxes\n'), ((30284, 30339), 'preprocessing.BatchGenerator', 'BatchGenerator', (['train', 'generator_config'], {'norm': 'normalize'}), '(train, generator_config, norm=normalize)\n', (30298, 30339), False, 'from preprocessing import parse_annotation, BatchGenerator, LSTMBatchGenerator\n'), ((30502, 30569), 'preprocessing.BatchGenerator', 'BatchGenerator', (['val', 'generator_config'], {'norm': 'normalize', 'jitter': '(False)'}), '(val, generator_config, norm=normalize, jitter=False)\n', (30516, 30569), False, 'from preprocessing import parse_annotation, BatchGenerator, LSTMBatchGenerator\n'), ((30642, 30734), 'preprocessing.BatchGenerator', 'BatchGenerator', (['eval_imgs', 'generator_config'], {'norm': 'normalize', 'jitter': '(False)', 'shuffle': '(False)'}), '(eval_imgs, generator_config, norm=normalize, jitter=False,\n shuffle=False)\n', (30656, 30734), False, 'from preprocessing import parse_annotation, BatchGenerator, LSTMBatchGenerator\n'), ((30837, 30926), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.001)', 'patience': '(3)', 'mode': '"""min"""', 'verbose': '(1)'}), "(monitor='val_loss', min_delta=0.001, patience=3, mode='min',\n verbose=1)\n", (30850, 30926), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((31045, 31171), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(PATH + '/LSTM_weights_coco.h5')"], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""', 'period': '(1)'}), "(PATH + '/LSTM_weights_coco.h5', monitor='val_loss', verbose\n =1, save_best_only=True, mode='min', period=1)\n", (31060, 31171), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((31327, 31456), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(PATH + '/original_weights_coco.h5')"], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""', 'period': '(1)'}), "(PATH + '/original_weights_coco.h5', monitor='val_loss',\n verbose=1, save_best_only=True, mode='min', period=1)\n", (31342, 31456), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((31969, 32035), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(5e-05)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)', 'decay': '(0.0)'}), '(lr=5e-05, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n', (31973, 32035), False, 'from keras.optimizers import SGD, Adam, RMSprop\n'), ((32632, 32731), 'preprocessing.LSTMBatchGenerator', 'LSTMBatchGenerator', (['eval_imgs', 'generator_config', 'model'], {'norm': 'None', 'jitter': '(False)', 'shuffle': '(False)'}), '(eval_imgs, generator_config, model, norm=None, jitter=\n False, shuffle=False)\n', (32650, 32731), False, 'from preprocessing import parse_annotation, BatchGenerator, LSTMBatchGenerator\n'), ((38837, 38879), 'numpy.zeros', 'np.zeros', (['(1, 1, 1, 1, TRUE_BOX_BUFFER, 4)'], {}), '((1, 1, 1, 1, TRUE_BOX_BUFFER, 4))\n', (38845, 38879), True, 'import numpy as np\n'), ((39025, 39052), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_inp'], {}), '(video_inp)\n', (39041, 39052), False, 'import os, cv2\n'), ((4501, 4535), 'tensorflow.space_to_depth', 'tf.space_to_depth', (['x'], {'block_size': '(2)'}), '(x, block_size=2)\n', (4518, 4535), True, 'import tensorflow as tf\n'), ((5547, 5633), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_1"""', 'use_bias': '(False)'}), "(32, (3, 3), strides=(1, 1), padding='same', name='conv_1', use_bias=\n False)\n", (5553, 5633), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((5646, 5679), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_1"""'}), "(name='norm_1')\n", (5664, 5679), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((5687, 5707), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (5696, 5707), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((5721, 5751), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5733, 5751), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((5771, 5874), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_2"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(64, (3, 3), strides=(1, 1), padding='same', name='conv_2', use_bias=\n False, trainable=False)\n", (5777, 5874), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((5883, 5933), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_2"""', 'trainable': '(False)'}), "(name='norm_2', trainable=False)\n", (5901, 5933), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((5941, 5961), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (5950, 5961), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((5969, 5999), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5981, 5999), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6018, 6122), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_3"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(128, (3, 3), strides=(1, 1), padding='same', name='conv_3', use_bias\n =False, trainable=False)\n", (6024, 6122), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6125, 6175), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_3"""', 'trainable': '(False)'}), "(name='norm_3', trainable=False)\n", (6143, 6175), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6183, 6203), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (6192, 6203), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((6222, 6325), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_4"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(64, (1, 1), strides=(1, 1), padding='same', name='conv_4', use_bias=\n False, trainable=False)\n", (6228, 6325), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6328, 6378), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_4"""', 'trainable': '(False)'}), "(name='norm_4', trainable=False)\n", (6346, 6378), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6386, 6406), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (6395, 6406), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((6425, 6529), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_5"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(128, (3, 3), strides=(1, 1), padding='same', name='conv_5', use_bias\n =False, trainable=False)\n", (6431, 6529), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6532, 6582), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_5"""', 'trainable': '(False)'}), "(name='norm_5', trainable=False)\n", (6550, 6582), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6590, 6610), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (6599, 6610), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((6618, 6648), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6630, 6648), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6667, 6771), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_6"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(256, (3, 3), strides=(1, 1), padding='same', name='conv_6', use_bias\n =False, trainable=False)\n", (6673, 6771), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6774, 6824), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_6"""', 'trainable': '(False)'}), "(name='norm_6', trainable=False)\n", (6792, 6824), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6832, 6852), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (6841, 6852), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((6871, 6975), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_7"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(128, (1, 1), strides=(1, 1), padding='same', name='conv_7', use_bias\n =False, trainable=False)\n", (6877, 6975), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((6978, 7028), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_7"""', 'trainable': '(False)'}), "(name='norm_7', trainable=False)\n", (6996, 7028), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((7036, 7056), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (7045, 7056), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((7075, 7179), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_8"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(256, (3, 3), strides=(1, 1), padding='same', name='conv_8', use_bias\n =False, trainable=False)\n", (7081, 7179), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((7182, 7232), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_8"""', 'trainable': '(False)'}), "(name='norm_8', trainable=False)\n", (7200, 7232), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((7240, 7260), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (7249, 7260), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((7268, 7298), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (7280, 7298), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((7317, 7421), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_9"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(512, (3, 3), strides=(1, 1), padding='same', name='conv_9', use_bias\n =False, trainable=False)\n", (7323, 7421), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((7424, 7474), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_9"""', 'trainable': '(False)'}), "(name='norm_9', trainable=False)\n", (7442, 7474), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((7482, 7502), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (7491, 7502), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((7522, 7626), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_10"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(256, (1, 1), strides=(1, 1), padding='same', name='conv_10',\n use_bias=False, trainable=False)\n", (7528, 7626), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((7630, 7681), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_10"""', 'trainable': '(False)'}), "(name='norm_10', trainable=False)\n", (7648, 7681), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((7689, 7709), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (7698, 7709), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((7729, 7833), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_11"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(512, (3, 3), strides=(1, 1), padding='same', name='conv_11',\n use_bias=False, trainable=False)\n", (7735, 7833), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((7837, 7888), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_11"""', 'trainable': '(False)'}), "(name='norm_11', trainable=False)\n", (7855, 7888), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((7896, 7916), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (7905, 7916), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((7936, 8040), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_12"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(256, (1, 1), strides=(1, 1), padding='same', name='conv_12',\n use_bias=False, trainable=False)\n", (7942, 8040), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8044, 8095), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_12"""', 'trainable': '(False)'}), "(name='norm_12', trainable=False)\n", (8062, 8095), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8103, 8123), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (8112, 8123), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((8143, 8247), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_13"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(512, (3, 3), strides=(1, 1), padding='same', name='conv_13',\n use_bias=False, trainable=False)\n", (8149, 8247), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8251, 8302), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_13"""', 'trainable': '(False)'}), "(name='norm_13', trainable=False)\n", (8269, 8302), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8310, 8330), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (8319, 8330), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((8360, 8390), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (8372, 8390), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8410, 8515), 'keras.layers.Conv2D', 'Conv2D', (['(1024)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_14"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(1024, (3, 3), strides=(1, 1), padding='same', name='conv_14',\n use_bias=False, trainable=False)\n", (8416, 8515), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8519, 8570), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_14"""', 'trainable': '(False)'}), "(name='norm_14', trainable=False)\n", (8537, 8570), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8578, 8598), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (8587, 8598), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((8618, 8722), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_15"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(512, (1, 1), strides=(1, 1), padding='same', name='conv_15',\n use_bias=False, trainable=False)\n", (8624, 8722), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8726, 8777), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_15"""', 'trainable': '(False)'}), "(name='norm_15', trainable=False)\n", (8744, 8777), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8785, 8805), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (8794, 8805), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((8825, 8930), 'keras.layers.Conv2D', 'Conv2D', (['(1024)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_16"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(1024, (3, 3), strides=(1, 1), padding='same', name='conv_16',\n use_bias=False, trainable=False)\n", (8831, 8930), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8934, 8985), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_16"""', 'trainable': '(False)'}), "(name='norm_16', trainable=False)\n", (8952, 8985), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((8993, 9013), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (9002, 9013), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((9033, 9137), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_17"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(512, (1, 1), strides=(1, 1), padding='same', name='conv_17',\n use_bias=False, trainable=False)\n", (9039, 9137), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((9141, 9192), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_17"""', 'trainable': '(False)'}), "(name='norm_17', trainable=False)\n", (9159, 9192), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((9200, 9220), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (9209, 9220), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((9240, 9345), 'keras.layers.Conv2D', 'Conv2D', (['(1024)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_18"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18',\n use_bias=False, trainable=False)\n", (9246, 9345), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((9349, 9400), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_18"""', 'trainable': '(False)'}), "(name='norm_18', trainable=False)\n", (9367, 9400), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((9408, 9428), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (9417, 9428), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((9448, 9553), 'keras.layers.Conv2D', 'Conv2D', (['(1024)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_19"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(1024, (3, 3), strides=(1, 1), padding='same', name='conv_19',\n use_bias=False, trainable=False)\n", (9454, 9553), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((9557, 9608), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_19"""', 'trainable': '(False)'}), "(name='norm_19', trainable=False)\n", (9575, 9608), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((9616, 9636), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (9625, 9636), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((9656, 9761), 'keras.layers.Conv2D', 'Conv2D', (['(1024)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_20"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(1024, (3, 3), strides=(1, 1), padding='same', name='conv_20',\n use_bias=False, trainable=False)\n", (9662, 9761), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((9765, 9816), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_20"""', 'trainable': '(False)'}), "(name='norm_20', trainable=False)\n", (9783, 9816), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((9824, 9844), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (9833, 9844), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((9878, 9982), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_21"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(64, (1, 1), strides=(1, 1), padding='same', name='conv_21', use_bias\n =False, trainable=False)\n", (9884, 9982), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((10018, 10069), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_21"""', 'trainable': '(False)'}), "(name='norm_21', trainable=False)\n", (10036, 10069), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((10105, 10125), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (10114, 10125), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((10161, 10186), 'keras.layers.Lambda', 'Lambda', (['space_to_depth_x2'], {}), '(space_to_depth_x2)\n', (10167, 10186), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((10259, 10364), 'keras.layers.Conv2D', 'Conv2D', (['(1024)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_22"""', 'use_bias': '(False)', 'trainable': '(False)'}), "(1024, (3, 3), strides=(1, 1), padding='same', name='conv_22',\n use_bias=False, trainable=False)\n", (10265, 10364), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((10368, 10419), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_22"""', 'trainable': '(False)'}), "(name='norm_22', trainable=False)\n", (10386, 10419), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((10427, 10447), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (10436, 10447), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((10467, 10557), 'keras.layers.Conv2D', 'Conv2D', (['(BOX * (4 + 1 + CLASS))', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_23"""'}), "(BOX * (4 + 1 + CLASS), (1, 1), strides=(1, 1), padding='same', name=\n 'conv_23')\n", (10473, 10557), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((10565, 10610), 'keras.layers.Reshape', 'Reshape', (['(GRID_H, GRID_W, BOX, 4 + 1 + CLASS)'], {}), '((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))\n', (10572, 10610), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((10771, 10799), 'keras.layers.Lambda', 'Lambda', (['(lambda args: args[0])'], {}), '(lambda args: args[0])\n', (10777, 10799), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((12217, 12239), 'keras.layers.wrappers.TimeDistributed', 'TimeDistributed', (['model'], {}), '(model)\n', (12232, 12239), False, 'from keras.layers.wrappers import TimeDistributed\n'), ((12305, 12333), 'keras.layers.recurrent.LSTM', 'LSTM', (['input_dim'], {'name': '"""lstm"""'}), "(input_dim, name='lstm')\n", (12309, 12333), False, 'from keras.layers.recurrent import LSTM\n'), ((12343, 12371), 'keras.layers.Dense', 'Dense', (['input_dim'], {'name': '"""out"""'}), "(input_dim, name='out')\n", (12348, 12371), False, 'from keras.layers import Dense, Input\n'), ((16052, 16094), 'numpy.zeros', 'np.zeros', (['(1, 1, 1, 1, TRUE_BOX_BUFFER, 4)'], {}), '((1, 1, 1, 1, TRUE_BOX_BUFFER, 4))\n', (16060, 16094), True, 'import numpy as np\n'), ((16140, 16168), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (16150, 16168), True, 'import matplotlib.pyplot as plt\n'), ((16188, 16217), 'cv2.resize', 'cv2.resize', (['image', '(416, 416)'], {}), '(image, (416, 416))\n', (16198, 16217), False, 'import os, cv2\n'), ((16315, 16345), 'numpy.expand_dims', 'np.expand_dims', (['input_image', '(0)'], {}), '(input_image, 0)\n', (16329, 16345), True, 'import numpy as np\n'), ((16415, 16535), 'utils.decode_netout', 'decode_netout', (['netout[0]'], {'obj_threshold': 'OBJ_THRESHOLD', 'nms_threshold': 'NMS_THRESHOLD', 'anchors': 'ANCHORS', 'nb_class': 'CLASS'}), '(netout[0], obj_threshold=OBJ_THRESHOLD, nms_threshold=\n NMS_THRESHOLD, anchors=ANCHORS, nb_class=CLASS)\n', (16428, 16535), False, 'from utils import decode_netout, compute_overlap, compute_ap\n'), ((16647, 16686), 'utils.draw_boxes', 'draw_boxes', (['image', 'boxes'], {'labels': 'LABELS'}), '(image, boxes, labels=LABELS)\n', (16657, 16686), False, 'from utils import WeightReader, decode_netout, draw_boxes\n'), ((16692, 16721), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[:, :, ::-1]'], {}), '(image[:, :, ::-1])\n', (16702, 16721), True, 'import matplotlib.pyplot as plt\n'), ((22214, 22248), 'pickle.dump', 'pickle.dump', (['average_precisions', 'f'], {}), '(average_precisions, f)\n', (22225, 22248), False, 'import pickle\n'), ((22482, 22519), 'tensorflow.transpose', 'tf.transpose', (['cell_x', '(0, 2, 1, 3, 4)'], {}), '(cell_x, (0, 2, 1, 3, 4))\n', (22494, 22519), True, 'import tensorflow as tf\n'), ((22622, 22642), 'tensorflow.zeros', 'tf.zeros', (['mask_shape'], {}), '(mask_shape)\n', (22630, 22642), True, 'import tensorflow as tf\n'), ((22659, 22679), 'tensorflow.zeros', 'tf.zeros', (['mask_shape'], {}), '(mask_shape)\n', (22667, 22679), True, 'import tensorflow as tf\n'), ((22697, 22717), 'tensorflow.zeros', 'tf.zeros', (['mask_shape'], {}), '(mask_shape)\n', (22705, 22717), True, 'import tensorflow as tf\n'), ((22730, 22746), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), '(0.0)\n', (22741, 22746), True, 'import tensorflow as tf\n'), ((22765, 22781), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), '(0.0)\n', (22776, 22781), True, 'import tensorflow as tf\n'), ((23056, 23082), 'tensorflow.sigmoid', 'tf.sigmoid', (['y_pred[..., 4]'], {}), '(y_pred[..., 4])\n', (23066, 23082), True, 'import tensorflow as tf\n'), ((23710, 23742), 'tensorflow.maximum', 'tf.maximum', (['pred_mins', 'true_mins'], {}), '(pred_mins, true_mins)\n', (23720, 23742), True, 'import tensorflow as tf\n'), ((23765, 23799), 'tensorflow.minimum', 'tf.minimum', (['pred_maxes', 'true_maxes'], {}), '(pred_maxes, true_maxes)\n', (23775, 23799), True, 'import tensorflow as tf\n'), ((23819, 23868), 'tensorflow.maximum', 'tf.maximum', (['(intersect_maxes - intersect_mins)', '(0.0)'], {}), '(intersect_maxes - intersect_mins, 0.0)\n', (23829, 23868), True, 'import tensorflow as tf\n'), ((24131, 24171), 'tensorflow.truediv', 'tf.truediv', (['intersect_areas', 'union_areas'], {}), '(intersect_areas, union_areas)\n', (24141, 24171), True, 'import tensorflow as tf\n'), ((24278, 24308), 'tensorflow.argmax', 'tf.argmax', (['y_true[..., 5:]', '(-1)'], {}), '(y_true[..., 5:], -1)\n', (24287, 24308), True, 'import tensorflow as tf\n'), ((24874, 24904), 'tensorflow.expand_dims', 'tf.expand_dims', (['pred_box_xy', '(4)'], {}), '(pred_box_xy, 4)\n', (24888, 24904), True, 'import tensorflow as tf\n'), ((24919, 24949), 'tensorflow.expand_dims', 'tf.expand_dims', (['pred_box_wh', '(4)'], {}), '(pred_box_wh, 4)\n', (24933, 24949), True, 'import tensorflow as tf\n'), ((25084, 25116), 'tensorflow.maximum', 'tf.maximum', (['pred_mins', 'true_mins'], {}), '(pred_mins, true_mins)\n', (25094, 25116), True, 'import tensorflow as tf\n'), ((25139, 25173), 'tensorflow.minimum', 'tf.minimum', (['pred_maxes', 'true_maxes'], {}), '(pred_maxes, true_maxes)\n', (25149, 25173), True, 'import tensorflow as tf\n'), ((25193, 25242), 'tensorflow.maximum', 'tf.maximum', (['(intersect_maxes - intersect_mins)', '(0.0)'], {}), '(intersect_maxes - intersect_mins, 0.0)\n', (25203, 25242), True, 'import tensorflow as tf\n'), ((25489, 25529), 'tensorflow.truediv', 'tf.truediv', (['intersect_areas', 'union_areas'], {}), '(intersect_areas, union_areas)\n', (25499, 25529), True, 'import tensorflow as tf\n'), ((25547, 25580), 'tensorflow.reduce_max', 'tf.reduce_max', (['iou_scores'], {'axis': '(4)'}), '(iou_scores, axis=4)\n', (25560, 25580), True, 'import tensorflow as tf\n'), ((26069, 26112), 'tensorflow.to_float', 'tf.to_float', (['(coord_mask < COORD_SCALE / 2.0)'], {}), '(coord_mask < COORD_SCALE / 2.0)\n', (26080, 26112), True, 'import tensorflow as tf\n'), ((26123, 26147), 'tensorflow.assign_add', 'tf.assign_add', (['seen', '(1.0)'], {}), '(seen, 1.0)\n', (26136, 26147), True, 'import tensorflow as tf\n'), ((27444, 27540), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'true_box_class', 'logits': 'pred_box_class'}), '(labels=true_box_class,\n logits=pred_box_class)\n', (27490, 27540), True, 'import tensorflow as tf\n'), ((27691, 27720), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['y_true[..., 4]'], {}), '(y_true[..., 4])\n', (27704, 27720), True, 'import tensorflow as tf\n'), ((27933, 27976), 'tensorflow.assign_add', 'tf.assign_add', (['total_recall', 'current_recall'], {}), '(total_recall, current_recall)\n', (27946, 27976), True, 'import tensorflow as tf\n'), ((28073, 28136), 'tensorflow.Print', 'tf.Print', (['loss', '[loss_xy]'], {'message': '"""Loss XY \t"""', 'summarize': '(1000)'}), "(loss, [loss_xy], message='Loss XY \\t', summarize=1000)\n", (28081, 28136), True, 'import tensorflow as tf\n'), ((28148, 28211), 'tensorflow.Print', 'tf.Print', (['loss', '[loss_wh]'], {'message': '"""Loss WH \t"""', 'summarize': '(1000)'}), "(loss, [loss_wh], message='Loss WH \\t', summarize=1000)\n", (28156, 28211), True, 'import tensorflow as tf\n'), ((28223, 28290), 'tensorflow.Print', 'tf.Print', (['loss', '[loss_conf]'], {'message': '"""Loss Conf \t"""', 'summarize': '(1000)'}), "(loss, [loss_conf], message='Loss Conf \\t', summarize=1000)\n", (28231, 28290), True, 'import tensorflow as tf\n'), ((28302, 28371), 'tensorflow.Print', 'tf.Print', (['loss', '[loss_class]'], {'message': '"""Loss Class \t"""', 'summarize': '(1000)'}), "(loss, [loss_class], message='Loss Class \\t', summarize=1000)\n", (28310, 28371), True, 'import tensorflow as tf\n'), ((28383, 28446), 'tensorflow.Print', 'tf.Print', (['loss', '[loss]'], {'message': '"""Total Loss \t"""', 'summarize': '(1000)'}), "(loss, [loss], message='Total Loss \\t', summarize=1000)\n", (28391, 28446), True, 'import tensorflow as tf\n'), ((28458, 28535), 'tensorflow.Print', 'tf.Print', (['loss', '[current_recall]'], {'message': '"""Current Recall \t"""', 'summarize': '(1000)'}), "(loss, [current_recall], message='Current Recall \\t', summarize=1000)\n", (28466, 28535), True, 'import tensorflow as tf\n'), ((28547, 28633), 'tensorflow.Print', 'tf.Print', (['loss', '[total_recall / seen]'], {'message': '"""Average Recall \t"""', 'summarize': '(1000)'}), "(loss, [total_recall / seen], message='Average Recall \\t',\n summarize=1000)\n", (28555, 28633), True, 'import tensorflow as tf\n'), ((29611, 29626), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (29622, 29626), False, 'import pickle\n'), ((29972, 29987), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (29983, 29987), False, 'import pickle\n'), ((39305, 39336), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (39327, 39336), False, 'import os, cv2\n'), ((39516, 39545), 'cv2.resize', 'cv2.resize', (['image', '(416, 416)'], {}), '(image, (416, 416))\n', (39526, 39545), False, 'import os, cv2\n'), ((39643, 39673), 'numpy.expand_dims', 'np.expand_dims', (['input_image', '(0)'], {}), '(input_image, 0)\n', (39657, 39673), True, 'import numpy as np\n'), ((39743, 39852), 'utils.decode_netout', 'decode_netout', (['netout[0]'], {'obj_threshold': '(0.3)', 'nms_threshold': 'NMS_THRESHOLD', 'anchors': 'ANCHORS', 'nb_class': 'CLASS'}), '(netout[0], obj_threshold=0.3, nms_threshold=NMS_THRESHOLD,\n anchors=ANCHORS, nb_class=CLASS)\n', (39756, 39852), False, 'from utils import decode_netout, compute_overlap, compute_ap\n'), ((39965, 40004), 'utils.draw_boxes', 'draw_boxes', (['image', 'boxes'], {'labels': 'LABELS'}), '(image, boxes, labels=LABELS)\n', (39975, 40004), False, 'from utils import WeightReader, decode_netout, draw_boxes\n'), ((12268, 12277), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (12275, 12277), False, 'from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, UpSampling2D, TimeDistributed, LSTM\n'), ((17206, 17240), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + '/' + img_name)"], {}), "(path + '/' + img_name)\n", (17217, 17240), True, 'import matplotlib.pyplot as plt\n'), ((18820, 18863), 'numpy.array', 'np.array', (['[box.score for box in pred_boxes]'], {}), '([box.score for box in pred_boxes])\n', (18828, 18863), True, 'import numpy as np\n'), ((18886, 18929), 'numpy.array', 'np.array', (['[box.label for box in pred_boxes]'], {}), '([box.label for box in pred_boxes])\n', (18894, 18929), True, 'import numpy as np\n'), ((19300, 19318), 'numpy.argsort', 'np.argsort', (['(-score)'], {}), '(-score)\n', (19310, 19318), True, 'import numpy as np\n'), ((19995, 20009), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (20003, 20009), True, 'import numpy as np\n'), ((20035, 20049), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (20043, 20049), True, 'import numpy as np\n'), ((20067, 20081), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (20075, 20081), True, 'import numpy as np\n'), ((21524, 21543), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (21534, 21543), True, 'import numpy as np\n'), ((21724, 21750), 'numpy.cumsum', 'np.cumsum', (['false_positives'], {}), '(false_positives)\n', (21733, 21750), True, 'import numpy as np\n'), ((21776, 21801), 'numpy.cumsum', 'np.cumsum', (['true_positives'], {}), '(true_positives)\n', (21785, 21801), True, 'import numpy as np\n'), ((22065, 22094), 'utils.compute_ap', 'compute_ap', (['recall', 'precision'], {}), '(recall, precision)\n', (22075, 22094), False, 'from utils import decode_netout, compute_overlap, compute_ap\n'), ((22346, 22362), 'tensorflow.shape', 'tf.shape', (['y_true'], {}), '(y_true)\n', (22354, 22362), True, 'import tensorflow as tf\n'), ((22545, 22576), 'tensorflow.concat', 'tf.concat', (['[cell_x, cell_y]', '(-1)'], {}), '([cell_x, cell_y], -1)\n', (22554, 22576), True, 'import tensorflow as tf\n'), ((22861, 22888), 'tensorflow.sigmoid', 'tf.sigmoid', (['y_pred[..., :2]'], {}), '(y_pred[..., :2])\n', (22871, 22888), True, 'import tensorflow as tf\n'), ((22943, 22967), 'tensorflow.exp', 'tf.exp', (['y_pred[..., 2:4]'], {}), '(y_pred[..., 2:4])\n', (22949, 22967), True, 'import tensorflow as tf\n'), ((22970, 23008), 'numpy.reshape', 'np.reshape', (['ANCHORS', '[1, 1, 1, BOX, 2]'], {}), '(ANCHORS, [1, 1, 1, BOX, 2])\n', (22980, 23008), True, 'import numpy as np\n'), ((24455, 24494), 'tensorflow.expand_dims', 'tf.expand_dims', (['y_true[..., 4]'], {'axis': '(-1)'}), '(y_true[..., 4], axis=-1)\n', (24469, 24494), True, 'import tensorflow as tf\n'), ((26199, 26229), 'tensorflow.less', 'tf.less', (['seen', 'WARM_UP_BATCHES'], {}), '(seen, WARM_UP_BATCHES)\n', (26206, 26229), True, 'import tensorflow as tf\n'), ((26941, 26970), 'tensorflow.to_float', 'tf.to_float', (['(coord_mask > 0.0)'], {}), '(coord_mask > 0.0)\n', (26952, 26970), True, 'import tensorflow as tf\n'), ((27004, 27032), 'tensorflow.to_float', 'tf.to_float', (['(conf_mask > 0.0)'], {}), '(conf_mask > 0.0)\n', (27015, 27032), True, 'import tensorflow as tf\n'), ((27067, 27096), 'tensorflow.to_float', 'tf.to_float', (['(class_mask > 0.0)'], {}), '(class_mask > 0.0)\n', (27078, 27096), True, 'import tensorflow as tf\n'), ((27554, 27592), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(loss_class * class_mask)'], {}), '(loss_class * class_mask)\n', (27567, 27592), True, 'import tensorflow as tf\n'), ((40029, 40044), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (40037, 40044), True, 'import numpy as np\n'), ((16856, 16876), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (16870, 16876), False, 'import os, cv2\n'), ((16890, 16907), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (16901, 16907), False, 'import os, cv2\n'), ((18607, 18621), 'os.path.normpath', 'normpath', (['path'], {}), '(path)\n', (18615, 18621), False, 'from os.path import normpath, basename\n'), ((18988, 19127), 'numpy.array', 'np.array', (['[[box.xmin * raw_width, box.ymin * raw_height, box.xmax * raw_width, box.\n ymax * raw_height, box.score] for box in pred_boxes]'], {}), '([[box.xmin * raw_width, box.ymin * raw_height, box.xmax *\n raw_width, box.ymax * raw_height, box.score] for box in pred_boxes])\n', (18996, 19127), True, 'import numpy as np\n'), ((19199, 19213), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (19207, 19213), True, 'import numpy as np\n'), ((25956, 25996), 'tensorflow.gather', 'tf.gather', (['CLASS_WEIGHTS', 'true_box_class'], {}), '(CLASS_WEIGHTS, true_box_class)\n', (25965, 25996), True, 'import tensorflow as tf\n'), ((27753, 27785), 'tensorflow.to_float', 'tf.to_float', (['(true_box_conf > 0.5)'], {}), '(true_box_conf > 0.5)\n', (27764, 27785), True, 'import tensorflow as tf\n'), ((27788, 27820), 'tensorflow.to_float', 'tf.to_float', (['(pred_box_conf > 0.3)'], {}), '(pred_box_conf > 0.3)\n', (27799, 27820), True, 'import tensorflow as tf\n'), ((28005, 28016), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (28013, 28016), True, 'import tensorflow as tf\n'), ((36960, 36973), 'keras.layers.recurrent.LSTM.values', 'LSTM.values', ([], {}), '()\n', (36971, 36973), False, 'from keras.layers.recurrent import LSTM\n'), ((20406, 20429), 'numpy.append', 'np.append', (['scores', 'd[4]'], {}), '(scores, d[4])\n', (20415, 20429), True, 'import numpy as np\n'), ((20762, 20789), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (20771, 20789), True, 'import numpy as np\n'), ((22412, 22428), 'tensorflow.range', 'tf.range', (['GRID_W'], {}), '(GRID_W)\n', (22420, 22428), True, 'import tensorflow as tf\n'), ((25609, 25637), 'tensorflow.to_float', 'tf.to_float', (['(best_ious < 0.6)'], {}), '(best_ious < 0.6)\n', (25620, 25637), True, 'import tensorflow as tf\n'), ((26623, 26647), 'tensorflow.ones_like', 'tf.ones_like', (['coord_mask'], {}), '(coord_mask)\n', (26635, 26647), True, 'import tensorflow as tf\n'), ((20515, 20544), 'numpy.append', 'np.append', (['false_positives', '(1)'], {}), '(false_positives, 1)\n', (20524, 20544), True, 'import numpy as np\n'), ((20582, 20610), 'numpy.append', 'np.append', (['true_positives', '(0)'], {}), '(true_positives, 0)\n', (20591, 20610), True, 'import numpy as np\n'), ((20684, 20709), 'numpy.expand_dims', 'np.expand_dims', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (20698, 20709), True, 'import numpy as np\n'), ((20993, 21022), 'numpy.append', 'np.append', (['false_positives', '(0)'], {}), '(false_positives, 0)\n', (21002, 21022), True, 'import numpy as np\n'), ((21060, 21088), 'numpy.append', 'np.append', (['true_positives', '(1)'], {}), '(true_positives, 1)\n', (21069, 21088), True, 'import numpy as np\n'), ((21218, 21247), 'numpy.append', 'np.append', (['false_positives', '(1)'], {}), '(false_positives, 1)\n', (21227, 21247), True, 'import numpy as np\n'), ((21285, 21313), 'numpy.append', 'np.append', (['true_positives', '(0)'], {}), '(true_positives, 0)\n', (21294, 21313), True, 'import numpy as np\n'), ((21974, 21994), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (21982, 21994), True, 'import numpy as np\n'), ((27127, 27163), 'tensorflow.square', 'tf.square', (['(true_box_xy - pred_box_xy)'], {}), '(true_box_xy - pred_box_xy)\n', (27136, 27163), True, 'import tensorflow as tf\n'), ((27235, 27271), 'tensorflow.square', 'tf.square', (['(true_box_wh - pred_box_wh)'], {}), '(true_box_wh - pred_box_wh)\n', (27244, 27271), True, 'import tensorflow as tf\n'), ((27345, 27385), 'tensorflow.square', 'tf.square', (['(true_box_conf - pred_box_conf)'], {}), '(true_box_conf - pred_box_conf)\n', (27354, 27385), True, 'import tensorflow as tf\n'), ((31668, 31697), 'os.path.expanduser', 'os.path.expanduser', (['"""./lstm/"""'], {}), "('./lstm/')\n", (31686, 31697), False, 'import os, cv2\n'), ((31757, 31786), 'os.path.expanduser', 'os.path.expanduser', (['"""./lstm/"""'], {}), "('./lstm/')\n", (31775, 31786), False, 'import os, cv2\n'), ((37788, 37801), 'keras.layers.recurrent.LSTM.values', 'LSTM.values', ([], {}), '()\n', (37799, 37801), False, 'from keras.layers.recurrent import LSTM\n'), ((26414, 26439), 'tensorflow.ones_like', 'tf.ones_like', (['true_box_wh'], {}), '(true_box_wh)\n', (26426, 26439), True, 'import tensorflow as tf\n'), ((26442, 26480), 'numpy.reshape', 'np.reshape', (['ANCHORS', '[1, 1, 1, BOX, 2]'], {}), '(ANCHORS, [1, 1, 1, BOX, 2])\n', (26452, 26480), True, 'import numpy as np\n')] |
# Copyright 2020 Kaggle Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import math
from os import path
from random import choice, randint, sample
import numpy as np
from .helpers import board_agent, Board, ShipAction, ShipyardAction
from kaggle_environments import utils
def get_col_row(size, pos):
return pos % size, pos // size
def get_to_pos(size, pos, direction):
col, row = get_col_row(size, pos)
if direction == "NORTH":
return pos - size if pos >= size else size ** 2 - size + col
elif direction == "SOUTH":
return col if pos + size >= size ** 2 else pos + size
elif direction == "EAST":
return pos + 1 if col < size - 1 else row * size
elif direction == "WEST":
return pos - 1 if col > 0 else (row + 1) * size - 1
@board_agent
def random_agent(board):
me = board.current_player
remaining_halite = me.halite
ships = me.ships
# randomize ship order
ships = sample(ships, len(ships))
for ship in ships:
if ship.cell.halite > ship.halite and randint(0, 1) == 0:
# 50% chance to mine
continue
if ship.cell.shipyard is None and remaining_halite > board.configuration.convert_cost:
# 5% chance to convert at any time
if randint(0, 19) == 0:
remaining_halite -= board.configuration.convert_cost
ship.next_action = ShipAction.CONVERT
continue
# 50% chance to convert if there are no shipyards
if randint(0, 1) == 0 and len(me.shipyards) == 0:
remaining_halite -= board.configuration.convert_cost
ship.next_action = ShipAction.CONVERT
continue
# None represents the chance to do nothing
ship.next_action = choice(ShipAction.moves())
shipyards = me.shipyards
# randomize shipyard order
shipyards = sample(shipyards, len(shipyards))
ship_count = len(board.next().current_player.ships)
for shipyard in shipyards:
# If there are no ships, always spawn if possible
if ship_count == 0 and remaining_halite > board.configuration.spawn_cost:
remaining_halite -= board.configuration.spawn_cost
shipyard.next_action = ShipyardAction.SPAWN
# 20% chance to spawn if no ships
elif randint(0, 4) == 0 and remaining_halite > board.configuration.spawn_cost:
remaining_halite -= board.configuration.spawn_cost
shipyard.next_action = ShipyardAction.SPAWN
agents = {"random": random_agent}
def populate_board(state, env):
obs = state[0].observation
config = env.configuration
size = env.configuration.size
uid_counter = 0
# This is a consistent way to generate unique strings to form ship and shipyard ids
def create_uid():
nonlocal uid_counter
uid_counter += 1
return f"{obs.step}-{uid_counter}"
# Set step for initialization to 0.
obs.step = 0
# Distribute Halite evenly into quartiles.
half = math.ceil(size / 2)
grid = [[0] * half for _ in range(half)]
# Randomly place a few halite "seeds".
for i in range(half):
# random distribution across entire quartile
grid[randint(0, half - 1)][randint(0, half - 1)] = i ** 2
# as well as a particular distribution weighted toward the center of the map
grid[randint(half // 2, half - 1)][randint(half // 2, half - 1)] = i ** 2
# Spread the seeds radially.
radius_grid = copy.deepcopy(grid)
for r in range(half):
for c in range(half):
value = grid[r][c]
if value == 0:
continue
# keep initial seed values, but constrain radius of clusters
radius = min(round((value / half) ** 0.5), 1)
for r2 in range(r - radius + 1, r + radius):
for c2 in range(c - radius + 1, c + radius):
if 0 <= r2 < half and 0 <= c2 < half:
distance = (abs(r2 - r) ** 2 + abs(c2 - c) ** 2) ** 0.5
radius_grid[r2][c2] += int(value / max(1, distance) ** distance)
# add some random sprouts of halite
radius_grid = np.asarray(radius_grid)
add_grid = np.random.gumbel(0, 300.0, size=(half, half)).astype(int)
sparse_radius_grid = np.random.binomial(1, 0.5, size=(half, half))
add_grid = np.clip(add_grid, 0, a_max=None) * sparse_radius_grid
radius_grid += add_grid
# add another set of random locations to the center corner
corner_grid = np.random.gumbel(0, 500.0, size=(half // 4, half // 4)).astype(int)
corner_grid = np.clip(corner_grid, 0, a_max=None)
radius_grid[half - (half // 4):, half - (half // 4):] += corner_grid
# Normalize the available halite against the defined configuration starting halite.
total = sum([sum(row) for row in radius_grid])
obs.halite = [0] * (size ** 2)
for r, row in enumerate(radius_grid):
for c, val in enumerate(row):
val = int(val * config.startingHalite / total / 4)
obs.halite[size * r + c] = val
obs.halite[size * r + (size - c - 1)] = val
obs.halite[size * (size - 1) - (size * r) + c] = val
obs.halite[size * (size - 1) - (size * r) + (size - c - 1)] = val
# Distribute the starting ships evenly.
num_agents = len(state)
starting_positions = [0] * num_agents
if num_agents == 1:
starting_positions[0] = size * (size // 2) + size // 2
elif num_agents == 2:
starting_positions[0] = size * (size // 2) + size // 4
starting_positions[1] = size * (size // 2) + math.ceil(3 * size / 4) - 1
elif num_agents == 4:
starting_positions[0] = size * (size // 4) + size // 4
starting_positions[1] = size * (size // 4) + 3 * size // 4
starting_positions[2] = size * (3 * size // 4) + size // 4
starting_positions[3] = size * (3 * size // 4) + 3 * size // 4
# Initialize the players.
obs.players = []
for i in range(num_agents):
ships = {create_uid(): [starting_positions[i], 0]}
obs.players.append([state[0].reward, {}, ships])
return state
def interpreter(state, env):
obs = state[0].observation
config = env.configuration
# Initialize the board (place cell halite and starting ships).
if env.done:
return populate_board(state, env)
actions = [agent.action for agent in state]
board = Board(obs, config, actions)
board = board.next()
state[0].observation = obs = utils.structify(board.observation)
# Remove players with invalid status or insufficient potential.
for index, agent in enumerate(state):
player_halite, shipyards, ships = obs.players[index]
if agent.status == "ACTIVE" and len(ships) == 0 and (len(shipyards) == 0 or player_halite < config.spawnCost):
# Agent can no longer gather any halite
agent.status = "DONE"
agent.reward = board.step - board.configuration.episode_steps - 1
if agent.status != "ACTIVE" and agent.status != "DONE":
obs.players[index] = [0, {}, {}]
# Check if done (< 2 players and num_agents > 1)
if len(state) > 1 and sum(1 for agent in state if agent.status == "ACTIVE") < 2:
for agent in state:
if agent.status == "ACTIVE":
agent.status = "DONE"
# Update Rewards.
for index, agent in enumerate(state):
if agent.status == "ACTIVE":
agent.reward = obs.players[index][0]
elif agent.status != "DONE":
agent.reward = 0
return state
def renderer(state, env):
config = env.configuration
size = config.size
obs = state[0].observation
board = [[h, -1, -1, -1] for h in obs.halite]
for index, player in enumerate(obs.players):
_, shipyards, ships = player
for shipyard_pos in shipyards.values():
board[shipyard_pos][1] = index
for ship in ships.values():
ship_pos, ship_halite = ship
board[ship_pos][2] = index
board[ship_pos][3] = ship_halite
col_divider = "|"
row_divider = "+" + "+".join(["----"] * size) + "+\n"
out = row_divider
for row in range(size):
for col in range(size):
_, _, ship, ship_halite = board[col + row * size]
out += col_divider + (
f"{min(int(ship_halite), 99)}S{ship}" if ship > -1 else ""
).ljust(4)
out += col_divider + "\n"
for col in range(size):
halite, shipyard, _, _ = board[col + row * size]
if shipyard > -1:
out += col_divider + f"SY{shipyard}".ljust(4)
else:
out += col_divider + str(min(int(halite), 9999)).rjust(4)
out += col_divider + "\n" + row_divider
return out
dir_path = path.dirname(__file__)
json_path = path.abspath(path.join(dir_path, "halite.json"))
with open(json_path) as json_file:
specification = json.load(json_file)
def html_renderer():
js_path = path.abspath(path.join(dir_path, "halite.js"))
with open(js_path) as js_file:
return js_file.read()
| [
"numpy.clip",
"math.ceil",
"numpy.asarray",
"os.path.join",
"kaggle_environments.utils.structify",
"os.path.dirname",
"copy.deepcopy",
"json.load",
"numpy.random.gumbel",
"random.randint",
"numpy.random.binomial"
] | [((9411, 9433), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (9423, 9433), False, 'from os import path\n'), ((3560, 3579), 'math.ceil', 'math.ceil', (['(size / 2)'], {}), '(size / 2)\n', (3569, 3579), False, 'import math\n'), ((4034, 4053), 'copy.deepcopy', 'copy.deepcopy', (['grid'], {}), '(grid)\n', (4047, 4053), False, 'import copy\n'), ((4729, 4752), 'numpy.asarray', 'np.asarray', (['radius_grid'], {}), '(radius_grid)\n', (4739, 4752), True, 'import numpy as np\n'), ((4851, 4896), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {'size': '(half, half)'}), '(1, 0.5, size=(half, half))\n', (4869, 4896), True, 'import numpy as np\n'), ((5162, 5197), 'numpy.clip', 'np.clip', (['corner_grid', '(0)'], {'a_max': 'None'}), '(corner_grid, 0, a_max=None)\n', (5169, 5197), True, 'import numpy as np\n'), ((7082, 7116), 'kaggle_environments.utils.structify', 'utils.structify', (['board.observation'], {}), '(board.observation)\n', (7097, 7116), False, 'from kaggle_environments import utils\n'), ((9459, 9493), 'os.path.join', 'path.join', (['dir_path', '"""halite.json"""'], {}), "(dir_path, 'halite.json')\n", (9468, 9493), False, 'from os import path\n'), ((9550, 9570), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (9559, 9570), False, 'import json\n'), ((4912, 4944), 'numpy.clip', 'np.clip', (['add_grid', '(0)'], {'a_max': 'None'}), '(add_grid, 0, a_max=None)\n', (4919, 4944), True, 'import numpy as np\n'), ((9621, 9653), 'os.path.join', 'path.join', (['dir_path', '"""halite.js"""'], {}), "(dir_path, 'halite.js')\n", (9630, 9653), False, 'from os import path\n'), ((3783, 3803), 'random.randint', 'randint', (['(0)', '(half - 1)'], {}), '(0, half - 1)\n', (3790, 3803), False, 'from random import choice, randint, sample\n'), ((3943, 3971), 'random.randint', 'randint', (['(half // 2)', '(half - 1)'], {}), '(half // 2, half - 1)\n', (3950, 3971), False, 'from random import choice, randint, sample\n'), ((4768, 4813), 'numpy.random.gumbel', 'np.random.gumbel', (['(0)', '(300.0)'], {'size': '(half, half)'}), '(0, 300.0, size=(half, half))\n', (4784, 4813), True, 'import numpy as np\n'), ((5076, 5131), 'numpy.random.gumbel', 'np.random.gumbel', (['(0)', '(500.0)'], {'size': '(half // 4, half // 4)'}), '(0, 500.0, size=(half // 4, half // 4))\n', (5092, 5131), True, 'import numpy as np\n'), ((1569, 1582), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1576, 1582), False, 'from random import choice, randint, sample\n'), ((1800, 1814), 'random.randint', 'randint', (['(0)', '(19)'], {}), '(0, 19)\n', (1807, 1814), False, 'from random import choice, randint, sample\n'), ((3761, 3781), 'random.randint', 'randint', (['(0)', '(half - 1)'], {}), '(0, half - 1)\n', (3768, 3781), False, 'from random import choice, randint, sample\n'), ((3913, 3941), 'random.randint', 'randint', (['(half // 2)', '(half - 1)'], {}), '(half // 2, half - 1)\n', (3920, 3941), False, 'from random import choice, randint, sample\n'), ((2046, 2059), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (2053, 2059), False, 'from random import choice, randint, sample\n'), ((2857, 2870), 'random.randint', 'randint', (['(0)', '(4)'], {}), '(0, 4)\n', (2864, 2870), False, 'from random import choice, randint, sample\n'), ((6175, 6198), 'math.ceil', 'math.ceil', (['(3 * size / 4)'], {}), '(3 * size / 4)\n', (6184, 6198), False, 'import math\n')] |
# coding: utf-8
import numpy as np
import random
import tensorflow as tf
import logging
import imageio
import read_data
# from data_generator import DataGenerator
from origin_mil_pick import MIL
# from evaluation.eval_reach import evaluate_vision_reach
# from evaluation.eval_push import evaluate_push
from tensorflow.python.platform import flags
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
FLAGS = flags.FLAGS
LOGGER = logging.getLogger(__name__)
## Dataset/method options
flags.DEFINE_string('experiment', 'pick_place', 'sim_vision_reach or sim_push')
flags.DEFINE_string('data_path', './pick_dataset_origin/human_robot_dataset/',
'path to the directory where demo files that containing robot states and actions are stored')
flags.DEFINE_string('demo_gif_dir', 'data', 'path to the videos of demonstrations')
flags.DEFINE_string('gif_prefix', 'object', 'prefix of the video directory for each task, e.g. object_0 for task 0')
flags.DEFINE_integer('im_width', 264,
'width of the images in the demo videos, 125 for sim_push, and 80 for sim_vision_reach')
flags.DEFINE_integer('im_height', 196,
'height of the images in the demo videos, 125 for sim_push, and 64 for sim_vision_reach')
flags.DEFINE_integer('num_channels', 3, 'number of channels of the images in the demo videos')
flags.DEFINE_integer('T', 3, 'time horizon of the demo videos, 50 for reach, 100 for push')
flags.DEFINE_bool('hsv', False, 'convert the image to HSV format')
flags.DEFINE_bool('use_noisy_demos', False, 'use noisy demonstrations or not (for domain shift)')
flags.DEFINE_string('noisy_demo_gif_dir', None, 'path to the videos of noisy demonstrations')
flags.DEFINE_string('noisy_demo_file', None,
'path to the directory where noisy demo files that containing robot states and actions are stored')
flags.DEFINE_bool('no_action', True, 'do not include actions in the demonstrations for inner update')
flags.DEFINE_bool('no_state', False, 'do not include states in the demonstrations during training')
flags.DEFINE_bool('no_final_eept', False, 'do not include final ee pos in the demonstrations for inner update')
flags.DEFINE_bool('zero_state', True,
'zero-out states (meta-learn state) in the demonstrations for inner update (used in the paper with video-only demos)')
flags.DEFINE_bool('two_arms', False, 'use two-arm structure when state is zeroed-out')
flags.DEFINE_integer('training_set_size', -1, 'size of the training set, 1500 for sim_reach, 693 for sim push, anzero_stated \
-1 for all data except those in validation set')
flags.DEFINE_integer('val_set_size', 150, 'size of the training set, 150 for sim_reach and 76 for sim push')
## Training options
flags.DEFINE_integer('metatrain_iterations', 50000, 'number of metatraining iterations.') # 30k for pushing, 50k for reaching and placing
flags.DEFINE_integer('meta_batch_size', 16, 'number of tasks sampled per meta-update') # 15 for reaching, 15 for pushing, 12 for placing
flags.DEFINE_integer('meta_test_batch_size', 1, 'number of tasks sampled per meta-update') # 15 for reaching, 15 for pushing, 12 for placing
flags.DEFINE_float('meta_lr', 1e-4, 'the base learning rate of the generator')
flags.DEFINE_integer('update_batch_size', 1,
'number of examples used for inner gradient update (K for K-shot learning).')
flags.DEFINE_float('train_update_lr', 1e-4,
'step size alpha for inner gradient update.') # 0.001 for reaching, 0.01 for pushing and placing
flags.DEFINE_integer('num_updates', 1, 'number of inner gradient updates during training.') # 5 for placing
flags.DEFINE_bool('clip', True, 'use gradient clipping for fast gradient')
flags.DEFINE_float('clip_max', 100.0, 'maximum clipping value for fast gradient')
flags.DEFINE_float('clip_min', -100.0, 'minimum clipping value for fast gradient')
# flags.DEFINE_float('clip_max', 20.0, 'maximum clipping value for fast gradient')
# flags.DEFINE_float('clip_min', -20.0, 'minimum clipping value for fast gradient')
flags.DEFINE_bool('fc_bt', True, 'use bias transformation for the first fc layer')
flags.DEFINE_bool('all_fc_bt', False, 'use bias transformation for all fc layers')
flags.DEFINE_bool('conv_bt', False, 'use bias transformation for the first conv layer, N/A for using pretraining')
flags.DEFINE_integer('bt_dim', 10, 'the dimension of bias transformation for FC layers')
flags.DEFINE_string('pretrain_weight_path', 'N/A', 'path to pretrained weights')
flags.DEFINE_bool('train_pretrain_conv1', False, 'whether to finetune the pretrained weights')
flags.DEFINE_bool('two_head', True, 'use two-head architecture')
flags.DEFINE_bool('learn_final_eept', False, 'learn an auxiliary loss for predicting final end-effector pose')
flags.DEFINE_bool('learn_final_eept_whole_traj', False, 'learn an auxiliary loss for predicting final end-effector pose \
by passing the whole trajectory of eepts (used for video-only models)')
flags.DEFINE_bool('stopgrad_final_eept', True,
'stop the gradient when concatenate the predicted final eept with the feature points')
flags.DEFINE_integer('final_eept_min', 6, 'first index of the final eept in the action array')
flags.DEFINE_integer('final_eept_max', 8, 'last index of the final eept in the action array')
flags.DEFINE_float('final_eept_loss_eps', 0.1, 'the coefficient of the auxiliary loss')
flags.DEFINE_float('act_loss_eps', 1.0, 'the coefficient of the action loss')
flags.DEFINE_float('loss_multiplier', 100.0,
'the constant multiplied with the loss value, 100 for reach and 50 for push')
flags.DEFINE_bool('use_l1_l2_loss', False, 'use a loss with combination of l1 and l2')
flags.DEFINE_float('l2_eps', 0.01, 'coeffcient of l2 loss')
flags.DEFINE_bool('shuffle_val', False, 'whether to choose the validation set via shuffling or not')
## Model options
flags.DEFINE_integer('random_seed', 0, 'random seed for training')
flags.DEFINE_bool('fp', True, 'use spatial soft-argmax or not')
flags.DEFINE_string('norm', 'layer_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_bool('dropout', False, 'use dropout for fc layers or not')
flags.DEFINE_float('keep_prob', 0.5, 'keep probability for dropout')
flags.DEFINE_integer('num_filters', 64,
'number of filters for conv nets -- 64 for placing, 16 for pushing, 40 for reaching.')
flags.DEFINE_integer('filter_size', 3, 'filter size for conv nets -- 3 for placing, 5 for pushing, 3 for reaching.')
flags.DEFINE_integer('num_conv_layers', 5, 'number of conv layers -- 5 for placing, 4 for pushing, 3 for reaching.')
flags.DEFINE_integer('num_strides', 3,
'number of conv layers with strided filters -- 3 for placing, 4 for pushing, 3 for reaching.')
flags.DEFINE_bool('conv', True, 'whether or not to use a convolutional network, only applicable in some cases')
flags.DEFINE_integer('num_fc_layers', 3, 'number of fully-connected layers')
flags.DEFINE_integer('layer_size', 200, 'hidden dimension of fully-connected layers')
flags.DEFINE_bool('temporal_conv_2_head', True,
'whether or not to use temporal convolutions for the two-head architecture in video-only setting.')
flags.DEFINE_bool('temporal_conv_2_head_ee', False, 'whether or not to use temporal convolutions for the two-head architecture in video-only setting \
for predicting the ee pose.')
flags.DEFINE_integer('temporal_filter_size', 10, 'filter size for temporal convolution')
flags.DEFINE_integer('temporal_num_filters', 32, 'number of filters for temporal convolution')
flags.DEFINE_integer('temporal_num_filters_ee', 32, 'number of filters for temporal convolution for ee pose prediction')
flags.DEFINE_integer('temporal_num_layers', 3, 'number of layers for temporal convolution for ee pose prediction')
flags.DEFINE_integer('temporal_num_layers_ee', 3, 'number of layers for temporal convolution for ee pose prediction')
flags.DEFINE_string('init', 'xavier', 'initializer for conv weights. Choose among random, xavier, and he')
flags.DEFINE_bool('max_pool', False, 'Whether or not to use max pooling rather than strided convolutions')
flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for axis_angle)')
## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('save_dir', './daml_pick_logs', 'directory for summaries and checkpoints.')
# flags.DEFINE_string('save_dir', './daml_human_pick_logs', 'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', True, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('restore_iter', -1, 'iteration to load model (-1 for latest model)')
flags.DEFINE_integer('begin_restore_iter', 41000, 'iteration to load model (-1 for latest model)')
flags.DEFINE_integer('train_update_batch_size', -1, 'number of examples used for gradient update during training \
(use if you want to test with a different number).')
flags.DEFINE_integer('test_update_batch_size', 1, 'number of demos used during test time')
flags.DEFINE_float('gpu_memory_fraction', 0.9, 'fraction of memory used in gpu')
flags.DEFINE_bool('record_gifs', True, 'record gifs during evaluation')
flags.DEFINE_integer('output_data', 6, '')
flags.DEFINE_integer('color_num', 3, '')
flags.DEFINE_integer('object_num', 4, '')
flags.DEFINE_integer('train_task_num', 6, '')
flags.DEFINE_integer('task_num', 8, '')
flags.DEFINE_integer('demo_num', 5, '')
# flags.DEFINE_integer('index_num', 1, '')
flags.DEFINE_integer('index_range', 20, '')
flags.DEFINE_integer('index_train_range', 20, '')
flags.DEFINE_string('demo_type', 'robot', 'robot or human')
# flags.DEFINE_string('demo_type', 'human', 'robot or human')
flags.DEFINE_string('target_type', 'robot', '')
# flags.DEFINE_float('weight_xy', 0.999, '')
# flags.DEFINE_float('weight_z', 0.001, '')
# flags.DEFINE_float('weight_rxyz', 0.001, '')
flags.DEFINE_float('weight_xy', 1.0, '')
flags.DEFINE_float('weight_z', 0, '')
flags.DEFINE_float('weight_rxyz', 0, '')
flags.DEFINE_string('test_data_color', 'color_yellow', '')
def generate_data(if_train=True):
if if_train:
batch_size = FLAGS.meta_batch_size
else:
batch_size = FLAGS.meta_test_batch_size
color_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.color_num
print('color_list', color_list)
object_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.object_num
print('object_list', object_list)
if if_train:
task_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.train_task_num
else:
task_list = np.random.randint(FLAGS.train_task_num, FLAGS.task_num, size=batch_size)
print('task_list', task_list)
demo_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.demo_num
print('demo_list', demo_list)
target_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.demo_num
print('target_list', target_list)
obsas = []
obsbs = []
stateas = []
statebs = []
actionas = []
actionbs = []
color_num = ['color_blue', 'color_green', 'color_orange', 'color_yellow']
# color_num = ['color_blue', 'color_green', 'color_orange']
object_num = ['object_type_animal', 'object_type_car', 'object_type_dinosaur', 'object_type_tool']
for element in range(0, batch_size):
if if_train:
demo_path = '%s/%s/%s/%s/task_%d/demo_%d' % (
FLAGS.data_path, color_num[color_list[element]], object_num[object_list[element]], FLAGS.demo_type,
task_list[element], demo_list[element])
target_path = '%s/%s/%s/%s/task_%d/demo_%d' % (
FLAGS.data_path, color_num[color_list[element]], object_num[object_list[element]], FLAGS.target_type,
task_list[element], target_list[element])
else:
demo_path = '%s/%s/%s/%s/task_%d/demo_%d' % (
FLAGS.data_path, color_num[-1], object_num[object_list[element]], FLAGS.demo_type,
task_list[element], demo_list[element])
target_path = '%s/%s/%s/%s/task_%d/demo_%d' % (
FLAGS.data_path, color_num[-1], object_num[object_list[element]], FLAGS.target_type,
task_list[element], target_list[element])
# print('demo_path', demo_path)
# print('target_path', target_path)
index = np.random.randint(0, 20)
if FLAGS.demo_type == 'robot':
obsa, statea, actiona = read_data.Read_Robot_Data2(demo_path, FLAGS.T, index)
elif FLAGS.demo_type == 'human':
obsa, statea, actiona = read_data.Read_Human_Data2(demo_path, FLAGS.T, index)
obsb, stateb, actionb = read_data.Read_Robot_Data2(target_path, FLAGS.T, index)
obsas.append(obsa)
obsbs.append(obsb)
stateas.append(statea)
statebs.append(stateb)
actionas.append(actiona)
actionbs.append(actionb)
obsas = np.reshape(obsas, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels])
obsbs = np.reshape(obsbs, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels])
actionas = np.reshape(actionas, [batch_size, FLAGS.T, FLAGS.output_data])
actionbs = np.reshape(actionbs, [batch_size, FLAGS.T, FLAGS.output_data])
stateas = np.zeros([batch_size, FLAGS.T, FLAGS.output_data])
statebs = np.zeros([batch_size, FLAGS.T, FLAGS.output_data])
return obsas, obsbs, actionas, actionbs, stateas, statebs
def generate_place_test_data(demo_path, target_path, batch_size=1, index=0):
# color_num = ['color_blue', 'color_green', 'color_orange', 'color_yellow']
# object_num = ['object_type_animal', 'object_type_car', 'object_type_dinosaur', 'object_type_tool']
# print('demo_path', demo_path)
# print('target_path', target_path)
if FLAGS.demo_type == 'robot':
obsa, statea, actiona = read_data.Read_Robot_Data2(demo_path, FLAGS.T, index)
elif FLAGS.demo_type == 'human':
obsa, statea, actiona = read_data.Read_Human_Data2(demo_path, FLAGS.T, index)
obsb, stateb, actionb = read_data.Read_Robot_Data2(target_path, FLAGS.T, index)
obsas = np.reshape(obsa, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels])
obsbs = np.reshape(obsb, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels])
actionas = np.reshape(actiona, [batch_size, FLAGS.T, FLAGS.output_data])
actionbs = np.reshape(actionb, [batch_size, FLAGS.T, FLAGS.output_data])
# print('actionas', actionas)
# print('actionbs', actionbs)
stateas = np.zeros([batch_size, FLAGS.T, FLAGS.output_data])
statebs = np.zeros([batch_size, FLAGS.T, FLAGS.output_data])
return obsas, obsbs, actionas, actionbs, stateas, statebs
def generate_place_data(obsas, obsbs, actionas, actionbs, stateas, statebs, if_train=True):
if if_train:
batch_size = FLAGS.meta_batch_size
else:
batch_size = FLAGS.meta_test_batch_size
# print('actionbs of input generate_place_data', actionbs)
# print('obsas, obsbs, actionas, actionbs, stateas, statebs', obsas.shape, obsbs.shape, actionas.shape, actionbs.shape, stateas.shape, statebs.shape)
obsbs = np.reshape(obsbs[:, 0, :], [batch_size , 1, -1])
actionbs = np.reshape(actionbs[:, 1, :], [batch_size , 1, -1])
statebs = np.reshape(statebs[:, -1, :], [batch_size , 1, -1])
# print('obsas, obsbs, actionas, actionbs, stateas, statebs of generate_place_data', obsas.shape, obsbs.shape, actionas.shape, actionbs.shape,
# stateas.shape, statebs.shape)
# print('actionbs of generate_place_data', actionbs)
return obsas, obsbs, actionas, actionbs, stateas, statebs
def if_success(a, b, domain=0.08):
a = np.squeeze(a)
b = np.squeeze(b)
a=a[:2]
b=b[:2]
c = np.abs(a - b)
if c[0] <= domain and c[1] <= domain:
print('success')
return 1.0
else:
print('fail')
return 0.0
def train(graph, model, saver, sess, save_dir, restore_itr=0):
"""
Train the model.
"""
PRINT_INTERVAL = 100
PRINT_INTERVAL = 100
TEST_PRINT_INTERVAL = 100
SUMMARY_INTERVAL = 100
SAVE_INTERVAL = 1000
TOTAL_ITERS = FLAGS.metatrain_iterations
prelosses, postlosses = [], []
save_dir = save_dir + '/model'
train_writer = tf.summary.FileWriter(save_dir, graph)
# test_path = FLAGS.data_path + FLAGS.test_data_color
color_list = ['color_blue', 'color_green', 'color_orange', 'color_yellow']
object_list = ['object_type_animal', 'object_type_car', 'object_type_dinosaur', 'object_type_tool']
itr = 0
success_num=0
for color_id in range(0, FLAGS.color_num):
for object_id in range(0, FLAGS.object_num):
for task_id in range(FLAGS.train_task_num, FLAGS.task_num):
for target_id in range(0, FLAGS.demo_num):
for index_id in range(0, FLAGS.index_range):
# for index_id in range(0, FLAGS.index_train_range):
# for index_id in range(FLAGS.index_train_range, FLAGS.index_range):
test_path = FLAGS.data_path + color_list[color_id]
demo_id = (np.random.randint(0, 100, size=1) + 1) % FLAGS.demo_num
if FLAGS.demo_type == 'robot':
demo_path = '%s/%s/robot/task_%d/demo_%d/' % (
test_path, object_list[object_id], task_id, demo_id)
else:
demo_path = '%s/%s/human/task_%d/demo_%d/' % (
test_path, object_list[object_id], task_id, demo_id)
target_path = '%s/%s/robot/task_%d/demo_%d/' % (
test_path, object_list[object_id], task_id, target_id)
obsas, obsbs, actionas, actionbs, stateas, statebs = generate_place_test_data(demo_path,
target_path,
index=index_id)
obsas, obsbs, actionas, actionbs, stateas, statebs = generate_place_data(obsas, obsbs, actionas,
actionbs, stateas,
statebs,
if_train=False)
# print('actionas', actionas)
feed_dict = {
model.obsa: obsas,
model.obsb: obsbs,
model.statea: stateas,
model.stateb: statebs,
model.actiona: actionas,
# model.actionb: actionbs
}
input_tensors = [model.test_act_op]
# input_tensors = [model.total_losses2[model.num_updates - 1], model.test_act_op]
with graph.as_default():
results = sess.run(input_tensors, feed_dict=feed_dict)
print(itr, 'demo actions', actionbs)
print (itr, 'predicted actions', results[-1])
itr += 1
success_num += if_success(actionbs, results[-1])
print(itr, 'current success rate:', success_num / itr)
# print(itr, 'current loss:', results[0], 'current success rate:', success_num / itr)
print('sample num', itr, 'success_num', success_num, 'total success rate is:', success_num / itr)
return success_num / itr
def main():
tf.set_random_seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
graph = tf.Graph()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
sess = tf.Session(graph=graph, config=tf_config)
sess = tf.Session(graph=graph)
network_config = {
'num_filters': [FLAGS.num_filters] * FLAGS.num_conv_layers,
'strides': [[1, 2, 2, 1]] * FLAGS.num_strides + [[1, 1, 1, 1]] * (FLAGS.num_conv_layers - FLAGS.num_strides),
'filter_size': FLAGS.filter_size,
'image_width': FLAGS.im_width,
'image_height': FLAGS.im_height,
'image_channels': FLAGS.num_channels,
'n_layers': FLAGS.num_fc_layers,
'layer_size': FLAGS.layer_size,
'initialization': FLAGS.init,
}
# data_generator = DataGenerator()
state_idx = range(FLAGS.output_data)
img_idx = range(len(state_idx), len(state_idx) + FLAGS.im_height * FLAGS.im_width * FLAGS.num_channels)
# need to compute x_idx and img_idx from data_generator
model = MIL(FLAGS.output_data, state_idx=state_idx, img_idx=img_idx, network_config=network_config)
# TODO: figure out how to save summaries and checkpoints
exp_string = FLAGS.experiment + '.' + FLAGS.init + '_init.' + str(FLAGS.num_conv_layers) + '_conv' + '.' + str(
FLAGS.num_strides) + '_strides' + '.' + str(FLAGS.num_filters) + '_filters' + \
'.' + str(FLAGS.num_fc_layers) + '_fc' + '.' + str(FLAGS.layer_size) + '_dim' + '.bt_dim_' + str(
FLAGS.bt_dim) + '.mbs_' + str(FLAGS.meta_batch_size) + \
'.ubs_' + str(FLAGS.update_batch_size) + '.numstep_' + str(FLAGS.num_updates) + '.updatelr_' + str(
FLAGS.train_update_lr)
if FLAGS.clip:
exp_string += '.clip_' + str(int(FLAGS.clip_max))
if FLAGS.conv_bt:
exp_string += '.conv_bt'
if FLAGS.all_fc_bt:
exp_string += '.all_fc_bt'
if FLAGS.fp:
exp_string += '.fp'
if FLAGS.learn_final_eept:
exp_string += '.learn_ee_pos'
if FLAGS.no_action:
exp_string += '.no_action'
if FLAGS.zero_state:
exp_string += '.zero_state'
if FLAGS.two_head:
exp_string += '.two_heads'
if FLAGS.two_arms:
exp_string += '.two_arms'
if FLAGS.temporal_conv_2_head:
exp_string += '.1d_conv_act_' + str(FLAGS.temporal_num_layers) + '_' + str(FLAGS.temporal_num_filters)
if FLAGS.temporal_conv_2_head_ee:
exp_string += '_ee_' + str(FLAGS.temporal_num_layers_ee) + '_' + str(FLAGS.temporal_num_filters_ee)
exp_string += '_' + str(FLAGS.temporal_filter_size) + 'x1_filters'
if FLAGS.training_set_size != -1:
exp_string += '.' + str(FLAGS.training_set_size) + '_trials'
save_dir = FLAGS.save_dir + '/' + exp_string
# put here for now
if FLAGS.train:
# data_generator.generate_batches(noisy=FLAGS.use_noisy_demos)
# with graph.as_default():
# train_image_tensors = data_generator.make_batch_tensor(network_config, restore_iter=FLAGS.restore_iter)
# inputa = train_image_tensors[:, :FLAGS.update_batch_size*FLAGS.T, :]
# inputb = train_image_tensors[:, FLAGS.update_batch_size*FLAGS.T:, :]
# train_input_tensors = {'inputa': inputa, 'inputb': inputb}
model.init_network(graph, input_tensors=None, restore_iter=FLAGS.restore_iter)
# model.init_network(graph, input_tensors=val_input_tensors, restore_iter=FLAGS.restore_iter, prefix='Validation_')
else:
model.init_network(graph, prefix='Testing')
with graph.as_default():
# Set up saver.
saver = tf.train.Saver(max_to_keep=10)
# Initialize variables.
init_op = tf.global_variables_initializer()
sess.run(init_op, feed_dict=None)
# Start queue runners (used for loading videos on the fly)
tf.train.start_queue_runners(sess=sess)
success_rates = []
for i in range(0, 10):
model_file = tf.train.latest_checkpoint(save_dir)
if i <9:
model_file = model_file[:model_file.index('model')] + 'model_' + str(FLAGS.begin_restore_iter + i*1000)
else:
model_file = model_file[:model_file.index('model')] + 'model_' + str(FLAGS.begin_restore_iter + i*1000 -1)
if model_file:
ind1 = model_file.index('model')
resume_itr = int(model_file[ind1 + 6:])
print(i,"Restoring model weights from " + model_file)
with graph.as_default():
saver.restore(sess, model_file)
success_rate = train(graph, model, saver, sess, save_dir, restore_itr=FLAGS.restore_iter)
success_rates.append(success_rate)
print('success_rates are', success_rates)
if __name__ == "__main__":
main() | [
"logging.getLogger",
"read_data.Read_Human_Data2",
"tensorflow.set_random_seed",
"tensorflow.GPUOptions",
"tensorflow.Graph",
"numpy.reshape",
"tensorflow.Session",
"numpy.random.seed",
"tensorflow.ConfigProto",
"numpy.abs",
"tensorflow.python.platform.flags.DEFINE_integer",
"tensorflow.train.... | [((429, 456), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (446, 456), False, 'import logging\n'), ((484, 563), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""experiment"""', '"""pick_place"""', '"""sim_vision_reach or sim_push"""'], {}), "('experiment', 'pick_place', 'sim_vision_reach or sim_push')\n", (503, 563), False, 'from tensorflow.python.platform import flags\n'), ((564, 749), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data_path"""', '"""./pick_dataset_origin/human_robot_dataset/"""', '"""path to the directory where demo files that containing robot states and actions are stored"""'], {}), "('data_path',\n './pick_dataset_origin/human_robot_dataset/',\n 'path to the directory where demo files that containing robot states and actions are stored'\n )\n", (583, 749), False, 'from tensorflow.python.platform import flags\n'), ((757, 844), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""demo_gif_dir"""', '"""data"""', '"""path to the videos of demonstrations"""'], {}), "('demo_gif_dir', 'data',\n 'path to the videos of demonstrations')\n", (776, 844), False, 'from tensorflow.python.platform import flags\n'), ((841, 961), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""gif_prefix"""', '"""object"""', '"""prefix of the video directory for each task, e.g. object_0 for task 0"""'], {}), "('gif_prefix', 'object',\n 'prefix of the video directory for each task, e.g. object_0 for task 0')\n", (860, 961), False, 'from tensorflow.python.platform import flags\n'), ((958, 1094), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""im_width"""', '(264)', '"""width of the images in the demo videos, 125 for sim_push, and 80 for sim_vision_reach"""'], {}), "('im_width', 264,\n 'width of the images in the demo videos, 125 for sim_push, and 80 for sim_vision_reach'\n )\n", (978, 1094), False, 'from tensorflow.python.platform import flags\n'), ((1107, 1244), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""im_height"""', '(196)', '"""height of the images in the demo videos, 125 for sim_push, and 64 for sim_vision_reach"""'], {}), "('im_height', 196,\n 'height of the images in the demo videos, 125 for sim_push, and 64 for sim_vision_reach'\n )\n", (1127, 1244), False, 'from tensorflow.python.platform import flags\n'), ((1257, 1355), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_channels"""', '(3)', '"""number of channels of the images in the demo videos"""'], {}), "('num_channels', 3,\n 'number of channels of the images in the demo videos')\n", (1277, 1355), False, 'from tensorflow.python.platform import flags\n'), ((1352, 1447), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""T"""', '(3)', '"""time horizon of the demo videos, 50 for reach, 100 for push"""'], {}), "('T', 3,\n 'time horizon of the demo videos, 50 for reach, 100 for push')\n", (1372, 1447), False, 'from tensorflow.python.platform import flags\n'), ((1444, 1510), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""hsv"""', '(False)', '"""convert the image to HSV format"""'], {}), "('hsv', False, 'convert the image to HSV format')\n", (1461, 1510), False, 'from tensorflow.python.platform import flags\n'), ((1511, 1612), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""use_noisy_demos"""', '(False)', '"""use noisy demonstrations or not (for domain shift)"""'], {}), "('use_noisy_demos', False,\n 'use noisy demonstrations or not (for domain shift)')\n", (1528, 1612), False, 'from tensorflow.python.platform import flags\n'), ((1609, 1706), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""noisy_demo_gif_dir"""', 'None', '"""path to the videos of noisy demonstrations"""'], {}), "('noisy_demo_gif_dir', None,\n 'path to the videos of noisy demonstrations')\n", (1628, 1706), False, 'from tensorflow.python.platform import flags\n'), ((1703, 1856), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""noisy_demo_file"""', 'None', '"""path to the directory where noisy demo files that containing robot states and actions are stored"""'], {}), "('noisy_demo_file', None,\n 'path to the directory where noisy demo files that containing robot states and actions are stored'\n )\n", (1722, 1856), False, 'from tensorflow.python.platform import flags\n'), ((1868, 1973), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""no_action"""', '(True)', '"""do not include actions in the demonstrations for inner update"""'], {}), "('no_action', True,\n 'do not include actions in the demonstrations for inner update')\n", (1885, 1973), False, 'from tensorflow.python.platform import flags\n'), ((1970, 2073), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""no_state"""', '(False)', '"""do not include states in the demonstrations during training"""'], {}), "('no_state', False,\n 'do not include states in the demonstrations during training')\n", (1987, 2073), False, 'from tensorflow.python.platform import flags\n'), ((2070, 2185), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""no_final_eept"""', '(False)', '"""do not include final ee pos in the demonstrations for inner update"""'], {}), "('no_final_eept', False,\n 'do not include final ee pos in the demonstrations for inner update')\n", (2087, 2185), False, 'from tensorflow.python.platform import flags\n'), ((2182, 2347), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""zero_state"""', '(True)', '"""zero-out states (meta-learn state) in the demonstrations for inner update (used in the paper with video-only demos)"""'], {}), "('zero_state', True,\n 'zero-out states (meta-learn state) in the demonstrations for inner update (used in the paper with video-only demos)'\n )\n", (2199, 2347), False, 'from tensorflow.python.platform import flags\n'), ((2357, 2447), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""two_arms"""', '(False)', '"""use two-arm structure when state is zeroed-out"""'], {}), "('two_arms', False,\n 'use two-arm structure when state is zeroed-out')\n", (2374, 2447), False, 'from tensorflow.python.platform import flags\n'), ((2444, 2674), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""training_set_size"""', '(-1)', '"""size of the training set, 1500 for sim_reach, 693 for sim push, anzero_stated -1 for all data except those in validation set"""'], {}), "('training_set_size', -1,\n 'size of the training set, 1500 for sim_reach, 693 for sim push, anzero_stated -1 for all data except those in validation set'\n )\n", (2464, 2674), False, 'from tensorflow.python.platform import flags\n'), ((2668, 2780), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""val_set_size"""', '(150)', '"""size of the training set, 150 for sim_reach and 76 for sim push"""'], {}), "('val_set_size', 150,\n 'size of the training set, 150 for sim_reach and 76 for sim push')\n", (2688, 2780), False, 'from tensorflow.python.platform import flags\n'), ((2798, 2891), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""metatrain_iterations"""', '(50000)', '"""number of metatraining iterations."""'], {}), "('metatrain_iterations', 50000,\n 'number of metatraining iterations.')\n", (2818, 2891), False, 'from tensorflow.python.platform import flags\n'), ((2938, 3028), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""meta_batch_size"""', '(16)', '"""number of tasks sampled per meta-update"""'], {}), "('meta_batch_size', 16,\n 'number of tasks sampled per meta-update')\n", (2958, 3028), False, 'from tensorflow.python.platform import flags\n'), ((3077, 3171), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""meta_test_batch_size"""', '(1)', '"""number of tasks sampled per meta-update"""'], {}), "('meta_test_batch_size', 1,\n 'number of tasks sampled per meta-update')\n", (3097, 3171), False, 'from tensorflow.python.platform import flags\n'), ((3220, 3305), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""meta_lr"""', '(0.0001)', '"""the base learning rate of the generator"""'], {}), "('meta_lr', 0.0001, 'the base learning rate of the generator'\n )\n", (3238, 3305), False, 'from tensorflow.python.platform import flags\n'), ((3299, 3430), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""update_batch_size"""', '(1)', '"""number of examples used for inner gradient update (K for K-shot learning)."""'], {}), "('update_batch_size', 1,\n 'number of examples used for inner gradient update (K for K-shot learning).'\n )\n", (3319, 3430), False, 'from tensorflow.python.platform import flags\n'), ((3443, 3538), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""train_update_lr"""', '(0.0001)', '"""step size alpha for inner gradient update."""'], {}), "('train_update_lr', 0.0001,\n 'step size alpha for inner gradient update.')\n", (3461, 3538), False, 'from tensorflow.python.platform import flags\n'), ((3604, 3699), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_updates"""', '(1)', '"""number of inner gradient updates during training."""'], {}), "('num_updates', 1,\n 'number of inner gradient updates during training.')\n", (3624, 3699), False, 'from tensorflow.python.platform import flags\n'), ((3713, 3787), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""clip"""', '(True)', '"""use gradient clipping for fast gradient"""'], {}), "('clip', True, 'use gradient clipping for fast gradient')\n", (3730, 3787), False, 'from tensorflow.python.platform import flags\n'), ((3788, 3873), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""clip_max"""', '(100.0)', '"""maximum clipping value for fast gradient"""'], {}), "('clip_max', 100.0,\n 'maximum clipping value for fast gradient')\n", (3806, 3873), False, 'from tensorflow.python.platform import flags\n'), ((3870, 3956), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""clip_min"""', '(-100.0)', '"""minimum clipping value for fast gradient"""'], {}), "('clip_min', -100.0,\n 'minimum clipping value for fast gradient')\n", (3888, 3956), False, 'from tensorflow.python.platform import flags\n'), ((4120, 4206), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""fc_bt"""', '(True)', '"""use bias transformation for the first fc layer"""'], {}), "('fc_bt', True,\n 'use bias transformation for the first fc layer')\n", (4137, 4206), False, 'from tensorflow.python.platform import flags\n'), ((4203, 4289), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""all_fc_bt"""', '(False)', '"""use bias transformation for all fc layers"""'], {}), "('all_fc_bt', False,\n 'use bias transformation for all fc layers')\n", (4220, 4289), False, 'from tensorflow.python.platform import flags\n'), ((4286, 4409), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""conv_bt"""', '(False)', '"""use bias transformation for the first conv layer, N/A for using pretraining"""'], {}), "('conv_bt', False,\n 'use bias transformation for the first conv layer, N/A for using pretraining'\n )\n", (4303, 4409), False, 'from tensorflow.python.platform import flags\n'), ((4401, 4493), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""bt_dim"""', '(10)', '"""the dimension of bias transformation for FC layers"""'], {}), "('bt_dim', 10,\n 'the dimension of bias transformation for FC layers')\n", (4421, 4493), False, 'from tensorflow.python.platform import flags\n'), ((4490, 4575), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""pretrain_weight_path"""', '"""N/A"""', '"""path to pretrained weights"""'], {}), "('pretrain_weight_path', 'N/A', 'path to pretrained weights'\n )\n", (4509, 4575), False, 'from tensorflow.python.platform import flags\n'), ((4571, 4669), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""train_pretrain_conv1"""', '(False)', '"""whether to finetune the pretrained weights"""'], {}), "('train_pretrain_conv1', False,\n 'whether to finetune the pretrained weights')\n", (4588, 4669), False, 'from tensorflow.python.platform import flags\n'), ((4666, 4730), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""two_head"""', '(True)', '"""use two-head architecture"""'], {}), "('two_head', True, 'use two-head architecture')\n", (4683, 4730), False, 'from tensorflow.python.platform import flags\n'), ((4731, 4845), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""learn_final_eept"""', '(False)', '"""learn an auxiliary loss for predicting final end-effector pose"""'], {}), "('learn_final_eept', False,\n 'learn an auxiliary loss for predicting final end-effector pose')\n", (4748, 4845), False, 'from tensorflow.python.platform import flags\n'), ((4842, 5099), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""learn_final_eept_whole_traj"""', '(False)', '"""learn an auxiliary loss for predicting final end-effector pose by passing the whole trajectory of eepts (used for video-only models)"""'], {}), "('learn_final_eept_whole_traj', False,\n 'learn an auxiliary loss for predicting final end-effector pose by passing the whole trajectory of eepts (used for video-only models)'\n )\n", (4859, 5099), False, 'from tensorflow.python.platform import flags\n'), ((5093, 5235), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""stopgrad_final_eept"""', '(True)', '"""stop the gradient when concatenate the predicted final eept with the feature points"""'], {}), "('stopgrad_final_eept', True,\n 'stop the gradient when concatenate the predicted final eept with the feature points'\n )\n", (5110, 5235), False, 'from tensorflow.python.platform import flags\n'), ((5245, 5343), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""final_eept_min"""', '(6)', '"""first index of the final eept in the action array"""'], {}), "('final_eept_min', 6,\n 'first index of the final eept in the action array')\n", (5265, 5343), False, 'from tensorflow.python.platform import flags\n'), ((5340, 5437), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""final_eept_max"""', '(8)', '"""last index of the final eept in the action array"""'], {}), "('final_eept_max', 8,\n 'last index of the final eept in the action array')\n", (5360, 5437), False, 'from tensorflow.python.platform import flags\n'), ((5434, 5525), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""final_eept_loss_eps"""', '(0.1)', '"""the coefficient of the auxiliary loss"""'], {}), "('final_eept_loss_eps', 0.1,\n 'the coefficient of the auxiliary loss')\n", (5452, 5525), False, 'from tensorflow.python.platform import flags\n'), ((5522, 5599), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""act_loss_eps"""', '(1.0)', '"""the coefficient of the action loss"""'], {}), "('act_loss_eps', 1.0, 'the coefficient of the action loss')\n", (5540, 5599), False, 'from tensorflow.python.platform import flags\n'), ((5600, 5731), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""loss_multiplier"""', '(100.0)', '"""the constant multiplied with the loss value, 100 for reach and 50 for push"""'], {}), "('loss_multiplier', 100.0,\n 'the constant multiplied with the loss value, 100 for reach and 50 for push'\n )\n", (5618, 5731), False, 'from tensorflow.python.platform import flags\n'), ((5742, 5832), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""use_l1_l2_loss"""', '(False)', '"""use a loss with combination of l1 and l2"""'], {}), "('use_l1_l2_loss', False,\n 'use a loss with combination of l1 and l2')\n", (5759, 5832), False, 'from tensorflow.python.platform import flags\n'), ((5829, 5888), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""l2_eps"""', '(0.01)', '"""coeffcient of l2 loss"""'], {}), "('l2_eps', 0.01, 'coeffcient of l2 loss')\n", (5847, 5888), False, 'from tensorflow.python.platform import flags\n'), ((5889, 5993), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""shuffle_val"""', '(False)', '"""whether to choose the validation set via shuffling or not"""'], {}), "('shuffle_val', False,\n 'whether to choose the validation set via shuffling or not')\n", (5906, 5993), False, 'from tensorflow.python.platform import flags\n'), ((6008, 6074), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""random_seed"""', '(0)', '"""random seed for training"""'], {}), "('random_seed', 0, 'random seed for training')\n", (6028, 6074), False, 'from tensorflow.python.platform import flags\n'), ((6075, 6138), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""fp"""', '(True)', '"""use spatial soft-argmax or not"""'], {}), "('fp', True, 'use spatial soft-argmax or not')\n", (6092, 6138), False, 'from tensorflow.python.platform import flags\n'), ((6139, 6215), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""norm"""', '"""layer_norm"""', '"""batch_norm, layer_norm, or None"""'], {}), "('norm', 'layer_norm', 'batch_norm, layer_norm, or None')\n", (6158, 6215), False, 'from tensorflow.python.platform import flags\n'), ((6216, 6287), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""dropout"""', '(False)', '"""use dropout for fc layers or not"""'], {}), "('dropout', False, 'use dropout for fc layers or not')\n", (6233, 6287), False, 'from tensorflow.python.platform import flags\n'), ((6288, 6356), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""keep_prob"""', '(0.5)', '"""keep probability for dropout"""'], {}), "('keep_prob', 0.5, 'keep probability for dropout')\n", (6306, 6356), False, 'from tensorflow.python.platform import flags\n'), ((6357, 6492), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_filters"""', '(64)', '"""number of filters for conv nets -- 64 for placing, 16 for pushing, 40 for reaching."""'], {}), "('num_filters', 64,\n 'number of filters for conv nets -- 64 for placing, 16 for pushing, 40 for reaching.'\n )\n", (6377, 6492), False, 'from tensorflow.python.platform import flags\n'), ((6505, 6630), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""filter_size"""', '(3)', '"""filter size for conv nets -- 3 for placing, 5 for pushing, 3 for reaching."""'], {}), "('filter_size', 3,\n 'filter size for conv nets -- 3 for placing, 5 for pushing, 3 for reaching.'\n )\n", (6525, 6630), False, 'from tensorflow.python.platform import flags\n'), ((6622, 6742), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_conv_layers"""', '(5)', '"""number of conv layers -- 5 for placing, 4 for pushing, 3 for reaching."""'], {}), "('num_conv_layers', 5,\n 'number of conv layers -- 5 for placing, 4 for pushing, 3 for reaching.')\n", (6642, 6742), False, 'from tensorflow.python.platform import flags\n'), ((6739, 6881), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_strides"""', '(3)', '"""number of conv layers with strided filters -- 3 for placing, 4 for pushing, 3 for reaching."""'], {}), "('num_strides', 3,\n 'number of conv layers with strided filters -- 3 for placing, 4 for pushing, 3 for reaching.'\n )\n", (6759, 6881), False, 'from tensorflow.python.platform import flags\n'), ((6894, 7014), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""conv"""', '(True)', '"""whether or not to use a convolutional network, only applicable in some cases"""'], {}), "('conv', True,\n 'whether or not to use a convolutional network, only applicable in some cases'\n )\n", (6911, 7014), False, 'from tensorflow.python.platform import flags\n'), ((7006, 7082), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_fc_layers"""', '(3)', '"""number of fully-connected layers"""'], {}), "('num_fc_layers', 3, 'number of fully-connected layers')\n", (7026, 7082), False, 'from tensorflow.python.platform import flags\n'), ((7083, 7172), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""layer_size"""', '(200)', '"""hidden dimension of fully-connected layers"""'], {}), "('layer_size', 200,\n 'hidden dimension of fully-connected layers')\n", (7103, 7172), False, 'from tensorflow.python.platform import flags\n'), ((7169, 7325), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""temporal_conv_2_head"""', '(True)', '"""whether or not to use temporal convolutions for the two-head architecture in video-only setting."""'], {}), "('temporal_conv_2_head', True,\n 'whether or not to use temporal convolutions for the two-head architecture in video-only setting.'\n )\n", (7186, 7325), False, 'from tensorflow.python.platform import flags\n'), ((7335, 7538), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""temporal_conv_2_head_ee"""', '(False)', '"""whether or not to use temporal convolutions for the two-head architecture in video-only setting for predicting the ee pose."""'], {}), "('temporal_conv_2_head_ee', False,\n 'whether or not to use temporal convolutions for the two-head architecture in video-only setting for predicting the ee pose.'\n )\n", (7352, 7538), False, 'from tensorflow.python.platform import flags\n'), ((7532, 7624), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""temporal_filter_size"""', '(10)', '"""filter size for temporal convolution"""'], {}), "('temporal_filter_size', 10,\n 'filter size for temporal convolution')\n", (7552, 7624), False, 'from tensorflow.python.platform import flags\n'), ((7621, 7719), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""temporal_num_filters"""', '(32)', '"""number of filters for temporal convolution"""'], {}), "('temporal_num_filters', 32,\n 'number of filters for temporal convolution')\n", (7641, 7719), False, 'from tensorflow.python.platform import flags\n'), ((7716, 7840), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""temporal_num_filters_ee"""', '(32)', '"""number of filters for temporal convolution for ee pose prediction"""'], {}), "('temporal_num_filters_ee', 32,\n 'number of filters for temporal convolution for ee pose prediction')\n", (7736, 7840), False, 'from tensorflow.python.platform import flags\n'), ((7837, 7955), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""temporal_num_layers"""', '(3)', '"""number of layers for temporal convolution for ee pose prediction"""'], {}), "('temporal_num_layers', 3,\n 'number of layers for temporal convolution for ee pose prediction')\n", (7857, 7955), False, 'from tensorflow.python.platform import flags\n'), ((7952, 8073), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""temporal_num_layers_ee"""', '(3)', '"""number of layers for temporal convolution for ee pose prediction"""'], {}), "('temporal_num_layers_ee', 3,\n 'number of layers for temporal convolution for ee pose prediction')\n", (7972, 8073), False, 'from tensorflow.python.platform import flags\n'), ((8070, 8180), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""init"""', '"""xavier"""', '"""initializer for conv weights. Choose among random, xavier, and he"""'], {}), "('init', 'xavier',\n 'initializer for conv weights. Choose among random, xavier, and he')\n", (8089, 8180), False, 'from tensorflow.python.platform import flags\n'), ((8177, 8287), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""max_pool"""', '(False)', '"""Whether or not to use max pooling rather than strided convolutions"""'], {}), "('max_pool', False,\n 'Whether or not to use max pooling rather than strided convolutions')\n", (8194, 8287), False, 'from tensorflow.python.platform import flags\n'), ((8284, 8410), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""stop_grad"""', '(False)', '"""if True, do not use second derivatives in meta-optimization (for axis_angle)"""'], {}), "('stop_grad', False,\n 'if True, do not use second derivatives in meta-optimization (for axis_angle)'\n )\n", (8301, 8410), False, 'from tensorflow.python.platform import flags\n'), ((8443, 8532), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""log"""', '(True)', '"""if false, do not log summaries, for debugging code."""'], {}), "('log', True,\n 'if false, do not log summaries, for debugging code.')\n", (8460, 8532), False, 'from tensorflow.python.platform import flags\n'), ((8529, 8628), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""save_dir"""', '"""./daml_pick_logs"""', '"""directory for summaries and checkpoints."""'], {}), "('save_dir', './daml_pick_logs',\n 'directory for summaries and checkpoints.')\n", (8548, 8628), False, 'from tensorflow.python.platform import flags\n'), ((8729, 8815), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""resume"""', '(True)', '"""resume training if there is a model available"""'], {}), "('resume', True,\n 'resume training if there is a model available')\n", (8746, 8815), False, 'from tensorflow.python.platform import flags\n'), ((8812, 8877), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""train"""', '(True)', '"""True to train, False to test."""'], {}), "('train', True, 'True to train, False to test.')\n", (8829, 8877), False, 'from tensorflow.python.platform import flags\n'), ((8878, 8971), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""restore_iter"""', '(-1)', '"""iteration to load model (-1 for latest model)"""'], {}), "('restore_iter', -1,\n 'iteration to load model (-1 for latest model)')\n", (8898, 8971), False, 'from tensorflow.python.platform import flags\n'), ((8968, 9070), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""begin_restore_iter"""', '(41000)', '"""iteration to load model (-1 for latest model)"""'], {}), "('begin_restore_iter', 41000,\n 'iteration to load model (-1 for latest model)')\n", (8988, 9070), False, 'from tensorflow.python.platform import flags\n'), ((9067, 9261), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""train_update_batch_size"""', '(-1)', '"""number of examples used for gradient update during training (use if you want to test with a different number)."""'], {}), "('train_update_batch_size', -1,\n 'number of examples used for gradient update during training (use if you want to test with a different number).'\n )\n", (9087, 9261), False, 'from tensorflow.python.platform import flags\n'), ((9255, 9349), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""test_update_batch_size"""', '(1)', '"""number of demos used during test time"""'], {}), "('test_update_batch_size', 1,\n 'number of demos used during test time')\n", (9275, 9349), False, 'from tensorflow.python.platform import flags\n'), ((9346, 9431), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""gpu_memory_fraction"""', '(0.9)', '"""fraction of memory used in gpu"""'], {}), "('gpu_memory_fraction', 0.9, 'fraction of memory used in gpu'\n )\n", (9364, 9431), False, 'from tensorflow.python.platform import flags\n'), ((9427, 9498), 'tensorflow.python.platform.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""record_gifs"""', '(True)', '"""record gifs during evaluation"""'], {}), "('record_gifs', True, 'record gifs during evaluation')\n", (9444, 9498), False, 'from tensorflow.python.platform import flags\n'), ((9499, 9541), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""output_data"""', '(6)', '""""""'], {}), "('output_data', 6, '')\n", (9519, 9541), False, 'from tensorflow.python.platform import flags\n'), ((9542, 9582), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""color_num"""', '(3)', '""""""'], {}), "('color_num', 3, '')\n", (9562, 9582), False, 'from tensorflow.python.platform import flags\n'), ((9583, 9624), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""object_num"""', '(4)', '""""""'], {}), "('object_num', 4, '')\n", (9603, 9624), False, 'from tensorflow.python.platform import flags\n'), ((9625, 9670), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""train_task_num"""', '(6)', '""""""'], {}), "('train_task_num', 6, '')\n", (9645, 9670), False, 'from tensorflow.python.platform import flags\n'), ((9671, 9710), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""task_num"""', '(8)', '""""""'], {}), "('task_num', 8, '')\n", (9691, 9710), False, 'from tensorflow.python.platform import flags\n'), ((9711, 9750), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""demo_num"""', '(5)', '""""""'], {}), "('demo_num', 5, '')\n", (9731, 9750), False, 'from tensorflow.python.platform import flags\n'), ((9794, 9837), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""index_range"""', '(20)', '""""""'], {}), "('index_range', 20, '')\n", (9814, 9837), False, 'from tensorflow.python.platform import flags\n'), ((9838, 9887), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""index_train_range"""', '(20)', '""""""'], {}), "('index_train_range', 20, '')\n", (9858, 9887), False, 'from tensorflow.python.platform import flags\n'), ((9888, 9947), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""demo_type"""', '"""robot"""', '"""robot or human"""'], {}), "('demo_type', 'robot', 'robot or human')\n", (9907, 9947), False, 'from tensorflow.python.platform import flags\n'), ((10010, 10057), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""target_type"""', '"""robot"""', '""""""'], {}), "('target_type', 'robot', '')\n", (10029, 10057), False, 'from tensorflow.python.platform import flags\n'), ((10194, 10234), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""weight_xy"""', '(1.0)', '""""""'], {}), "('weight_xy', 1.0, '')\n", (10212, 10234), False, 'from tensorflow.python.platform import flags\n'), ((10235, 10272), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""weight_z"""', '(0)', '""""""'], {}), "('weight_z', 0, '')\n", (10253, 10272), False, 'from tensorflow.python.platform import flags\n'), ((10273, 10313), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""weight_rxyz"""', '(0)', '""""""'], {}), "('weight_rxyz', 0, '')\n", (10291, 10313), False, 'from tensorflow.python.platform import flags\n'), ((10314, 10372), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""test_data_color"""', '"""color_yellow"""', '""""""'], {}), "('test_data_color', 'color_yellow', '')\n", (10333, 10372), False, 'from tensorflow.python.platform import flags\n'), ((13253, 13352), 'numpy.reshape', 'np.reshape', (['obsas', '[batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels]'], {}), '(obsas, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height *\n FLAGS.num_channels])\n', (13263, 13352), True, 'import numpy as np\n'), ((13362, 13461), 'numpy.reshape', 'np.reshape', (['obsbs', '[batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels]'], {}), '(obsbs, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height *\n FLAGS.num_channels])\n', (13372, 13461), True, 'import numpy as np\n'), ((13477, 13539), 'numpy.reshape', 'np.reshape', (['actionas', '[batch_size, FLAGS.T, FLAGS.output_data]'], {}), '(actionas, [batch_size, FLAGS.T, FLAGS.output_data])\n', (13487, 13539), True, 'import numpy as np\n'), ((13555, 13617), 'numpy.reshape', 'np.reshape', (['actionbs', '[batch_size, FLAGS.T, FLAGS.output_data]'], {}), '(actionbs, [batch_size, FLAGS.T, FLAGS.output_data])\n', (13565, 13617), True, 'import numpy as np\n'), ((13633, 13683), 'numpy.zeros', 'np.zeros', (['[batch_size, FLAGS.T, FLAGS.output_data]'], {}), '([batch_size, FLAGS.T, FLAGS.output_data])\n', (13641, 13683), True, 'import numpy as np\n'), ((13698, 13748), 'numpy.zeros', 'np.zeros', (['[batch_size, FLAGS.T, FLAGS.output_data]'], {}), '([batch_size, FLAGS.T, FLAGS.output_data])\n', (13706, 13748), True, 'import numpy as np\n'), ((14428, 14483), 'read_data.Read_Robot_Data2', 'read_data.Read_Robot_Data2', (['target_path', 'FLAGS.T', 'index'], {}), '(target_path, FLAGS.T, index)\n', (14454, 14483), False, 'import read_data\n'), ((14498, 14596), 'numpy.reshape', 'np.reshape', (['obsa', '[batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels]'], {}), '(obsa, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height *\n FLAGS.num_channels])\n', (14508, 14596), True, 'import numpy as np\n'), ((14606, 14704), 'numpy.reshape', 'np.reshape', (['obsb', '[batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels]'], {}), '(obsb, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height *\n FLAGS.num_channels])\n', (14616, 14704), True, 'import numpy as np\n'), ((14719, 14780), 'numpy.reshape', 'np.reshape', (['actiona', '[batch_size, FLAGS.T, FLAGS.output_data]'], {}), '(actiona, [batch_size, FLAGS.T, FLAGS.output_data])\n', (14729, 14780), True, 'import numpy as np\n'), ((14796, 14857), 'numpy.reshape', 'np.reshape', (['actionb', '[batch_size, FLAGS.T, FLAGS.output_data]'], {}), '(actionb, [batch_size, FLAGS.T, FLAGS.output_data])\n', (14806, 14857), True, 'import numpy as np\n'), ((14942, 14992), 'numpy.zeros', 'np.zeros', (['[batch_size, FLAGS.T, FLAGS.output_data]'], {}), '([batch_size, FLAGS.T, FLAGS.output_data])\n', (14950, 14992), True, 'import numpy as np\n'), ((15007, 15057), 'numpy.zeros', 'np.zeros', (['[batch_size, FLAGS.T, FLAGS.output_data]'], {}), '([batch_size, FLAGS.T, FLAGS.output_data])\n', (15015, 15057), True, 'import numpy as np\n'), ((15565, 15612), 'numpy.reshape', 'np.reshape', (['obsbs[:, 0, :]', '[batch_size, 1, -1]'], {}), '(obsbs[:, 0, :], [batch_size, 1, -1])\n', (15575, 15612), True, 'import numpy as np\n'), ((15629, 15679), 'numpy.reshape', 'np.reshape', (['actionbs[:, 1, :]', '[batch_size, 1, -1]'], {}), '(actionbs[:, 1, :], [batch_size, 1, -1])\n', (15639, 15679), True, 'import numpy as np\n'), ((15695, 15745), 'numpy.reshape', 'np.reshape', (['statebs[:, -1, :]', '[batch_size, 1, -1]'], {}), '(statebs[:, -1, :], [batch_size, 1, -1])\n', (15705, 15745), True, 'import numpy as np\n'), ((16095, 16108), 'numpy.squeeze', 'np.squeeze', (['a'], {}), '(a)\n', (16105, 16108), True, 'import numpy as np\n'), ((16117, 16130), 'numpy.squeeze', 'np.squeeze', (['b'], {}), '(b)\n', (16127, 16130), True, 'import numpy as np\n'), ((16163, 16176), 'numpy.abs', 'np.abs', (['(a - b)'], {}), '(a - b)\n', (16169, 16176), True, 'import numpy as np\n'), ((16682, 16720), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['save_dir', 'graph'], {}), '(save_dir, graph)\n', (16703, 16720), True, 'import tensorflow as tf\n'), ((20280, 20317), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['FLAGS.random_seed'], {}), '(FLAGS.random_seed)\n', (20298, 20317), True, 'import tensorflow as tf\n'), ((20322, 20355), 'numpy.random.seed', 'np.random.seed', (['FLAGS.random_seed'], {}), '(FLAGS.random_seed)\n', (20336, 20355), True, 'import numpy as np\n'), ((20360, 20390), 'random.seed', 'random.seed', (['FLAGS.random_seed'], {}), '(FLAGS.random_seed)\n', (20371, 20390), False, 'import random\n'), ((20404, 20414), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (20412, 20414), True, 'import tensorflow as tf\n'), ((20433, 20505), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'FLAGS.gpu_memory_fraction'}), '(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)\n', (20446, 20505), True, 'import tensorflow as tf\n'), ((20522, 20561), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (20536, 20561), True, 'import tensorflow as tf\n'), ((20573, 20614), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph', 'config': 'tf_config'}), '(graph=graph, config=tf_config)\n', (20583, 20614), True, 'import tensorflow as tf\n'), ((20626, 20649), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (20636, 20649), True, 'import tensorflow as tf\n'), ((21413, 21509), 'origin_mil_pick.MIL', 'MIL', (['FLAGS.output_data'], {'state_idx': 'state_idx', 'img_idx': 'img_idx', 'network_config': 'network_config'}), '(FLAGS.output_data, state_idx=state_idx, img_idx=img_idx, network_config\n =network_config)\n', (21416, 21509), False, 'from origin_mil_pick import MIL\n'), ((10912, 10984), 'numpy.random.randint', 'np.random.randint', (['FLAGS.train_task_num', 'FLAGS.task_num'], {'size': 'batch_size'}), '(FLAGS.train_task_num, FLAGS.task_num, size=batch_size)\n', (10929, 10984), True, 'import numpy as np\n'), ((12683, 12707), 'numpy.random.randint', 'np.random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (12700, 12707), True, 'import numpy as np\n'), ((13000, 13055), 'read_data.Read_Robot_Data2', 'read_data.Read_Robot_Data2', (['target_path', 'FLAGS.T', 'index'], {}), '(target_path, FLAGS.T, index)\n', (13026, 13055), False, 'import read_data\n'), ((14222, 14275), 'read_data.Read_Robot_Data2', 'read_data.Read_Robot_Data2', (['demo_path', 'FLAGS.T', 'index'], {}), '(demo_path, FLAGS.T, index)\n', (14248, 14275), False, 'import read_data\n'), ((24021, 24051), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(10)'}), '(max_to_keep=10)\n', (24035, 24051), True, 'import tensorflow as tf\n'), ((24102, 24135), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (24133, 24135), True, 'import tensorflow as tf\n'), ((24253, 24292), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (24281, 24292), True, 'import tensorflow as tf\n'), ((24366, 24402), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['save_dir'], {}), '(save_dir)\n', (24392, 24402), True, 'import tensorflow as tf\n'), ((10545, 10587), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': 'batch_size'}), '(0, 100, size=batch_size)\n', (10562, 10587), True, 'import numpy as np\n'), ((10667, 10709), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': 'batch_size'}), '(0, 100, size=batch_size)\n', (10684, 10709), True, 'import numpy as np\n'), ((11037, 11079), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': 'batch_size'}), '(0, 100, size=batch_size)\n', (11054, 11079), True, 'import numpy as np\n'), ((11156, 11198), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': 'batch_size'}), '(0, 100, size=batch_size)\n', (11173, 11198), True, 'import numpy as np\n'), ((12783, 12836), 'read_data.Read_Robot_Data2', 'read_data.Read_Robot_Data2', (['demo_path', 'FLAGS.T', 'index'], {}), '(demo_path, FLAGS.T, index)\n', (12809, 12836), False, 'import read_data\n'), ((14345, 14398), 'read_data.Read_Human_Data2', 'read_data.Read_Human_Data2', (['demo_path', 'FLAGS.T', 'index'], {}), '(demo_path, FLAGS.T, index)\n', (14371, 14398), False, 'import read_data\n'), ((10811, 10853), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': 'batch_size'}), '(0, 100, size=batch_size)\n', (10828, 10853), True, 'import numpy as np\n'), ((12914, 12967), 'read_data.Read_Human_Data2', 'read_data.Read_Human_Data2', (['demo_path', 'FLAGS.T', 'index'], {}), '(demo_path, FLAGS.T, index)\n', (12940, 12967), False, 'import read_data\n'), ((17563, 17596), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': '(1)'}), '(0, 100, size=1)\n', (17580, 17596), True, 'import numpy as np\n')] |
import os
import itertools
import numpy as np
from modcma import Parameters
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs import ModCMAEnv, CMAStepSizeEnv
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
DEFAULT_CFG_SPACE = CS.ConfigurationSpace()
ACTIVE = CSH.CategoricalHyperparameter(name='0_active', choices=[True, False])
ELITIST = CSH.CategoricalHyperparameter(name='1_elitist', choices=[True, False])
ORTHOGONAL = CSH.CategoricalHyperparameter(name='2_orthogonal', choices=[True, False])
SEQUENTIAL = CSH.CategoricalHyperparameter(name='3_sequential', choices=[True, False])
THRESHOLD_CONVERGENCE = CSH.CategoricalHyperparameter(name='4_threshold_convergence', choices=[True, False])
STEP_SIZE_ADAPTION = CSH.CategoricalHyperparameter(name='5_step_size_adaption', choices=["csa", "tpa", "msr", "xnes", "m-xnes", "lp-xnes", "psr"])
MIRRORED = CSH.CategoricalHyperparameter(name='6_mirrored', choices=["None", "mirrored", "mirrored pairwise"])
BASE_SAMPLER = CSH.CategoricalHyperparameter(name='7_base_sampler', choices=["gaussian", "sobol", "halton"])
WEIGHTS_OPTION = CSH.CategoricalHyperparameter(name='8_weights_option', choices=["default", "equal", "1/2^lambda"])
LOCAL_RESTART = CSH.CategoricalHyperparameter(name='90_local_restart', choices=["None", "IPOP", "BIPOP"])
BOUND_CORRECTION = CSH.CategoricalHyperparameter(name='91_bound_correction', choices=["None", "saturate", "unif_resample", "COTN", "toroidal", "mirror"])
DEFAULT_CFG_SPACE.add_hyperparameter(ACTIVE)
DEFAULT_CFG_SPACE.add_hyperparameter(ELITIST)
DEFAULT_CFG_SPACE.add_hyperparameter(ORTHOGONAL)
DEFAULT_CFG_SPACE.add_hyperparameter(SEQUENTIAL)
DEFAULT_CFG_SPACE.add_hyperparameter(THRESHOLD_CONVERGENCE)
DEFAULT_CFG_SPACE.add_hyperparameter(STEP_SIZE_ADAPTION)
DEFAULT_CFG_SPACE.add_hyperparameter(MIRRORED)
DEFAULT_CFG_SPACE.add_hyperparameter(BASE_SAMPLER)
DEFAULT_CFG_SPACE.add_hyperparameter(WEIGHTS_OPTION)
DEFAULT_CFG_SPACE.add_hyperparameter(LOCAL_RESTART)
DEFAULT_CFG_SPACE.add_hyperparameter(BOUND_CORRECTION)
INFO = {
"identifier": "ModCMA",
"name": "Online Selection of CMA-ES Variants",
"reward": "Negative best function value",
"state_description": [
"Generation Size",
"Sigma",
"Remaining Budget",
"Function ID",
"Instance ID",
],
}
MODCMA_DEFAULTS = objdict(
{
"config_space": DEFAULT_CFG_SPACE,
"action_space_class": "MultiDiscrete",
"action_space_args": [
list(
map(
lambda m: len(
getattr(getattr(Parameters, m), "options", [False, True])
),
Parameters.__modules__,
)
)
],
"observation_space_class": "Box",
"observation_space_args": [-np.inf * np.ones(5), np.inf * np.ones(5)],
"observation_space_type": np.float32,
"reward_range": (-(10 ** 12), 0),
"budget": 100,
"cutoff": 1e6,
"seed": 0,
"instance_set_path": os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../instance_sets/modea/modea_train.csv",
),
"test_set_path": os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../instance_sets/modea/modea_train.csv",
),
"benchmark_info": INFO,
}
)
class ModCMABenchmark(AbstractBenchmark):
def __init__(self, config_path: str = None, step_size=False, config=None):
super().__init__(config_path, config)
self.config = objdict(MODCMA_DEFAULTS.copy(), **(self.config or dict()))
self.step_size = step_size
def get_environment(self):
if "instance_set" not in self.config:
self.read_instance_set()
# Read test set if path is specified
if "test_set" not in self.config.keys() and "test_set_path" in self.config.keys():
self.read_instance_set(test=True)
if self.step_size:
self.config.action_space_class = "Box"
self.config.action_space_args = [np.array([0]), np.array([10])]
env = CMAStepSizeEnv(self.config)
else:
env = ModCMAEnv(self.config)
for func in self.wrap_funcs:
env = func(env)
return env
def read_instance_set(self, test=False):
if test:
path = self.config.test_set_path
keyword = "test_set"
else:
path = self.config.instance_set_path
keyword = "instance_set"
self.config[keyword] = dict()
with open(path, "r") as fh:
for line in itertools.islice(fh, 1, None):
_id, dim, fid, iid, *representation = line.strip().split(",")
self.config[keyword][int(_id)] = [
int(dim),
int(fid),
int(iid),
list(map(int, representation)),
]
def get_benchmark(self, seed: int = 0):
self.config = MODCMA_DEFAULTS.copy()
self.config.seed = seed
self.read_instance_set()
self.read_instance_set(test=True)
return ModCMAEnv(self.config)
| [
"itertools.islice",
"numpy.ones",
"dacbench.envs.CMAStepSizeEnv",
"numpy.array",
"dacbench.envs.ModCMAEnv",
"os.path.abspath",
"ConfigSpace.ConfigurationSpace",
"ConfigSpace.hyperparameters.CategoricalHyperparameter"
] | [((286, 309), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {}), '()\n', (307, 309), True, 'import ConfigSpace as CS\n'), ((319, 388), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""0_active"""', 'choices': '[True, False]'}), "(name='0_active', choices=[True, False])\n", (348, 388), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((399, 469), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""1_elitist"""', 'choices': '[True, False]'}), "(name='1_elitist', choices=[True, False])\n", (428, 469), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((483, 556), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""2_orthogonal"""', 'choices': '[True, False]'}), "(name='2_orthogonal', choices=[True, False])\n", (512, 556), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((570, 643), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""3_sequential"""', 'choices': '[True, False]'}), "(name='3_sequential', choices=[True, False])\n", (599, 643), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((668, 756), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""4_threshold_convergence"""', 'choices': '[True, False]'}), "(name='4_threshold_convergence', choices=[True,\n False])\n", (697, 756), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((774, 903), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""5_step_size_adaption"""', 'choices': "['csa', 'tpa', 'msr', 'xnes', 'm-xnes', 'lp-xnes', 'psr']"}), "(name='5_step_size_adaption', choices=['csa',\n 'tpa', 'msr', 'xnes', 'm-xnes', 'lp-xnes', 'psr'])\n", (803, 903), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((911, 1014), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""6_mirrored"""', 'choices': "['None', 'mirrored', 'mirrored pairwise']"}), "(name='6_mirrored', choices=['None',\n 'mirrored', 'mirrored pairwise'])\n", (940, 1014), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((1026, 1123), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""7_base_sampler"""', 'choices': "['gaussian', 'sobol', 'halton']"}), "(name='7_base_sampler', choices=['gaussian',\n 'sobol', 'halton'])\n", (1055, 1123), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((1137, 1239), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""8_weights_option"""', 'choices': "['default', 'equal', '1/2^lambda']"}), "(name='8_weights_option', choices=['default',\n 'equal', '1/2^lambda'])\n", (1166, 1239), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((1252, 1345), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""90_local_restart"""', 'choices': "['None', 'IPOP', 'BIPOP']"}), "(name='90_local_restart', choices=['None',\n 'IPOP', 'BIPOP'])\n", (1281, 1345), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((1361, 1499), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', ([], {'name': '"""91_bound_correction"""', 'choices': "['None', 'saturate', 'unif_resample', 'COTN', 'toroidal', 'mirror']"}), "(name='91_bound_correction', choices=['None',\n 'saturate', 'unif_resample', 'COTN', 'toroidal', 'mirror'])\n", (1390, 1499), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((5205, 5227), 'dacbench.envs.ModCMAEnv', 'ModCMAEnv', (['self.config'], {}), '(self.config)\n', (5214, 5227), False, 'from dacbench.envs import ModCMAEnv, CMAStepSizeEnv\n'), ((4166, 4193), 'dacbench.envs.CMAStepSizeEnv', 'CMAStepSizeEnv', (['self.config'], {}), '(self.config)\n', (4180, 4193), False, 'from dacbench.envs import ModCMAEnv, CMAStepSizeEnv\n'), ((4226, 4248), 'dacbench.envs.ModCMAEnv', 'ModCMAEnv', (['self.config'], {}), '(self.config)\n', (4235, 4248), False, 'from dacbench.envs import ModCMAEnv, CMAStepSizeEnv\n'), ((4673, 4702), 'itertools.islice', 'itertools.islice', (['fh', '(1)', 'None'], {}), '(fh, 1, None)\n', (4689, 4702), False, 'import itertools\n'), ((2859, 2869), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2866, 2869), True, 'import numpy as np\n'), ((2880, 2890), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2887, 2890), True, 'import numpy as np\n'), ((3117, 3142), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3132, 3142), False, 'import os\n'), ((3277, 3302), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3292, 3302), False, 'import os\n'), ((4117, 4130), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4125, 4130), True, 'import numpy as np\n'), ((4132, 4146), 'numpy.array', 'np.array', (['[10]'], {}), '([10])\n', (4140, 4146), True, 'import numpy as np\n')] |
from scipy import signal
import numpy as np
import time
import math
import TradingBotConfig as theConfig
class MarketData():
MAX_HISTORIC_SAMPLES = 20000
NB_POINTS_FOR_FAST_SMOOTH_FILTER = 600
NB_POINTS_FOR_SLOW_SMOOTH_FILTER = 1200
NB_POINTS_DELAY_FOR_RISK_LINE_COMPUTATION = 220
NB_POINTS_FOR_RISK_LINE_COMPUTATION = 1200
RISK_LINE_START_INDEX = - (NB_POINTS_FOR_RISK_LINE_COMPUTATION + NB_POINTS_DELAY_FOR_RISK_LINE_COMPUTATION)
RISK_LINE_END_INDEX = - (NB_POINTS_DELAY_FOR_RISK_LINE_COMPUTATION)
maxMACDValuePricePercentageForNormalization = 60
NB_POINTS_MIN_FOR_ESTABLISHMENT = NB_POINTS_FOR_SLOW_SMOOTH_FILTER
def __init__(self, GDAXControler, UIGraph):
self.theGDAXControler = GDAXControler
self.theUIGraph = UIGraph
# Init model data
self.MRKT_ResetAllData(1)
self.RefreshSmoothFiltersCoefficients()
def MRKT_ResetAllData(self, UIGraphSubScheduling):
print("MRKT - Reset all data")
self.totalNbIterations = 0
self.dataRefTime = []
self.dataRefCryptoPriceInEUR = []
self.dataRefSmoothAverageFast = []
self.dataRefSmoothAverageSlow = []
self.dataRefRiskLine = []
self.dataRefMACD = []
self.UIGraphSubScheduling = UIGraphSubScheduling
def RefreshSmoothFiltersCoefficients(self):
newSensitvityValue = self.theUIGraph.UIGR_getSensitivityLevelValue()
print("MRKT - Applied coefficients : %s" % newSensitvityValue)
if (newSensitvityValue == 6):
N = 1
WnFast=float(0.0333) # 1/30
WnSlow=float(0.01) # 1/100
self.maxMACDValuePricePercentageForNormalization = 0.006
elif (newSensitvityValue == 5):
N = 1
WnFast=float(0.01666) # 1/60
WnSlow=float(0.005882) # 1/170
self.maxMACDValuePricePercentageForNormalization = 0.007
elif (newSensitvityValue == 4):
N = 1
WnFast=float(0.010) # 1/80
WnSlow=float(0.0040) # 1/230
self.maxMACDValuePricePercentageForNormalization = 0.008
elif (newSensitvityValue == 3):
N = 1
WnFast=float(0.008) # 1/110
WnSlow=float(0.003) # 1/250
self.maxMACDValuePricePercentageForNormalization = 0.01
elif (newSensitvityValue == 2):
N = 1
WnFast=float(0.0040) # 1/
WnSlow=float(0.0018) # 1/
self.maxMACDValuePricePercentageForNormalization = 0.012
elif (newSensitvityValue == 1):
N = 2
WnFast=float(0.01111) # 1/90
WnSlow=float(0.0041667) # 1/240
self.maxMACDValuePricePercentageForNormalization = 0.012
else: # Should not happen
N = 1
WnFast=float(0.0125) # 1/80
WnSlow=float(0.004347) # 1/230
self.maxMACDValuePricePercentageForNormalization = 0.012
if (self.totalNbIterations > 1):
self.maxMACDForNormalization = self.dataRefCryptoPriceInEUR[1] * self.maxMACDValuePricePercentageForNormalization
else:
self.maxMACDForNormalization = 10000 * self.maxMACDValuePricePercentageForNormalization
print("MRKT - Coefficients updated. New self.maxMACDForNormalization is %s, WnFast = %s, WnSlow = %s" % (self.maxMACDForNormalization, WnFast, WnSlow))
self.bFast, self.aFast = signal.butter(N, float(WnFast), 'low') # One gotcha is that Wn is a fraction of the Nyquist frequency (half the sampling frequency).
self.bSlow, self.aSlow = signal.butter(N, float(WnSlow), 'low') # One gotcha is that Wn is a fraction of the Nyquist frequency (half the sampling frequency).
def MRKT_AreIndicatorsEstablished(self):
#print("MRKT_AreIndicatorsEstablished - nb it %s minRequested %s" % (self.totalNbIterations,self.MRKT_GetMinNumberOfRequiredSamplesForEstablishment()))
if (self.totalNbIterations > self.NB_POINTS_MIN_FOR_ESTABLISHMENT):
return True
else:
return False
def MRKT_GetLastRiskLineValue(self):
return self.dataRefRiskLine[-1]
def MRKT_GetLastMACDValue(self):
return self.dataRefMACD[-1]
# Used in SImulation mode in order to get the price at which we buy or sell
def MRKT_GetLastRefPrice(self):
return self.dataRefCryptoPriceInEUR[-1]
def MRKT_GetLastFastSmoothedPrice(self):
return self.dataRefSmoothAverageFast[-1]
# Needs one sample every 10 sec
def MRKT_updateMarketData(self, newSampleTime, newSamplePrice):
if (newSampleTime is not None):
if (newSamplePrice is not None):
# Drop old samples (buffers shifts)
self.dropOldData()
# Add new sample
self.updateMarketPriceAndTime(newSampleTime, newSamplePrice)
# Update indicators
self.updateFastSmoothAverage()
self.updateSlowSmoothAverage()
self.updatePriceMACD()
self.updateRiskLine()
# UI Data Update
if (self.totalNbIterations % self.UIGraphSubScheduling == 0):
self.theUIGraph.UIGR_updateNextIterationData(self.dataRefTime[-1], self.dataRefCryptoPriceInEUR[-1], self.dataRefSmoothAverageFast[-1], self.dataRefSmoothAverageSlow[-1], self.dataRefRiskLine[-1], self.dataRefMACD[-1])
if (self.totalNbIterations % 20 == 0):
# Update Smooth filters coefficients if needed. Check value changed in subscheduled part to save CPU
# Last condition is made for calibration of MACD normalization indicator with price data
if ((self.theUIGraph.UIGR_hasSensitivityLevelValueChanged() == True) or (self.totalNbIterations == 20)):
self.RefreshSmoothFiltersCoefficients()
self.totalNbIterations = self.totalNbIterations + 1
else:
print("MRKT - None Sampleprice detected")
else:
print("MRKT - None Sampletime detected")
def dropOldData(self):
if (self.totalNbIterations > self.MAX_HISTORIC_SAMPLES):
self.dataRefTime.pop(0)
self.dataRefCryptoPriceInEUR.pop(0)
if (self.totalNbIterations % self.UIGraphSubScheduling == 0):
self.dataRefSmoothAverageFast.pop(0)
self.dataRefSmoothAverageSlow.pop(0)
self.dataRefRiskLine.pop(0)
self.dataRefMACD.pop(0)
def updateMarketPriceAndTime(self, newSampleTime, newSamplePrice):
self.dataRefCryptoPriceInEUR.append(newSamplePrice)
self.dataRefTime.append(newSampleTime)
# Update price on the UI
if (self.totalNbIterations % self.UIGraphSubScheduling == 0):
self.theUIGraph.UIGR_updatePriceLbl(round(self.dataRefCryptoPriceInEUR[-1], 2))
def updateFastSmoothAverage(self):
if (self.totalNbIterations > self.NB_POINTS_FOR_FAST_SMOOTH_FILTER + 1):
if (self.totalNbIterations % self.UIGraphSubScheduling == 0):
#WnFast=1/55 # Filtre à 12 fois plus lent
#N=1 # Ordre du filtre
#b, a = signal.butter(N, Wn, 'low') # One gotcha is that Wn is a fraction of the Nyquist frequency (half the sampling frequency).
# So if the sampling rate is 1000Hz and you want a cutoff of 250Hz, you should use Wn=0.5.
self.dataRefSmoothAverageFast.append((signal.lfilter(self.bFast, self.aFast, self.dataRefCryptoPriceInEUR[-self.NB_POINTS_FOR_FAST_SMOOTH_FILTER:]))[-1])
else:
self.dataRefSmoothAverageFast.append(self.dataRefCryptoPriceInEUR[-1]*0.999)
def updateSlowSmoothAverage(self):
if (self.totalNbIterations > self.NB_POINTS_FOR_SLOW_SMOOTH_FILTER + 1):
if (self.totalNbIterations % self.UIGraphSubScheduling == 0):
self.dataRefSmoothAverageSlow.append((signal.lfilter(self.bSlow, self.aSlow, self.dataRefCryptoPriceInEUR[-self.NB_POINTS_FOR_SLOW_SMOOTH_FILTER:]))[-1])
else:
self.dataRefSmoothAverageSlow.append(self.dataRefCryptoPriceInEUR[-1]*0.999)
def updateRiskLine(self):
if (self.totalNbIterations > self.NB_POINTS_FOR_RISK_LINE_COMPUTATION + 1):
if (self.totalNbIterations % self.UIGraphSubScheduling == 0):
average = (np.sum(self.dataRefCryptoPriceInEUR[self.RISK_LINE_START_INDEX:self.RISK_LINE_END_INDEX])) / self.NB_POINTS_FOR_RISK_LINE_COMPUTATION
self.dataRefRiskLine.append(average)
else:
pass # Keep last value
else:
self.dataRefRiskLine.append(0)
def updatePriceMACD(self):
# Derivate is computed over smooth price data so wait until this one is established
if (self.totalNbIterations > self.NB_POINTS_FOR_SLOW_SMOOTH_FILTER + 2):
if (self.totalNbIterations % self.UIGraphSubScheduling == 0):
localMACD = (self.dataRefSmoothAverageFast[-1] - self.dataRefSmoothAverageSlow[-1])
self.dataRefMACD.append(localMACD * 100 / (self.maxMACDForNormalization))
else:
self.dataRefMACD.append(0)
| [
"numpy.sum",
"scipy.signal.lfilter"
] | [((8983, 9077), 'numpy.sum', 'np.sum', (['self.dataRefCryptoPriceInEUR[self.RISK_LINE_START_INDEX:self.\n RISK_LINE_END_INDEX]'], {}), '(self.dataRefCryptoPriceInEUR[self.RISK_LINE_START_INDEX:self.\n RISK_LINE_END_INDEX])\n', (8989, 9077), True, 'import numpy as np\n'), ((8029, 8143), 'scipy.signal.lfilter', 'signal.lfilter', (['self.bFast', 'self.aFast', 'self.dataRefCryptoPriceInEUR[-self.NB_POINTS_FOR_FAST_SMOOTH_FILTER:]'], {}), '(self.bFast, self.aFast, self.dataRefCryptoPriceInEUR[-self.\n NB_POINTS_FOR_FAST_SMOOTH_FILTER:])\n', (8043, 8143), False, 'from scipy import signal\n'), ((8523, 8637), 'scipy.signal.lfilter', 'signal.lfilter', (['self.bSlow', 'self.aSlow', 'self.dataRefCryptoPriceInEUR[-self.NB_POINTS_FOR_SLOW_SMOOTH_FILTER:]'], {}), '(self.bSlow, self.aSlow, self.dataRefCryptoPriceInEUR[-self.\n NB_POINTS_FOR_SLOW_SMOOTH_FILTER:])\n', (8537, 8637), False, 'from scipy import signal\n')] |
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget, QAction, QTabWidget,QVBoxLayout
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
import sys, os
class plotWindow():
def __init__(self, title="Plot Window", parent=None):
self.app = QApplication(sys.argv)
self.MainWindow = QMainWindow()
self.MainWindow.__init__()
self.MainWindow.setWindowTitle(title)
self.canvases = []
self.figure_handles = []
self.toolbar_handles = []
self.tab_handles = []
self.current_window = -1
self.tabs = QTabWidget()
self.MainWindow.setCentralWidget(self.tabs)
# self.MainWindow.resize(1920, 1080)
self.MainWindow.resize(1200, 980)
self.MainWindow.show()
def addPlot(self, title, figure, threeD=False):
new_tab = QWidget()
layout = QVBoxLayout()
new_tab.setLayout(layout)
figure.subplots_adjust(left=0.05, right=0.99, bottom=0.05, top=0.91, wspace=0.2, hspace=0.2)
new_canvas = FigureCanvas(figure)
new_toolbar = NavigationToolbar(new_canvas, new_tab)
layout.addWidget(new_canvas)
layout.addWidget(new_toolbar)
self.tabs.addTab(new_tab, title)
self.toolbar_handles.append(new_toolbar)
self.canvases.append(new_canvas)
self.figure_handles.append(figure)
if threeD:
figure.axes[0].mouse_init()
self.tab_handles.append(new_tab)
def show(self):
return self.app.exec_()
def saveFig(self, fig, filepath, format='svg', sizeInches=[]):
if fig == None:
return
allaxes = fig.get_axes()
for ax in allaxes:
ax.autoscale() # Reset to default zoom
restoreSize = fig.get_size_inches()
if not sizeInches:
if format == 'png': # Increase size for saved png
sizeInches = [16,11]
# sizeInches = [20,14]
else: # svg or png
sizeInches = [11,8]
fig.set_size_inches(sizeInches)
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
fig.savefig(os.path.join(filepath + '.' + format), bbox_inches='tight')
fig.set_size_inches(restoreSize)
if __name__ == '__main__':
import numpy as np
pw = plotWindow()
x = np.arange(0, 10, 0.001)
f = plt.figure()
ysin = np.sin(x)
plt.plot(x, ysin, '--')
pw.addPlot("sin", f)
f = plt.figure()
ycos = np.cos(x)
plt.plot(x, ycos, '--')
pw.addPlot("cos", f)
pw.show()
# sys.exit(app.exec_())
| [
"PyQt5.QtWidgets.QWidget",
"os.path.exists",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QMainWindow",
"os.makedirs",
"matplotlib.pyplot.plot",
"os.path.join",
"PyQt5.QtWidgets.QVBoxLayout",
"matplotlib.pyplot.figure",
"os.path.dirname",
"numpy.cos",
"PyQt5.QtWi... | [((2691, 2714), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.001)'], {}), '(0, 10, 0.001)\n', (2700, 2714), True, 'import numpy as np\n'), ((2724, 2736), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2734, 2736), True, 'import matplotlib.pyplot as plt\n'), ((2748, 2757), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (2754, 2757), True, 'import numpy as np\n'), ((2762, 2785), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ysin', '"""--"""'], {}), "(x, ysin, '--')\n", (2770, 2785), True, 'import matplotlib.pyplot as plt\n'), ((2820, 2832), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2830, 2832), True, 'import matplotlib.pyplot as plt\n'), ((2844, 2853), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (2850, 2853), True, 'import numpy as np\n'), ((2858, 2881), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ycos', '"""--"""'], {}), "(x, ycos, '--')\n", (2866, 2881), True, 'import matplotlib.pyplot as plt\n'), ((547, 569), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (559, 569), False, 'from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget, QAction, QTabWidget, QVBoxLayout\n'), ((596, 609), 'PyQt5.QtWidgets.QMainWindow', 'QMainWindow', ([], {}), '()\n', (607, 609), False, 'from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget, QAction, QTabWidget, QVBoxLayout\n'), ((868, 880), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', ([], {}), '()\n', (878, 880), False, 'from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget, QAction, QTabWidget, QVBoxLayout\n'), ((1122, 1131), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (1129, 1131), False, 'from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget, QAction, QTabWidget, QVBoxLayout\n'), ((1149, 1162), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (1160, 1162), False, 'from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget, QAction, QTabWidget, QVBoxLayout\n'), ((1320, 1340), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['figure'], {}), '(figure)\n', (1332, 1340), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((1364, 1402), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['new_canvas', 'new_tab'], {}), '(new_canvas, new_tab)\n', (1381, 1402), True, 'from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n'), ((2383, 2408), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (2398, 2408), False, 'import sys, os\n'), ((2424, 2449), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (2438, 2449), False, 'import sys, os\n'), ((2463, 2485), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (2474, 2485), False, 'import sys, os\n'), ((2506, 2543), 'os.path.join', 'os.path.join', (["(filepath + '.' + format)"], {}), "(filepath + '.' + format)\n", (2518, 2543), False, 'import sys, os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 13:22:26 2018
@author: <NAME> <<EMAIL>>
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from cartomap import geogmap as gm
from glob import glob
from dateutil import parser
import h5py, os
import yaml, platform
from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones
from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy
from numpy import meshgrid, rot90, flip, ndarray, squeeze, nan, divide, isin
from numpy.ma import masked_invalid
from datetime import datetime, timedelta
from scipy import ndimage
from typing import Union
from gpstec import gpstec
from scipy.interpolate import griddata
import concurrent.futures
def interpolateTEC(im: Union[list, ndarray] = None,
x0 = None, y0 = None,
xgrid = None, ygrid = None,
N: int = 512, res=1,
method: str = 'linear'):
assert im is not None, 'Invalid input argument. Has to be a list or np.ndarray with a length of at least 1'
if x0 is None or y0 is None:
x0, y0 = meshgrid(arange(im.shape[0]), arange(im.shape[1]))
x0 = x0.T
y0 = y0.T
mask = masked_invalid(im)
x0 = x0[~mask.mask]
y0 = y0[~mask.mask]
X = im[~mask.mask]
if xgrid is None or ygrid is None:
xgrid, ygrid = meshgrid(arange(0, im.shape[0], res),
arange(0, im.shape[1], res))
xgrid = xgrid.T
ygrid = ygrid.T
z = griddata((x0,y0), X.ravel(), (xgrid, ygrid), method=method, fill_value=nan)
return z
def _toLuma(x):
"""
RBG -> Luma conversion
After https://en.wikipedia.org/wiki/Luma_(video)
"""
rr = multiply(x[:,:,0], 0.2126)
gg = multiply(x[:,:,1], 0.7152)
bb = multiply(x[:,:,2], 0.0722)
yy = add(rr,gg,bb)
return yy
def returndTEC(fn,dtype='single',darg=1,time='dt'):
"""
Return a single slice with coordinates from the HDF image collection. Multi
type query:
dtype = single:
darg = i-th element of the array. Must be an integer
darg = timestamp. It will find the closes time stamp in the collection
and return the slice with coordinates. Input either datetime.datetime
or strng which is parsed via parser.parse()
time = return time format. If dt = posix, else datetime.datetime
Return:
time[dt,posix], xgrid, ygrid, image
"""
def _getIndex(t,t0):
i = abs(t-t0).argmin()
return i
f = h5py.File(fn, 'r')
xgrid = f['data/xgrid'].value
ygrid = f['data/ygrid'].value
t0 = f['data/time'].value
t = array([datetime.utcfromtimestamp(t) for t in t0])
im = f['data/im']
if dtype == 'single':
i = darg
im = f['data/im'][i]
elif dtype == 't':
if isinstance(darg,str):
darg = parser.parse(darg)
elif isinstance(darg, datetime):
pass
else:
raise("'darg' must be datetime or stging type")
i = _getIndex(t, darg)
im = f['data/im'][i]
elif dtype == 'treq':
if isinstance(darg, (list, ndarray)):
darg = [parser.parse(d) for d in darg]
elif isinstance(darg[0], datetime):
pass
else:
raise("'darg' must be datetime or stging type")
i1 = _getIndex(t, darg[0])
i2 = _getIndex(t, darg[1])
im = f['data/im'][i1:i2]
t = t[i1:i2]
if time == 'posix':
t = t0
return t, xgrid, ygrid, im
def returnNEXRAD(folder, downsample=1, dtype='single',darg='',im_mask=220, RGB=0):
import nexrad_quickplot as nq
if dtype == 'single':
nqr = nq.load(folder + darg, downsample=downsample)
nqr_lon = nqr.lon
nqr_lat = nqr.lat
nqr_im = nqr.values
if not RGB:
nqr_im= _toLuma(nqr_im)
Z = flip(rot90(ma.masked_where((nqr_im>=im_mask),nqr_im),2),1)
else:
Z = ma.masked_where((nqr_im>=im_mask),nqr_im)
X,Y = meshgrid(nqr_lon,nqr_lat)
return X,Y,Z
def getNeighbours(image,i,j,N=3):
"""
Return an array of <=9 neighbour pixel of an image with a center at (i,j)
"""
nbg = []
m = int(floor(N/2))
M = int(ceil(N/2))
for k in arange(i-m, i+M):
for l in arange(j-m, j+M):
try:
nbg.append(image[k,l])
except:
pass
return array(nbg)
def fillPixels(im, N=1):
"""
Fill in the dead pixels. If a dead pixel has a least 4 finite neighbour
pixel, than replace the center pixel with a mean valuse of the neighbours
"""
X = im.shape[0]-1
Y = im.shape[1]-1
imcopy = copy(im)
for n in range(N):
skip = int(floor((3+n)/2))
starti = 0
startj = 0
forwardi = int(floor(0.6*X))
backwardi = int(floor(0.4*X))
if n%2 == 0:
for i in arange(starti, forwardi, skip):
for j in arange(startj, Y, skip):
# Check if th epixel is dead, i.e. empty
if isnan(im[i,j]):
# Get its neighbours as a np array
nbg = getNeighbours(imcopy,i,j,N=(3+n))
# If there are at leas 4 neighbours, replace the value with a mean
if sum(isfinite(nbg)) >= 4:
ix = where(isfinite(nbg))[0]
avg = mean(nbg[ix])
im[i,j] = avg
for i in arange(X, backwardi, -skip):
for j in arange(Y, 0, -skip):
# Check if th epixel is dead, i.e. empty
if isnan(im[i,j]):
# Get its neighbours as a np array
nbg = getNeighbours(imcopy,i,j,N=(3+n))
# If there are at leas 4 neighbours, replace the value with a mean
if sum(isfinite(nbg)) >= 4:
ix = where(isfinite(nbg))[0]
avg = mean(nbg[ix])
im[i,j] = avg
else:
for j in arange(startj, Y, skip):
for i in arange(starti, forwardi, skip):
# Check if th epixel is dead, i.e. empty
if isnan(im[i,j]):
# Get its neighbours as a np array
nbg = getNeighbours(imcopy,i,j,N=(3+n))
# If there are at leas 4 neighbours, replace the value with a mean
if sum(isfinite(nbg)) >= 4:
ix = where(isfinite(nbg))[0]
avg = mean(nbg[ix])
im[i,j] = avg
for j in arange(Y, 0, -skip):
for i in arange(X, backwardi, -skip):
# Check if th epixel is dead, i.e. empty
if isnan(im[i,j]):
# Get its neighbours as a np array
nbg = getNeighbours(imcopy,i,j,N=(3+n))
# If there are at leas 4 neighbours, replace the value with a mean
if sum(isfinite(nbg)) >= 4:
ix = where(isfinite(nbg))[0]
avg = mean(nbg[ix])
im[i,j] = avg
return im
def getEUVMaskCoordinates(latlim=[-89.5,89.5],lonlim=[-180,180],nlat=180,nlon=360):
xgrid, ygrid = mgrid[lonlim[0]:lonlim[1]:nlon*1j, latlim[0]:latlim[1]:nlat*1j]
return xgrid,ygrid
def getEUVMask(time,nlat=180,nlon=360,
EUVDIR = '/home/smrak/Documents/eclipse/MapsSDOdisk300/'):
"""
I: time in posix
"""
xgrid, ygrid = getEUVMaskCoordinates(nlat=nlat, nlon=nlon)
npts = nlat*nlon
#Import EUV mask files
flist = sort(glob(EUVDIR+'*.bin'))
if isinstance(time, float) or isinstance(time, int):
Tframe_full = datetime.utcfromtimestamp(time)
else:
Tframe_full = time
if int(Tframe_full.strftime('%H')) >= 16 and int(Tframe_full.strftime('%H')) < 22:
# find right filename extension
TframeHM = Tframe_full.strftime('%H%M')
flist = sort(glob(EUVDIR+'*'+TframeHM+'.bin'))
# Get Mask
data = fromfile(flist[0],count=npts, dtype=float32).reshape((nlat,nlon))
return xgrid, ygrid, data
else:
return 0, 0, 0
def makeImage(im, pixel_iter):
if len(im.shape) == 2:
im = fillPixels(im, pixel_iter)
im = fillPixels(im)
im = ndimage.median_filter(im, 3)
elif len(im.shape) == 3:
ims = nan * copy(im)
for i in range(im.shape[0]):
im0 = fillPixels(im[i], pixel_iter)
im0 = fillPixels(im0)
ims[i] = ndimage.median_filter(im0, 3)
im = mean(ims, axis=0)
return im
def getTotality():
totality_path = h5py.File('/home/smrak/Documents/eclipse/totality.h5', 'r')
lat_n = totality_path['path/north_lat'].value
lon_n = totality_path['path/north_lon'].value
lat_s = totality_path['path/south_lat'].value
lon_s = totality_path['path/south_lon'].value
return lon_s, lat_s, lon_n, lat_n
def getTotalityCenter(fn='/home/smrak/Documents/eclipse/totality.h5'):
totality_path = h5py.File(fn, 'r')
lat_c = totality_path['path/center_lat'].value
lon_c = totality_path['path/center_lon'].value
return lon_c, lat_c
# Imageinput
if __name__ == '__main__':
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('file', type=str, help='Input HDF5 file')
p.add_argument('--tlim', type=str, help='Processing time; start,end', default=None, nargs=2)
p.add_argument('--cfg', type=str)
p.add_argument('--skip', type=int, default=None)
p.add_argument('--odir', type=str, help='Output directory', default=None)
p.add_argument('-m', '--cfgmap', type=str, help='Yaml configuration file with the map settings',
default='map/example_map.yaml')
p.add_argument('--clim', type=float, nargs=2, default=None)
p.add_argument('--average', type=int, default=1)
p.add_argument('--projection', type=str, default=None)
p.add_argument('--cmap', type=str, default=None)
p.add_argument('--latlim', type=float, nargs=2, default=None)
p.add_argument('--lonlim', type=float, nargs=2, default=None)
p.add_argument('--tec', type=str, help='TEC file', default=None)
P = p.parse_args()
assert P.file.endswith('.h5')
gpsfn = P.file
try:
stream = yaml.load(open(P.cfg, 'r'), Loader=yaml.SafeLoader)
except:
stream = yaml.load(open(os.path.join(os.getcwd(), P.cfg), 'r'), Loader=yaml.SafeLoader)
fntec = P.tec if P.tec is not None else None
fillpixel_iter = stream.get('fillpixel_iter')
skip = P.skip if (P.skip is not None) else stream.get('skip')
projection = P.projection if (P.projection is not None) else stream.get('projection')
latlim = P.latlim if (P.latlim is not None) else stream.get('latlim')
lonlim = P.lonlim if (P.lonlim is not None) else stream.get('lonlim')
clim = P.clim if (P.clim is not None) else stream.get('clim')
cmap = P.cmap if (P.cmap is not None) else stream.get('cmap')
# Coordinates' lines
parallels = stream.get('parallels')
meridians = stream.get('meridians')
mag_parallels = stream.get('mag_parallels')
mag_meridians = stream.get('mag_meridians')
mlon_cs = stream.get('mlon_cs')
nightshade = stream.get('nightshade')
if (mag_parallels is not None) or (mag_meridians is not None):
apex = True
else:
apex = False
# Map settings
mapcfg = P.cfgmap
try:
streammap = yaml.load(open(mapcfg, 'r'), Loader=yaml.SafeLoader)
except:
streammap = yaml.load(open(os.path.join(os.getcwd(), mapcfg), 'r'), Loader=yaml.SafeLoader)
figure_size = streammap.get('figure_size')
background_color = streammap.get('background_color')
border_color = streammap.get('border_color')
grid_color = streammap.get('grid_color')
grid_linestyle = streammap.get('grid_linestyle')
grid_linewidth = streammap.get('grid_linewidth')
terrain = streammap.get('terrain')
states = streammap.get('states')
# Image params
image_type = streammap.get('image_type')
image_nlevels = streammap.get('image_nlevels')
# Overlays @ eclipse
totality = streammap.get('totality')
penumbra = streammap.get('penumbra')
laplacian = streammap.get('laplacian')
laplacian_levels = streammap.get('laplacian_levels')
penumbra_levels = streammap.get('penumbra_levels')
# Marker
marker = streammap.get('marker')
marker_color = streammap.get('marker_color')
marker_size = streammap.get('marker_size')
marker_width = streammap.get('marker_width')
#Averaging
average = P.average if (P.average is not None) else 1
# GPS Images
gpsdata = h5py.File(gpsfn, 'r')
time = gpsdata['data/time'][:]
xgrid = gpsdata['data/xgrid'][:]
ygrid = gpsdata['data/ygrid'][:]
im = gpsdata['data/im'][:][:][:]
gpsdata.close()
xg, yg = meshgrid(xgrid, ygrid)
try:
altkm = gpsdata.attrs['altkm']
except:
altkm = int(os.path.split(gpsfn)[1][-13:-10])
datetimetime = array([datetime.utcfromtimestamp(t) for t in time])
dirdatetime = datetimetime[0].strftime('%Y%m%d')
today = datetime.now().strftime('%Y%m%d')
if P.tlim is not None:
if today == parser.parse(P.tlim[0]).strftime('%Y%m%d'):
t0 = parser.parse(dirdatetime + 'T' + P.tlim[0])
else:
t0 = parser.parse(P.tlim[0])
if today == parser.parse(P.tlim[1]).strftime('%Y%m%d'):
t1 = parser.parse(dirdatetime + 'T' + P.tlim[1])
else:
t1 = parser.parse(P.tlim[0])
timelim = [t0, t1]
idt = (datetimetime >= timelim[0]) & (datetimetime <= timelim[1])
else:
idt = ones(datetimetime.size, dtype=bool)
dt = datetimetime[idt]
iterate1 = arange(where(idt==1)[0][0], where(idt==1)[0][-1]+1, skip)
iterate2 = arange(0, dt.size, skip)
if fntec is not None:
assert os.path.exists(fntec)
TEC = gpstec.readFromHDF(fntec)
idttec = (TEC['time'] >= timelim[0]) & (TEC['time'] <= timelim[1])
idx = (TEC['xgrid'] >= xgrid.min()) & (TEC['xgrid'] <= xgrid.max())
idy = (TEC['ygrid'] >= ygrid.min()) & (TEC['ygrid'] <= ygrid.max())
xgtec, ygtec = meshgrid(TEC['xgrid'][idx], TEC['ygrid'][idy])
idttec = isin(TEC['time'], dt)
T0t = TEC['tecim'][idttec]
T0x = T0t[:, idx, :]
T0 = T0x[:, :, idy]
tecdt = TEC['time'][idttec]
# Save
if platform.system() == 'Linux':
odir = P.odir if P.odir is not None else '/media/smrak/gnss/images/'
odir += dirdatetime + '_' + str(int(altkm)) + '_' + str(average) + '_' + str(clim[1]).replace(".", "")
if nightshade:
odir += '_ns'
if P.tec is not None:
odir += '_percent'
odir += '/'
elif platform.system() == 'Windows':
odir = P.odir if P.odir is not None else os.path.split(gpsfn)[0] + '\\images\\'
odir += dirdatetime + '_' + str(int(altkm)) + '_' + str(average) + '_' + str(clim[1]).replace(".", "")
if nightshade:
odir += '_ns'
if P.tec is not None:
odir += '_percent'
odir += '\\'
#RUN
with concurrent.futures.ThreadPoolExecutor(max_workers=50) as ex:
im = [ex.submit(makeImage, squeeze(im[i : i+average]), fillpixel_iter) for i in iterate1]
#
j = 0
for i in iterate2:
print ('Plotting figure {}/{}'.format(j+1,iterate2.shape[0]))
# Get a map
fig, ax = gm.plotCartoMap(figsize=figure_size, projection=projection, #title=dt[i],
terrain=terrain, states=states, border_color=border_color,
background_color=background_color,
lonlim=lonlim,latlim=latlim,
title="{}, alt = {} km".format(dt[i], altkm),
meridians=meridians, parallels=parallels,
grid_linewidth=grid_linewidth,grid_color=grid_color,
apex=apex, mlon_cs=mlon_cs, date=dt[i],
nightshade=nightshade, ns_alpha=0.05,
mlon_levels=mag_meridians, mlat_levels=mag_parallels,
mlon_labels=False, mlat_labels=False, mgrid_style='--',
mlon_colors='w', mlat_colors='w', terminator=1, terminator_altkm=350,
)
image = im[j].result()
j+=1
# dTEC/TEC ?
if fntec is not None:
assert os.path.exists(fntec)
idttec0 = abs(tecdt - dt[i]).argmin()
assert abs(tecdt[idttec0] - dt[i]) < timedelta(minutes=10)
tecim = T0[idttec0]
T00 = interpolateTEC(im=tecim, x0=xgtec, y0=ygtec,
xgrid=xg, ygrid=yg,
method='linear')
image = divide(image, T00) * 100
label = 'dTEC [%]'
else:
label = 'dTEC [TECu]'
# Plot image
try:
if image_type == 'contourf':
levels = linspace(clim[0], clim[1], 40)
image[image<=clim[0]] = levels[0]
image[image>=clim[1]] = levels[-1]
imax = plt.contourf(xgrid,ygrid,image.T, levels=levels,cmap=cmap, transform=ccrs.PlateCarree())
imax.cmap.set_under('b')
imax.cmap.set_over('r')
else:
imax = plt.pcolormesh(xgrid,ygrid,image.T,cmap=cmap, transform=ccrs.PlateCarree())
plt.clim(clim)
# cbar = plt.colorbar()
# cbar.set_label('$\Delta$TEC [TECu]')
posn = ax.get_position()
cax = fig.add_axes([posn.x0+posn.width+0.01, posn.y0, 0.02, posn.height])
fig.colorbar(imax, cax=cax, label=label,
ticks=[clim[0], clim[0]/2, 0, clim[1]/2, clim[1]])
if totality:
lon_c, lat_c = getTotalityCenter()
plt.plot(lon_c, lat_c-1, 'k', lw=1, transform=ccrs.PlateCarree())
if penumbra:
cmap1 = colors.LinearSegmentedColormap.from_list("", ['white', 'magenta'])
try:
xgm, ygm, data = getEUVMask(dt[i])
if laplacian:
data = abs(ndimage.filters.laplace(data))
if laplacian_levels is None:
laplacian_levels = [0.005,0.035,10]
levels = linspace(laplacian_levels[0],laplacian_levels[1],laplacian_levels[2])
plt.contour(xgm,ygm,data.T, levels, cmap=cmap1,transform=ccrs.PlateCarree())#, alpha=0.9, norm=colors.PowerNorm(gamma=0.7),
else:
if penumbra_levels is not None:
penumbra_levels = [0.2,1,40]
levels = linspace(penumbra_levels[0],penumbra_levels[1],penumbra_levels[2])
lw = 0.5
plt.contour(xgm,ygm,data.T, levels, colors='w', linewidths=lw, transform=ccrs.PlateCarree())
except:
pass
# Marker
# if position is not None:
# try:
# plt.plot(position[0],position[1], marker, c=marker_color, ms=marker_size, mew=marker_width, transform=ccrs.PlateCarree())
# except:
# print ('Couldnt plot the marker')
# ax.set_extent([maplonlim[0], maplonlim[1],
# maplatlim[0], maplatlim[1]],crs=ccrs.PlateCarree())
# ax.set_aspect('auto')
except Exception as e:
print (e)
if not os.path.exists(odir):
import subprocess
if platform.system() == 'Linux':
subprocess.call('mkdir -p {}'.format(odir), shell=True, timeout=2)
elif platform.system() == 'Windows':
subprocess.call('mkdir "{}"'.format(odir), shell=True, timeout=2)
tit = dt[i].strftime('%m%d_%H%M')
ofn = odir+str(tit)+'.png'
plt.savefig(ofn, dpi=150)
plt.close()
| [
"datetime.datetime.utcfromtimestamp",
"numpy.fromfile",
"numpy.isin",
"numpy.array",
"numpy.isfinite",
"datetime.timedelta",
"numpy.arange",
"numpy.divide",
"os.path.exists",
"numpy.multiply",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.where",
"numpy.ma.masked_where",
"nexrad_quickp... | [((1304, 1322), 'numpy.ma.masked_invalid', 'masked_invalid', (['im'], {}), '(im)\n', (1318, 1322), False, 'from numpy.ma import masked_invalid\n'), ((1815, 1843), 'numpy.multiply', 'multiply', (['x[:, :, 0]', '(0.2126)'], {}), '(x[:, :, 0], 0.2126)\n', (1823, 1843), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((1851, 1879), 'numpy.multiply', 'multiply', (['x[:, :, 1]', '(0.7152)'], {}), '(x[:, :, 1], 0.7152)\n', (1859, 1879), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((1887, 1915), 'numpy.multiply', 'multiply', (['x[:, :, 2]', '(0.0722)'], {}), '(x[:, :, 2], 0.0722)\n', (1895, 1915), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((1923, 1938), 'numpy.add', 'add', (['rr', 'gg', 'bb'], {}), '(rr, gg, bb)\n', (1926, 1938), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((2638, 2656), 'h5py.File', 'h5py.File', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (2647, 2656), False, 'import h5py, os\n'), ((4111, 4137), 'numpy.meshgrid', 'meshgrid', (['nqr_lon', 'nqr_lat'], {}), '(nqr_lon, nqr_lat)\n', (4119, 4137), False, 'from numpy import meshgrid, rot90, flip, ndarray, squeeze, nan, divide, isin\n'), ((4357, 4377), 'numpy.arange', 'arange', (['(i - m)', '(i + M)'], {}), '(i - m, i + M)\n', (4363, 4377), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((4518, 4528), 'numpy.array', 'array', (['nbg'], {}), '(nbg)\n', (4523, 4528), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((4782, 4790), 'numpy.copy', 'copy', (['im'], {}), '(im)\n', (4786, 4790), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((8986, 9045), 'h5py.File', 'h5py.File', (['"""/home/smrak/Documents/eclipse/totality.h5"""', '"""r"""'], {}), "('/home/smrak/Documents/eclipse/totality.h5', 'r')\n", (8995, 9045), False, 'import h5py, os\n'), ((9397, 9415), 'h5py.File', 'h5py.File', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (9406, 9415), False, 'import h5py, os\n'), ((9633, 9649), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (9647, 9649), False, 'from argparse import ArgumentParser\n'), ((13064, 13085), 'h5py.File', 'h5py.File', (['gpsfn', '"""r"""'], {}), "(gpsfn, 'r')\n", (13073, 13085), False, 'import h5py, os\n'), ((13265, 13287), 'numpy.meshgrid', 'meshgrid', (['xgrid', 'ygrid'], {}), '(xgrid, ygrid)\n', (13273, 13287), False, 'from numpy import meshgrid, rot90, flip, ndarray, squeeze, nan, divide, isin\n'), ((14245, 14269), 'numpy.arange', 'arange', (['(0)', 'dt.size', 'skip'], {}), '(0, dt.size, skip)\n', (14251, 14269), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((3804, 3849), 'nexrad_quickplot.load', 'nq.load', (['(folder + darg)'], {'downsample': 'downsample'}), '(folder + darg, downsample=downsample)\n', (3811, 3849), True, 'import nexrad_quickplot as nq\n'), ((4059, 4101), 'numpy.ma.masked_where', 'ma.masked_where', (['(nqr_im >= im_mask)', 'nqr_im'], {}), '(nqr_im >= im_mask, nqr_im)\n', (4074, 4101), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((4309, 4321), 'numpy.floor', 'floor', (['(N / 2)'], {}), '(N / 2)\n', (4314, 4321), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((4333, 4344), 'numpy.ceil', 'ceil', (['(N / 2)'], {}), '(N / 2)\n', (4337, 4344), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((4392, 4412), 'numpy.arange', 'arange', (['(j - m)', '(j + M)'], {}), '(j - m, j + M)\n', (4398, 4412), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((7932, 7954), 'glob.glob', 'glob', (["(EUVDIR + '*.bin')"], {}), "(EUVDIR + '*.bin')\n", (7936, 7954), False, 'from glob import glob\n'), ((8033, 8064), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['time'], {}), '(time)\n', (8058, 8064), False, 'from datetime import datetime, timedelta\n'), ((8639, 8667), 'scipy.ndimage.median_filter', 'ndimage.median_filter', (['im', '(3)'], {}), '(im, 3)\n', (8660, 8667), False, 'from scipy import ndimage\n'), ((14085, 14120), 'numpy.ones', 'ones', (['datetimetime.size'], {'dtype': 'bool'}), '(datetimetime.size, dtype=bool)\n', (14089, 14120), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((14316, 14337), 'os.path.exists', 'os.path.exists', (['fntec'], {}), '(fntec)\n', (14330, 14337), False, 'import h5py, os\n'), ((14352, 14377), 'gpstec.gpstec.readFromHDF', 'gpstec.readFromHDF', (['fntec'], {}), '(fntec)\n', (14370, 14377), False, 'from gpstec import gpstec\n'), ((14628, 14674), 'numpy.meshgrid', 'meshgrid', (["TEC['xgrid'][idx]", "TEC['ygrid'][idy]"], {}), "(TEC['xgrid'][idx], TEC['ygrid'][idy])\n", (14636, 14674), False, 'from numpy import meshgrid, rot90, flip, ndarray, squeeze, nan, divide, isin\n'), ((14692, 14713), 'numpy.isin', 'isin', (["TEC['time']", 'dt'], {}), "(TEC['time'], dt)\n", (14696, 14713), False, 'from numpy import meshgrid, rot90, flip, ndarray, squeeze, nan, divide, isin\n'), ((14865, 14882), 'platform.system', 'platform.system', ([], {}), '()\n', (14880, 14882), False, 'import yaml, platform\n'), ((20507, 20532), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofn'], {'dpi': '(150)'}), '(ofn, dpi=150)\n', (20518, 20532), True, 'import matplotlib.pyplot as plt\n'), ((20541, 20552), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20550, 20552), True, 'import matplotlib.pyplot as plt\n'), ((1223, 1242), 'numpy.arange', 'arange', (['im.shape[0]'], {}), '(im.shape[0])\n', (1229, 1242), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((1244, 1263), 'numpy.arange', 'arange', (['im.shape[1]'], {}), '(im.shape[1])\n', (1250, 1263), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((1465, 1492), 'numpy.arange', 'arange', (['(0)', 'im.shape[0]', 'res'], {}), '(0, im.shape[0], res)\n', (1471, 1492), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((1527, 1554), 'numpy.arange', 'arange', (['(0)', 'im.shape[1]', 'res'], {}), '(0, im.shape[1], res)\n', (1533, 1554), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((2770, 2798), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['t'], {}), '(t)\n', (2795, 2798), False, 'from datetime import datetime, timedelta\n'), ((4833, 4851), 'numpy.floor', 'floor', (['((3 + n) / 2)'], {}), '((3 + n) / 2)\n', (4838, 4851), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((4910, 4924), 'numpy.floor', 'floor', (['(0.6 * X)'], {}), '(0.6 * X)\n', (4915, 4924), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((4948, 4962), 'numpy.floor', 'floor', (['(0.4 * X)'], {}), '(0.4 * X)\n', (4953, 4962), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((5004, 5034), 'numpy.arange', 'arange', (['starti', 'forwardi', 'skip'], {}), '(starti, forwardi, skip)\n', (5010, 5034), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((5620, 5647), 'numpy.arange', 'arange', (['X', 'backwardi', '(-skip)'], {}), '(X, backwardi, -skip)\n', (5626, 5647), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6243, 6266), 'numpy.arange', 'arange', (['startj', 'Y', 'skip'], {}), '(startj, Y, skip)\n', (6249, 6266), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6860, 6879), 'numpy.arange', 'arange', (['Y', '(0)', '(-skip)'], {}), '(Y, 0, -skip)\n', (6866, 6879), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((8298, 8336), 'glob.glob', 'glob', (["(EUVDIR + '*' + TframeHM + '.bin')"], {}), "(EUVDIR + '*' + TframeHM + '.bin')\n", (8302, 8336), False, 'from glob import glob\n'), ((8909, 8926), 'numpy.mean', 'mean', (['ims'], {'axis': '(0)'}), '(ims, axis=0)\n', (8913, 8926), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((13429, 13457), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['t'], {}), '(t)\n', (13454, 13457), False, 'from datetime import datetime, timedelta\n'), ((13539, 13553), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13551, 13553), False, 'from datetime import datetime, timedelta\n'), ((13681, 13724), 'dateutil.parser.parse', 'parser.parse', (["(dirdatetime + 'T' + P.tlim[0])"], {}), "(dirdatetime + 'T' + P.tlim[0])\n", (13693, 13724), False, 'from dateutil import parser\n'), ((13756, 13779), 'dateutil.parser.parse', 'parser.parse', (['P.tlim[0]'], {}), '(P.tlim[0])\n', (13768, 13779), False, 'from dateutil import parser\n'), ((13861, 13904), 'dateutil.parser.parse', 'parser.parse', (["(dirdatetime + 'T' + P.tlim[1])"], {}), "(dirdatetime + 'T' + P.tlim[1])\n", (13873, 13904), False, 'from dateutil import parser\n'), ((13936, 13959), 'dateutil.parser.parse', 'parser.parse', (['P.tlim[0]'], {}), '(P.tlim[0])\n', (13948, 13959), False, 'from dateutil import parser\n'), ((15222, 15239), 'platform.system', 'platform.system', ([], {}), '()\n', (15237, 15239), False, 'import yaml, platform\n'), ((16932, 16953), 'os.path.exists', 'os.path.exists', (['fntec'], {}), '(fntec)\n', (16946, 16953), False, 'import h5py, os\n'), ((17955, 17969), 'matplotlib.pyplot.clim', 'plt.clim', (['clim'], {}), '(clim)\n', (17963, 17969), True, 'import matplotlib.pyplot as plt\n'), ((20110, 20130), 'os.path.exists', 'os.path.exists', (['odir'], {}), '(odir)\n', (20124, 20130), False, 'import h5py, os\n'), ((2983, 3001), 'dateutil.parser.parse', 'parser.parse', (['darg'], {}), '(darg)\n', (2995, 3001), False, 'from dateutil import parser\n'), ((3989, 4031), 'numpy.ma.masked_where', 'ma.masked_where', (['(nqr_im >= im_mask)', 'nqr_im'], {}), '(nqr_im >= im_mask, nqr_im)\n', (4004, 4031), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((5061, 5084), 'numpy.arange', 'arange', (['startj', 'Y', 'skip'], {}), '(startj, Y, skip)\n', (5067, 5084), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((5674, 5693), 'numpy.arange', 'arange', (['Y', '(0)', '(-skip)'], {}), '(Y, 0, -skip)\n', (5680, 5693), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6293, 6323), 'numpy.arange', 'arange', (['starti', 'forwardi', 'skip'], {}), '(starti, forwardi, skip)\n', (6299, 6323), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6906, 6933), 'numpy.arange', 'arange', (['X', 'backwardi', '(-skip)'], {}), '(X, backwardi, -skip)\n', (6912, 6933), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((8366, 8411), 'numpy.fromfile', 'fromfile', (['flist[0]'], {'count': 'npts', 'dtype': 'float32'}), '(flist[0], count=npts, dtype=float32)\n', (8374, 8411), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((8717, 8725), 'numpy.copy', 'copy', (['im'], {}), '(im)\n', (8721, 8725), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((8866, 8895), 'scipy.ndimage.median_filter', 'ndimage.median_filter', (['im0', '(3)'], {}), '(im0, 3)\n', (8887, 8895), False, 'from scipy import ndimage\n'), ((14179, 14194), 'numpy.where', 'where', (['(idt == 1)'], {}), '(idt == 1)\n', (14184, 14194), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((15698, 15724), 'numpy.squeeze', 'squeeze', (['im[i:i + average]'], {}), '(im[i:i + average])\n', (15705, 15724), False, 'from numpy import meshgrid, rot90, flip, ndarray, squeeze, nan, divide, isin\n'), ((17053, 17074), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (17062, 17074), False, 'from datetime import datetime, timedelta\n'), ((17295, 17313), 'numpy.divide', 'divide', (['image', 'T00'], {}), '(image, T00)\n', (17301, 17313), False, 'from numpy import meshgrid, rot90, flip, ndarray, squeeze, nan, divide, isin\n'), ((17500, 17530), 'numpy.linspace', 'linspace', (['clim[0]', 'clim[1]', '(40)'], {}), '(clim[0], clim[1], 40)\n', (17508, 17530), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((18516, 18582), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['""""""', "['white', 'magenta']"], {}), "('', ['white', 'magenta'])\n", (18556, 18582), True, 'import matplotlib.colors as colors\n'), ((20177, 20194), 'platform.system', 'platform.system', ([], {}), '()\n', (20192, 20194), False, 'import yaml, platform\n'), ((5170, 5185), 'numpy.isnan', 'isnan', (['im[i, j]'], {}), '(im[i, j])\n', (5175, 5185), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((5779, 5794), 'numpy.isnan', 'isnan', (['im[i, j]'], {}), '(im[i, j])\n', (5784, 5794), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6409, 6424), 'numpy.isnan', 'isnan', (['im[i, j]'], {}), '(im[i, j])\n', (6414, 6424), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((7019, 7034), 'numpy.isnan', 'isnan', (['im[i, j]'], {}), '(im[i, j])\n', (7024, 7034), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((13620, 13643), 'dateutil.parser.parse', 'parser.parse', (['P.tlim[0]'], {}), '(P.tlim[0])\n', (13632, 13643), False, 'from dateutil import parser\n'), ((13800, 13823), 'dateutil.parser.parse', 'parser.parse', (['P.tlim[1]'], {}), '(P.tlim[1])\n', (13812, 13823), False, 'from dateutil import parser\n'), ((14200, 14215), 'numpy.where', 'where', (['(idt == 1)'], {}), '(idt == 1)\n', (14205, 14215), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((20308, 20325), 'platform.system', 'platform.system', ([], {}), '()\n', (20323, 20325), False, 'import yaml, platform\n'), ((3286, 3301), 'dateutil.parser.parse', 'parser.parse', (['d'], {}), '(d)\n', (3298, 3301), False, 'from dateutil import parser\n'), ((10773, 10784), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10782, 10784), False, 'import h5py, os\n'), ((11949, 11960), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11958, 11960), False, 'import h5py, os\n'), ((13368, 13388), 'os.path.split', 'os.path.split', (['gpsfn'], {}), '(gpsfn)\n', (13381, 13388), False, 'import h5py, os\n'), ((15303, 15323), 'os.path.split', 'os.path.split', (['gpsfn'], {}), '(gpsfn)\n', (15316, 15323), False, 'import h5py, os\n'), ((17724, 17742), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (17740, 17742), True, 'import cartopy.crs as ccrs\n'), ((17922, 17940), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (17938, 17940), True, 'import cartopy.crs as ccrs\n'), ((18447, 18465), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (18463, 18465), True, 'import cartopy.crs as ccrs\n'), ((18909, 18980), 'numpy.linspace', 'linspace', (['laplacian_levels[0]', 'laplacian_levels[1]', 'laplacian_levels[2]'], {}), '(laplacian_levels[0], laplacian_levels[1], laplacian_levels[2])\n', (18917, 18980), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((19300, 19368), 'numpy.linspace', 'linspace', (['penumbra_levels[0]', 'penumbra_levels[1]', 'penumbra_levels[2]'], {}), '(penumbra_levels[0], penumbra_levels[1], penumbra_levels[2])\n', (19308, 19368), False, 'from numpy import fromfile, float32, linspace, floor, ceil, add, multiply, copy\n'), ((5543, 5556), 'numpy.mean', 'mean', (['nbg[ix]'], {}), '(nbg[ix])\n', (5547, 5556), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6152, 6165), 'numpy.mean', 'mean', (['nbg[ix]'], {}), '(nbg[ix])\n', (6156, 6165), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6782, 6795), 'numpy.mean', 'mean', (['nbg[ix]'], {}), '(nbg[ix])\n', (6786, 6795), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((7392, 7405), 'numpy.mean', 'mean', (['nbg[ix]'], {}), '(nbg[ix])\n', (7396, 7405), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((18728, 18757), 'scipy.ndimage.filters.laplace', 'ndimage.filters.laplace', (['data'], {}), '(data)\n', (18751, 18757), False, 'from scipy import ndimage\n'), ((5431, 5444), 'numpy.isfinite', 'isfinite', (['nbg'], {}), '(nbg)\n', (5439, 5444), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6040, 6053), 'numpy.isfinite', 'isfinite', (['nbg'], {}), '(nbg)\n', (6048, 6053), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6670, 6683), 'numpy.isfinite', 'isfinite', (['nbg'], {}), '(nbg)\n', (6678, 6683), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((7280, 7293), 'numpy.isfinite', 'isfinite', (['nbg'], {}), '(nbg)\n', (7288, 7293), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((19060, 19078), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (19076, 19078), True, 'import cartopy.crs as ccrs\n'), ((19497, 19515), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (19513, 19515), True, 'import cartopy.crs as ccrs\n'), ((5491, 5504), 'numpy.isfinite', 'isfinite', (['nbg'], {}), '(nbg)\n', (5499, 5504), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6100, 6113), 'numpy.isfinite', 'isfinite', (['nbg'], {}), '(nbg)\n', (6108, 6113), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((6730, 6743), 'numpy.isfinite', 'isfinite', (['nbg'], {}), '(nbg)\n', (6738, 6743), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n'), ((7340, 7353), 'numpy.isfinite', 'isfinite', (['nbg'], {}), '(nbg)\n', (7348, 7353), False, 'from numpy import array, where, ma, isnan, arange, mean, isfinite, mgrid, sort, ones\n')] |
#!/usr/bin/env python
##This script creates plots that salinity vs salinity difference for saildrone and each SSS platform, Figure 3
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
import datetime
import sys
import cftime
from decimal import Decimal
#data directory for saildrone and satellite orbital data
data_sat = 'C:/Users/intern-1/Documents/GitHub/paper_software/2020_ATOMIC_Salinity/data/sss_collocations_orbital_norepeat/'
data_sat2 = 'C:/Users/intern-1/Documents/GitHub/paper_software/2020_ATOMIC_Salinity/data/sss_collocations_8day_norepeat/'
#data directory for HYCOM data
data_dir1 = 'C:/Users/intern-1/Documents/hycom_files/'
files = glob(data_dir1+'*nc4')
hycom=xr.open_mfdataset(data_dir1+'*nc4',concat_dim='time').isel(depth=0)
##this next section removes duplicate timesteps
_, index = np.unique(hycom['time'], return_index=True)
hycom2=hycom.isel(time=index)
#change hycom coordinates to match with saildrone(0-359:-180-179)
hycom_lon=hycom2.assign_coords(longitude=(((hycom2.lon + 180) % 360) - 180))
hycom2=hycom_lon.swap_dims({'lon':'longitude'})
#remove nans from hycom data
filled=hycom2.chunk({'time':-1}).interpolate_na(dim="time",method="nearest",fill_value='extrapolate')
filled2=filled.interpolate_na(dim="lat", method="nearest",fill_value='extrapolate')
filled3=filled2.interpolate_na(dim="lon", method="nearest",fill_value='extrapolate')
#Collocated Saildrone and Satellite data
JPL = glob(data_sat+'*jpl*.nc')
RSS = glob(data_sat+'*rss*.nc')
legend_properties = {'weight': 'semibold', 'size': '12'}
#Saildrone 1026
JPL=xr.open_dataset(JPL[0],decode_times=False)
RSS=xr.open_dataset(RSS[0],decode_times=False)
# Convert times from nano seconds to seconds
ns = 1e-9
rss_time = RSS.time.values * ns
jpl_time = JPL.time.values * ns
test=rss_time.astype(np.float)
test2=jpl_time.astype(np.float)
# Swap Dimensions
RSS = RSS.swap_dims({'ob': 'time'})
JPL = JPL.swap_dims({'ob': 'time'})
ss_times = [datetime.datetime(2020, 1, 17, 0, 0) + datetime.timedelta(seconds=s) for s in test]
jp_times = [datetime.datetime(2020, 1, 17, 0, 0) + datetime.timedelta(seconds=s) for s in test2]
RSS.assign_coords(time=ss_times)
JPL.assign_coords(time=jp_times)
# interp HYCOM data to saildrone dimensions
hysal = filled3.interp(lat=JPL.lat, longitude=JPL.lon, time=jp_times, method='nearest')
hysal2 = hysal.salinity
# Mean Difference
mean_sbe_JPL = JPL.SAL_CTD_MEAN #.mean #(dim='trajectory')
mean_JPL = JPL.smap_SSS #.mean #(dim='trajectory')
diff_JPL= mean_sbe_JPL-mean_JPL
# Mean Difference
mean_RSS = RSS.smap_SSS#.mean#(dim='trajectory') #.mean(dim='ob')
mean_sbe_RSS = RSS.SAL_CTD_MEAN
mean_RSS_40km = RSS.smap_SSS_40km#.mean#(dim='trajectory')
diff_RSS= mean_sbe_RSS-mean_RSS
diff_RSS_40=mean_sbe_RSS-mean_RSS_40km
#Mean Difference HYCOM
mean_HYCOM = hysal2
diff_HYCOM=mean_sbe_JPL-mean_HYCOM
#Create plot
fig = plt.subplots(figsize=(18, 10))
ax = plt.subplot(2, 2, 1)
plt.scatter(mean_sbe_RSS.values, diff_RSS.values, color='b', label='SBE37 - RSS70')
ax.set_ylabel("Difference", fontsize=15, fontweight='semibold')
#ax.set_xlabel("Salinity (psu)", fontsize=15, fontweight='semibold')
plt.tick_params(axis='both', which='major', labelsize=15)
ax.set_ylim(-2, 2)
plt.legend(loc='upper left', prop=legend_properties)
plt.grid(True, lw=0.5, ls=':')
plt.xticks(fontweight='semibold')
plt.yticks(fontweight='semibold')
#ax.set_title("a)", loc="left",y=0.9, x=-0.1,fontweight='semibold')
ax.text(33.7, 1, 'a)', color='k', style='normal',fontsize='15',fontweight='semibold')
#plt.title(figure_title, y=1.08)
a = ax.get_ygridlines()
b = a[2]
b.set_color('black')
b.set_linewidth(1.5)
ax1 = plt.subplot(2, 2, 3)
plt.scatter(mean_sbe_JPL.values, diff_JPL.values, color='b', label='SBE37 - JPL')
ax1.set_ylabel("Difference", fontsize=15, fontweight='semibold')
ax1.set_xlabel("Salinity (psu)", fontsize=15, fontweight='semibold')
plt.tick_params(axis='both', which='major', labelsize=15)
ax1.set_ylim(-2, 2)
plt.legend(loc='upper left', prop=legend_properties)
ax1.tick_params(axis='y')
plt.grid(True, lw=0.5, ls=':')
plt.xticks(fontweight='semibold')
plt.yticks(fontweight='semibold')
#ax1.set_title("b)", loc="left",y=0.9, x=-0.1,fontweight='semibold')
ax1.text(33.7, 1, 'b)', color='k', style='normal',fontsize='15',fontweight='semibold')
a = ax1.get_ygridlines()
b = a[2]
b.set_color('black')
b.set_linewidth(1.5)
ax2 = plt.subplot(2, 2, 2)
ax2.set_ylabel("Difference", fontsize=15, fontweight='semibold')
#ax2.set_xlabel("Salinity (psu)", fontsize=15, fontweight='semibold')
plt.scatter(mean_sbe_RSS.values, diff_RSS_40.values, color='b', label='SBE37 - RSS40')
ax2.tick_params(axis='y')
plt.tick_params(axis='both', which='major', labelsize=15)
plt.legend(loc='upper left', prop=legend_properties)
ax2.set_ylim(-2, 2)
plt.grid(True, lw=0.5, ls=':')
plt.xticks(fontweight='semibold')
plt.yticks(fontweight='semibold')
#ax2.set_title("c)", loc="left",y=0.9, x=-0.1,fontweight='semibold')
ax2.text(33.7, 1, 'c)', color='k', style='normal',fontsize='15',fontweight='semibold')
a = ax2.get_ygridlines()
b = a[2]
b.set_color('black')
b.set_linewidth(1.5)
ax3 = plt.subplot(2, 2, 4)
ax3.set_ylabel("Difference", fontsize=15, fontweight='semibold')
ax3.set_xlabel("Salinity (psu)", fontsize=15, fontweight='semibold')
plt.scatter(mean_sbe_JPL.values, diff_HYCOM.values, color='b', label='SBE37 - HYCOM' )
ax2.tick_params(axis='y')
ax3.set_ylim(-2, 2)
plt.tick_params(axis='both', which='major', labelsize=15)
plt.legend(loc='upper left', prop=legend_properties)
plt.grid(True, lw=0.5, ls=':')
plt.xticks(fontweight='semibold')
plt.yticks(fontweight='semibold')
#ax3.set_title("d)", loc="left",y=0.9, x=-0.1,fontweight='semibold')
ax3.text(33.7, 1, 'd)', color='k', style='normal',fontsize='15',fontweight='semibold')
a = ax3.get_ygridlines()
b = a[2]
b.set_color('black')
b.set_linewidth(1.5)
plt.show()
| [
"datetime.datetime",
"xarray.open_mfdataset",
"matplotlib.pyplot.grid",
"numpy.unique",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"datetime.timedelta",
"xarray.open_dataset",
"matplotlib.pyp... | [((713, 737), 'glob.glob', 'glob', (["(data_dir1 + '*nc4')"], {}), "(data_dir1 + '*nc4')\n", (717, 737), False, 'from glob import glob\n'), ((874, 917), 'numpy.unique', 'np.unique', (["hycom['time']"], {'return_index': '(True)'}), "(hycom['time'], return_index=True)\n", (883, 917), True, 'import numpy as np\n'), ((1502, 1529), 'glob.glob', 'glob', (["(data_sat + '*jpl*.nc')"], {}), "(data_sat + '*jpl*.nc')\n", (1506, 1529), False, 'from glob import glob\n'), ((1535, 1562), 'glob.glob', 'glob', (["(data_sat + '*rss*.nc')"], {}), "(data_sat + '*rss*.nc')\n", (1539, 1562), False, 'from glob import glob\n'), ((1645, 1688), 'xarray.open_dataset', 'xr.open_dataset', (['JPL[0]'], {'decode_times': '(False)'}), '(JPL[0], decode_times=False)\n', (1660, 1688), True, 'import xarray as xr\n'), ((1693, 1736), 'xarray.open_dataset', 'xr.open_dataset', (['RSS[0]'], {'decode_times': '(False)'}), '(RSS[0], decode_times=False)\n', (1708, 1736), True, 'import xarray as xr\n'), ((2987, 3017), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (2999, 3017), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3044), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (3035, 3044), True, 'import matplotlib.pyplot as plt\n'), ((3046, 3134), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mean_sbe_RSS.values', 'diff_RSS.values'], {'color': '"""b"""', 'label': '"""SBE37 - RSS70"""'}), "(mean_sbe_RSS.values, diff_RSS.values, color='b', label=\n 'SBE37 - RSS70')\n", (3057, 3134), True, 'import matplotlib.pyplot as plt\n'), ((3266, 3323), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(15)'}), "(axis='both', which='major', labelsize=15)\n", (3281, 3323), True, 'import matplotlib.pyplot as plt\n'), ((3345, 3397), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'prop': 'legend_properties'}), "(loc='upper left', prop=legend_properties)\n", (3355, 3397), True, 'import matplotlib.pyplot as plt\n'), ((3399, 3429), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'lw': '(0.5)', 'ls': '""":"""'}), "(True, lw=0.5, ls=':')\n", (3407, 3429), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3464), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontweight': '"""semibold"""'}), "(fontweight='semibold')\n", (3441, 3464), True, 'import matplotlib.pyplot as plt\n'), ((3466, 3499), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontweight': '"""semibold"""'}), "(fontweight='semibold')\n", (3476, 3499), True, 'import matplotlib.pyplot as plt\n'), ((3780, 3800), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (3791, 3800), True, 'import matplotlib.pyplot as plt\n'), ((3802, 3888), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mean_sbe_JPL.values', 'diff_JPL.values'], {'color': '"""b"""', 'label': '"""SBE37 - JPL"""'}), "(mean_sbe_JPL.values, diff_JPL.values, color='b', label=\n 'SBE37 - JPL')\n", (3813, 3888), True, 'import matplotlib.pyplot as plt\n'), ((4021, 4078), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(15)'}), "(axis='both', which='major', labelsize=15)\n", (4036, 4078), True, 'import matplotlib.pyplot as plt\n'), ((4101, 4153), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'prop': 'legend_properties'}), "(loc='upper left', prop=legend_properties)\n", (4111, 4153), True, 'import matplotlib.pyplot as plt\n'), ((4182, 4212), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'lw': '(0.5)', 'ls': '""":"""'}), "(True, lw=0.5, ls=':')\n", (4190, 4212), True, 'import matplotlib.pyplot as plt\n'), ((4214, 4247), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontweight': '"""semibold"""'}), "(fontweight='semibold')\n", (4224, 4247), True, 'import matplotlib.pyplot as plt\n'), ((4249, 4282), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontweight': '"""semibold"""'}), "(fontweight='semibold')\n", (4259, 4282), True, 'import matplotlib.pyplot as plt\n'), ((4531, 4551), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (4542, 4551), True, 'import matplotlib.pyplot as plt\n'), ((4690, 4781), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mean_sbe_RSS.values', 'diff_RSS_40.values'], {'color': '"""b"""', 'label': '"""SBE37 - RSS40"""'}), "(mean_sbe_RSS.values, diff_RSS_40.values, color='b', label=\n 'SBE37 - RSS40')\n", (4701, 4781), True, 'import matplotlib.pyplot as plt\n'), ((4805, 4862), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(15)'}), "(axis='both', which='major', labelsize=15)\n", (4820, 4862), True, 'import matplotlib.pyplot as plt\n'), ((4864, 4916), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'prop': 'legend_properties'}), "(loc='upper left', prop=legend_properties)\n", (4874, 4916), True, 'import matplotlib.pyplot as plt\n'), ((4939, 4969), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'lw': '(0.5)', 'ls': '""":"""'}), "(True, lw=0.5, ls=':')\n", (4947, 4969), True, 'import matplotlib.pyplot as plt\n'), ((4971, 5004), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontweight': '"""semibold"""'}), "(fontweight='semibold')\n", (4981, 5004), True, 'import matplotlib.pyplot as plt\n'), ((5006, 5039), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontweight': '"""semibold"""'}), "(fontweight='semibold')\n", (5016, 5039), True, 'import matplotlib.pyplot as plt\n'), ((5288, 5308), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (5299, 5308), True, 'import matplotlib.pyplot as plt\n'), ((5446, 5536), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mean_sbe_JPL.values', 'diff_HYCOM.values'], {'color': '"""b"""', 'label': '"""SBE37 - HYCOM"""'}), "(mean_sbe_JPL.values, diff_HYCOM.values, color='b', label=\n 'SBE37 - HYCOM')\n", (5457, 5536), True, 'import matplotlib.pyplot as plt\n'), ((5582, 5639), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(15)'}), "(axis='both', which='major', labelsize=15)\n", (5597, 5639), True, 'import matplotlib.pyplot as plt\n'), ((5641, 5693), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'prop': 'legend_properties'}), "(loc='upper left', prop=legend_properties)\n", (5651, 5693), True, 'import matplotlib.pyplot as plt\n'), ((5695, 5725), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'lw': '(0.5)', 'ls': '""":"""'}), "(True, lw=0.5, ls=':')\n", (5703, 5725), True, 'import matplotlib.pyplot as plt\n'), ((5727, 5760), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontweight': '"""semibold"""'}), "(fontweight='semibold')\n", (5737, 5760), True, 'import matplotlib.pyplot as plt\n'), ((5762, 5795), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontweight': '"""semibold"""'}), "(fontweight='semibold')\n", (5772, 5795), True, 'import matplotlib.pyplot as plt\n'), ((6037, 6047), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6045, 6047), True, 'import matplotlib.pyplot as plt\n'), ((743, 799), 'xarray.open_mfdataset', 'xr.open_mfdataset', (["(data_dir1 + '*nc4')"], {'concat_dim': '"""time"""'}), "(data_dir1 + '*nc4', concat_dim='time')\n", (760, 799), True, 'import xarray as xr\n'), ((2038, 2074), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(17)', '(0)', '(0)'], {}), '(2020, 1, 17, 0, 0)\n', (2055, 2074), False, 'import datetime\n'), ((2077, 2106), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 's'}), '(seconds=s)\n', (2095, 2106), False, 'import datetime\n'), ((2135, 2171), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(17)', '(0)', '(0)'], {}), '(2020, 1, 17, 0, 0)\n', (2152, 2171), False, 'import datetime\n'), ((2174, 2203), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 's'}), '(seconds=s)\n', (2192, 2203), False, 'import datetime\n')] |
from .myqt import QT
import pyqtgraph as pg
import numpy as np
import pandas as pd
from .base import WidgetBase
class MyViewBox(pg.ViewBox):
doubleclicked = QT.pyqtSignal()
gain_zoom = QT.pyqtSignal(float)
def __init__(self, *args, **kwds):
pg.ViewBox.__init__(self, *args, **kwds)
#~ self.disableAutoRange()
def mouseClickEvent(self, ev):
ev.accept()
def mouseDoubleClickEvent(self, ev):
self.doubleclicked.emit()
ev.accept()
#~ def mouseDragEvent(self, ev):
#~ ev.ignore()
def wheelEvent(self, ev, axis=None):
if ev.modifiers() == QT.Qt.ControlModifier:
z = 10 if ev.delta()>0 else 1/10.
else:
z = 1.3 if ev.delta()>0 else 1/1.3
self.gain_zoom.emit(z)
ev.accept()
class WaveformViewerBase(WidgetBase):
#base for both WaveformViewer (Catalogue) and PeelerWaveformViewer
def __init__(self, controller=None, parent=None):
WidgetBase.__init__(self, parent=parent, controller=controller)
self.layout = QT.QVBoxLayout()
self.setLayout(self.layout)
#~ self.create_settings()
self.create_toolbar()
self.layout.addWidget(self.toolbar)
self.graphicsview = pg.GraphicsView()
self.layout.addWidget(self.graphicsview)
self.initialize_plot()
self.alpha = 60
self.refresh()
def create_toolbar(self):
tb = self.toolbar = QT.QToolBar()
#Mode flatten or geometry
self.combo_mode = QT.QComboBox()
tb.addWidget(self.combo_mode)
#~ self.mode = 'flatten'
#~ self.combo_mode.addItems([ 'flatten', 'geometry'])
self.mode = 'geometry'
self.combo_mode.addItems([ 'geometry', 'flatten'])
self.combo_mode.currentIndexChanged.connect(self.on_combo_mode_changed)
tb.addSeparator()
but = QT.QPushButton('settings')
but.clicked.connect(self.open_settings)
tb.addWidget(but)
but = QT.QPushButton('scale')
but.clicked.connect(self.zoom_range)
tb.addWidget(but)
but = QT.QPushButton('refresh')
but.clicked.connect(self.refresh)
tb.addWidget(but)
def on_combo_mode_changed(self):
self.mode = str(self.combo_mode.currentText())
self.initialize_plot()
self.refresh()
def on_params_changed(self, params, changes):
for param, change, data in changes:
if change != 'value': continue
if param.name()=='flip_bottom_up':
self.initialize_plot()
self.refresh()
def initialize_plot(self):
#~ print('WaveformViewer.initialize_plot', self.controller.some_waveforms)
if self.controller.get_waveforms_shape() is None:
return
self.viewBox1 = MyViewBox()
self.viewBox1.disableAutoRange()
grid = pg.GraphicsLayout(border=(100,100,100))
self.graphicsview.setCentralItem(grid)
self.plot1 = grid.addPlot(row=0, col=0, rowspan=2, viewBox=self.viewBox1)
self.plot1.hideButtons()
self.plot1.showAxis('left', True)
self.curve_one_waveform = pg.PlotCurveItem([], [], pen=pg.mkPen(QT.QColor( 'white'), width=1), connect='finite')
self.plot1.addItem(self.curve_one_waveform)
if self.mode=='flatten':
grid.nextRow()
grid.nextRow()
self.viewBox2 = MyViewBox()
self.viewBox2.disableAutoRange()
self.plot2 = grid.addPlot(row=2, col=0, rowspan=1, viewBox=self.viewBox2)
self.plot2.hideButtons()
self.plot2.showAxis('left', True)
self.viewBox2.setXLink(self.viewBox1)
self.factor_y = 1.
elif self.mode=='geometry':
self.plot2 = None
chan_grp = self.controller.chan_grp
channel_group = self.controller.dataio.channel_groups[chan_grp]
#~ print(channel_group['geometry'])
if channel_group['geometry'] is None:
print('no geometry')
self.xvect = None
else:
shape = self.controller.get_waveforms_shape()
width = shape[0]
self.xvect = np.zeros(shape[0]*shape[1], dtype='float32')
self.arr_geometry = []
for i, chan in enumerate(self.controller.channel_indexes):
x, y = channel_group['geometry'][chan]
self.arr_geometry.append([x, y])
self.arr_geometry = np.array(self.arr_geometry, dtype='float64')
if self.params['flip_bottom_up']:
self.arr_geometry[:, 1] *= -1.
xpos = self.arr_geometry[:,0]
ypos = self.arr_geometry[:,1]
if np.unique(xpos).size>1:
self.delta_x = np.min(np.diff(np.sort(np.unique(xpos))))
else:
self.delta_x = np.unique(xpos)[0]
if np.unique(ypos).size>1:
self.delta_y = np.min(np.diff(np.sort(np.unique(ypos))))
else:
self.delta_y = np.unique(ypos)[0]
self.factor_y = .3
if self.delta_x>0.:
#~ espx = self.delta_x/2. *.95
espx = self.delta_x/2.5
else:
espx = .5
for i, chan in enumerate(channel_group['channels']):
x, y = channel_group['geometry'][chan]
self.xvect[i*width:(i+1)*width] = np.linspace(x-espx, x+espx, num=width)
self.wf_min, self.wf_max = self.controller.get_min_max_centroids()
self._x_range = None
self._y1_range = None
self._y2_range = None
self.viewBox1.gain_zoom.connect(self.gain_zoom)
self.viewBox1.doubleclicked.connect(self.open_settings)
#~ self.viewBox.xsize_zoom.connect(self.xsize_zoom)
def gain_zoom(self, factor_ratio):
self.factor_y *= factor_ratio
self.refresh()
def zoom_range(self):
self._x_range = None
self._y1_range = None
self._y2_range = None
self.refresh()
def refresh(self):
if not hasattr(self, 'viewBox1'):
self.initialize_plot()
if not hasattr(self, 'viewBox1'):
return
n_selected = np.sum(self.controller.spike_selection)
if self.params['show_only_selected_cluster'] and n_selected==1:
cluster_visible = {k:False for k in self.controller.cluster_visible}
ind, = np.nonzero(self.controller.spike_selection)
ind = ind[0]
k = self.controller.spikes[ind]['cluster_label']
cluster_visible[k] = True
else:
cluster_visible = self.controller.cluster_visible
if self.mode=='flatten':
self.refresh_mode_flatten(cluster_visible)
elif self.mode=='geometry':
self.refresh_mode_geometry(cluster_visible)
self._refresh_one_spike(n_selected)
def refresh_mode_flatten(self, cluster_visible):
if self._x_range is not None:
#~ self._x_range = self.plot1.getXRange()
#~ self._y1_range = self.plot1.getYRange()
#~ self._y2_range = self.plot2.getYRange()
#this may change with pyqtgraph
self._x_range = tuple(self.viewBox1.state['viewRange'][0])
self._y1_range = tuple(self.viewBox1.state['viewRange'][1])
self._y2_range = tuple(self.viewBox2.state['viewRange'][1])
self.plot1.clear()
self.plot2.clear()
self.plot1.addItem(self.curve_one_waveform)
if self.controller.spike_index ==[]:
return
nb_channel = self.controller.nb_channel
#~ d = self.controller.info['waveform_extractor_params']
#~ n_left, n_right = d['n_left'], d['n_right']
n_left, n_right = self.controller.get_waveform_left_right()
width = n_right - n_left
#lines
def addSpan(plot):
white = pg.mkColor(255, 255, 255, 20)
for i in range(nb_channel):
if i%2==1:
region = pg.LinearRegionItem([width*i, width*(i+1)-1], movable = False, brush = white)
plot.addItem(region, ignoreBounds=True)
for l in region.lines:
l.setPen(white)
vline = pg.InfiniteLine(pos = -n_left + width*i, angle=90, movable=False, pen = pg.mkPen('w'))
plot.addItem(vline)
if self.params['plot_limit_for_flatten']:
addSpan(self.plot1)
addSpan(self.plot2)
if self.params['display_threshold']:
thresh = self.controller.get_threshold()
thresh_line = pg.InfiniteLine(pos=thresh, angle=0, movable=False, pen = pg.mkPen('w'))
self.plot1.addItem(thresh_line)
#waveforms
if self.params['metrics']=='median/mad':
key1, key2 = 'median', 'mad'
elif self.params['metrics']=='mean/std':
key1, key2 = 'mean', 'std'
shape = self.controller.get_waveforms_shape()
if shape is None:
return
xvect = np.arange(shape[0]*shape[1])
#~ for i,k in enumerate(self.controller.centroids):
for k in cluster_visible:
#~ if not self.controller.cluster_visible[k]:
if not cluster_visible[k]:
continue
#~ wf0 = self.controller.centroids[k][key1].T.flatten()
#~ mad = self.controller.centroids[k][key2].T.flatten()
wf0 = self.controller.get_waveform_centroid(k, key1)
if wf0 is None: continue
wf0 = wf0.T.flatten()
mad = self.controller.get_waveform_centroid(k, key2)
color = self.controller.qcolors.get(k, QT.QColor( 'white'))
curve = pg.PlotCurveItem(xvect, wf0, pen=pg.mkPen(color, width=2))
self.plot1.addItem(curve)
if self.params['fillbetween'] and mad is not None:
mad = mad.T.flatten()
color2 = QT.QColor(color)
color2.setAlpha(self.alpha)
curve1 = pg.PlotCurveItem(xvect, wf0+mad, pen=color2)
curve2 = pg.PlotCurveItem(xvect, wf0-mad, pen=color2)
self.plot1.addItem(curve1)
self.plot1.addItem(curve2)
fill = pg.FillBetweenItem(curve1=curve1, curve2=curve2, brush=color2)
self.plot1.addItem(fill)
if mad is not None:
curve = pg.PlotCurveItem(xvect, mad, pen=color)
self.plot2.addItem(curve)
if self.params['show_channel_num']:
for i, (chan, name) in enumerate(self.controller.channel_indexes_and_names):
itemtxt = pg.TextItem('{}: {}'.format(i, name), anchor=(.5,.5), color='#FFFF00')
itemtxt.setFont(QT.QFont('', pointSize=12))
self.plot1.addItem(itemtxt)
itemtxt.setPos(width*i-n_left, 0)
if self._x_range is None:
self._x_range = xvect[0], xvect[-1]
self._y1_range = self.wf_min*1.1, self.wf_max*1.1
self._y2_range = 0., 5.
self.plot1.setXRange(*self._x_range, padding = 0.0)
self.plot1.setYRange(*self._y1_range, padding = 0.0)
self.plot2.setYRange(*self._y2_range, padding = 0.0)
def refresh_mode_geometry(self, cluster_visible):
if self._x_range is not None:
#~ self._x_range = self.plot1.getXRange()
#~ self._y1_range = self.plot1.getYRange()
#this may change with pyqtgraph
self._x_range = tuple(self.viewBox1.state['viewRange'][0])
self._y1_range = tuple(self.viewBox1.state['viewRange'][1])
self.plot1.clear()
if self.xvect is None:
return
shape = self.controller.get_waveforms_shape()
if shape is None:
return
# if n_left/n_right have change need new xvect
if self.xvect.shape[0] != shape[0] * shape[1]:
self.initialize_plot()
self.plot1.addItem(self.curve_one_waveform)
if self.params['metrics']=='median/mad':
key1, key2 = 'median', 'mad'
elif self.params['metrics']=='mean/std':
key1, key2 = 'mean', 'std'
ypos = self.arr_geometry[:,1]
for k in cluster_visible:
if not cluster_visible[k]:
continue
wf = self.controller.get_waveform_centroid(k, key1)
if wf is None: continue
wf = wf*self.factor_y*self.delta_y + ypos[None, :]
wf[0,:] = np.nan
wf = wf.T.reshape(-1)
color = self.controller.qcolors.get(k, QT.QColor( 'white'))
curve = pg.PlotCurveItem(self.xvect, wf, pen=pg.mkPen(color, width=2), connect='finite')
self.plot1.addItem(curve)
if self.params['show_channel_num']:
chan_grp = self.controller.chan_grp
channel_group = self.controller.dataio.channel_groups[chan_grp]
for i, (chan, name) in enumerate(self.controller.channel_indexes_and_names):
x, y = self.arr_geometry[i, : ]
itemtxt = pg.TextItem('{}: {}'.format(i, name), anchor=(.5,.5), color='#FFFF00')
itemtxt.setFont(QT.QFont('', pointSize=12))
self.plot1.addItem(itemtxt)
itemtxt.setPos(x, y)
if self._x_range is None:
self._x_range = np.min(self.xvect), np.max(self.xvect)
self._y1_range = np.min(ypos)-self.delta_y*2, np.max(ypos)+self.delta_y*2
self.plot1.setXRange(*self._x_range, padding = 0.0)
self.plot1.setYRange(*self._y1_range, padding = 0.0)
def _refresh_one_spike(self, n_selected):
#TODO peak the selected peak if only one
if n_selected!=1 or not self.params['plot_selected_spike']:
self.curve_one_waveform.setData([], [])
return
ind, = np.nonzero(self.controller.spike_selection)
ind = ind[0]
seg_num = self.controller.spike_segment[ind]
peak_ind = self.controller.spike_index[ind]
n_left, n_right = self.controller.get_waveform_left_right()
wf = self.controller.dataio.get_signals_chunk(seg_num=seg_num, chan_grp=self.controller.chan_grp,
i_start=peak_ind+n_left, i_stop=peak_ind+n_right,
signal_type='processed')
if wf.shape[0]==(n_right-n_left):
#this avoid border bugs
if self.mode=='flatten':
wf = wf.T.flatten()
xvect = np.arange(wf.size)
self.curve_one_waveform.setData(xvect, wf)
elif self.mode=='geometry':
ypos = self.arr_geometry[:,1]
wf = wf*self.factor_y*self.delta_y + ypos[None, :]
wf[0,:] = np.nan
wf = wf.T.reshape(-1)
self.curve_one_waveform.setData(self.xvect, wf)
def on_spike_selection_changed(self):
#~ n_selected = np.sum(self.controller.spike_selection)
#~ self._refresh_one_spike(n_selected)
self.refresh()
class WaveformViewer(WaveformViewerBase):
"""
**Waveform viewer** is undoubtedly the view to inspect waveforms.
Note that in some aspect **Waveform hist viewer** can be a better firend.
All centroid (median or mean) of visible cluster are plotted here.
2 main modes:
* **geometry** waveforms are organized with 2d geometry given by PRB file.
* **flatten** each chunk of each channel is put side by side in channel order
than it can be ploted in 1d. The bottom view is th mad. On good cluster the mad
must as close as possible from the value 1 because 1 is the normalized noise.
The **geometry** mode is more intuitive and help users about spatial
information. But the **flatten** mode is really important because is give information
about the variance (mad or std) for each point and about peak alignement.
The centoid is dfine by median+mad but you can also check with mean+std.
For healthy cluster it should more or less the same.
Important for zooming:
* **geometry** : zoomXY geometry = right click, move = left click and mouse wheel = zoom waveforms
* **flatten**: zoomXY = right click and move = left click
Settings:
* **plot_selected_spike**: superimposed one slected peak on centroid
* **show_only_selected_cluster**: this auto hide all cluster except the one of selected spike
* **plot_limit_for_flatten**: for flatten mode this plot line for delimiting channels.
Plotting is important but it slow down the zoom.
* **metrics**: choose median+mad or mean+std.
* *show_channel_num**: what could it be ?
* **flip_bottom_up**: in geometry this flip bottom up the channel geometry.
* **display_threshold**: what could it be ?
"""
_params = [{'name': 'plot_selected_spike', 'type': 'bool', 'value': False },
{'name': 'show_only_selected_cluster', 'type': 'bool', 'value': False},
{'name': 'plot_limit_for_flatten', 'type': 'bool', 'value': True },
{'name': 'metrics', 'type': 'list', 'values': ['median/mad', 'mean/std'] },
{'name': 'fillbetween', 'type': 'bool', 'value': True },
{'name': 'show_channel_num', 'type': 'bool', 'value': False},
{'name': 'flip_bottom_up', 'type': 'bool', 'value': False},
{'name': 'display_threshold', 'type': 'bool', 'value' : True },
]
class PeelerWaveformViewer(WaveformViewerBase):
"""
**Waveform viewer**
"""
_params = [{'name': 'plot_selected_spike', 'type': 'bool', 'value': True },
{'name': 'show_only_selected_cluster', 'type': 'bool', 'value': True},
{'name': 'plot_limit_for_flatten', 'type': 'bool', 'value': True },
{'name': 'metrics', 'type': 'list', 'values': ['median/mad'] },
{'name': 'fillbetween', 'type': 'bool', 'value': True },
{'name': 'show_channel_num', 'type': 'bool', 'value': False},
{'name': 'flip_bottom_up', 'type': 'bool', 'value': False},
{'name': 'display_threshold', 'type': 'bool', 'value' : True },
]
| [
"pyqtgraph.PlotCurveItem",
"numpy.unique",
"pyqtgraph.GraphicsLayout",
"numpy.max",
"numpy.sum",
"pyqtgraph.GraphicsView",
"pyqtgraph.ViewBox.__init__",
"pyqtgraph.FillBetweenItem",
"numpy.zeros",
"numpy.nonzero",
"numpy.min",
"numpy.array",
"pyqtgraph.mkPen",
"numpy.linspace",
"pyqtgrap... | [((265, 305), 'pyqtgraph.ViewBox.__init__', 'pg.ViewBox.__init__', (['self', '*args'], {}), '(self, *args, **kwds)\n', (284, 305), True, 'import pyqtgraph as pg\n'), ((1290, 1307), 'pyqtgraph.GraphicsView', 'pg.GraphicsView', ([], {}), '()\n', (1305, 1307), True, 'import pyqtgraph as pg\n'), ((2973, 3014), 'pyqtgraph.GraphicsLayout', 'pg.GraphicsLayout', ([], {'border': '(100, 100, 100)'}), '(border=(100, 100, 100))\n', (2990, 3014), True, 'import pyqtgraph as pg\n'), ((6670, 6709), 'numpy.sum', 'np.sum', (['self.controller.spike_selection'], {}), '(self.controller.spike_selection)\n', (6676, 6709), True, 'import numpy as np\n'), ((9685, 9715), 'numpy.arange', 'np.arange', (['(shape[0] * shape[1])'], {}), '(shape[0] * shape[1])\n', (9694, 9715), True, 'import numpy as np\n'), ((14800, 14843), 'numpy.nonzero', 'np.nonzero', (['self.controller.spike_selection'], {}), '(self.controller.spike_selection)\n', (14810, 14843), True, 'import numpy as np\n'), ((6891, 6934), 'numpy.nonzero', 'np.nonzero', (['self.controller.spike_selection'], {}), '(self.controller.spike_selection)\n', (6901, 6934), True, 'import numpy as np\n'), ((8443, 8472), 'pyqtgraph.mkColor', 'pg.mkColor', (['(255)', '(255)', '(255)', '(20)'], {}), '(255, 255, 255, 20)\n', (8453, 8472), True, 'import pyqtgraph as pg\n'), ((10729, 10775), 'pyqtgraph.PlotCurveItem', 'pg.PlotCurveItem', (['xvect', '(wf0 + mad)'], {'pen': 'color2'}), '(xvect, wf0 + mad, pen=color2)\n', (10745, 10775), True, 'import pyqtgraph as pg\n'), ((10799, 10845), 'pyqtgraph.PlotCurveItem', 'pg.PlotCurveItem', (['xvect', '(wf0 - mad)'], {'pen': 'color2'}), '(xvect, wf0 - mad, pen=color2)\n', (10815, 10845), True, 'import pyqtgraph as pg\n'), ((10970, 11032), 'pyqtgraph.FillBetweenItem', 'pg.FillBetweenItem', ([], {'curve1': 'curve1', 'curve2': 'curve2', 'brush': 'color2'}), '(curve1=curve1, curve2=curve2, brush=color2)\n', (10988, 11032), True, 'import pyqtgraph as pg\n'), ((11143, 11182), 'pyqtgraph.PlotCurveItem', 'pg.PlotCurveItem', (['xvect', 'mad'], {'pen': 'color'}), '(xvect, mad, pen=color)\n', (11159, 11182), True, 'import pyqtgraph as pg\n'), ((14263, 14281), 'numpy.min', 'np.min', (['self.xvect'], {}), '(self.xvect)\n', (14269, 14281), True, 'import numpy as np\n'), ((14283, 14301), 'numpy.max', 'np.max', (['self.xvect'], {}), '(self.xvect)\n', (14289, 14301), True, 'import numpy as np\n'), ((15453, 15471), 'numpy.arange', 'np.arange', (['wf.size'], {}), '(wf.size)\n', (15462, 15471), True, 'import numpy as np\n'), ((4373, 4419), 'numpy.zeros', 'np.zeros', (['(shape[0] * shape[1])'], {'dtype': '"""float32"""'}), "(shape[0] * shape[1], dtype='float32')\n", (4381, 4419), True, 'import numpy as np\n'), ((4681, 4725), 'numpy.array', 'np.array', (['self.arr_geometry'], {'dtype': '"""float64"""'}), "(self.arr_geometry, dtype='float64')\n", (4689, 4725), True, 'import numpy as np\n'), ((8569, 8655), 'pyqtgraph.LinearRegionItem', 'pg.LinearRegionItem', (['[width * i, width * (i + 1) - 1]'], {'movable': '(False)', 'brush': 'white'}), '([width * i, width * (i + 1) - 1], movable=False, brush=\n white)\n', (8588, 8655), True, 'import pyqtgraph as pg\n'), ((9251, 9264), 'pyqtgraph.mkPen', 'pg.mkPen', (['"""w"""'], {}), "('w')\n", (9259, 9264), True, 'import pyqtgraph as pg\n'), ((10427, 10451), 'pyqtgraph.mkPen', 'pg.mkPen', (['color'], {'width': '(2)'}), '(color, width=2)\n', (10435, 10451), True, 'import pyqtgraph as pg\n'), ((13546, 13570), 'pyqtgraph.mkPen', 'pg.mkPen', (['color'], {'width': '(2)'}), '(color, width=2)\n', (13554, 13570), True, 'import pyqtgraph as pg\n'), ((14331, 14343), 'numpy.min', 'np.min', (['ypos'], {}), '(ypos)\n', (14337, 14343), True, 'import numpy as np\n'), ((14360, 14372), 'numpy.max', 'np.max', (['ypos'], {}), '(ypos)\n', (14366, 14372), True, 'import numpy as np\n'), ((5762, 5804), 'numpy.linspace', 'np.linspace', (['(x - espx)', '(x + espx)'], {'num': 'width'}), '(x - espx, x + espx, num=width)\n', (5773, 5804), True, 'import numpy as np\n'), ((8886, 8899), 'pyqtgraph.mkPen', 'pg.mkPen', (['"""w"""'], {}), "('w')\n", (8894, 8899), True, 'import pyqtgraph as pg\n'), ((4989, 5004), 'numpy.unique', 'np.unique', (['xpos'], {}), '(xpos)\n', (4998, 5004), True, 'import numpy as np\n'), ((5147, 5162), 'numpy.unique', 'np.unique', (['xpos'], {}), '(xpos)\n', (5156, 5162), True, 'import numpy as np\n'), ((5185, 5200), 'numpy.unique', 'np.unique', (['ypos'], {}), '(ypos)\n', (5194, 5200), True, 'import numpy as np\n'), ((5343, 5358), 'numpy.unique', 'np.unique', (['ypos'], {}), '(ypos)\n', (5352, 5358), True, 'import numpy as np\n'), ((5071, 5086), 'numpy.unique', 'np.unique', (['xpos'], {}), '(xpos)\n', (5080, 5086), True, 'import numpy as np\n'), ((5267, 5282), 'numpy.unique', 'np.unique', (['ypos'], {}), '(ypos)\n', (5276, 5282), True, 'import numpy as np\n')] |
import math
import time
import torch
import torch.cuda.nvtx as nvtx
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from tqdm import tqdm
from utils.initializers import args_initialize, env_initialize, log_initialize, model_initialize
from a2c.helper import callback, format_time, gen_data
from a2c.model import ActorCritic
from a2c.test import test
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
with torch.cuda.stream(self.stream):
try:
self.next_states, self.next_actions, self.next_action_log_probs, self.next_returns, self.next_advantages = next(self.loader)
except StopIteration:
self.next_states, self.next_actions, self.next_action_log_probs, self.next_returns, self.next_advantages = None, None, None, None, None
return
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
states = self.next_states
actions = self.next_actions
action_log_probs = self.next_action_log_probs
returns = self.next_returns
advantages = self.next_advantages
self.preload()
return states, actions, action_log_probs, returns, advantages
def worker(gpu, ngpus_per_node, args):
env_device, train_device = args_initialize(gpu, ngpus_per_node, args)
train_csv_file, train_csv_writer, eval_csv_file, eval_csv_writer, summary_writer = log_initialize(args, train_device)
train_env, test_env, observation = env_initialize(args, env_device)
model = ActorCritic(args.num_stack, train_env.action_space, normalize=args.normalize, name=args.env_name)
model, optimizer = model_initialize(args, model, train_device)
shape = (args.num_steps + 1, args.num_ales, args.num_stack, *train_env.observation_space.shape[-2:])
states = torch.zeros(shape, device=train_device, dtype=torch.float32)
states[0, :, -1] = observation.to(device=train_device, dtype=torch.float32)
shape = (args.num_steps + 1, args.num_ales)
values = torch.zeros(shape, device=train_device, dtype=torch.float32)
logits = torch.zeros((args.num_steps + 1, args.num_ales, train_env.action_space.n), device=train_device, dtype=torch.float32)
returns = torch.zeros(shape, device=train_device, dtype=torch.float32)
shape = (args.num_steps, args.num_ales)
rewards = torch.zeros(shape, device=train_device, dtype=torch.float32)
masks = torch.zeros(shape, device=train_device, dtype=torch.float32)
actions = torch.zeros(shape, device=train_device, dtype=torch.long)
# These variables are used to compute average rewards for all processes.
episode_rewards = torch.zeros(args.num_ales, device=train_device, dtype=torch.float32)
final_rewards = torch.zeros(args.num_ales, device=train_device, dtype=torch.float32)
episode_lengths = torch.zeros(args.num_ales, device=train_device, dtype=torch.float32)
final_lengths = torch.zeros(args.num_ales, device=train_device, dtype=torch.float32)
if args.use_gae:
gae = torch.zeros(args.num_ales, device=train_device, dtype=torch.float32)
maybe_npy = lambda a: a.numpy() if args.use_openai else a
num_frames_per_iter = args.num_ales * args.num_steps
args.num_minibatches = num_frames_per_iter / args.batch_size
total_steps = math.ceil(args.t_max / (args.world_size * num_frames_per_iter))
decay = 1.0 / total_steps
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.ppo_epoch, gamma=1.0 - decay)
iterator = range(total_steps)
if args.rank == 0:
iterator = tqdm(iterator)
total_time = 0
evaluation_offset = 0
train_stream = torch.cuda.Stream()
torch.cuda.synchronize()
for update in iterator:
T = args.world_size * update * num_frames_per_iter
if (args.rank == 0) and (T >= evaluation_offset):
evaluation_offset += args.evaluation_interval
eval_lengths, eval_rewards = test(args, model, test_env)
lmean, lmedian, lmin, lmax, lstd = gen_data(eval_lengths)
rmean, rmedian, rmin, rmax, rstd = gen_data(eval_rewards)
length_data = '(length) min/max/mean/median: {lmin:4.1f}/{lmax:4.1f}/{lmean:4.1f}/{lmedian:4.1f}'.format(lmin=lmin, lmax=lmax, lmean=lmean, lmedian=lmedian)
reward_data = '(reward) min/max/mean/median: {rmin:4.1f}/{rmax:4.1f}/{rmean:4.1f}/{rmedian:4.1f}'.format(rmin=rmin, rmax=rmax, rmean=rmean, rmedian=rmedian)
print('[training time: {}] {}'.format(format_time(total_time), ' --- '.join([length_data, reward_data])))
if eval_csv_writer and eval_csv_file:
eval_csv_writer.writerow([T, total_time, rmean, rmedian, rmin, rmax, rstd, lmean, lmedian, lmin, lmax, lstd])
eval_csv_file.flush()
if args.plot:
summary_writer.add_scalar('eval/rewards_mean', rmean, T, walltime=total_time)
summary_writer.add_scalar('eval/lengths_mean', lmean, T, walltime=total_time)
start_time = time.time()
with torch.no_grad():
for step in range(args.num_steps):
nvtx.range_push('train:step')
value, logit = model(states[step])
# store values and logits
values[step], logits[step] = value.squeeze(-1), logit.squeeze(-1)
# convert actions to numpy and perform next step
probs = torch.clamp(F.softmax(logit, dim=1), min = 0.00001, max = 0.99999)
probs_action = probs.multinomial(1).to(env_device)
observation, reward, done, info = train_env.step(maybe_npy(probs_action))
if args.use_openai:
# convert back to pytorch tensors
observation = torch.from_numpy(observation)
reward = torch.from_numpy(reward)
done = torch.from_numpy(done.astype(np.uint8))
else:
observation = observation.squeeze(-1).unsqueeze(1)
# move back to training memory
observation = observation.to(device=train_device)
reward = reward.to(device=train_device, dtype=torch.float32)
done = done.to(device=train_device, dtype=torch.bool)
probs_action = probs_action.to(device=train_device, dtype=torch.long)
not_done = 1.0 - done.float()
# update rewards and actions
actions[step].copy_(probs_action.view(-1))
masks[step].copy_(not_done)
rewards[step].copy_(reward.sign())
# update next observations
states[step + 1, :, :-1].copy_(states[step, :, 1:])
states[step + 1] *= not_done.view(-1, *[1] * (observation.dim() - 1))
states[step + 1, :, -1].copy_(observation.view(-1, *states.size()[-2:]))
# update episodic reward counters
episode_rewards += reward
final_rewards[done] = episode_rewards[done]
episode_rewards *= not_done
episode_lengths += not_done
final_lengths[done] = episode_lengths[done]
episode_lengths *= not_done
nvtx.range_pop()
returns[-1] = values[-1] = model(states[-1])[0].data.squeeze(-1)
if args.use_gae:
gae.zero_()
for step in reversed(range(args.num_steps)):
delta = rewards[step] + (args.gamma * values[step + 1] * masks[step]) - values[step]
gae = delta + (args.gamma * args.tau * masks[step] * gae)
returns[step] = gae + values[step]
else:
for step in reversed(range(args.num_steps)):
returns[step] = rewards[step] + (args.gamma * returns[step + 1] * masks[step])
log_probs = F.log_softmax(logits[:-1].view(-1, train_env.action_space.n), dim=1)
action_log_probs = log_probs.gather(1, actions.view(-1).unsqueeze(-1))
advantages = returns[:-1].view(-1).unsqueeze(-1) - values[:-1].view(-1).unsqueeze(-1)
advantages = (advantages - advantages.mean()) / (advantages.std() + float(np.finfo(np.float32).eps))
total_value_loss = 0.0
total_policy_loss = 0.0
total_dist_entropy = 0.0
nvtx.range_push('train:loader')
states_view = states[:-1].view(-1, *states.size()[-3:])
actions_view = actions.view(-1)
returns_view = returns[:-1].view(-1)
train_dataset = torch.utils.data.TensorDataset(states_view, actions_view, action_log_probs, returns_view, advantages)
train_sampler = None
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=0, pin_memory=False, sampler=train_sampler)
nvtx.range_pop()
with torch.cuda.stream(train_stream):
for epoch in range(args.ppo_epoch):
nvtx.range_push('train:epoch_step')
if args.distributed:
train_sampler.set_epoch(epoch)
prefetcher = data_prefetcher(train_loader)
local_states, local_actions, local_action_log_probs, local_returns, local_advantages = prefetcher.next()
while local_states is not None:
batch_values, batch_logits = model(local_states)
batch_log_probs = F.log_softmax(batch_logits, dim=1)
batch_action_log_probs = batch_log_probs.gather(1, local_actions.unsqueeze(-1))
batch_probs = F.softmax(batch_logits, dim=1)
batch_dist_entropy = -(batch_log_probs * batch_probs).sum(-1).mean()
ratio = torch.exp(batch_action_log_probs - local_action_log_probs)
surrogate1 = ratio * local_advantages
surrogate2 = torch.clamp(ratio, 1.0 - args.clip_epsilon, 1.0 + args.clip_epsilon) * local_advantages
batch_policy_loss = -torch.min(surrogate1, surrogate2).mean()
batch_value_loss = F.mse_loss(local_returns.unsqueeze(-1), batch_values) / 2.0
loss = batch_value_loss * args.value_loss_coef + batch_policy_loss - batch_dist_entropy * args.entropy_coef
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
total_value_loss += batch_value_loss.item()
total_policy_loss += batch_policy_loss.item()
total_dist_entropy += batch_dist_entropy.item()
local_states, local_actions, local_action_log_probs, local_returns, local_advantages = prefetcher.next()
scheduler.step()
nvtx.range_pop()
torch.cuda.synchronize()
states[0].copy_(states[-1])
if args.rank == 0:
iter_time = time.time() - start_time
total_time += iter_time
value_loss = total_value_loss / (args.ppo_epoch * args.num_minibatches)
policy_loss = total_policy_loss / (args.ppo_epoch * args.num_minibatches)
dist_entropy = total_dist_entropy / (args.ppo_epoch * args.num_minibatches)
if args.plot:
writer.add_scalar('train/rewards_mean', final_rewards.mean().item(), T, walltime=total_time)
writer.add_scalar('train/lengths_mean', final_lengths.mean().item(), T, walltime=total_time)
writer.add_scalar('train/learning_rate', scheduler.get_lr()[0], T, walltime=total_time)
writer.add_scalar('train/value_loss', value_loss, T, walltime=total_time)
writer.add_scalar('train/policy_loss', policy_loss, T, walltime=total_time)
writer.add_scalar('train/entropy', dist_entropy, T, walltime=total_time)
progress_data = callback(args, model, T, iter_time, final_rewards, final_lengths,
value_loss, policy_loss, dist_entropy, train_csv_writer, train_csv_file)
iterator.set_postfix_str(progress_data)
if args.plot and (args.rank == 0):
writer.close()
if args.use_openai:
train_env.close()
if args.use_openai_test_env:
test_env.close()
| [
"torch.cuda.nvtx.range_push",
"utils.initializers.log_initialize",
"torch.from_numpy",
"torch.exp",
"torch.cuda.Stream",
"torch.cuda.synchronize",
"torch.utils.data.distributed.DistributedSampler",
"torch.min",
"a2c.model.ActorCritic",
"a2c.helper.callback",
"utils.initializers.model_initialize"... | [((1452, 1494), 'utils.initializers.args_initialize', 'args_initialize', (['gpu', 'ngpus_per_node', 'args'], {}), '(gpu, ngpus_per_node, args)\n', (1467, 1494), False, 'from utils.initializers import args_initialize, env_initialize, log_initialize, model_initialize\n'), ((1582, 1616), 'utils.initializers.log_initialize', 'log_initialize', (['args', 'train_device'], {}), '(args, train_device)\n', (1596, 1616), False, 'from utils.initializers import args_initialize, env_initialize, log_initialize, model_initialize\n'), ((1656, 1688), 'utils.initializers.env_initialize', 'env_initialize', (['args', 'env_device'], {}), '(args, env_device)\n', (1670, 1688), False, 'from utils.initializers import args_initialize, env_initialize, log_initialize, model_initialize\n'), ((1702, 1804), 'a2c.model.ActorCritic', 'ActorCritic', (['args.num_stack', 'train_env.action_space'], {'normalize': 'args.normalize', 'name': 'args.env_name'}), '(args.num_stack, train_env.action_space, normalize=args.\n normalize, name=args.env_name)\n', (1713, 1804), False, 'from a2c.model import ActorCritic\n'), ((1823, 1866), 'utils.initializers.model_initialize', 'model_initialize', (['args', 'model', 'train_device'], {}), '(args, model, train_device)\n', (1839, 1866), False, 'from utils.initializers import args_initialize, env_initialize, log_initialize, model_initialize\n'), ((1986, 2046), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'train_device', 'dtype': 'torch.float32'}), '(shape, device=train_device, dtype=torch.float32)\n', (1997, 2046), False, 'import torch\n'), ((2189, 2249), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'train_device', 'dtype': 'torch.float32'}), '(shape, device=train_device, dtype=torch.float32)\n', (2200, 2249), False, 'import torch\n'), ((2263, 2383), 'torch.zeros', 'torch.zeros', (['(args.num_steps + 1, args.num_ales, train_env.action_space.n)'], {'device': 'train_device', 'dtype': 'torch.float32'}), '((args.num_steps + 1, args.num_ales, train_env.action_space.n),\n device=train_device, dtype=torch.float32)\n', (2274, 2383), False, 'import torch\n'), ((2394, 2454), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'train_device', 'dtype': 'torch.float32'}), '(shape, device=train_device, dtype=torch.float32)\n', (2405, 2454), False, 'import torch\n'), ((2514, 2574), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'train_device', 'dtype': 'torch.float32'}), '(shape, device=train_device, dtype=torch.float32)\n', (2525, 2574), False, 'import torch\n'), ((2587, 2647), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'train_device', 'dtype': 'torch.float32'}), '(shape, device=train_device, dtype=torch.float32)\n', (2598, 2647), False, 'import torch\n'), ((2662, 2719), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'train_device', 'dtype': 'torch.long'}), '(shape, device=train_device, dtype=torch.long)\n', (2673, 2719), False, 'import torch\n'), ((2820, 2888), 'torch.zeros', 'torch.zeros', (['args.num_ales'], {'device': 'train_device', 'dtype': 'torch.float32'}), '(args.num_ales, device=train_device, dtype=torch.float32)\n', (2831, 2888), False, 'import torch\n'), ((2909, 2977), 'torch.zeros', 'torch.zeros', (['args.num_ales'], {'device': 'train_device', 'dtype': 'torch.float32'}), '(args.num_ales, device=train_device, dtype=torch.float32)\n', (2920, 2977), False, 'import torch\n'), ((3000, 3068), 'torch.zeros', 'torch.zeros', (['args.num_ales'], {'device': 'train_device', 'dtype': 'torch.float32'}), '(args.num_ales, device=train_device, dtype=torch.float32)\n', (3011, 3068), False, 'import torch\n'), ((3089, 3157), 'torch.zeros', 'torch.zeros', (['args.num_ales'], {'device': 'train_device', 'dtype': 'torch.float32'}), '(args.num_ales, device=train_device, dtype=torch.float32)\n', (3100, 3157), False, 'import torch\n'), ((3467, 3530), 'math.ceil', 'math.ceil', (['(args.t_max / (args.world_size * num_frames_per_iter))'], {}), '(args.t_max / (args.world_size * num_frames_per_iter))\n', (3476, 3530), False, 'import math\n'), ((3578, 3663), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'args.ppo_epoch', 'gamma': '(1.0 - decay)'}), '(optimizer, step_size=args.ppo_epoch, gamma=1.0 -\n decay)\n', (3603, 3663), True, 'import torch.optim as optim\n'), ((3825, 3844), 'torch.cuda.Stream', 'torch.cuda.Stream', ([], {}), '()\n', (3842, 3844), False, 'import torch\n'), ((3850, 3874), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (3872, 3874), False, 'import torch\n'), ((523, 542), 'torch.cuda.Stream', 'torch.cuda.Stream', ([], {}), '()\n', (540, 542), False, 'import torch\n'), ((3194, 3262), 'torch.zeros', 'torch.zeros', (['args.num_ales'], {'device': 'train_device', 'dtype': 'torch.float32'}), '(args.num_ales, device=train_device, dtype=torch.float32)\n', (3205, 3262), False, 'import torch\n'), ((3737, 3751), 'tqdm.tqdm', 'tqdm', (['iterator'], {}), '(iterator)\n', (3741, 3751), False, 'from tqdm import tqdm\n'), ((5198, 5209), 'time.time', 'time.time', ([], {}), '()\n', (5207, 5209), False, 'import time\n'), ((8561, 8592), 'torch.cuda.nvtx.range_push', 'nvtx.range_push', (['"""train:loader"""'], {}), "('train:loader')\n", (8576, 8592), True, 'import torch.cuda.nvtx as nvtx\n'), ((8766, 8871), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['states_view', 'actions_view', 'action_log_probs', 'returns_view', 'advantages'], {}), '(states_view, actions_view, action_log_probs,\n returns_view, advantages)\n', (8796, 8871), False, 'import torch\n'), ((9042, 9208), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(train_sampler is None)', 'num_workers': '(0)', 'pin_memory': '(False)', 'sampler': 'train_sampler'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=train_sampler is None, num_workers=0, pin_memory=False, sampler\n =train_sampler)\n', (9069, 9208), False, 'import torch\n'), ((9261, 9277), 'torch.cuda.nvtx.range_pop', 'nvtx.range_pop', ([], {}), '()\n', (9275, 9277), True, 'import torch.cuda.nvtx as nvtx\n'), ((11324, 11348), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (11346, 11348), False, 'import torch\n'), ((603, 633), 'torch.cuda.stream', 'torch.cuda.stream', (['self.stream'], {}), '(self.stream)\n', (620, 633), False, 'import torch\n'), ((4121, 4148), 'a2c.test.test', 'test', (['args', 'model', 'test_env'], {}), '(args, model, test_env)\n', (4125, 4148), False, 'from a2c.test import test\n'), ((4197, 4219), 'a2c.helper.gen_data', 'gen_data', (['eval_lengths'], {}), '(eval_lengths)\n', (4205, 4219), False, 'from a2c.helper import callback, format_time, gen_data\n'), ((4267, 4289), 'a2c.helper.gen_data', 'gen_data', (['eval_rewards'], {}), '(eval_rewards)\n', (4275, 4289), False, 'from a2c.helper import callback, format_time, gen_data\n'), ((5224, 5239), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5237, 5239), False, 'import torch\n'), ((8955, 9017), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (9002, 9017), False, 'import torch\n'), ((9292, 9323), 'torch.cuda.stream', 'torch.cuda.stream', (['train_stream'], {}), '(train_stream)\n', (9309, 9323), False, 'import torch\n'), ((12407, 12549), 'a2c.helper.callback', 'callback', (['args', 'model', 'T', 'iter_time', 'final_rewards', 'final_lengths', 'value_loss', 'policy_loss', 'dist_entropy', 'train_csv_writer', 'train_csv_file'], {}), '(args, model, T, iter_time, final_rewards, final_lengths,\n value_loss, policy_loss, dist_entropy, train_csv_writer, train_csv_file)\n', (12415, 12549), False, 'from a2c.helper import callback, format_time, gen_data\n'), ((1031, 1058), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (1056, 1058), False, 'import torch\n'), ((5305, 5334), 'torch.cuda.nvtx.range_push', 'nvtx.range_push', (['"""train:step"""'], {}), "('train:step')\n", (5320, 5334), True, 'import torch.cuda.nvtx as nvtx\n'), ((7437, 7453), 'torch.cuda.nvtx.range_pop', 'nvtx.range_pop', ([], {}), '()\n', (7451, 7453), True, 'import torch.cuda.nvtx as nvtx\n'), ((9389, 9424), 'torch.cuda.nvtx.range_push', 'nvtx.range_push', (['"""train:epoch_step"""'], {}), "('train:epoch_step')\n", (9404, 9424), True, 'import torch.cuda.nvtx as nvtx\n'), ((11298, 11314), 'torch.cuda.nvtx.range_pop', 'nvtx.range_pop', ([], {}), '()\n', (11312, 11314), True, 'import torch.cuda.nvtx as nvtx\n'), ((11438, 11449), 'time.time', 'time.time', ([], {}), '()\n', (11447, 11449), False, 'import time\n'), ((4678, 4701), 'a2c.helper.format_time', 'format_time', (['total_time'], {}), '(total_time)\n', (4689, 4701), False, 'from a2c.helper import callback, format_time, gen_data\n'), ((5613, 5636), 'torch.nn.functional.softmax', 'F.softmax', (['logit'], {'dim': '(1)'}), '(logit, dim=1)\n', (5622, 5636), True, 'import torch.nn.functional as F\n'), ((5950, 5979), 'torch.from_numpy', 'torch.from_numpy', (['observation'], {}), '(observation)\n', (5966, 5979), False, 'import torch\n'), ((6009, 6033), 'torch.from_numpy', 'torch.from_numpy', (['reward'], {}), '(reward)\n', (6025, 6033), False, 'import torch\n'), ((9851, 9885), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['batch_logits'], {'dim': '(1)'}), '(batch_logits, dim=1)\n', (9864, 9885), True, 'import torch.nn.functional as F\n'), ((10021, 10051), 'torch.nn.functional.softmax', 'F.softmax', (['batch_logits'], {'dim': '(1)'}), '(batch_logits, dim=1)\n', (10030, 10051), True, 'import torch.nn.functional as F\n'), ((10170, 10228), 'torch.exp', 'torch.exp', (['(batch_action_log_probs - local_action_log_probs)'], {}), '(batch_action_log_probs - local_action_log_probs)\n', (10179, 10228), False, 'import torch\n'), ((10320, 10388), 'torch.clamp', 'torch.clamp', (['ratio', '(1.0 - args.clip_epsilon)', '(1.0 + args.clip_epsilon)'], {}), '(ratio, 1.0 - args.clip_epsilon, 1.0 + args.clip_epsilon)\n', (10331, 10388), False, 'import torch\n'), ((8428, 8448), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (8436, 8448), True, 'import numpy as np\n'), ((10449, 10482), 'torch.min', 'torch.min', (['surrogate1', 'surrogate2'], {}), '(surrogate1, surrogate2)\n', (10458, 10482), False, 'import torch\n')] |
import numpy as np
import networkx as nx
import pickle as cp
import random
import ctypes
import os
import sys
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import time
sys.path.append( '%s/code' % os.path.dirname(os.path.realpath(__file__)) )
from learning_lib import LearningLib
n_valid = 100
w_scaling = 0.01
MAX_VAL = 1000000
MIN_VAL = -1000000
def gen_graph(opt):
max_n = int(opt['max_n'])
min_n = int(opt['min_n'])
g_type = opt['g_type']
max_w = 10
min_w = 1
graph_id = np.random.randint(MAX_VAL)
cur_n = np.random.randint(max_n - min_n + 1) + min_n
if g_type == 'erdos_renyi':
p = float(opt['density'])
e_g = nx.erdos_renyi_graph(n = cur_n, p = p, seed = graph_id)
lcc = max(nx.connected_component_subgraphs(e_g), key=len)
g = nx.convert_node_labels_to_integers(lcc)
elif g_type == 'powerlaw':
g = nx.powerlaw_cluster_graph(n = cur_n, m = 4, p = p, seed = graph_id)
elif g_type == 'barabasi_albert':
p = int(opt['density'])
if p == 0:
max_p = 16
min_p = 1
p = np.random.randint(max_p - min_p + 1) + min_p
g = nx.barabasi_albert_graph(n = cur_n, m = p, seed = graph_id)
for edge in nx.edges(g):
pert = np.random.uniform(-0.5,0.5)
weight = np.random.randint(max_w - min_w + 1) + min_w
g[edge[0]][edge[1]]['weight'] = (weight + pert) * w_scaling
return g
def gen_new_graphs(opt):
api.ClearTrainGraphs()
for i in range(1000):
g = gen_graph(opt)
api.InsertGraph(g, is_test=False)
def PrepareValidData(opt):
for i in range(n_valid):
g = gen_graph(opt)
api.InsertGraph(g, is_test=True)
if __name__ == '__main__':
start_time = time.time()
api = LearningLib(sys.argv)
opt = {}
for i in range(1, len(sys.argv), 2):
opt[sys.argv[i][1:]] = sys.argv[i + 1]
seed = int(opt['seed'])
np.random.seed(seed)
print("***********************************************************")
print("[INFO] TRAINING ON RANDOM GRAPHS")
print("[INFO] Graph type: " + opt['g_type'])
print("[INFO] Density parameter: " + opt['density'])
print("[INFO] Number of nodes: [" + opt['min_n'] + " " + opt['max_n'] + "]")
print("***********************************************************")
sys.stdout.flush()
# Build the validation set
PrepareValidData(opt)
# Generate the training set
gen_new_graphs(opt)
for i in range(10):
api.lib.PlayGame(100, ctypes.c_double(1.0))
api.TakeSnapshot()
eps_start = 1.0
eps_end = 0.05
eps_step = 10000.0
lr = float(opt['learning_rate'])
print('[INFO]','iter', 'time', 'lr', 'eps', 'avg-width','avg-bound','avg-reward')
sys.stdout.flush()
best_reward = (0,0,0,0,0,0,MIN_VAL)
if int(opt["plot_training"]) == 1:
fig = plt.figure()
iter_list = []
reward_list = []
for iter in range(int(opt['max_iter'])):
eps = eps_end + max(0., (eps_start - eps_end) * (eps_step - iter) / eps_step)
if iter % 10 == 0:
api.lib.PlayGame(10, ctypes.c_double(eps))
if iter % 100 == 0:
sys.stdout.flush()
width, bound, reward = 0.0, 0.0, 0.0
for idx in range(n_valid):
val, sol = api.GetResult(idx)
width += sol[0]
bound += sol[1]
reward += val
width, bound, reward = (width/n_valid, bound/n_valid, reward/n_valid)
cur_time = round(time.time() - start_time,2)
it_data = (iter, cur_time, lr, eps, width, bound, reward)
print("[DATA]", " ".join(map(str,it_data)))
if reward > best_reward[-1]:
best_reward = it_data
sys.stdout.flush()
model_path = '%s/model_iter_%d.model' % (opt['save_dir'], iter)
api.SaveModel(model_path)
if int(opt["plot_training"]) == 1:
iter_list.append(iter)
reward_list.append(reward)
plt.clf()
plt.plot(iter_list, reward_list)
out_file = '%s/log_training_curve_reward.png' % opt['save_dir']
plt.savefig(out_file, dpi = 300)
if iter % 1000 == 0:
api.TakeSnapshot()
lr = lr * 0.95
if iter and iter % 5000 == 0:
print("[LOG] Refreshing Training set")
gen_new_graphs(opt)
api.lib.Fit(ctypes.c_double(lr))
print("[BEST-REWARD]", " ".join(map(str,best_reward)))
| [
"networkx.barabasi_albert_graph",
"networkx.connected_component_subgraphs",
"learning_lib.LearningLib",
"matplotlib.pyplot.plot",
"networkx.edges",
"numpy.random.seed",
"sys.stdout.flush",
"networkx.erdos_renyi_graph",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"time.time",
"matplotlib.pyp... | [((135, 149), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (142, 149), True, 'import matplotlib as mpl\n'), ((533, 559), 'numpy.random.randint', 'np.random.randint', (['MAX_VAL'], {}), '(MAX_VAL)\n', (550, 559), True, 'import numpy as np\n'), ((1267, 1278), 'networkx.edges', 'nx.edges', (['g'], {}), '(g)\n', (1275, 1278), True, 'import networkx as nx\n'), ((1789, 1800), 'time.time', 'time.time', ([], {}), '()\n', (1798, 1800), False, 'import time\n'), ((1812, 1833), 'learning_lib.LearningLib', 'LearningLib', (['sys.argv'], {}), '(sys.argv)\n', (1823, 1833), False, 'from learning_lib import LearningLib\n'), ((1971, 1991), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1985, 1991), True, 'import numpy as np\n'), ((2377, 2395), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2393, 2395), False, 'import sys\n'), ((2803, 2821), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2819, 2821), False, 'import sys\n'), ((573, 609), 'numpy.random.randint', 'np.random.randint', (['(max_n - min_n + 1)'], {}), '(max_n - min_n + 1)\n', (590, 609), True, 'import numpy as np\n'), ((698, 747), 'networkx.erdos_renyi_graph', 'nx.erdos_renyi_graph', ([], {'n': 'cur_n', 'p': 'p', 'seed': 'graph_id'}), '(n=cur_n, p=p, seed=graph_id)\n', (718, 747), True, 'import networkx as nx\n'), ((832, 871), 'networkx.convert_node_labels_to_integers', 'nx.convert_node_labels_to_integers', (['lcc'], {}), '(lcc)\n', (866, 871), True, 'import networkx as nx\n'), ((1295, 1323), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (1312, 1323), True, 'import numpy as np\n'), ((2918, 2930), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2928, 2930), True, 'import matplotlib.pyplot as plt\n'), ((240, 266), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (256, 266), False, 'import os\n'), ((772, 809), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['e_g'], {}), '(e_g)\n', (804, 809), True, 'import networkx as nx\n'), ((915, 974), 'networkx.powerlaw_cluster_graph', 'nx.powerlaw_cluster_graph', ([], {'n': 'cur_n', 'm': '(4)', 'p': 'p', 'seed': 'graph_id'}), '(n=cur_n, m=4, p=p, seed=graph_id)\n', (940, 974), True, 'import networkx as nx\n'), ((1340, 1376), 'numpy.random.randint', 'np.random.randint', (['(max_w - min_w + 1)'], {}), '(max_w - min_w + 1)\n', (1357, 1376), True, 'import numpy as np\n'), ((2566, 2586), 'ctypes.c_double', 'ctypes.c_double', (['(1.0)'], {}), '(1.0)\n', (2581, 2586), False, 'import ctypes\n'), ((3234, 3252), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3250, 3252), False, 'import sys\n'), ((3841, 3859), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3857, 3859), False, 'import sys\n'), ((4542, 4561), 'ctypes.c_double', 'ctypes.c_double', (['lr'], {}), '(lr)\n', (4557, 4561), False, 'import ctypes\n'), ((1190, 1243), 'networkx.barabasi_albert_graph', 'nx.barabasi_albert_graph', ([], {'n': 'cur_n', 'm': 'p', 'seed': 'graph_id'}), '(n=cur_n, m=p, seed=graph_id)\n', (1214, 1243), True, 'import networkx as nx\n'), ((3171, 3191), 'ctypes.c_double', 'ctypes.c_double', (['eps'], {}), '(eps)\n', (3186, 3191), False, 'import ctypes\n'), ((4122, 4131), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4129, 4131), True, 'import matplotlib.pyplot as plt\n'), ((4148, 4180), 'matplotlib.pyplot.plot', 'plt.plot', (['iter_list', 'reward_list'], {}), '(iter_list, reward_list)\n', (4156, 4180), True, 'import matplotlib.pyplot as plt\n'), ((4277, 4307), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_file'], {'dpi': '(300)'}), '(out_file, dpi=300)\n', (4288, 4307), True, 'import matplotlib.pyplot as plt\n'), ((3593, 3604), 'time.time', 'time.time', ([], {}), '()\n', (3602, 3604), False, 'import time\n'), ((1133, 1169), 'numpy.random.randint', 'np.random.randint', (['(max_p - min_p + 1)'], {}), '(max_p - min_p + 1)\n', (1150, 1169), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed May 17 16:36:14 2017
@author: vrtjso
"""
import numpy as np
import pandas as pd
from datetime import datetime, date
from operator import le, eq
from Utils import sample_vals, FeatureCombination
import gc
from sklearn import model_selection, preprocessing
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
####Data Cleaning####
print('Data Cleaning...')
#Data importing
trainDf = pd.read_csv('train.csv').set_index('id')
testDf = pd.read_csv('test.csv').set_index('id')
fix = pd.read_excel('BAD_ADDRESS_FIX.xlsx').set_index('id')
testDf['isTrain'] = 0
trainDf['isTrain'] = 1
allDf = pd.concat([trainDf,testDf])
allDf.update(fix, filter_func = lambda x:np.array([True]*x.shape[0])) #update fix data
macro = pd.read_csv('macro.csv')
#Join division and macro
divisions = pd.read_csv('divisions.csv')
allDf = allDf.join(divisions.set_index('sub_area'), on='sub_area')
# macro = pd.read_csv('macro.csv')
# allDf = allDf.join(macro[['timestamp','macro_combined_index']].set_index('timestamp'), on='timestamp')
# macro = macro.loc[365:2343,:] #drop data before 2011 and after 2016.6
# macro_full = macro.loc[:,macro.count()==1979] # drop nan columns
# macro_missing = macro.loc[:2190,macro.count()==1826]
# allDf = allDf.join(macro_full.set_index('timestamp'), on='timestamp')
# FeatureCombination(macro_full.drop('timestamp',1),'',10)
# Drop variable with no use (actuallly they are useful :)
# allDf = allDf.drop(['16_29_male','cafe_count_5000_price_1500','market_count_1000',
# '0_6_male','young_male','build_count_before_1920','market_count_1500',
# 'trc_count_500','church_count_3000','cafe_count_2000_na_price',
# 'mosque_count_3000','leisure_count_2000','build_count_slag',
# "oil_chemistry_raion","railroad_terminal_raion","mosque_count_500",
# "nuclear_reactor_raion", "build_count_foam", "big_road1_1line",
# "trc_sqm_500", "cafe_count_500_price_high","mosque_count_1000", "mosque_count_1500"],1)
# Drop no use macro
# allDf = allDf.drop(["real_dispos_income_per_cap_growth","profitable_enterpr_share",
# "unprofitable_enterpr_share","share_own_revenues","overdue_wages_per_cap",
# "fin_res_per_cap","marriages_per_1000_cap","divorce_rate","construction_value",
# "invest_fixed_assets_phys","pop_migration","pop_total_inc","housing_fund_sqm",
# "lodging_sqm_per_cap","water_pipes_share","baths_share","sewerage_share","gas_share",
# "hot_water_share","electric_stove_share","heating_share","old_house_share",
# "infant_mortarity_per_1000_cap", "perinatal_mort_per_1000_cap", "incidence_population",
# "load_of_teachers_preschool_per_teacher","provision_doctors","power_clinics","hospital_beds_available_per_cap",
# "hospital_bed_occupancy_per_year","provision_retail_space_sqm","provision_retail_space_sqm",
# "theaters_viewers_per_1000_cap","museum_visitis_per_100_cap","population_reg_sports_share",
# "students_reg_sports_share","apartment_build",
# 'gdp_annual_growth','old_education_build_share','provision_nurse','employment', #这行开始是importance为0的feature
# 'apartment_fund_sqm','invest_fixed_capital_per_cap'],1)
### Change price by rate ###
allDf['timestamp'] = pd.to_datetime(allDf['timestamp'])
# price_q_rate = [0,1.1,1,2.36,7.6,2.79,2.79,2.77,-1.68,1.04,.44,.41,-.98,1.26,.86,1.69,1.12,-.68,-1.85,-1.66,-1.69,-.097]
# price_rate = [1]
# for i in range(1,len(price_q_rate)):
# price_rate.append(price_rate[i-1] * (1 + price_q_rate[i] * 0.01))
# year_quarter = np.array((allDf.timestamp.dt.year - 2011) * 4 + allDf.timestamp.dt.quarter - 1)
# p = np.ones(allDf.shape[0])
# for i in range(0,allDf.shape[0]):
# p[i] = price_rate[year_quarter[i]]
# allDf['price_rate'] = p
# allDf['price_doc'] = allDf.price_doc / allDf.price_rate
# time = np.array([])
# for i in allDf.index:
# time = np.append(time, datetime.strptime(allDf['timestamp'][i], '%Y-%m-%d').timestamp())
# allDf['time'] = time
# allDf.drop('timestamp', 1, inplace=True)
allDf['apartment_name'] = allDf.sub_area + allDf['metro_km_avto'].astype(str)
eco_map = {'excellent':4, 'good':3, 'satisfactory':2, 'poor':1, 'no data':0}
allDf['ecology'] = allDf['ecology'].map(eco_map)
#encode subarea in order
# price_by_area = allDf['price_doc'].groupby(allDf.sub_area).mean().sort_values()
# area_dict = {}
# for i in range(0,price_by_area.shape[0]):
# area_dict[price_by_area.index[i]] = i
# allDf['sub_area'] = allDf['sub_area'].map(area_dict)
for c in allDf.columns:
if allDf[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(allDf[c].values))
allDf[c] = lbl.transform(list(allDf[c].values))
# PCA on area feature
# area_feature = []
# for i in allDf.columns:
# if allDf[i].groupby(allDf.sub_area).var().mean()==0 and i != 'sub_area':
# area_feature.append(i)
# areaDf = allDf[area_feature]
# nonareaDf = allDf.drop(area_feature,1)
# areaDf = FeatureCombination(areaDf,'',10)
# allDf = pd.concat([nonareaDf,areaDf],1)
# allDf = FeatureCombination(allDf,'cafe_count',7)
#FeatureCombination(allDf,'sport_count',5)
#FeatureCombination(allDf,'market_count',3)
#FeatureCombination(allDf,'leisure_count',5)
#FeatureCombination(allDf,'church_count',5)
#FeatureCombination(allDf,'big_church_count',5)
#FeatureCombination(allDf,'trc_count',5)
#FeatureCombination(allDf,'office_sqm',5)
#FeatureCombination(allDf,'trc_sqm',3)
#FeatureCombination(allDf,'railroad_station',2)
#FeatureCombination(allDf,'metro',2)
#Transform price to log price
#allDf['log_price'] = np.log1p(allDf.price_doc)
#Drop all training samples with strange price.
#allDf = allDf[~((allDf.price_doc==1000000) & (allDf.product_type_Investment==1))]
#allDf = allDf[~((allDf.price_doc==2000000) & (allDf.product_type_Investment==1))]
#allDf.ix[allDf.price_doc==2000000,'w'] = 0.7
#Undersample strange price
# allDf = sample_vals(allDf, 1000000, 1/8, le)
# allDf = sample_vals(allDf, 2000000, 1/4, eq)
# allDf = sample_vals(allDf, 3000000, 1/2, eq)
#allDf = allDf.reset_index(drop=True)
#allDf.drop('price_doc',1,inplace=True)
###Dealing with Outlier###
allDf.loc[allDf.full_sq>2000,'full_sq'] = np.nan
allDf.loc[allDf.full_sq<3,'full_sq'] = np.nan
allDf.loc[allDf.life_sq>500,'life_sq'] = np.nan
allDf.loc[allDf.life_sq<3,'life_sq'] = np.nan
# allDf['lifesq_to_fullsq'] = 0 # 0 for normal, 1 for close,2 for outlier
allDf.loc[allDf.life_sq>0.8*allDf.full_sq,'life_sq'] = np.nan
# allDf.ix[allDf.life_sq>allDf.full_sq,['life_sq','lifesq_to_fullsq']] = np.nan, 2
allDf.loc[allDf.kitch_sq>=allDf.life_sq,'kitch_sq'] = np.nan
allDf.loc[allDf.kitch_sq>500,'kitch_sq'] = np.nan
allDf.loc[allDf.kitch_sq<2,'kitch_sq'] = np.nan
allDf.loc[allDf.state>30,'state'] = np.nan
allDf.loc[allDf.build_year<1800,'build_year'] = np.nan
allDf.loc[allDf.build_year==20052009,'build_year'] = 2005
allDf.loc[allDf.build_year==4965,'build_year'] = np.nan
allDf.loc[allDf.build_year>2021,'build_year'] = np.nan
allDf.loc[allDf.num_room>15,'num_room'] = np.nan
allDf.loc[allDf.num_room==0,'num_room'] = np.nan
allDf.loc[allDf.floor==0,'floor'] = np.nan
allDf.loc[allDf.max_floor==0,'max_floor'] = np.nan
allDf.loc[allDf.floor>allDf.max_floor,'max_floor'] = np.nan
#allDf.ix[allDf.full_sq>300,'full_sq'] = np.nan
#allDf.ix[allDf.life_sq>250,'life_sq'] = np.nan
# brings error down a lot by removing extreme price per sqm
bad_index = allDf[allDf.price_doc/allDf.full_sq > 600000].index
bad_index = bad_index.append(allDf[allDf.price_doc/allDf.full_sq < 10000].index)
allDf.drop(bad_index,0,inplace=True)
####Feature Engineering####
print('Feature Engineering...')
gc.collect()
##Time
# isWeekend = []
# month = []
# year = []
# weekday = []
# week_of_year = []
# year_month = []
# for i in allDf.index:
# dateS = date.fromtimestamp(allDf.time[i]) #timestamp
# isWeekend.append(1 if dateS.isoweekday() == 6 or dateS.isoweekday() == 7 else 0)
# month.append(dateS.month)
# year.append(dateS.year)
# year_month.append(dateS.year*100 + dateS.month)
# weekday.append(dateS.weekday())
# week_of_year.append(dateS.isocalendar()[1])
##allDf['is_weekend'] = pd.Series(isWeekend) #seems to be of no use
# allDf['month'] = np.array(month)
allDf['year'] = allDf.timestamp.dt.year #may be no use because test data is out of range
allDf['weekday'] = allDf.timestamp.dt.weekday
#allDf['week_of_year'] = np.array(week_of_year)
##allDf['year_month'] = np.array(year_month)
#w_map = {2011:0.8, 2012:0.8, 2013:0.9, 2014:1, 2015:1, 2016:0}
#allDf['w'] = [w_map[i] for i in year]
# Assign weight
allDf['w'] = 1
allDf.loc[allDf.price_doc==1000000,'w'] *= 0.5
allDf.loc[allDf.year==2015,'w'] *= 1.5
#May lead to overfitting
#Change timestamp to accumulated days.
#accum_day = np.array([])
#day0 = date(2011,8,20)
#for i in range(0,allDf.shape[0]):
# accum_day = np.append(accum_day, (date.fromtimestamp(allDf.time[allDf.index[i]]) - day0).days)
#allDf['accum_day'] = pd.Series(accum_day) #试试把时间去掉
# Sale count
# mon_to_sale = allDf.groupby('month')['month'].count().to_dict()
# allDf['sale_cnt_mon'] = allDf['month'].map(mon_to_sale)
# week_to_sale = allDf.groupby('week_of_year')['week_of_year'].count().to_dict()
# allDf['sale_cnt_week'] = allDf['week_of_year'].map(week_to_sale)
# allDf = allDf.drop('week_of_year',1)
# allDf = allDf.drop('month',1)
# weekday_to_sale = allDf.groupby('weekday')['weekday'].count().to_dict()
# allDf['sale_cnt_weekday'] = allDf['weekday'].map(weekday_to_sale)
# area_to_sale = allDf.groupby('sub_area')['sub_area'].count().to_dict()
# allDf['sale_cnt_area'] = allDf['sub_area'].map(area_to_sale)
# OKRUGS_to_sale = allDf.groupby('OKRUGS')['OKRUGS'].count().to_dict()
# allDf['sale_cnt_OKRUGS'] = allDf['OKRUGS'].map(OKRUGS_to_sale)
# allDf['year_month'] = (allDf.timestamp.dt.year - 2011) * 12 + allDf.timestamp.dt.month
# year_mon_to_sale = allDf.groupby('year_month')['year_month'].count().to_dict()
# allDf['sale_cnt_year_mon'] = allDf['year_month'].map(year_mon_to_sale)
# allDf.drop('year_month',1,inplace=True)
#Location
#center_OKRUGS_lon = allDf.groupby('OKRUGS')['lon'].mean().to_dict()
#center_OKRUGS_lat = allDf.groupby('OKRUGS')['lat'].mean().to_dict()
#allDf['dist_to_OKRUGS_center'] = np.sqrt((allDf['lon'] - allDf['OKRUGS'].map(center_OKRUGS_lon)) ** 2 +
# (allDf['lat'] - allDf['OKRUGS'].map(center_OKRUGS_lat)) ** 2)
#Floor
allDf['floor_by_max_floor'] = allDf.floor / allDf.max_floor
#allDf['floor_to_top'] = allDf.max_floor - allDf.floor
#Room
allDf['avg_room_size'] = (allDf.life_sq - allDf.kitch_sq) / allDf.num_room
allDf['life_sq_prop'] = allDf.life_sq / allDf.full_sq
allDf['kitch_sq_prop'] = allDf.kitch_sq / allDf.full_sq
#Calculate age of building
allDf['build_age'] = allDf.year - allDf.build_year
allDf = allDf.drop('build_year', 1)
#Population
allDf['popu_den'] = allDf.raion_popul / allDf.area_m
allDf['gender_rate'] = allDf.male_f / allDf.female_f
allDf['working_rate'] = allDf.work_all / allDf.full_all
#Education
allDf.loc[allDf.preschool_quota==0,'preschool_quota'] = np.nan
allDf['preschool_ratio'] = allDf.children_preschool / allDf.preschool_quota
allDf['school_ratio'] = allDf.children_school / allDf.school_quota
## Group statistics
# avg_yearbuilt_area = allDf.groupby('sub_area')['build_age'].mean().to_dict()
# allDf['avg_yearbuilt_area'] = allDf['sub_area'].map(avg_yearbuilt_area)
# avg_yearbuilt_OKRUGS = allDf.groupby('OKRUGS')['build_age'].mean().to_dict()
# allDf['avg_yearbuilt_OKRUGS'] = allDf['OKRUGS'].map(avg_yearbuilt_OKRUGS)
# Mathematical features
# polyf = ['full_sq','build_age','life_sq','floor','max_floor','num_room']
# for i in range(0,len(polyf)):
# for j in range(i,len(polyf)):
# allDf[polyf[i]+'*'+polyf[j]] = allDf[polyf[i]] * allDf[polyf[j]]
allDf['square_full_sq'] = (allDf.full_sq - allDf.full_sq.mean()) ** 2
allDf['square_build_age'] = (allDf.build_age - allDf.build_age.mean()) ** 2
allDf['nan_count'] = allDf[['full_sq','build_age','life_sq','floor','max_floor','num_room']].isnull().sum(axis=1)
allDf['full*maxfloor'] = allDf.max_floor * allDf.full_sq
allDf['full*floor'] = allDf.floor * allDf.full_sq
allDf['full/age'] = allDf.full_sq / (allDf.build_age + 0.5)
allDf['age*state'] = allDf.build_age * allDf.state
# new trial
allDf['main_road_diff'] = allDf['big_road2_km'] - allDf['big_road1_km']
allDf['rate_metro_km'] = allDf['metro_km_walk'] / allDf['ID_metro'].map(allDf.metro_km_walk.groupby(allDf.ID_metro).mean().to_dict())
allDf['rate_road1_km'] = allDf['big_road1_km'] / allDf['ID_big_road1'].map(allDf.big_road1_km.groupby(allDf.ID_big_road1).mean().to_dict())
# best on LB with weekday
allDf['rate_road2_km'] = allDf['big_road2_km'] / allDf['ID_big_road2'].map(allDf.big_road2_km.groupby(allDf.ID_big_road2).mean().to_dict())
allDf['rate_railroad_km'] = allDf['railroad_station_walk_km'] / allDf['ID_railroad_station_walk'].map(allDf.railroad_station_walk_km.groupby(allDf.ID_railroad_station_walk).mean().to_dict())
# increase CV from 2.35 to 2.33 but lower LB a little bit (with month)
# allDf['additional_edu_index'] = allDf.additional_education_km / allDf.additional_education_raion
# allDf['rate_edu_km'] = (allDf['additional_education_km']
# / allDf['sub_area'].map(allDf.additional_education_km.groupby(allDf.sub_area).mean().to_dict())) / (allDf.additional_education_raion+0.5)
# allDf['num_house_metro'] = allDf['ID_metro'].map(allDf['full_sq'].groupby(allDf.ID_metro).count().to_dict())
# allDf['num_house_road'] = allDf['ID_big_road1'].map(allDf['full_sq'].groupby(allDf.ID_big_road1).count().to_dict())
# do not improve both CV and LB
allDf.drop(['year','timestamp'], 1, inplace = True)
#Separate train and test again
trainDf = allDf[allDf.isTrain==1].drop(['isTrain'],1)
testDf = allDf[allDf.isTrain==0].drop(['isTrain','price_doc', 'w'],1)
outputFile = 'train_featured.csv'
trainDf.to_csv(outputFile,index=False)
outputFile = 'test_featured.csv'
testDf.to_csv(outputFile,index=False)
# Xgboost handles nan itself
'''
### Dealing with NA ###
#num_room, filled by linear regression of full_sq
if filename == 'train_encoded.csv': #na in num_room only appear in training set
LR = LinearRegression()
X = allDf.full_sq[~(np.isnan(allDf.num_room) | np.isnan(allDf.full_sq))].values.reshape(-1, 1)
y = np.array(allDf.num_room[~(np.isnan(allDf.num_room) | np.isnan(allDf.full_sq))])
LR.fit(X,y)
newX = allDf.full_sq[np.isnan(allDf.num_room)].values.reshape(-1, 1)
newX[np.isnan(newX)] = newX[~np.isnan(newX)].mean() #Special cases (na in full_sq) in test data
yfit = LR.predict(newX)
allDf.ix[np.isnan(allDf.num_room),'num_room'] = yfit
#max_floor, twice as the floor
allDf.ix[np.isnan(allDf.max_floor),'max_floor'] = allDf.ix[np.isnan(allDf.max_floor),'floor'] * 2
'''
| [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"numpy.array",
"gc.collect",
"pandas.read_excel",
"pandas.concat",
"pandas.to_datetime"
] | [((691, 719), 'pandas.concat', 'pd.concat', (['[trainDf, testDf]'], {}), '([trainDf, testDf])\n', (700, 719), True, 'import pandas as pd\n'), ((816, 840), 'pandas.read_csv', 'pd.read_csv', (['"""macro.csv"""'], {}), "('macro.csv')\n", (827, 840), True, 'import pandas as pd\n'), ((882, 910), 'pandas.read_csv', 'pd.read_csv', (['"""divisions.csv"""'], {}), "('divisions.csv')\n", (893, 910), True, 'import pandas as pd\n'), ((3367, 3401), 'pandas.to_datetime', 'pd.to_datetime', (["allDf['timestamp']"], {}), "(allDf['timestamp'])\n", (3381, 3401), True, 'import pandas as pd\n'), ((7869, 7881), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7879, 7881), False, 'import gc\n'), ((483, 507), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (494, 507), True, 'import pandas as pd\n'), ((534, 557), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (545, 557), True, 'import pandas as pd\n'), ((581, 618), 'pandas.read_excel', 'pd.read_excel', (['"""BAD_ADDRESS_FIX.xlsx"""'], {}), "('BAD_ADDRESS_FIX.xlsx')\n", (594, 618), True, 'import pandas as pd\n'), ((4722, 4750), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (4748, 4750), False, 'from sklearn import model_selection, preprocessing\n'), ((761, 790), 'numpy.array', 'np.array', (['([True] * x.shape[0])'], {}), '([True] * x.shape[0])\n', (769, 790), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import tensorflow as tf
import cv2
import numpy as np
import os
import time
import glob
import pandas as pd
class TLClassifierTester(object):
def __init__(self):
#TODO load classifier
self.input_height = 320
self.input_width = 432
self.config = tf.ConfigProto()
self.config.gpu_options.allow_growth = True
self.sess = tf.Session(config=self.config)
signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
img_key = 'img'
kp_key = 'kp'
pred_key = 'pred'
logits_key = 'logits'
export_path = os.path.dirname(os.path.abspath(__file__)) + '/model'
#export_path = '/home/student/CarND-Capstone/ros/src/tl_detector/light_classification/model'
meta_graph_def = tf.saved_model.loader.load(
self.sess,
[tf.saved_model.tag_constants.SERVING],
export_path)
signature = meta_graph_def.signature_def
img_tensor_name = signature[signature_key].inputs[img_key].name
kp_tensor_name = signature[signature_key].inputs[kp_key].name
pred_tensor_name = signature[signature_key].outputs[pred_key].name
logits_tensor_name = signature[signature_key].outputs[logits_key].name
self.img = self.sess.graph.get_tensor_by_name(img_tensor_name)
self.keep_prob = self.sess.graph.get_tensor_by_name(kp_tensor_name)
self.pred = self.sess.graph.get_tensor_by_name(pred_tensor_name)
self.logits = self.sess.graph.get_tensor_by_name(logits_tensor_name)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
img = np.array(cv2.resize(image, (self.input_width, self.input_height)))
# Normalize
img = img/255.0
imshape = img.shape
img = np.reshape(img, (1, imshape[0], imshape[1], imshape[2]))
#print(img.max())
#print(img.min())
#print(img.shape)
#t1 = time.time()
pred = self.sess.run(self.pred, feed_dict = {self.img: img, self.keep_prob: 1.0})[0]
#logits = self.sess.run(self.logits, feed_dict = {self.img: img, self.keep_prob: 1.0})
#logits = sess.run(self.logits, feed_dict = {self.img: img, self.keep_prob: 1.0})
#print(logits)
#print(time.time()-t1)
#return 4
#if (pred == 3):
# pred = 4 # UNKNOWN
return pred
if __name__ == '__main__':
# Load an arbitrary number of test images
num_test_images = 1000
training_file = '../../../../trainingimages/'
red_img_paths = glob.glob(training_file + '*RED.png')
green_img_paths = glob.glob(training_file + '*GREEN.png')
yellow_img_paths = glob.glob(training_file + '*YELLOW.png')
unknown_img_paths = glob.glob(training_file + '*UNKNOWN.png')
img_paths = np.array(red_img_paths + green_img_paths + yellow_img_paths + unknown_img_paths)
labels = np.array([0] * len(red_img_paths) + [1] * len(yellow_img_paths) + [2] * len(green_img_paths) + [3] * len(unknown_img_paths))
indices = np.random.choice(np.arange(len(labels)), num_test_images, replace=False)
img_paths_sampled = img_paths[indices]
labels_sampled = labels[indices]
tlctester = TLClassifierTester()
predictions = []
for i, (img_path, label) in enumerate(zip(img_paths_sampled, labels_sampled)):
#print img_path, label
#print("progress: {}/{}".format(i, num_test_images))
img = cv2.imread(img_path)
prediction = tlctester.get_classification(img)
predictions.append(prediction)
if i%10 == 0:
print("progress: {}/{}".format(i, num_test_images))
predictions = np.array(predictions)
#print(predictions.dtype)
#print(labels_sampled.dtype)
#print(predictions == labels_sampled)
accuracy = np.sum(predictions == labels_sampled)/float(num_test_images)
print("Accuracy: {}".format(accuracy))
y_actu = pd.Series(labels_sampled, name='Actual')
y_pred = pd.Series(predictions, name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred)
print(df_confusion)
| [
"pandas.Series",
"numpy.reshape",
"tensorflow.Session",
"pandas.crosstab",
"tensorflow.saved_model.loader.load",
"numpy.array",
"numpy.sum",
"os.path.abspath",
"tensorflow.ConfigProto",
"cv2.resize",
"cv2.imread",
"glob.glob"
] | [((3001, 3038), 'glob.glob', 'glob.glob', (["(training_file + '*RED.png')"], {}), "(training_file + '*RED.png')\n", (3010, 3038), False, 'import glob\n'), ((3061, 3100), 'glob.glob', 'glob.glob', (["(training_file + '*GREEN.png')"], {}), "(training_file + '*GREEN.png')\n", (3070, 3100), False, 'import glob\n'), ((3124, 3164), 'glob.glob', 'glob.glob', (["(training_file + '*YELLOW.png')"], {}), "(training_file + '*YELLOW.png')\n", (3133, 3164), False, 'import glob\n'), ((3189, 3230), 'glob.glob', 'glob.glob', (["(training_file + '*UNKNOWN.png')"], {}), "(training_file + '*UNKNOWN.png')\n", (3198, 3230), False, 'import glob\n'), ((3248, 3333), 'numpy.array', 'np.array', (['(red_img_paths + green_img_paths + yellow_img_paths + unknown_img_paths)'], {}), '(red_img_paths + green_img_paths + yellow_img_paths + unknown_img_paths\n )\n', (3256, 3333), True, 'import numpy as np\n'), ((4124, 4145), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (4132, 4145), True, 'import numpy as np\n'), ((4388, 4428), 'pandas.Series', 'pd.Series', (['labels_sampled'], {'name': '"""Actual"""'}), "(labels_sampled, name='Actual')\n", (4397, 4428), True, 'import pandas as pd\n'), ((4442, 4482), 'pandas.Series', 'pd.Series', (['predictions'], {'name': '"""Predicted"""'}), "(predictions, name='Predicted')\n", (4451, 4482), True, 'import pandas as pd\n'), ((4502, 4529), 'pandas.crosstab', 'pd.crosstab', (['y_actu', 'y_pred'], {}), '(y_actu, y_pred)\n', (4513, 4529), True, 'import pandas as pd\n'), ((313, 329), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (327, 329), True, 'import tensorflow as tf\n'), ((411, 441), 'tensorflow.Session', 'tf.Session', ([], {'config': 'self.config'}), '(config=self.config)\n', (421, 441), True, 'import tensorflow as tf\n'), ((849, 944), 'tensorflow.saved_model.loader.load', 'tf.saved_model.loader.load', (['self.sess', '[tf.saved_model.tag_constants.SERVING]', 'export_path'], {}), '(self.sess, [tf.saved_model.tag_constants.SERVING\n ], export_path)\n', (875, 944), True, 'import tensorflow as tf\n'), ((2202, 2258), 'numpy.reshape', 'np.reshape', (['img', '(1, imshape[0], imshape[1], imshape[2])'], {}), '(img, (1, imshape[0], imshape[1], imshape[2]))\n', (2212, 2258), True, 'import numpy as np\n'), ((3896, 3916), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (3906, 3916), False, 'import cv2\n'), ((4266, 4303), 'numpy.sum', 'np.sum', (['(predictions == labels_sampled)'], {}), '(predictions == labels_sampled)\n', (4272, 4303), True, 'import numpy as np\n'), ((2048, 2104), 'cv2.resize', 'cv2.resize', (['image', '(self.input_width, self.input_height)'], {}), '(image, (self.input_width, self.input_height))\n', (2058, 2104), False, 'import cv2\n'), ((684, 709), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (699, 709), False, 'import os\n')] |
# coding=utf-8
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.special
from nums.core.array.application import ArrayApplication
def test_stats(app_inst: ArrayApplication):
np_x = np.arange(100)
ba_x = app_inst.array(np_x, block_shape=np_x.shape)
assert np.allclose(np.mean(np_x), app_inst.mean(ba_x).get())
assert np.allclose(np.std(np_x), app_inst.std(ba_x).get())
def test_uops(app_inst: ArrayApplication):
np_x = np.arange(100)
ba_x = app_inst.array(np_x, block_shape=np_x.shape)
assert np.allclose(np.abs(np_x), app_inst.abs(ba_x).get())
assert np.allclose(np.linalg.norm(np_x), app_inst.norm(ba_x).get())
def test_bops(app_inst: ArrayApplication):
# pylint: disable=no-member
pairs = [(1, 2), (2.0, 3.0), (2, 3.0), (2.0, 3)]
for a, b in pairs:
np_a, np_b = np.array(a), np.array(b)
ba_a, ba_b = app_inst.scalar(a), app_inst.scalar(b)
assert np.allclose(np_a + np_b, (ba_a + ba_b).get())
assert np.allclose(np_a - np_b, (ba_a - ba_b).get())
assert np.allclose(np_a * np_b, (ba_a * ba_b).get())
assert np.allclose(np_a / np_b, (ba_a / ba_b).get())
assert np.allclose(np_a ** np_b, (ba_a ** ba_b).get())
assert np.allclose(
scipy.special.xlogy(np_a, np_b), app_inst.xlogy(ba_a, ba_b).get()
)
def test_bools(app_inst: ArrayApplication):
np_one, np_two = np.array(1), np.array(2)
ba_one, ba_two = app_inst.scalar(1), app_inst.scalar(2)
assert (ba_one < ba_two) == (np_one < np_two)
assert (ba_one <= ba_two) == (np_one <= np_two)
assert (ba_one > ba_two) == (np_one > np_two)
assert (ba_one >= ba_two) == (np_one >= np_two)
assert (ba_one == ba_two) == (np_one == np_two)
assert (ba_one != ba_two) == (np_one != np_two)
def test_bool_reduction(app_inst: ArrayApplication):
np_arr = np.array([True, False, True, True, False, False], dtype=np.bool_)
ba = app_inst.array(np_arr, block_shape=(2,))
result_sum = app_inst.sum(ba, axis=0).get()
np_sum = np.sum(np_arr)
assert result_sum.dtype == np_sum.dtype
assert result_sum == np_sum
def test_trans(app_inst: ArrayApplication):
np_x = np.arange(40).reshape(10, 4)
ba_x = app_inst.array(np_x, block_shape=(5, 2))
assert np.array_equal(ba_x.T.get(), np_x.T)
def test_isnan(app_inst: ArrayApplication):
assert not app_inst.isnan(app_inst.array([1.0], block_shape=(1,)))
assert app_inst.isnan(app_inst.array([np.nan], block_shape=(1,)))
if __name__ == "__main__":
# pylint: disable=import-error
import conftest
app_inst = conftest.get_app("serial")
# test_stats(app_inst)
# test_uops(app_inst)
test_bops(app_inst)
# test_bools(app_inst)
# test_bool_reduction(app_inst)
# test_isnan(app_inst)
| [
"numpy.mean",
"numpy.abs",
"numpy.linalg.norm",
"numpy.array",
"numpy.sum",
"numpy.std",
"conftest.get_app",
"numpy.arange"
] | [((762, 776), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (771, 776), True, 'import numpy as np\n'), ((1017, 1031), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1026, 1031), True, 'import numpy as np\n'), ((2433, 2498), 'numpy.array', 'np.array', (['[True, False, True, True, False, False]'], {'dtype': 'np.bool_'}), '([True, False, True, True, False, False], dtype=np.bool_)\n', (2441, 2498), True, 'import numpy as np\n'), ((2610, 2624), 'numpy.sum', 'np.sum', (['np_arr'], {}), '(np_arr)\n', (2616, 2624), True, 'import numpy as np\n'), ((3174, 3200), 'conftest.get_app', 'conftest.get_app', (['"""serial"""'], {}), "('serial')\n", (3190, 3200), False, 'import conftest\n'), ((856, 869), 'numpy.mean', 'np.mean', (['np_x'], {}), '(np_x)\n', (863, 869), True, 'import numpy as np\n'), ((921, 933), 'numpy.std', 'np.std', (['np_x'], {}), '(np_x)\n', (927, 933), True, 'import numpy as np\n'), ((1111, 1123), 'numpy.abs', 'np.abs', (['np_x'], {}), '(np_x)\n', (1117, 1123), True, 'import numpy as np\n'), ((1174, 1194), 'numpy.linalg.norm', 'np.linalg.norm', (['np_x'], {}), '(np_x)\n', (1188, 1194), True, 'import numpy as np\n'), ((1972, 1983), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1980, 1983), True, 'import numpy as np\n'), ((1985, 1996), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (1993, 1996), True, 'import numpy as np\n'), ((1397, 1408), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1405, 1408), True, 'import numpy as np\n'), ((1410, 1421), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (1418, 1421), True, 'import numpy as np\n'), ((2758, 2771), 'numpy.arange', 'np.arange', (['(40)'], {}), '(40)\n', (2767, 2771), True, 'import numpy as np\n')] |
import argparse
import random
import numpy as np
import torch
from nner import *
from transformers import *
# take args
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--source_language", default='en', type=str,
help="The target language")
parser.add_argument("--target_language", default='en', type=str,
help="The target language")
parser.add_argument("--bert_model", default='', type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--output_dir", default='save', type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--ckpt", default=None, type=str,
help="Checkpoint for previously saved mdoel")
parser.add_argument("--exp_name", default=None, type=str,
help="Checkpoint and config save prefix")
parser.add_argument("--batchsize", default=32, type=int)
parser.add_argument("--num_exp", default=None, type=int,
help="Number of additional examples from source language")
parser.add_argument("--learning_rate", default=5e-5, type=float)
parser.add_argument("--max_epoch", default=5, type=int)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument("--gpuid", default='0', type=str)
parser.add_argument("--max_seq_length", default=128, type=int)
parser.add_argument("--num_duplicate", default=20, type=int)
parser.add_argument("--warmup_proportion", default=0.4, type=float)
parser.add_argument("--gradient_accumulation_steps", default=1, type=int,
help="Number of updates steps to accumulate before performing a backward/update pass.")
args = parser.parse_args()
if __name__ == '__main__':
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
save_ckpt = args.exp_name + '.ckpt'
save_config = args.exp_name + '.cfg'
# parse source domains
print('F1 ================== EXP =====================')
source_language = args.source_language
target_language = args.target_language
print('F1 Target language: %s' % target_language)
print('batchsize: %d' % args.batchsize)
print('learning rate: %.7f' % args.learning_rate)
print('max epochs: %d' % args.max_epoch)
print('max_seq_length: %d' % args.max_seq_length)
print('num_depulicate: %d' % args.num_duplicate)
print('warmup proportion: %.5f' % args.warmup_proportion)
print('model ckpt will be saved at: %s' % save_ckpt)
print('model config will be saved at: %s' % save_config)
processor = ACEProcessor()
label_list = processor.get_labels()
num_labels = len(label_list)
device = torch.device('cuda:' + args.gpuid)
# build model
if args.bert_model == 'bert-base-multilingual-cased':
model = BertForNER.from_pretrained(args.bert_model,
cache_dir=args.output_dir,
num_labels = num_labels,
output_hidden_states=True) # if you want to get all layer hidden states
elif args.bert_model == 'xlm-roberta-base':
model = XLMRobertaForNER.from_pretrained('/data/lan/BiBERT/data/xlm-robert-base-pre-training/tlm/checkpoints/',
cache_dir=args.output_dir,
num_labels=num_labels,
output_hidden_states=True) # if you want to get all layer hidden states
elif args.bert_model == 'xlm-mlm-xnli15-1024':
model = XLMForNER.from_pretrained(args.bert_model,
cache_dir=args.output_dir,
num_labels=num_labels,
output_hidden_states=True) # if you want to get all layer hidden states
elif args.bert_model == 'xlm-mlm-tlm-xnli15-1024':
model = XLMForNER.from_pretrained(args.bert_model,
cache_dir=args.output_dir,
num_labels=num_labels,
output_hidden_states=True) # if you want to get all layer hidden states
elif args.bert_model == 'xlm-roberta-large':
model = XLMRobertaForNER.from_pretrained('/data/lan/BiBERT/saved_model/'+ args.bert_model + '/giga/',
cache_dir=args.output_dir,
num_labels=num_labels,
output_hidden_states=True) # if you want to get all layer hidden states
else:
config = BertConfig.from_json_file(args.bert_model+'/bert_config.json') # config file
config.num_labels = num_labels
config.output_hidden_states = True
#print('num_labels: ', num_labels)
#sys.exit()
model = BertForNER(config=config)
model.load_state_dict(torch.load(args.bert_model+'/pytorch_model.bin', map_location=device), strict=False) # pytorch ckpt file
model.set_label_map(label_list)
model.to(device)
model.set_device('cuda:' + args.gpuid)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
# preprocess the data to json file and use loader to convert it to training format
training_data_path = source_language + '/train.txt'
if 'source' in args.exp_name:
dev_data_path = source_language + '/dev.txt'
else:
dev_data_path = target_language + '/dev.txt'
test_data_path = target_language + '/test.txt'
train_examples = processor.get_examples(training_data_path)
num_train_optimization_steps = int(
len(train_examples) / args.batchsize / args.gradient_accumulation_steps) * args.max_epoch
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, correct_bias=False) # To reproduce BertAdam specific behavior set correct_bias=False
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_proportion * num_train_optimization_steps), num_training_steps=num_train_optimization_steps)
#scheduler = WarmupLinearSchedule(optimizer, warmup_steps=int(args.warmup_proportion * num_train_optimization_steps), t_total=num_train_optimization_steps)
if args.bert_model == 'bert-base-multilingual-cased':
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
tokenizer.bos_token = '[CLS]'
tokenizer.eos_token = '[SEP]'
tokenizer.unk_token = '[UNK]'
tokenizer.sep_token = '[SEP]'
tokenizer.cls_token = '[CLS]'
tokenizer.mask_token = '[MASK]'
tokenizer.pad_token = '[PAD]'
elif args.bert_model == 'xlm-roberta-base':
tokenizer = XLMRobertaTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
elif args.bert_model == 'xlm-roberta-large':
tokenizer = XLMRobertaTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
elif args.bert_model == 'xlm-mlm-xnli15-1024':
tokenizer = XLMTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
tokenizer.bos_token = '<s>'
tokenizer.eos_token = '</s>'
tokenizer.unk_token = '<unk>'
tokenizer.sep_token = '</s>'
tokenizer.cls_token = '</s>'
tokenizer.mask_token = '<special1>'
tokenizer.pad_token = '<pad>'
elif args.bert_model == 'xlm-mlm-tlm-xnli15-1024':
tokenizer = XLMTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
tokenizer.bos_token = '<s>'
tokenizer.eos_token = '</s>'
tokenizer.unk_token = '<unk>'
tokenizer.sep_token = '</s>'
tokenizer.cls_token = '</s>'
tokenizer.mask_token = '<special1>'
tokenizer.pad_token = '<pad>'
else:
#if args.bert_model=='bibert-64k' or args.bert_model == 'csbert' or args.bert_model == 'bibert':
# lower_case_flag=True
#else:
lower_case_flag=True
print('lower_case_flag: ', lower_case_flag)
tokenizer = BertTokenizer.from_pretrained(args.bert_model+'/vocab.txt', do_lower_case=lower_case_flag) # bert vocab file
tokenizer.bos_token = '[CLS]'
tokenizer.eos_token = '[SEP]'
tokenizer.unk_token = '[UNK]'
tokenizer.sep_token = '[SEP]'
tokenizer.cls_token = '[CLS]'
tokenizer.mask_token = '[MASK]'
tokenizer.pad_token = '[PAD]'
# make data loader for train/dev/test
print('Loading training data...\n')
train_dataloader, _ = create_dataloader(training_data_path, set_type='train', batchsize=args.batchsize,
max_seq_length=args.max_seq_length, tokenizer=tokenizer,
num_duplicate=args.num_duplicate)
print('Loading development data...\n')
dev_dataloader, dev_size = create_dataloader(dev_data_path, set_type='dev',
batchsize=args.batchsize,
max_seq_length=args.max_seq_length, tokenizer=tokenizer,
num_duplicate=args.num_duplicate)
print('Loading testing data...\n')
test_dataloader, test_size = create_dataloader(test_data_path, set_type='test',
batchsize=args.batchsize,
max_seq_length=args.max_seq_length, tokenizer=tokenizer,
num_duplicate=args.num_duplicate)
# train
print('Training started...')
model = train(model, train_dataloader=train_dataloader, dev_dataloader=dev_dataloader,
dev_size=dev_size, optimizer=optimizer, scheduler=scheduler, max_epochs=args.max_epoch,
save_ckpt=save_ckpt, save_config=save_config, dev_ref=dev_data_path.replace('txt', 'json'))
# Load best checkpoint
print('Loading best check point...')
output_model_file = 'best_' + save_ckpt
model.load_state_dict(torch.load(output_model_file, map_location=device))
# test
print('Evaluating on dev set...\n')
f1, avg_loss = evaluate(model, dev_dataloader, dev_size, ref=dev_data_path.replace('txt', 'json'))
print('DEV F1: %.5f, avg loss: %.5f' % (f1, avg_loss))
print('Evaluating on test set...\n')
f1, avg_loss = evaluate(model, test_dataloader, test_size, ref=test_data_path.replace('txt', 'json'))
print('Test F1: %.5f, avg loss: %.5f' % (f1, avg_loss))
| [
"torch.manual_seed",
"argparse.ArgumentParser",
"torch.load",
"random.seed",
"numpy.random.seed",
"torch.device"
] | [((129, 154), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (152, 154), False, 'import argparse\n'), ((2004, 2026), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (2015, 2026), False, 'import random\n'), ((2031, 2056), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2045, 2056), True, 'import numpy as np\n'), ((2061, 2089), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2078, 2089), False, 'import torch\n'), ((2950, 2984), 'torch.device', 'torch.device', (["('cuda:' + args.gpuid)"], {}), "('cuda:' + args.gpuid)\n", (2962, 2984), False, 'import torch\n'), ((10676, 10726), 'torch.load', 'torch.load', (['output_model_file'], {'map_location': 'device'}), '(output_model_file, map_location=device)\n', (10686, 10726), False, 'import torch\n'), ((5233, 5304), 'torch.load', 'torch.load', (["(args.bert_model + '/pytorch_model.bin')"], {'map_location': 'device'}), "(args.bert_model + '/pytorch_model.bin', map_location=device)\n", (5243, 5304), False, 'import torch\n')] |
#!/usr/bin/env python
from setuptools import setup, find_packages
from distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy
import os
MODULE_NAME = "tierpsy"
AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
URL = 'https://github.com/ver228/tierpsy-tracker'
DOWNLOAD_URL = 'https://github.com/ver228/tierpsy-tracker'
DESCRIPTION = "tierpsy: Tierpsy Tracker Multi-Worm Tracker."
exec(open(MODULE_NAME + '/version.py').read())
VERSION = __version__
def _get_ext_modules():
#build cython files
# python3 setup.py build_ext --inplace
path_parts = [MODULE_NAME, 'analysis', 'ske_create', 'segWormPython', 'cython_files']
cython_path = os.path.join(*path_parts)
cython_path_e = os.path.join(MODULE_NAME, 'analysis', 'stage_aligment')
def _add_path(f_list):
return [os.path.join(cython_path, x) for x in f_list]
def _get_mod_path(name):
return '.'.join(path_parts + [name])
ext_files = {
"circCurvature" : ["circCurvature.pyx", "c_circCurvature.c"],
"curvspace" : ["curvspace.pyx", "c_curvspace.c"]
}
include_dirs = [numpy.get_include()]
ext_modules = cythonize(os.path.join(cython_path, "*_cython.pyx"))
ext_modules += cythonize(os.path.join(cython_path_e, "*.pyx"))
ext_modules += [Extension(_get_mod_path(name),
sources=_add_path(files),
include_dirs=include_dirs)
for name, files in ext_files.items()]
return ext_modules
PKG_DATA = [
'extras/*',
'extras/param_files/*',
'features/tierpsy_features/extras/*',
'features/open_worm_analysis_toolbox/features/master_eigen_worms_N2.mat',
'features/open_worm_analysis_toolbox/features/feature_metadata/features_list.csv'
]
#install setup
setup(name = MODULE_NAME,
version = VERSION,
description = DESCRIPTION,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = URL,
packages = find_packages(),
cmdclass = {'build_ext': build_ext},
ext_modules = _get_ext_modules(),
include_dirs = [numpy.get_include()],
package_data = {'tierpsy': PKG_DATA},
entry_points= {
'gui_scripts': [
'tierpsy_gui_simple = tierpsy.gui.HDF5VideoPlayer:tierpsy_gui_simple'
],
'console_scripts': [
'tierpsy_gui = tierpsy.gui.SelectApp:tierpsy_gui', #windows bug, if I put tierpsy_gui as a gui application I cannot run batch processing since the command line stdout is supressed.
'tierpsy_process = tierpsy.processing.processMultipleFilesFun:tierpsy_process',
'tierpsy_tests = tierpsy.tests.run_tests:tierpsy_tests'
]
}
)
| [
"setuptools.find_packages",
"os.path.join",
"numpy.get_include"
] | [((705, 730), 'os.path.join', 'os.path.join', (['*path_parts'], {}), '(*path_parts)\n', (717, 730), False, 'import os\n'), ((749, 804), 'os.path.join', 'os.path.join', (['MODULE_NAME', '"""analysis"""', '"""stage_aligment"""'], {}), "(MODULE_NAME, 'analysis', 'stage_aligment')\n", (761, 804), False, 'import os\n'), ((1116, 1135), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1133, 1135), False, 'import numpy\n'), ((1163, 1204), 'os.path.join', 'os.path.join', (['cython_path', '"""*_cython.pyx"""'], {}), "(cython_path, '*_cython.pyx')\n", (1175, 1204), False, 'import os\n'), ((1233, 1269), 'os.path.join', 'os.path.join', (['cython_path_e', '"""*.pyx"""'], {}), "(cython_path_e, '*.pyx')\n", (1245, 1269), False, 'import os\n'), ((1930, 1945), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1943, 1945), False, 'from setuptools import setup, find_packages\n'), ((842, 870), 'os.path.join', 'os.path.join', (['cython_path', 'x'], {}), '(cython_path, x)\n', (854, 870), False, 'import os\n'), ((2043, 2062), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2060, 2062), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from numpy.linalg import LinAlgError, inv, solve, norm
from numpy import dot, exp
from numpy.random import beta
from scipy.integrate import trapz
import scipy.stats as stats
import pandas as pd
from lifelines.plotting import plot_estimate, plot_regressions
from lifelines.utils import survival_table_from_events, inv_normal_cdf, \
epanechnikov_kernel, StatError, coalesce
from lifelines.progress_bar import progress_bar
class BaseFitter(object):
def __repr__(self):
classname = self.__class__.__name__
try:
s = """<lifelines.%s: fitted with %d observations, %d censored>""" % (
classname, self.event_observed.shape[0], (1 - self.event_observed).sum())
except AttributeError:
s = """<lifelines.%s>""" % classname
return s
class NelsonAalenFitter(BaseFitter):
"""
Class for fitting the Nelson-Aalen estimate for the cumulative hazard.
NelsonAalenFitter( alpha=0.95, nelson_aalen_smoothing=True)
alpha: The alpha value associated with the confidence intervals.
nelson_aalen_smoothing: If the event times are naturally discrete (like discrete years, minutes, etc.)
then it is advisable to turn this parameter to False. See [1], pg.84.
"""
def __init__(self, alpha=0.95, nelson_aalen_smoothing=True):
self.alpha = alpha
self.nelson_aalen_smoothing = nelson_aalen_smoothing
if self.nelson_aalen_smoothing:
self._variance_f = self._variance_f_smooth
self._additive_f = self._additive_f_smooth
else:
self._variance_f = self._variance_f_discrete
self._additive_f = self._additive_f_discrete
def fit(self, durations, event_observed=None, timeline=None, entry=None,
label='NA-estimate', alpha=None, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated observations, i.e the birth event was not observed.
If None, defaults to all 0 (all birth events observed.)
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'cumulative_hazard_'.
"""
v = preprocess_inputs(durations, event_observed, timeline, entry)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = v
cumulative_hazard_, cumulative_sq_ = _additive_estimate(self.event_table, self.timeline,
self._additive_f, self._variance_f, False)
# esimates
self.cumulative_hazard_ = pd.DataFrame(cumulative_hazard_, columns=[label])
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha if alpha else self.alpha, ci_labels)
self._cumulative_sq = cumulative_sq_
# estimation functions
self.predict = _predict(self, "cumulative_hazard_", label)
self.subtract = _subtract(self, "cumulative_hazard_")
self.divide = _divide(self, "cumulative_hazard_")
# plotting
self.plot = plot_estimate(self, "cumulative_hazard_")
self.plot_cumulative_hazard = self.plot
self.plot_hazard = plot_estimate(self, 'hazard_')
return self
def _bounds(self, cumulative_sq_, alpha, ci_labels):
alpha2 = inv_normal_cdf(1 - (1 - alpha) / 2)
df = pd.DataFrame(index=self.timeline)
name = self.cumulative_hazard_.columns[0]
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (name, self.alpha), "%s_lower_%.2f" % (name, self.alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
self.ci_labels = ci_labels
df[ci_labels[0]] = self.cumulative_hazard_.values * \
np.exp(alpha2 * np.sqrt(cumulative_sq_) / self.cumulative_hazard_.values)
df[ci_labels[1]] = self.cumulative_hazard_.values * \
np.exp(-alpha2 * np.sqrt(cumulative_sq_) / self.cumulative_hazard_.values)
return df
def _variance_f_smooth(self, population, deaths):
df = pd.DataFrame({'N': population, 'd': deaths})
return df.apply(lambda N_d: np.sum((1. / (N_d[0] - i) ** 2 for i in range(int(N_d[1])))), axis=1)
def _variance_f_discrete(self, population, deaths):
return 1. * (population - deaths) * deaths / population ** 3
def _additive_f_smooth(self, population, deaths):
df = pd.DataFrame({'N': population, 'd': deaths})
return df.apply(lambda N_d: np.sum((1. / (N_d[0] - i) for i in range(int(N_d[1])))), axis=1)
def _additive_f_discrete(self, population, deaths):
return (1. * deaths / population).replace([np.inf], 0)
def smoothed_hazard_(self, bandwidth):
"""
Parameters:
bandwidth: the bandwith used in the Epanechnikov kernel.
Returns:
a DataFrame of the smoothed hazard
"""
timeline = self.timeline
cumulative_hazard_name = self.cumulative_hazard_.columns[0]
hazard_name = "smoothed-" + cumulative_hazard_name
hazard_ = self.cumulative_hazard_.diff().fillna(self.cumulative_hazard_.iloc[0])
C = (hazard_[cumulative_hazard_name] != 0.0).values
return pd.DataFrame( 1./(2*bandwidth)*np.dot(epanechnikov_kernel(timeline[:, None], timeline[C][None, :], bandwidth), hazard_.values[C,:]),
columns=[hazard_name], index=timeline)
def smoothed_hazard_confidence_intervals_(self, bandwidth, hazard_=None):
"""
Parameter:
bandwidth: the bandwith to use in the Epanechnikov kernel.
hazard_: a computed (n,) numpy array of estimated hazard rates. If none, uses naf.smoothed_hazard_
"""
if hazard_ is None:
hazard_ = self.smoothed_hazard_(bandwidth).values[:, 0]
timeline = self.timeline
alpha2 = inv_normal_cdf(1 - (1 - self.alpha) / 2)
self._cumulative_sq.iloc[0] = 0
var_hazard_ = self._cumulative_sq.diff().fillna(self._cumulative_sq.iloc[0])
C = (var_hazard_.values != 0.0) # only consider the points with jumps
std_hazard_ = np.sqrt(1./(2*bandwidth**2)*np.dot(epanechnikov_kernel(timeline[:, None], timeline[C][None, :], bandwidth)**2, var_hazard_.values[C]))
values = {
self.ci_labels[0]: hazard_ * np.exp(alpha2 * std_hazard_ / hazard_),
self.ci_labels[1]: hazard_ * np.exp(-alpha2 * std_hazard_ / hazard_)
}
return pd.DataFrame(values, index=timeline)
class KaplanMeierFitter(BaseFitter):
"""
Class for fitting the Kaplan-Meier estimate for the survival function.
KaplanMeierFitter( alpha=0.95)
alpha: The alpha value associated with the confidence intervals.
"""
def __init__(self, alpha=0.95):
self.alpha = alpha
def fit(self, durations, event_observed=None, timeline=None, entry=None, label='KM-estimate',
alpha=None, left_censorship=False, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated observations, i.e the birth event was not observed.
If None, defaults to all 0 (all birth events observed.)
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
left_censorship: True if durations and event_observed refer to left censorship events. Default False
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'survival_function_'.
"""
# if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,
estimate_name = 'survival_function_' if not left_censorship else 'cumulative_density_'
v = preprocess_inputs(durations, event_observed, timeline, entry)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = v
log_survival_function, cumulative_sq_ = _additive_estimate(self.event_table, self.timeline,
self._additive_f, self._additive_var,
left_censorship)
if entry is not None:
# a serious problem with KM is that when the sample size is small and there are too few early
# truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.
# we adjust for this using the Breslow-Fleming-Harrington estimator
n = self.event_table.shape[0]
net_population = (self.event_table['entrance'] - self.event_table['removed']).cumsum()
if net_population.iloc[:int(n / 2)].min() == 0:
ix = net_population.iloc[:int(n / 2)].argmin()
raise StatError("""There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BFH estimator.""" % ix)
# estimation
setattr(self, estimate_name, pd.DataFrame(np.exp(log_survival_function), columns=[label]))
self.__estimate = getattr(self, estimate_name)
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha if alpha else self.alpha, ci_labels)
self.median_ = median_survival_times(self.__estimate)
# estimation methods
self.predict = _predict(self, estimate_name, label)
self.subtract = _subtract(self, estimate_name)
self.divide = _divide(self, estimate_name)
# plotting functions
self.plot = plot_estimate(self, estimate_name)
setattr(self, "plot_" + estimate_name, self.plot)
return self
def _bounds(self, cumulative_sq_, alpha, ci_labels):
# See http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes2.pdfg
alpha2 = inv_normal_cdf((1. + alpha) / 2.)
df = pd.DataFrame(index=self.timeline)
name = self.__estimate.columns[0]
v = np.log(self.__estimate.values)
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (name, self.alpha), "%s_lower_%.2f" % (name, self.alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + alpha2 * np.sqrt(cumulative_sq_) / v))
df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - alpha2 * np.sqrt(cumulative_sq_) / v))
return df
def _additive_f(self, population, deaths):
np.seterr(invalid='ignore')
return (np.log(population - deaths) - np.log(population))
def _additive_var(self, population, deaths):
np.seterr(divide='ignore')
return (1. * deaths / (population * (population - deaths))).replace([np.inf], 0)
class BreslowFlemingHarringtonFitter(BaseFitter):
"""
Class for fitting the Breslow-Fleming-Harrington estimate for the survival function. This estimator
is a biased estimator of the survival function but is more stable when the popualtion is small and
there are too few early truncation times, it may happen that is the number of patients at risk and
the number of deaths is the same.
Mathematically, the NAF estimator is the negative logarithm of the BFH estimator.
BreslowFlemingHarringtonFitter(alpha=0.95)
alpha: The alpha value associated with the confidence intervals.
"""
def __init__(self, alpha=0.95):
self.alpha = alpha
def fit(self, durations, event_observed=None, timeline=None, entry=None,
label='BFH-estimate', alpha=None, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated observations, i.e the birth event was not observed.
If None, defaults to all 0 (all birth events observed.)
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'survival_function_'.
"""
naf = NelsonAalenFitter(self.alpha)
naf.fit(durations, event_observed=event_observed, timeline=timeline, label=label, entry=entry, ci_labels=ci_labels)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = \
naf.durations, naf.event_observed, naf.timeline, naf.entry, naf.event_table
# estimation
self.survival_function_ = np.exp(-naf.cumulative_hazard_)
self.confidence_interval_ = np.exp(-naf.confidence_interval_)
self.median_ = median_survival_times(self.survival_function_)
# estimation methods
self.predict = _predict(self, "survival_function_", label)
self.subtract = _subtract(self, "survival_function_")
self.divide = _divide(self, "survival_function_")
# plotting functions
self.plot = plot_estimate(self, "survival_function_")
self.plot_survival_function = self.plot
return self
class BayesianFitter(BaseFitter):
"""
If you have small data, and KM feels too uncertain, you can use the BayesianFitter to
generate sample survival functions. The algorithm is:
S_i(T) = \Prod_{t=0}^T (1 - p_t)
where p_t ~ Beta( 0.01 + d_t, 0.01 + n_t - d_t), d_t is the number of deaths and n_t is the size of the
population at risk at time t. The prior is a Beta(0.01, 0.01) for each time point (high values led to a
high bias).
Parameters:
samples: the number of sample survival functions to return.
"""
def __init__(self, samples=300):
self.beta = beta
self.samples = samples
def fit(self, durations, censorship=None, timeline=None, entry=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
censorship: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if censorship==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated observations, i.e the birth event was not observed.
If None, defaults to all 0 (all birth events observed.)
Returns:
self, with new properties like 'sample_survival_functions_'.
"""
v = preprocess_inputs(durations, censorship, timeline, entry)
self.durations, self.censorship, self.timeline, self.entry, self.event_table = v
self.sample_survival_functions_ = self.generate_sample_path(self.samples)
return self
def plot(self, **kwargs):
kwargs['alpha'] = coalesce(kwargs.pop('alpha', None), 0.05)
kwargs['legend'] = False
kwargs['c'] = coalesce(kwargs.pop('c', None), kwargs.pop('color', None), '#348ABD')
ax = self.sample_survival_functions_.plot(**kwargs)
return ax
def generate_sample_path(self, n=1):
deaths = self.event_table['observed']
population = self.event_table['entrance'].cumsum() - self.event_table['removed'].cumsum().shift(1).fillna(0)
d = deaths.shape[0]
samples = 1. - beta(0.01 + deaths, 0.01 + population - deaths, size=(n, d))
sample_paths = pd.DataFrame(np.exp(np.log(samples).cumsum(1)).T, index=self.timeline)
return sample_paths
class AalenAdditiveFitter(BaseFitter):
"""
This class fits the regression model:
hazard(t) = b_0(t) + b_t(t)*x_1 + ... + b_N(t)*x_N
that is, the hazard rate is a linear function of the covariates.
Parameters:
fit_intercept: If False, do not attach an intercept (column of ones) to the covariate matrix. The
intercept, b_0(t) acts as a baseline hazard.
alpha: the level in the confidence intervals.
penalizer: Attach a L2 penalizer to the regression. This improves stability of the estimates
and controls high correlation between covariates. Recommended, even if a small value.
"""
def __init__(self, fit_intercept=True, alpha=0.95, penalizer=0.5):
self.fit_intercept = fit_intercept
self.alpha = alpha
self.penalizer = penalizer
assert penalizer >= 0, "penalizer must be >= 0."
def fit(self, dataframe, duration_col="T", event_col="E",
timeline=None, id_col=None, show_progress=True):
"""
Perform inference on the coefficients of the Aalen additive model.
Parameters:
dataframe: a pandas dataframe, with covariates and a duration_col and a event_col.
static covariates:
one row per individual. duration_col refers to how long the individual was
observed for. event_col is a boolean: 1 if individual 'died', 0 else. id_col
should be left as None.
time-varying covariates:
For time-varying covariates, an id_col is required to keep track of individuals'
changing covariates. individual should have a unique id. duration_col refers to how
long the individual has been observed to up to that point. event_col refers to if
the event (death) occured in that period. Censored individuals will not have a 1.
For example:
+----+---+---+------+------+
| id | T | E | var1 | var2 |
+----+---+---+------+------+
| 1 | 1 | 0 | 0 | 1 |
| 1 | 2 | 0 | 0 | 1 |
| 1 | 3 | 0 | 4 | 3 |
| 1 | 4 | 1 | 8 | 4 |
| 2 | 1 | 0 | 1 | 1 |
| 2 | 2 | 0 | 1 | 2 |
| 2 | 3 | 0 | 1 | 2 |
+----+---+---+------+------+
duration_col: specify what the duration column is called in the dataframe
event_col: specify what the event occurred column is called in the dataframe
timeline: reformat the estimates index to a new timeline.
id_col: (only for time-varying covariates) name of the id column in the dataframe
progress_bar: include a fancy progress bar =)
max_unique_durations: memory can be an issue if there are too many
unique durations. If the max is surpassed, max_unique_durations bins
will be used.
Returns:
self, with new methods like plot, smoothed_hazards_ and properties like cumulative_hazards_
"""
if id_col is None:
self._fit_static(dataframe, duration_col, event_col, timeline, show_progress)
else:
self._fit_varying(dataframe, duration_col, event_col, id_col, timeline, show_progress)
return self
def _fit_static(self, dataframe, duration_col="T", event_col="E",
timeline=None, show_progress=True):
"""
Perform inference on the coefficients of the Aalen additive model.
Parameters:
dataframe: a pandas dataframe, with covariates and a duration_col and a event_col.
one row per individual. duration_col refers to how long the individual was
observed for. event_col is a boolean: 1 if individual 'died', 0 else. id_col
should be left as None.
duration_col: specify what the duration column is called in the dataframe
event_col: specify what the event occurred column is called in the dataframe
timeline: reformat the estimates index to a new timeline.
progress_bar: include a fancy progress bar!
Returns:
self, with new methods like plot, smoothed_hazards_ and properties like cumulative_hazards_
"""
from_tuples = pd.MultiIndex.from_tuples
df = dataframe.copy()
# set unique ids for individuals
id_col = 'id'
ids = np.arange(df.shape[0])
df[id_col] = ids
# if the regression should fit an intercept
if self.fit_intercept:
df['baseline'] = 1.
# each individual should have an ID of time of leaving study
C = pd.Series(df[event_col].values, dtype=bool, index=ids)
T = pd.Series(df[duration_col].values, index=ids)
df = df.set_index([duration_col, id_col])
ix = T.argsort()
T, C = T.iloc[ix], C.iloc[ix]
del df[event_col]
n, d = df.shape
columns = df.columns
# initialize dataframe to store estimates
non_censorsed_times = list(T[C].iteritems())
n_deaths = len(non_censorsed_times)
hazards_ = pd.DataFrame(np.zeros((n_deaths, d)), columns=columns,
index=from_tuples(non_censorsed_times)).swaplevel(1, 0)
variance_ = pd.DataFrame(np.zeros((n_deaths, d)), columns=columns,
index=from_tuples(non_censorsed_times)).swaplevel(1, 0)
# initializes the penalizer matrix
penalizer = self.penalizer * np.eye(d)
# initialize loop variables.
progress = progress_bar(n_deaths)
to_remove = []
t = T.iloc[0]
i = 0
for id, time in T.iteritems(): # should be sorted.
if t != time:
assert t < time
# remove the individuals from the previous loop.
df.iloc[to_remove] = 0.
to_remove = []
t = time
to_remove.append(id)
if C[id] == 0:
continue
relevant_individuals = (ids == id)
assert relevant_individuals.sum() == 1.
# perform linear regression step.
X = df.values
try:
V = dot(inv(dot(X.T, X) + penalizer), X.T)
except LinAlgError:
print("Linear regression error. Try increasing the penalizer term.")
v = dot(V, 1.0 * relevant_individuals)
hazards_.ix[time, id] = v.T
variance_.ix[time, id] = V[:, relevant_individuals][:, 0] ** 2
# update progress bar
if show_progress:
i += 1
progress.update(i)
# print a new line so the console displays well
if show_progress:
print()
# not sure this is the correct thing to do.
self.hazards_ = hazards_.groupby(level=0).sum()
self.cumulative_hazards_ = self.hazards_.cumsum()
self.variance_ = variance_.groupby(level=0).sum()
if timeline is not None:
self.hazards_ = self.hazards_.reindex(timeline, method='ffill')
self.cumulative_hazards_ = self.cumulative_hazards_.reindex(timeline, method='ffill')
self.variance_ = self.variance_.reindex(timeline, method='ffill')
self.timeline = timeline
else:
self.timeline = self.hazards_.index.values.astype(float)
self.data = dataframe
self.durations = T
self.event_observed = C
self._compute_confidence_intervals()
self.plot = plot_regressions(self)
return
def _fit_varying(self, dataframe, duration_col="T", event_col="E",
id_col=None, timeline=None, show_progress=True):
from_tuples = pd.MultiIndex.from_tuples
df = dataframe.copy()
# if the regression should fit an intercept
if self.fit_intercept:
df['baseline'] = 1.
# each individual should have an ID of time of leaving study
df = df.set_index([duration_col, id_col])
C_panel = df[[event_col]].to_panel().transpose(2, 1, 0)
C = C_panel.minor_xs(event_col).sum().astype(bool)
T = (C_panel.minor_xs(event_col).notnull()).cumsum().idxmax()
del df[event_col]
n, d = df.shape
# so this is a problem line. bfill performs a recursion which is
# really not scalable. Plus even for modest datasets, this eats a lot of memory.
wp = df.to_panel().bfill().fillna(0)
# initialize dataframe to store estimates
non_censorsed_times = list(T[C].iteritems())
columns = wp.items
hazards_ = pd.DataFrame(np.zeros((len(non_censorsed_times), d)),
columns=columns, index=from_tuples(non_censorsed_times))
variance_ = pd.DataFrame(np.zeros((len(non_censorsed_times), d)),
columns=columns, index=from_tuples(non_censorsed_times))
# initializes the penalizer matrix
penalizer = self.penalizer * np.eye(d)
ids = wp.minor_axis.values
progress = progress_bar(len(non_censorsed_times))
# this makes indexing times much faster
wp = wp.swapaxes(0, 1, copy=False).swapaxes(1, 2, copy=False)
for i, (id, time) in enumerate(non_censorsed_times):
relevant_individuals = (ids == id)
assert relevant_individuals.sum() == 1.
X = wp[time].values
# perform linear regression step.
try:
V = dot(inv(dot(X.T, X) + penalizer), X.T)
except LinAlgError:
print("Linear regression error. Try increasing the penalizer term.")
v = dot(V, 1.0 * relevant_individuals)
hazards_.ix[id, time] = v.T
variance_.ix[id, time] = V[:, relevant_individuals][:, 0] ** 2
# update progress bar
if show_progress:
progress.update(i)
# print a new line so the console displays well
if show_progress:
print()
ordered_cols = df.columns # to_panel() mixes up my columns
# not sure this is the correct thing to do.
self.hazards_ = hazards_.groupby(level=1).sum()[ordered_cols]
self.cumulative_hazards_ = self.hazards_.cumsum()[ordered_cols]
self.variance_ = variance_.groupby(level=1).sum()[ordered_cols]
if timeline is not None:
self.hazards_ = self.hazards_.reindex(timeline, method='ffill')
self.cumulative_hazards_ = self.cumulative_hazards_.reindex(timeline, method='ffill')
self.variance_ = self.variance_.reindex(timeline, method='ffill')
self.timeline = timeline
else:
self.timeline = self.hazards_.index.values.astype(float)
self.data = wp
self.durations = T
self.event_observed = C
self._compute_confidence_intervals()
self.plot = plot_regressions(self)
return
def smoothed_hazards_(self, bandwidth=1):
"""
Using the epanechnikov kernel to smooth the hazard function, with sigma/bandwidth
"""
return pd.DataFrame(np.dot(epanechnikov_kernel(self.timeline[:, None], self.timeline, bandwidth), self.hazards_.values),
columns=self.hazards_.columns, index=self.timeline)
def _compute_confidence_intervals(self):
alpha2 = inv_normal_cdf(1 - (1 - self.alpha) / 2)
n = self.timeline.shape[0]
d = self.cumulative_hazards_.shape[1]
index = [['upper'] * n + ['lower'] * n, np.concatenate([self.timeline, self.timeline])]
self.confidence_intervals_ = pd.DataFrame(np.zeros((2 * n, d)),
index=index,
columns=self.cumulative_hazards_.columns
)
self.confidence_intervals_.ix['upper'] = self.cumulative_hazards_.values + \
alpha2 * np.sqrt(self.variance_.cumsum().values)
self.confidence_intervals_.ix['lower'] = self.cumulative_hazards_.values - \
alpha2 * np.sqrt(self.variance_.cumsum().values)
return
def predict_cumulative_hazard(self, X, id_col=None):
"""
X: a (n,d) covariate matrix
Returns the hazard rates for the individuals
"""
if id_col is not None:
# see https://github.com/CamDavidsonPilon/lifelines/issues/38
raise NotImplementedError
n, d = X.shape
try:
X_ = X.values.copy()
except:
X_ = X.copy()
X_ = X.copy() if not self.fit_intercept else np.c_[X.copy(), np.ones((n, 1))]
return pd.DataFrame(np.dot(self.cumulative_hazards_, X_.T), index=self.timeline)
def predict_survival_function(self, X):
"""
X: a (n,d) covariate matrix
Returns the survival functions for the individuals
"""
return np.exp(-self.predict_cumulative_hazard(X))
def predict_median(self, X):
"""
X: a (n,d) covariate matrix
Returns the median lifetimes for the individuals
"""
return median_survival_times(self.predict_survival_function(X))
def predict_expectation(self, X):
"""
Compute the expected lifetime, E[T], using covarites X.
"""
t = self.cumulative_hazards_.index
return trapz(self.predict_survival_function(X).values.T, t)
class CoxPHFitter(BaseFitter):
"""
This class implements fitting Cox's proportional hazard model:
h(t|x) = h_0(t)*exp(x'*beta)
Parameters:
alpha: the level in the confidence intervals.
tie_method: specify how the fitter should deal with ties. Currently only
'Efron' is available.
"""
def __init__(self, alpha=0.95, tie_method='Efron'):
self.alpha = alpha
if tie_method != 'Efron':
raise NotImplementedError("Only Efron is available atm.")
self.tie_method = tie_method
def _get_efron_values(self, X, beta, T, E, include_likelihood=False):
"""
Calculates the first and second order vector differentials,
with respect to beta. If 'include_likelihood' is True, then
the log likelihood is also calculated. This is omitted by default
to speed up the fit.
Note that X, T, E are assumed to be sorted on T!
Parameters:
X: (n,d) numpy array of observations.
beta: (1, d) numpy array of coefficients.
T: (n) numpy array representing observed durations.
E: (n) numpy array representing death events.
Returns:
hessian: (d, d) numpy array,
gradient: (1, d) numpy array
log_likelihood: double, if include_likelihood=True
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((1, d))
log_lik = 0
# Init risk and tie sums to zero
x_tie_sum = np.zeros((1, d))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties
tie_count = 0
# Iterate backwards to utilize recursive relationship
for i, (ti, ei) in reversed(list(enumerate(zip(T, E)))):
# Doing it like this to preserve shape
xi = X[i:i+1]
# Calculate phi values
phi_i = exp(dot(xi, beta))
phi_x_i = dot(phi_i, xi)
phi_x_x_i = np.dot(xi.T, xi) * phi_i
# Calculate sums of Risk set
risk_phi += phi_i
risk_phi_x += phi_x_i
risk_phi_x_x += phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_tie_sum += xi
tie_phi += phi_i
tie_phi_x += phi_x_i
tie_phi_x_x += phi_x_x_i
# Keep track of count
tie_count += 1
if i > 0 and T[i-1] == ti:
# There are more ties/members of the risk set
continue
elif tie_count == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to sum.
partial_gradient = np.zeros((1, d))
for l in range(tie_count):
c = l / tie_count
denom = (risk_phi - c * tie_phi)
z = (risk_phi_x - c * tie_phi_x)
# Gradient
partial_gradient += z / denom
# Hessian
a1 = (risk_phi_x_x - c * tie_phi_x_x) / denom
a2 = dot(z.T, z) / (denom ** 2)
hessian -= (a1 - a2)
if include_likelihood:
log_lik -= np.log(denom)
# Values outside tie sum
gradient += x_tie_sum - partial_gradient
if include_likelihood:
log_lik += dot(x_tie_sum, beta).ravel()
# reset tie values
tie_count = 0
x_tie_sum = np.zeros((1, d))
tie_phi = 0
tie_phi_x = np.zeros((1, d))
tie_phi_x_x = np.zeros((d, d))
if include_likelihood:
return hessian, gradient, log_lik.ravel()[0]
else:
return hessian, gradient
def _newton_rhaphson(self, X, T, E, initial_beta=None, step_size=1.,
epsilon=10e-5, show_progress=True):
"""
Newton Rhaphson algorithm for fitting CPH model.
Note that data is assumed to be sorted on T!
Parameters:
X: (n,d) numpy array of observations.
T: (n) numpy array representing observed durations.
E: (n) numpy array representing death events.
initial_beta: (1,d) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: 0 < float <= 1 to determine a step size in NR algorithm.
epsilon: the convergence halts if the norm of delta between
successive positions is less than epsilon.
Returns:
beta: (1,d) numpy array.
"""
assert epsilon <= 1., "epsilon must be less than or equal to 1."
n, d = X.shape
# Enforce numpy arrays
X = np.array(X)
T = np.array(T)
E = np.array(E)
# Want as bools
E = E.astype(bool)
# make sure betas are correct size.
if initial_beta is not None:
assert initial_beta.shape == (d, 1)
beta = initial_beta
else:
beta = np.zeros((d, 1))
# Method of choice is just efron right now
if self.tie_method == 'Efron':
get_gradients = self._get_efron_values
else:
raise NotImplementedError("Only Efron is available atm.")
i = 1
converging = True
while converging:
hessian, gradient = get_gradients(X, beta, T, E)
delta = solve(-hessian, step_size * gradient.T)
beta = delta + beta
if pd.isnull(delta).sum() > 1:
raise ValueError("delta contains nan value(s). Converge halted.")
if norm(delta) < epsilon:
converging = False
if i % 10 == 0 and show_progress:
print("Iteration %d: delta = %.5f" % (i, norm(delta)))
i += 1
self._hessian_ = hessian
self._score_ = gradient
if show_progress:
print("Convergence completed after %d iterations." % (i))
return beta
def fit(self, df, duration_col='T', event_col='E',
show_progress=False, initial_beta=None):
"""
Fit the Cox Propertional Hazard model to a dataset. Tied survival times
are handled using Efron's tie-method.
Parameters:
df: a Pandas dataframe with necessary columns `duration_col` and
`event_col`, plus other covariates. `duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: the column in dataframe that contains the subjects'
lifetimes.
event_col: the column in dataframe that contains the subjects' death
observation.
show_progress: since the fitter is iterative, show convergence
diagnostics.
initial_beta: initialize the starting point of the iterative
algorithm. Default is the zero vector.
Returns:
self, with additional properties: hazards_
"""
df = df.copy()
# Sort on time
df.sort(duration_col, inplace=True)
# Extract time and event
T = df[duration_col]
E = df[event_col]
del df[duration_col]
del df[event_col]
E = E.astype(bool)
self._check_values(df)
hazards_ = self._newton_rhaphson(df, T, E, initial_beta=initial_beta,
show_progress=show_progress)
self.hazards_ = pd.DataFrame(hazards_.T, columns=df.columns,
index=['coef'])
self.confidence_intervals_ = self._compute_confidence_intervals()
self.data = df
self.durations = T
self.event_observed = E
self.baseline_hazard_ = self._compute_baseline_hazard()
return self
def _check_values(self, X):
low_var = (X.var(0) < 10e-5)
if low_var.any():
cols = str(list(X.columns[low_var]))
print("Warning: column(s) %s have very low variance.\
This may harm convergence." % cols)
def _compute_confidence_intervals(self):
alpha2 = inv_normal_cdf((1. + self.alpha) / 2.)
se = self._compute_standard_errors()
hazards = self.hazards_.values
return pd.DataFrame(np.r_[hazards - alpha2 * se,
hazards + alpha2 * se],
index=['lower-bound', 'upper-bound'],
columns=self.hazards_.columns)
def _compute_standard_errors(self):
se = np.sqrt(inv(-self._hessian_).diagonal())
return pd.DataFrame(se[None, :],
index=['se'], columns=self.hazards_.columns)
def _compute_z_values(self):
return (self.hazards_.ix['coef'] /
self._compute_standard_errors().ix['se'])
def _compute_p_values(self):
U = self._compute_z_values() ** 2
return 1 - stats.chi2.cdf(U, 1)
def summary(self):
df = pd.DataFrame(index=self.hazards_.columns)
df['coef'] = self.hazards_.ix['coef'].values
df['exp(coef)'] = exp(self.hazards_.ix['coef'].values)
df['se(coef)'] = self._compute_standard_errors().ix['se'].values
df['z'] = self._compute_z_values()
df['p'] = self._compute_p_values()
df['lower %.2f' % self.alpha] = self.confidence_intervals_.ix['lower-bound'].values
df['upper %.2f' % self.alpha] = self.confidence_intervals_.ix['upper-bound'].values
print(df.to_string())
return
def predict_hazard(self, X):
"""
X: a (n,d) covariate matrix
Returns the survival functions for the individuals
"""
v = exp(np.dot(X, self.hazards_.T))
bh = self.baseline_hazard_.values
return pd.DataFrame(np.dot(bh, v.T), index=self.baseline_hazard_.index)
def predict_survival_function(self, X):
"""
X: a (n,d) covariate matrix
Returns the survival functions for the individuals
"""
return exp(-self.predict_hazard(X).cumsum(0))
def predict_median(self, X):
"""
X: a (n,d) covariate matrix
Returns the median lifetimes for the individuals
"""
return median_survival_times(self.predict_survival_function(X))
def predict_expectation(self, X):
"""
Compute the expected lifetime, E[T], using covarites X.
"""
t = self.cumulative_hazards_.index
return trapz(self.predict_survival_function(X).values.T, t)
def _compute_baseline_hazard(self):
# http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes3.pdf
ind_hazards = exp(np.dot(self.data, self.hazards_.T))
event_table = survival_table_from_events(self.durations.values,
self.event_observed.values,
np.zeros_like(self.durations))
n, d = event_table.shape
baseline_hazard_ = pd.DataFrame(np.zeros((n, 1)),
index=event_table.index,
columns=['baseline hazard'])
for t, s in event_table.iterrows():
baseline_hazard_.ix[t] = (s['observed'] /
ind_hazards[self.durations <= t].sum())
return baseline_hazard_
#### Utils ####
def _subtract(self, estimate):
class_name = self.__class__.__name__
doc_string = """
Subtract the %s of two %s objects.
Parameters:
other: an %s fitted instance.
""" % (estimate, class_name, class_name)
def subtract(other):
self_estimate = getattr(self, estimate)
other_estimate = getattr(other, estimate)
return self_estimate.reindex(other_estimate.index, method='ffill') - \
other_estimate.reindex(self_estimate.index, method='ffill')
subtract.__doc__ = doc_string
return subtract
def _divide(self, estimate):
class_name = self.__class__.__name__
doc_string = """
Divide the %s of two %s objects.
Parameters:
other: an %s fitted instance.
""" % (estimate, class_name, class_name)
def divide(other):
self_estimate = getattr(self, estimate)
other_estimate = getattr(other, estimate)
return self_estimate.reindex(other_estimate.index, method='ffill') / \
other_estimate.reindex(self_estimate.index, method='ffill')
divide.__doc__ = doc_string
return divide
def _predict(self, estimate, label):
doc_string = """
Predict the %s at certain times
Parameters:
time: an array of times to predict the value of %s at
""" % (estimate, estimate)
def predict(time):
return [getattr(self, estimate).ix[:t].iloc[-1][label] for t in time]
predict.__doc__ = doc_string
return predict
def preprocess_inputs(durations, event_observed, timeline, entry):
n = len(durations)
durations = np.asarray(durations).reshape((n,))
# set to all observed if event_observed is none
if event_observed is None:
event_observed = np.ones(n, dtype=int)
else:
event_observed = np.asarray(event_observed).reshape((n,)).copy().astype(int)
if entry is None:
entry = np.zeros(n)
else:
entry = np.asarray(entry).reshape((n,))
event_table = survival_table_from_events(durations, event_observed, entry)
if timeline is None:
timeline = event_table.index.values
else:
timeline = np.asarray(timeline)
return durations, event_observed, timeline.astype(float), entry, event_table
def _additive_estimate(events, timeline, _additive_f, _additive_var, reverse):
"""
Called to compute the <NAME> and Nelson-Aalen estimates.
"""
if reverse:
events = events.sort_index(ascending=False)
population = events['entrance'].sum() - events['removed'].cumsum().shift(1).fillna(0)
deaths = events['observed'].shift(1).fillna(0)
estimate_ = np.cumsum(_additive_f(population, deaths)).ffill().sort_index()
var_ = np.cumsum(_additive_var(population, deaths)).ffill().sort_index()
else:
deaths = events['observed']
population = events['entrance'].cumsum() - events['removed'].cumsum().shift(1).fillna(0) # slowest line here.
estimate_ = np.cumsum(_additive_f(population, deaths))
var_ = np.cumsum(_additive_var(population, deaths))
timeline = sorted(timeline)
estimate_ = estimate_.reindex(timeline, method='pad').fillna(0)
var_ = var_.reindex(timeline, method='pad')
var_.index.name = 'timeline'
estimate_.index.name = 'timeline'
return estimate_, var_
def qth_survival_times(q, survival_functions):
"""
This can be done much better.
Parameters:
q: a float between 0 and 1.
survival_functions: a (n,d) dataframe or numpy array.
If dataframe, will return index values (actual times)
If numpy array, will return indices.
Returns:
v: an array containing the first times the value was crossed.
np.inf if infinity.
"""
assert 0. <= q <= 1., "q must be between 0. and 1."
sv_b = (1.0 * (survival_functions < q)).cumsum() > 0
try:
v = sv_b.idxmax(0)
v[sv_b.iloc[-1, :] == 0] = np.inf
except:
v = sv_b.argmax(0)
v[sv_b[-1, :] == 0] = np.inf
return v
def median_survival_times(survival_functions):
return qth_survival_times(0.5, survival_functions)
def asymmetric_epanechnikov_kernel(q, x):
return (64 * (2 - 4 * q + 6 * q * q - 3 * q ** 3) + 240 * (1 - q) ** 2 * x) / ((1 + q) ** 4 * (19 - 18 * q + 3 * q ** 2))
"""
References:
[1] <NAME>., <NAME>., <NAME>., 2008. Survival and Event History Analysis
"""
| [
"numpy.sqrt",
"lifelines.utils.epanechnikov_kernel",
"numpy.log",
"lifelines.utils.StatError",
"numpy.array",
"numpy.linalg.norm",
"numpy.arange",
"lifelines.progress_bar.progress_bar",
"scipy.stats.chi2.cdf",
"numpy.asarray",
"numpy.zeros_like",
"numpy.exp",
"numpy.dot",
"numpy.concatenat... | [((45479, 45539), 'lifelines.utils.survival_table_from_events', 'survival_table_from_events', (['durations', 'event_observed', 'entry'], {}), '(durations, event_observed, entry)\n', (45505, 45539), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, epanechnikov_kernel, StatError, coalesce\n'), ((3514, 3563), 'pandas.DataFrame', 'pd.DataFrame', (['cumulative_hazard_'], {'columns': '[label]'}), '(cumulative_hazard_, columns=[label])\n', (3526, 3563), True, 'import pandas as pd\n'), ((3985, 4026), 'lifelines.plotting.plot_estimate', 'plot_estimate', (['self', '"""cumulative_hazard_"""'], {}), "(self, 'cumulative_hazard_')\n", (3998, 4026), False, 'from lifelines.plotting import plot_estimate, plot_regressions\n'), ((4102, 4132), 'lifelines.plotting.plot_estimate', 'plot_estimate', (['self', '"""hazard_"""'], {}), "(self, 'hazard_')\n", (4115, 4132), False, 'from lifelines.plotting import plot_estimate, plot_regressions\n'), ((4229, 4264), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', (['(1 - (1 - alpha) / 2)'], {}), '(1 - (1 - alpha) / 2)\n', (4243, 4264), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, epanechnikov_kernel, StatError, coalesce\n'), ((4278, 4311), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.timeline'}), '(index=self.timeline)\n', (4290, 4311), True, 'import pandas as pd\n'), ((4989, 5033), 'pandas.DataFrame', 'pd.DataFrame', (["{'N': population, 'd': deaths}"], {}), "({'N': population, 'd': deaths})\n", (5001, 5033), True, 'import pandas as pd\n'), ((5334, 5378), 'pandas.DataFrame', 'pd.DataFrame', (["{'N': population, 'd': deaths}"], {}), "({'N': population, 'd': deaths})\n", (5346, 5378), True, 'import pandas as pd\n'), ((6790, 6830), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', (['(1 - (1 - self.alpha) / 2)'], {}), '(1 - (1 - self.alpha) / 2)\n', (6804, 6830), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, epanechnikov_kernel, StatError, coalesce\n'), ((7398, 7434), 'pandas.DataFrame', 'pd.DataFrame', (['values'], {'index': 'timeline'}), '(values, index=timeline)\n', (7410, 7434), True, 'import pandas as pd\n'), ((11216, 11250), 'lifelines.plotting.plot_estimate', 'plot_estimate', (['self', 'estimate_name'], {}), '(self, estimate_name)\n', (11229, 11250), False, 'from lifelines.plotting import plot_estimate, plot_regressions\n'), ((11495, 11530), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', (['((1.0 + alpha) / 2.0)'], {}), '((1.0 + alpha) / 2.0)\n', (11509, 11530), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, epanechnikov_kernel, StatError, coalesce\n'), ((11542, 11575), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.timeline'}), '(index=self.timeline)\n', (11554, 11575), True, 'import pandas as pd\n'), ((11630, 11660), 'numpy.log', 'np.log', (['self.__estimate.values'], {}), '(self.__estimate.values)\n', (11636, 11660), True, 'import numpy as np\n'), ((12132, 12159), 'numpy.seterr', 'np.seterr', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (12141, 12159), True, 'import numpy as np\n'), ((12284, 12310), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (12293, 12310), True, 'import numpy as np\n'), ((14811, 14842), 'numpy.exp', 'np.exp', (['(-naf.cumulative_hazard_)'], {}), '(-naf.cumulative_hazard_)\n', (14817, 14842), True, 'import numpy as np\n'), ((14879, 14912), 'numpy.exp', 'np.exp', (['(-naf.confidence_interval_)'], {}), '(-naf.confidence_interval_)\n', (14885, 14912), True, 'import numpy as np\n'), ((15250, 15291), 'lifelines.plotting.plot_estimate', 'plot_estimate', (['self', '"""survival_function_"""'], {}), "(self, 'survival_function_')\n", (15263, 15291), False, 'from lifelines.plotting import plot_estimate, plot_regressions\n'), ((22566, 22588), 'numpy.arange', 'np.arange', (['df.shape[0]'], {}), '(df.shape[0])\n', (22575, 22588), True, 'import numpy as np\n'), ((22812, 22866), 'pandas.Series', 'pd.Series', (['df[event_col].values'], {'dtype': 'bool', 'index': 'ids'}), '(df[event_col].values, dtype=bool, index=ids)\n', (22821, 22866), True, 'import pandas as pd\n'), ((22879, 22924), 'pandas.Series', 'pd.Series', (['df[duration_col].values'], {'index': 'ids'}), '(df[duration_col].values, index=ids)\n', (22888, 22924), True, 'import pandas as pd\n'), ((23744, 23766), 'lifelines.progress_bar.progress_bar', 'progress_bar', (['n_deaths'], {}), '(n_deaths)\n', (23756, 23766), False, 'from lifelines.progress_bar import progress_bar\n'), ((25739, 25761), 'lifelines.plotting.plot_regressions', 'plot_regressions', (['self'], {}), '(self)\n', (25755, 25761), False, 'from lifelines.plotting import plot_estimate, plot_regressions\n'), ((29146, 29168), 'lifelines.plotting.plot_regressions', 'plot_regressions', (['self'], {}), '(self)\n', (29162, 29168), False, 'from lifelines.plotting import plot_estimate, plot_regressions\n'), ((29619, 29659), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', (['(1 - (1 - self.alpha) / 2)'], {}), '(1 - (1 - self.alpha) / 2)\n', (29633, 29659), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, epanechnikov_kernel, StatError, coalesce\n'), ((33113, 33129), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (33121, 33129), True, 'import numpy as np\n'), ((33149, 33165), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (33157, 33165), True, 'import numpy as np\n'), ((33248, 33264), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (33256, 33264), True, 'import numpy as np\n'), ((36695, 36706), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (36703, 36706), True, 'import numpy as np\n'), ((36719, 36730), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (36727, 36730), True, 'import numpy as np\n'), ((36743, 36754), 'numpy.array', 'np.array', (['E'], {}), '(E)\n', (36751, 36754), True, 'import numpy as np\n'), ((39532, 39592), 'pandas.DataFrame', 'pd.DataFrame', (['hazards_.T'], {'columns': 'df.columns', 'index': "['coef']"}), "(hazards_.T, columns=df.columns, index=['coef'])\n", (39544, 39592), True, 'import pandas as pd\n'), ((40183, 40223), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', (['((1.0 + self.alpha) / 2.0)'], {}), '((1.0 + self.alpha) / 2.0)\n', (40197, 40223), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, epanechnikov_kernel, StatError, coalesce\n'), ((40321, 40460), 'pandas.DataFrame', 'pd.DataFrame', (['np.r_[hazards - alpha2 * se, hazards + alpha2 * se]'], {'index': "['lower-bound', 'upper-bound']", 'columns': 'self.hazards_.columns'}), "(np.r_[hazards - alpha2 * se, hazards + alpha2 * se], index=[\n 'lower-bound', 'upper-bound'], columns=self.hazards_.columns)\n", (40333, 40460), True, 'import pandas as pd\n'), ((40656, 40726), 'pandas.DataFrame', 'pd.DataFrame', (['se[None, :]'], {'index': "['se']", 'columns': 'self.hazards_.columns'}), "(se[None, :], index=['se'], columns=self.hazards_.columns)\n", (40668, 40726), True, 'import pandas as pd\n'), ((41043, 41084), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.hazards_.columns'}), '(index=self.hazards_.columns)\n', (41055, 41084), True, 'import pandas as pd\n'), ((41164, 41200), 'numpy.exp', 'exp', (["self.hazards_.ix['coef'].values"], {}), "(self.hazards_.ix['coef'].values)\n", (41167, 41200), False, 'from numpy import dot, exp\n'), ((45234, 45255), 'numpy.ones', 'np.ones', (['n'], {'dtype': 'int'}), '(n, dtype=int)\n', (45241, 45255), True, 'import numpy as np\n'), ((45390, 45401), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (45398, 45401), True, 'import numpy as np\n'), ((45639, 45659), 'numpy.asarray', 'np.asarray', (['timeline'], {}), '(timeline)\n', (45649, 45659), True, 'import numpy as np\n'), ((12176, 12203), 'numpy.log', 'np.log', (['(population - deaths)'], {}), '(population - deaths)\n', (12182, 12203), True, 'import numpy as np\n'), ((12206, 12224), 'numpy.log', 'np.log', (['population'], {}), '(population)\n', (12212, 12224), True, 'import numpy as np\n'), ((17695, 17755), 'numpy.random.beta', 'beta', (['(0.01 + deaths)', '(0.01 + population - deaths)'], {'size': '(n, d)'}), '(0.01 + deaths, 0.01 + population - deaths, size=(n, d))\n', (17699, 17755), False, 'from numpy.random import beta\n'), ((23677, 23686), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (23683, 23686), True, 'import numpy as np\n'), ((24576, 24610), 'numpy.dot', 'dot', (['V', '(1.0 * relevant_individuals)'], {}), '(V, 1.0 * relevant_individuals)\n', (24579, 24610), False, 'from numpy import dot, exp\n'), ((27227, 27236), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (27233, 27236), True, 'import numpy as np\n'), ((27902, 27936), 'numpy.dot', 'dot', (['V', '(1.0 * relevant_individuals)'], {}), '(V, 1.0 * relevant_individuals)\n', (27905, 27936), False, 'from numpy import dot, exp\n'), ((29789, 29835), 'numpy.concatenate', 'np.concatenate', (['[self.timeline, self.timeline]'], {}), '([self.timeline, self.timeline])\n', (29803, 29835), True, 'import numpy as np\n'), ((29888, 29908), 'numpy.zeros', 'np.zeros', (['(2 * n, d)'], {}), '((2 * n, d))\n', (29896, 29908), True, 'import numpy as np\n'), ((30966, 31004), 'numpy.dot', 'np.dot', (['self.cumulative_hazards_', 'X_.T'], {}), '(self.cumulative_hazards_, X_.T)\n', (30972, 31004), True, 'import numpy as np\n'), ((33330, 33346), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (33338, 33346), True, 'import numpy as np\n'), ((33348, 33364), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (33356, 33364), True, 'import numpy as np\n'), ((33401, 33417), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (33409, 33417), True, 'import numpy as np\n'), ((33419, 33435), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (33427, 33435), True, 'import numpy as np\n'), ((33790, 33804), 'numpy.dot', 'dot', (['phi_i', 'xi'], {}), '(phi_i, xi)\n', (33793, 33804), False, 'from numpy import dot, exp\n'), ((34645, 34661), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (34653, 34661), True, 'import numpy as np\n'), ((35431, 35447), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (35439, 35447), True, 'import numpy as np\n'), ((35496, 35512), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (35504, 35512), True, 'import numpy as np\n'), ((35539, 35555), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (35547, 35555), True, 'import numpy as np\n'), ((37002, 37018), 'numpy.zeros', 'np.zeros', (['(d, 1)'], {}), '((d, 1))\n', (37010, 37018), True, 'import numpy as np\n'), ((37393, 37432), 'numpy.linalg.solve', 'solve', (['(-hessian)', '(step_size * gradient.T)'], {}), '(-hessian, step_size * gradient.T)\n', (37398, 37432), False, 'from numpy.linalg import LinAlgError, inv, solve, norm\n'), ((40985, 41005), 'scipy.stats.chi2.cdf', 'stats.chi2.cdf', (['U', '(1)'], {}), '(U, 1)\n', (40999, 41005), True, 'import scipy.stats as stats\n'), ((41759, 41785), 'numpy.dot', 'np.dot', (['X', 'self.hazards_.T'], {}), '(X, self.hazards_.T)\n', (41765, 41785), True, 'import numpy as np\n'), ((41857, 41872), 'numpy.dot', 'np.dot', (['bh', 'v.T'], {}), '(bh, v.T)\n', (41863, 41872), True, 'import numpy as np\n'), ((42742, 42776), 'numpy.dot', 'np.dot', (['self.data', 'self.hazards_.T'], {}), '(self.data, self.hazards_.T)\n', (42748, 42776), True, 'import numpy as np\n'), ((42977, 43006), 'numpy.zeros_like', 'np.zeros_like', (['self.durations'], {}), '(self.durations)\n', (42990, 43006), True, 'import numpy as np\n'), ((43082, 43098), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (43090, 43098), True, 'import numpy as np\n'), ((45089, 45110), 'numpy.asarray', 'np.asarray', (['durations'], {}), '(durations)\n', (45099, 45110), True, 'import numpy as np\n'), ((7252, 7290), 'numpy.exp', 'np.exp', (['(alpha2 * std_hazard_ / hazard_)'], {}), '(alpha2 * std_hazard_ / hazard_)\n', (7258, 7290), True, 'import numpy as np\n'), ((7333, 7372), 'numpy.exp', 'np.exp', (['(-alpha2 * std_hazard_ / hazard_)'], {}), '(-alpha2 * std_hazard_ / hazard_)\n', (7339, 7372), True, 'import numpy as np\n'), ((10482, 10620), 'lifelines.utils.StatError', 'StatError', (["('There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BFH estimator.'\n % ix)"], {}), "(\n 'There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BFH estimator.'\n % ix)\n", (10491, 10620), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, epanechnikov_kernel, StatError, coalesce\n'), ((10687, 10716), 'numpy.exp', 'np.exp', (['log_survival_function'], {}), '(log_survival_function)\n', (10693, 10716), True, 'import numpy as np\n'), ((29382, 29451), 'lifelines.utils.epanechnikov_kernel', 'epanechnikov_kernel', (['self.timeline[:, None]', 'self.timeline', 'bandwidth'], {}), '(self.timeline[:, None], self.timeline, bandwidth)\n', (29401, 29451), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, epanechnikov_kernel, StatError, coalesce\n'), ((33753, 33766), 'numpy.dot', 'dot', (['xi', 'beta'], {}), '(xi, beta)\n', (33756, 33766), False, 'from numpy import dot, exp\n'), ((33829, 33845), 'numpy.dot', 'np.dot', (['xi.T', 'xi'], {}), '(xi.T, xi)\n', (33835, 33845), True, 'import numpy as np\n'), ((37605, 37616), 'numpy.linalg.norm', 'norm', (['delta'], {}), '(delta)\n', (37609, 37616), False, 'from numpy.linalg import LinAlgError, inv, solve, norm\n'), ((45428, 45445), 'numpy.asarray', 'np.asarray', (['entry'], {}), '(entry)\n', (45438, 45445), True, 'import numpy as np\n'), ((6180, 6251), 'lifelines.utils.epanechnikov_kernel', 'epanechnikov_kernel', (['timeline[:, None]', 'timeline[C][None, :]', 'bandwidth'], {}), '(timeline[:, None], timeline[C][None, :], bandwidth)\n', (6199, 6251), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, epanechnikov_kernel, StatError, coalesce\n'), ((23301, 23324), 'numpy.zeros', 'np.zeros', (['(n_deaths, d)'], {}), '((n_deaths, d))\n', (23309, 23324), True, 'import numpy as np\n'), ((23465, 23488), 'numpy.zeros', 'np.zeros', (['(n_deaths, d)'], {}), '((n_deaths, d))\n', (23473, 23488), True, 'import numpy as np\n'), ((30921, 30936), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (30928, 30936), True, 'import numpy as np\n'), ((35017, 35028), 'numpy.dot', 'dot', (['z.T', 'z'], {}), '(z.T, z)\n', (35020, 35028), False, 'from numpy import dot, exp\n'), ((35153, 35166), 'numpy.log', 'np.log', (['denom'], {}), '(denom)\n', (35159, 35166), True, 'import numpy as np\n'), ((40608, 40628), 'numpy.linalg.inv', 'inv', (['(-self._hessian_)'], {}), '(-self._hessian_)\n', (40611, 40628), False, 'from numpy.linalg import LinAlgError, inv, solve, norm\n'), ((4696, 4719), 'numpy.sqrt', 'np.sqrt', (['cumulative_sq_'], {}), '(cumulative_sq_)\n', (4703, 4719), True, 'import numpy as np\n'), ((4845, 4868), 'numpy.sqrt', 'np.sqrt', (['cumulative_sq_'], {}), '(cumulative_sq_)\n', (4852, 4868), True, 'import numpy as np\n'), ((7092, 7163), 'lifelines.utils.epanechnikov_kernel', 'epanechnikov_kernel', (['timeline[:, None]', 'timeline[C][None, :]', 'bandwidth'], {}), '(timeline[:, None], timeline[C][None, :], bandwidth)\n', (7111, 7163), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, epanechnikov_kernel, StatError, coalesce\n'), ((11912, 11922), 'numpy.log', 'np.log', (['(-v)'], {}), '(-v)\n', (11918, 11922), True, 'import numpy as np\n'), ((12006, 12016), 'numpy.log', 'np.log', (['(-v)'], {}), '(-v)\n', (12012, 12016), True, 'import numpy as np\n'), ((35320, 35340), 'numpy.dot', 'dot', (['x_tie_sum', 'beta'], {}), '(x_tie_sum, beta)\n', (35323, 35340), False, 'from numpy import dot, exp\n'), ((37480, 37496), 'pandas.isnull', 'pd.isnull', (['delta'], {}), '(delta)\n', (37489, 37496), True, 'import pandas as pd\n'), ((17799, 17814), 'numpy.log', 'np.log', (['samples'], {}), '(samples)\n', (17805, 17814), True, 'import numpy as np\n'), ((24411, 24422), 'numpy.dot', 'dot', (['X.T', 'X'], {}), '(X.T, X)\n', (24414, 24422), False, 'from numpy import dot, exp\n'), ((27737, 27748), 'numpy.dot', 'dot', (['X.T', 'X'], {}), '(X.T, X)\n', (27740, 27748), False, 'from numpy import dot, exp\n'), ((37767, 37778), 'numpy.linalg.norm', 'norm', (['delta'], {}), '(delta)\n', (37771, 37778), False, 'from numpy.linalg import LinAlgError, inv, solve, norm\n'), ((11934, 11957), 'numpy.sqrt', 'np.sqrt', (['cumulative_sq_'], {}), '(cumulative_sq_)\n', (11941, 11957), True, 'import numpy as np\n'), ((12028, 12051), 'numpy.sqrt', 'np.sqrt', (['cumulative_sq_'], {}), '(cumulative_sq_)\n', (12035, 12051), True, 'import numpy as np\n'), ((45291, 45317), 'numpy.asarray', 'np.asarray', (['event_observed'], {}), '(event_observed)\n', (45301, 45317), True, 'import numpy as np\n')] |
import numpy as np
X=[1,2,3,4]
X_array=np.array(X)
print(X_array)
X_list=X_array.tolist()
print(X_list)
| [
"numpy.array"
] | [((41, 52), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (49, 52), True, 'import numpy as np\n')] |
import unittest
from dlgo.data.processor import GoDataProcessor
from dlgo.agent.predict import DeepLearningAgent
from dlgo.networks.alphago import alphago_model
from dlgo.agent.pg import PolicyAgent
from dlgo.agent.predict import load_prediction_agent
from dlgo.encoders.alphago import AlphaGoEncoder
from dlgo.rl.simulate import experience_simulation
from dlgo.networks.alphago import alphago_model
from dlgo.rl import ValueAgent, load_experience
from dlgo.agent import load_prediction_agent, load_policy_agent, AlphaGoMCTS
from dlgo.rl import load_value_agent
from dlgo.goboard_fast import GameState
from keras.callbacks import ModelCheckpoint
import h5py
import numpy as np
class AlphaGoAgentTest(unittest.TestCase):
def test_1_supervised_learning(self):
rows, cols = 19, 19
encoder = AlphaGoEncoder()
input_shape = (encoder.num_planes, rows, cols)
alphago_sl_policy = alphago_model(input_shape, is_policy_net=True)
alphago_sl_policy.compile('sgd', 'categorical_crossentropy', metrics=['accuracy'])
alphago_sl_agent = DeepLearningAgent(alphago_sl_policy, encoder)
inputs = np.ones((10,) + input_shape)
outputs = alphago_sl_policy.predict(inputs)
assert(outputs.shape == (10, 361))
with h5py.File('test_alphago_sl_policy.h5', 'w') as sl_agent_out:
alphago_sl_agent.serialize(sl_agent_out)
def test_2_reinforcement_learning(self):
encoder = AlphaGoEncoder()
sl_agent = load_prediction_agent(h5py.File('test_alphago_sl_policy.h5'))
sl_opponent = load_prediction_agent(h5py.File('test_alphago_sl_policy.h5'))
alphago_rl_agent = PolicyAgent(sl_agent.model, encoder)
opponent = PolicyAgent(sl_opponent.model, encoder)
num_games = 1
experience = experience_simulation(num_games, alphago_rl_agent, opponent)
alphago_rl_agent.train(experience)
with h5py.File('test_alphago_rl_policy.h5', 'w') as rl_agent_out:
alphago_rl_agent.serialize(rl_agent_out)
with h5py.File('test_alphago_rl_experience.h5', 'w') as exp_out:
experience.serialize(exp_out)
def test_3_alphago_value(self):
rows, cols = 19, 19
encoder = AlphaGoEncoder()
input_shape = (encoder.num_planes, rows, cols)
alphago_value_network = alphago_model(input_shape)
alphago_value = ValueAgent(alphago_value_network, encoder)
experience = load_experience(h5py.File('test_alphago_rl_experience.h5', 'r'))
alphago_value.train(experience)
with h5py.File('test_alphago_value.h5', 'w') as value_agent_out:
alphago_value.serialize(value_agent_out)
def test_4_alphago_mcts(self):
fast_policy = load_prediction_agent(h5py.File('test_alphago_sl_policy.h5', 'r'))
strong_policy = load_policy_agent(h5py.File('test_alphago_rl_policy.h5', 'r'))
value = load_value_agent(h5py.File('test_alphago_value.h5', 'r'))
alphago = AlphaGoMCTS(strong_policy, fast_policy, value,
num_simulations=20, depth=5, rollout_limit=10)
start = GameState.new_game(19)
alphago.select_move(start)
if __name__ == '__main__':
unittest.main()
| [
"dlgo.goboard_fast.GameState.new_game",
"numpy.ones",
"dlgo.rl.ValueAgent",
"h5py.File",
"dlgo.encoders.alphago.AlphaGoEncoder",
"dlgo.agent.predict.DeepLearningAgent",
"dlgo.agent.pg.PolicyAgent",
"dlgo.networks.alphago.alphago_model",
"unittest.main",
"dlgo.agent.AlphaGoMCTS",
"dlgo.rl.simulat... | [((3239, 3254), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3252, 3254), False, 'import unittest\n'), ((811, 827), 'dlgo.encoders.alphago.AlphaGoEncoder', 'AlphaGoEncoder', ([], {}), '()\n', (825, 827), False, 'from dlgo.encoders.alphago import AlphaGoEncoder\n'), ((912, 958), 'dlgo.networks.alphago.alphago_model', 'alphago_model', (['input_shape'], {'is_policy_net': '(True)'}), '(input_shape, is_policy_net=True)\n', (925, 958), False, 'from dlgo.networks.alphago import alphago_model\n'), ((1079, 1124), 'dlgo.agent.predict.DeepLearningAgent', 'DeepLearningAgent', (['alphago_sl_policy', 'encoder'], {}), '(alphago_sl_policy, encoder)\n', (1096, 1124), False, 'from dlgo.agent.predict import DeepLearningAgent\n'), ((1143, 1171), 'numpy.ones', 'np.ones', (['((10,) + input_shape)'], {}), '((10,) + input_shape)\n', (1150, 1171), True, 'import numpy as np\n'), ((1459, 1475), 'dlgo.encoders.alphago.AlphaGoEncoder', 'AlphaGoEncoder', ([], {}), '()\n', (1473, 1475), False, 'from dlgo.encoders.alphago import AlphaGoEncoder\n'), ((1670, 1706), 'dlgo.agent.pg.PolicyAgent', 'PolicyAgent', (['sl_agent.model', 'encoder'], {}), '(sl_agent.model, encoder)\n', (1681, 1706), False, 'from dlgo.agent.pg import PolicyAgent\n'), ((1726, 1765), 'dlgo.agent.pg.PolicyAgent', 'PolicyAgent', (['sl_opponent.model', 'encoder'], {}), '(sl_opponent.model, encoder)\n', (1737, 1765), False, 'from dlgo.agent.pg import PolicyAgent\n'), ((1810, 1870), 'dlgo.rl.simulate.experience_simulation', 'experience_simulation', (['num_games', 'alphago_rl_agent', 'opponent'], {}), '(num_games, alphago_rl_agent, opponent)\n', (1831, 1870), False, 'from dlgo.rl.simulate import experience_simulation\n'), ((2249, 2265), 'dlgo.encoders.alphago.AlphaGoEncoder', 'AlphaGoEncoder', ([], {}), '()\n', (2263, 2265), False, 'from dlgo.encoders.alphago import AlphaGoEncoder\n'), ((2353, 2379), 'dlgo.networks.alphago.alphago_model', 'alphago_model', (['input_shape'], {}), '(input_shape)\n', (2366, 2379), False, 'from dlgo.networks.alphago import alphago_model\n'), ((2405, 2447), 'dlgo.rl.ValueAgent', 'ValueAgent', (['alphago_value_network', 'encoder'], {}), '(alphago_value_network, encoder)\n', (2415, 2447), False, 'from dlgo.rl import ValueAgent, load_experience\n'), ((3008, 3105), 'dlgo.agent.AlphaGoMCTS', 'AlphaGoMCTS', (['strong_policy', 'fast_policy', 'value'], {'num_simulations': '(20)', 'depth': '(5)', 'rollout_limit': '(10)'}), '(strong_policy, fast_policy, value, num_simulations=20, depth=5,\n rollout_limit=10)\n', (3019, 3105), False, 'from dlgo.agent import load_prediction_agent, load_policy_agent, AlphaGoMCTS\n'), ((3148, 3170), 'dlgo.goboard_fast.GameState.new_game', 'GameState.new_game', (['(19)'], {}), '(19)\n', (3166, 3170), False, 'from dlgo.goboard_fast import GameState\n'), ((1281, 1324), 'h5py.File', 'h5py.File', (['"""test_alphago_sl_policy.h5"""', '"""w"""'], {}), "('test_alphago_sl_policy.h5', 'w')\n", (1290, 1324), False, 'import h5py\n'), ((1518, 1556), 'h5py.File', 'h5py.File', (['"""test_alphago_sl_policy.h5"""'], {}), "('test_alphago_sl_policy.h5')\n", (1527, 1556), False, 'import h5py\n'), ((1602, 1640), 'h5py.File', 'h5py.File', (['"""test_alphago_sl_policy.h5"""'], {}), "('test_alphago_sl_policy.h5')\n", (1611, 1640), False, 'import h5py\n'), ((1929, 1972), 'h5py.File', 'h5py.File', (['"""test_alphago_rl_policy.h5"""', '"""w"""'], {}), "('test_alphago_rl_policy.h5', 'w')\n", (1938, 1972), False, 'import h5py\n'), ((2057, 2104), 'h5py.File', 'h5py.File', (['"""test_alphago_rl_experience.h5"""', '"""w"""'], {}), "('test_alphago_rl_experience.h5', 'w')\n", (2066, 2104), False, 'import h5py\n'), ((2486, 2533), 'h5py.File', 'h5py.File', (['"""test_alphago_rl_experience.h5"""', '"""r"""'], {}), "('test_alphago_rl_experience.h5', 'r')\n", (2495, 2533), False, 'import h5py\n'), ((2590, 2629), 'h5py.File', 'h5py.File', (['"""test_alphago_value.h5"""', '"""w"""'], {}), "('test_alphago_value.h5', 'w')\n", (2599, 2629), False, 'import h5py\n'), ((2783, 2826), 'h5py.File', 'h5py.File', (['"""test_alphago_sl_policy.h5"""', '"""r"""'], {}), "('test_alphago_sl_policy.h5', 'r')\n", (2792, 2826), False, 'import h5py\n'), ((2870, 2913), 'h5py.File', 'h5py.File', (['"""test_alphago_rl_policy.h5"""', '"""r"""'], {}), "('test_alphago_rl_policy.h5', 'r')\n", (2879, 2913), False, 'import h5py\n'), ((2948, 2987), 'h5py.File', 'h5py.File', (['"""test_alphago_value.h5"""', '"""r"""'], {}), "('test_alphago_value.h5', 'r')\n", (2957, 2987), False, 'import h5py\n')] |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Provile Envelop QC test
I believe that this test was first described by GTSPP, which define minimum
and maximum ranges for different depth layers. The concept is that near the
surface one should expect more variability, thus a wider range.
"""
import logging
import numpy as np
from numpy import ma
from .qctests import QCCheckVar
module_logger = logging.getLogger(__name__)
class ProfileEnvelop(QCCheckVar):
def test(self):
self.flags = {}
x = self.data[self.varname]
if isinstance(x, ma.MaskedArray):
x[x.mask] = np.nan
x = x.data
x = np.atleast_1d(x)
z = self.data["PRES"]
if isinstance(z, ma.MaskedArray):
if z.mask.any():
mask = np.ones_like(z, dtype="float32")
mask[z.mask] = np.nan
z = z * mask
z = z.data
z = np.atleast_1d(z)
assert np.shape(z) == np.shape(x)
assert "layers" in self.cfg, "Profile envelop cfg requires layers"
flag = np.zeros(np.shape(x), dtype="i1")
for layer in self.cfg["layers"]:
ind = np.nonzero(eval("(z %s) & (z %s)" % (layer[0], layer[1])))[0]
f = eval("(x[ind] > %s) & (x[ind] < %s)" % (layer[2], layer[3]))
flag[ind[f == True]] = self.flag_good
flag[ind[f == False]] = self.flag_bad
flag[ma.getmaskarray(x) | ~np.isfinite(x)] = 9
self.flags["profile_envelop"] = flag
| [
"logging.getLogger",
"numpy.ones_like",
"numpy.ma.getmaskarray",
"numpy.isfinite",
"numpy.shape",
"numpy.atleast_1d"
] | [((444, 471), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (461, 471), False, 'import logging\n'), ((697, 713), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (710, 713), True, 'import numpy as np\n'), ((974, 990), 'numpy.atleast_1d', 'np.atleast_1d', (['z'], {}), '(z)\n', (987, 990), True, 'import numpy as np\n'), ((1007, 1018), 'numpy.shape', 'np.shape', (['z'], {}), '(z)\n', (1015, 1018), True, 'import numpy as np\n'), ((1022, 1033), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1030, 1033), True, 'import numpy as np\n'), ((1135, 1146), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1143, 1146), True, 'import numpy as np\n'), ((839, 871), 'numpy.ones_like', 'np.ones_like', (['z'], {'dtype': '"""float32"""'}), "(z, dtype='float32')\n", (851, 871), True, 'import numpy as np\n'), ((1473, 1491), 'numpy.ma.getmaskarray', 'ma.getmaskarray', (['x'], {}), '(x)\n', (1488, 1491), False, 'from numpy import ma\n'), ((1495, 1509), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (1506, 1509), True, 'import numpy as np\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Core module of the pulse drawer.
This module provides the `DrawerCanvas` which is a collection of `Chart` object.
The `Chart` object is a collection of drawings. A user can assign multiple channels
to a single chart instance. For example, we can define a chart for specific qubit
and assign all related channels to the chart. This chart-channel mapping is defined by
the function specified by ``layout.chart_channel_map`` of the stylesheet.
Because this chart instance is decoupled from the coordinate system of the plotter,
we can arbitrarily place charts on the plotter canvas, i.e. if we want to create 3D plot,
each chart may be placed on the X-Z plane and charts are arranged along the Y-axis.
Thus this data model maximizes the flexibility to generate an output image.
The chart instance is not just a container of drawings, as it also performs
data processing like binding abstract coordinates and truncating long pulses for an axis break.
Each chart object has `.parent` which points to the `DrawerCanvas` instance so that
each child chart can refer to the global figure settings such as time range and axis break.
Initialization
~~~~~~~~~~~~~~
The `DataCanvas` and `Chart` are not exposed to users as they are implicitly
initialized in the interface function. It is noteworthy that the data canvas is agnostic
to plotters. This means once the canvas instance is initialized we can reuse this data
among multiple plotters. The canvas is initialized with a stylesheet and quantum backend
information :py:class:~`qiskit.visualization.pulse_v2.device_info.DrawerBackendInfo`.
Chart instances are automatically generated when pulse program is loaded.
```python
canvas = DrawerCanvas(stylesheet=stylesheet, device=device)
canvas.load_program(sched)
canvas.update()
```
Once all properties are set, `.update` method is called to apply changes to drawings.
If the `DrawDataContainer` is initialized without backend information, the output shows
the time in units of the system cycle time `dt` and the frequencies are initialized to zero.
Update
~~~~~~
To update the image, a user can set new values to canvas and then call the `.update` method.
```python
canvas.set_time_range(2000, 3000, seconds=False)
canvas.update()
```
All stored drawings are updated accordingly. The plotter API can access to
drawings with `.collections` property of chart instance. This returns
an iterator of drawing with the unique data key.
If a plotter provides object handler for plotted shapes, the plotter API can manage
the lookup table of the handler and the drawing by using this data key.
"""
from copy import deepcopy
from enum import Enum
from functools import partial
from itertools import chain
from typing import Union, List, Tuple, Iterator, Optional
import numpy as np
from qiskit import pulse
from qiskit.pulse.transforms import target_qobj_transform
from qiskit.visualization.exceptions import VisualizationError
from qiskit.visualization.pulse_v2 import events, types, drawings, device_info
from qiskit.visualization.pulse_v2.stylesheet import QiskitPulseStyle
class DrawerCanvas:
"""Collection of `Chart` and configuration data.
Pulse channels are associated with some `Chart` instance and
drawing data object are stored in the `Chart` instance.
Device, stylesheet, and some user generators are stored in the `DrawingCanvas`
and `Chart` instances are also attached to the `DrawerCanvas` as children.
Global configurations are accessed by those children to modify
the appearance of the `Chart` output.
"""
def __init__(self,
stylesheet: QiskitPulseStyle,
device: device_info.DrawerBackendInfo):
"""Create new data container with backend system information.
Args:
stylesheet: Stylesheet to decide appearance of output image.
device: Backend information to run the program.
"""
# stylesheet
self.formatter = stylesheet.formatter
self.generator = stylesheet.generator
self.layout = stylesheet.layout
# device info
self.device = device
# chart
self.global_charts = Chart(parent=self, name='global')
self.charts = []
# visible controls
self.disable_chans = set()
self.disable_types = set()
# data scaling
self.chan_scales = dict()
# global time
self._time_range = (0, 0)
self._time_breaks = []
# title
self.fig_title = ''
@property
def time_range(self) -> Tuple[int, int]:
"""Return current time range to draw.
Calculate net duration and add side margin to edge location.
Returns:
Time window considering side margin.
"""
t0, t1 = self._time_range
total_time_elimination = 0
for t0b, t1b in self.time_breaks:
if t1b > t0 and t0b < t1:
total_time_elimination += t1b - t0b
net_duration = t1 - t0 - total_time_elimination
new_t0 = t0 - net_duration * self.formatter['margin.left_percent']
new_t1 = t1 + net_duration * self.formatter['margin.right_percent']
return new_t0, new_t1
@time_range.setter
def time_range(self, new_range: Tuple[int, int]):
"""Update time range to draw."""
self._time_range = new_range
@property
def time_breaks(self) -> List[Tuple[int, int]]:
"""Return time breaks with time range.
If an edge of time range is in the axis break period,
the axis break period is recalculated.
Raises:
VisualizationError: When axis break is greater than time window.
Returns:
List of axis break periods considering the time window edges.
"""
t0, t1 = self._time_range
axis_breaks = []
for t0b, t1b in self._time_breaks:
if t0b >= t1 or t1b <= t0:
# skip because break period is outside of time window
continue
if t0b < t0 and t1b > t1:
raise VisualizationError('Axis break is greater than time window. '
'Nothing will be drawn.')
if t0b < t0 < t1b:
if t1b - t0 > self.formatter['axis_break.length']:
new_t0 = t0 + 0.5 * self.formatter['axis_break.max_length']
axis_breaks.append((new_t0, t1b))
continue
if t0b < t1 < t1b:
if t1 - t0b > self.formatter['axis_break.length']:
new_t1 = t1 - 0.5 * self.formatter['axis_break.max_length']
axis_breaks.append((t0b, new_t1))
continue
axis_breaks.append((t0b, t1b))
return axis_breaks
@time_breaks.setter
def time_breaks(self, new_breaks: List[Tuple[int, int]]):
"""Set new time breaks."""
self._time_breaks = sorted(new_breaks, key=lambda x: x[0])
def load_program(self, program: Union[pulse.Waveform, pulse.ParametricPulse, pulse.Schedule]):
"""Load a program to draw.
Args:
program: `Waveform`, `ParametricPulse`, or `Schedule` to draw.
Raises:
VisualizationError: When input program is invalid data format.
"""
if isinstance(program, (pulse.Schedule, pulse.ScheduleBlock)):
self._schedule_loader(program)
elif isinstance(program, (pulse.Waveform, pulse.ParametricPulse)):
self._waveform_loader(program)
else:
raise VisualizationError('Data type %s is not supported.' % type(program))
# update time range
self.set_time_range(0, program.duration, seconds=False)
# set title
self.fig_title = self.layout['figure_title'](program=program, device=self.device)
def _waveform_loader(self, program: Union[pulse.Waveform, pulse.ParametricPulse]):
"""Load Waveform instance.
This function is sub-routine of py:method:`load_program`.
Args:
program: `Waveform` to draw.
"""
chart = Chart(parent=self)
# add waveform data
fake_inst = pulse.Play(program, types.WaveformChannel())
inst_data = types.PulseInstruction(t0=0,
dt=self.device.dt,
frame=types.PhaseFreqTuple(phase=0, freq=0),
inst=fake_inst,
is_opaque=program.is_parameterized())
for gen in self.generator['waveform']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(inst_data):
chart.add_data(data)
self.charts.append(chart)
def _schedule_loader(self, program: Union[pulse.Schedule, pulse.ScheduleBlock]):
"""Load Schedule instance.
This function is sub-routine of py:method:`load_program`.
Args:
program: `Schedule` to draw.
"""
program = target_qobj_transform(program, remove_directives=False)
# initialize scale values
self.chan_scales = {}
for chan in program.channels:
if isinstance(chan, pulse.channels.DriveChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.drive']
elif isinstance(chan, pulse.channels.MeasureChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.measure']
elif isinstance(chan, pulse.channels.ControlChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.control']
elif isinstance(chan, pulse.channels.AcquireChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.acquire']
else:
self.chan_scales[chan] = 1.0
# create charts
mapper = self.layout['chart_channel_map']
for name, chans in mapper(channels=program.channels,
formatter=self.formatter,
device=self.device):
chart = Chart(parent=self, name=name)
# add standard pulse instructions
for chan in chans:
chart.load_program(program=program, chan=chan)
# add barriers
barrier_sched = program.filter(instruction_types=[pulse.instructions.RelativeBarrier],
channels=chans)
for t0, _ in barrier_sched.instructions:
inst_data = types.BarrierInstruction(t0, self.device.dt, chans)
for gen in self.generator['barrier']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(inst_data):
chart.add_data(data)
# add chart axis
chart_axis = types.ChartAxis(name=chart.name, channels=chart.channels)
for gen in self.generator['chart']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(chart_axis):
chart.add_data(data)
self.charts.append(chart)
# add snapshot data to global
snapshot_sched = program.filter(instruction_types=[pulse.instructions.Snapshot])
for t0, inst in snapshot_sched.instructions:
inst_data = types.SnapshotInstruction(t0, self.device.dt, inst.label, inst.channels)
for gen in self.generator['snapshot']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(inst_data):
self.global_charts.add_data(data)
# calculate axis break
self.time_breaks = self._calculate_axis_break(program)
def _calculate_axis_break(self, program: pulse.Schedule) -> List[Tuple[int, int]]:
"""A helper function to calculate axis break of long pulse sequence.
Args:
program: A schedule to calculate axis break.
Returns:
List of axis break periods.
"""
axis_breaks = []
edges = set()
for t0, t1 in chain.from_iterable(program.timeslots.values()):
if t1 - t0 > 0:
edges.add(t0)
edges.add(t1)
edges = sorted(edges)
for t0, t1 in zip(edges[:-1], edges[1:]):
if t1 - t0 > self.formatter['axis_break.length']:
t_l = t0 + 0.5 * self.formatter['axis_break.max_length']
t_r = t1 - 0.5 * self.formatter['axis_break.max_length']
axis_breaks.append((t_l, t_r))
return axis_breaks
def set_time_range(self,
t_start: Union[int, float],
t_end: Union[int, float],
seconds: bool = True):
"""Set time range to draw.
All child chart instances are updated when time range is updated.
Args:
t_start: Left boundary of drawing in units of cycle time or real time.
t_end: Right boundary of drawing in units of cycle time or real time.
seconds: Set `True` if times are given in SI unit rather than dt.
Raises:
VisualizationError: When times are given in float without specifying dt.
"""
# convert into nearest cycle time
if seconds:
if self.device.dt is not None:
t_start = int(np.round(t_start / self.device.dt))
t_end = int(np.round(t_end / self.device.dt))
else:
raise VisualizationError('Setting time range with SI units requires '
'backend `dt` information.')
self.time_range = (t_start, t_end)
def set_disable_channel(self,
channel: pulse.channels.Channel,
remove: bool = True):
"""Interface method to control visibility of pulse channels.
Specified object in the blocked list will not be shown.
Args:
channel: A pulse channel object to disable.
remove: Set `True` to disable, set `False` to enable.
"""
if remove:
self.disable_chans.add(channel)
else:
self.disable_chans.discard(channel)
def set_disable_type(self,
data_type: types.DataTypes,
remove: bool = True):
"""Interface method to control visibility of data types.
Specified object in the blocked list will not be shown.
Args:
data_type: A drawing data type to disable.
remove: Set `True` to disable, set `False` to enable.
"""
if isinstance(data_type, Enum):
data_type_str = str(data_type.value)
else:
data_type_str = data_type
if remove:
self.disable_types.add(data_type_str)
else:
self.disable_types.discard(data_type_str)
def update(self):
"""Update all associated charts and generate actual drawing data from template object.
This method should be called before the canvas is passed to the plotter.
"""
for chart in self.charts:
chart.update()
class Chart:
"""A collection of drawing to be shown on the same line.
Multiple pulse channels can be assigned to a single `Chart`.
The parent `DrawerCanvas` should be specified to refer to the current user preference.
The vertical value of each `Chart` should be in the range [-1, 1].
This truncation should be performed in the plotter interface.
"""
# unique index of chart
chart_index = 0
# list of waveform type names
waveform_types = [str(types.WaveformType.REAL.value),
str(types.WaveformType.IMAG.value),
str(types.WaveformType.OPAQUE.value)]
def __init__(self, parent: DrawerCanvas, name: Optional[str] = None):
"""Create new chart.
Args:
parent: `DrawerCanvas` that this `Chart` instance belongs to.
name: Name of this `Chart` instance.
"""
self.parent = parent
# data stored in this channel
self._collections = dict()
self._output_dataset = dict()
# channel metadata
self.index = self._cls_index()
self.name = name or ''
self._channels = set()
# vertical axis information
self.vmax = 0
self.vmin = 0
self.scale = 1.0
self._increment_cls_index()
def add_data(self, data: drawings.ElementaryData):
"""Add drawing to collections.
If the given object already exists in the collections,
this interface replaces the old object instead of adding new entry.
Args:
data: New drawing to add.
"""
self._collections[data.data_key] = data
def load_program(self,
program: pulse.Schedule,
chan: pulse.channels.Channel):
"""Load pulse schedule.
This method internally generates `ChannelEvents` to parse the program
for the specified pulse channel. This method is called once
Args:
program: Pulse schedule to load.
chan: A pulse channels associated with this instance.
"""
chan_events = events.ChannelEvents.load_program(program, chan)
chan_events.set_config(dt=self.parent.device.dt,
init_frequency=self.parent.device.get_channel_frequency(chan),
init_phase=0)
# create objects associated with waveform
for gen in self.parent.generator['waveform']:
waveforms = chan_events.get_waveforms()
obj_generator = partial(gen,
formatter=self.parent.formatter,
device=self.parent.device)
drawing_items = [obj_generator(waveform) for waveform in waveforms]
for drawing_item in list(chain.from_iterable(drawing_items)):
self.add_data(drawing_item)
# create objects associated with frame change
for gen in self.parent.generator['frame']:
frames = chan_events.get_frame_changes()
obj_generator = partial(gen,
formatter=self.parent.formatter,
device=self.parent.device)
drawing_items = [obj_generator(frame) for frame in frames]
for drawing_item in list(chain.from_iterable(drawing_items)):
self.add_data(drawing_item)
self._channels.add(chan)
def update(self):
"""Update vertical data range and scaling factor of this chart.
Those parameters are updated based on current time range in the parent canvas.
"""
self._output_dataset.clear()
self.vmax = 0
self.vmin = 0
# waveform
for key, data in self._collections.items():
if data.data_type not in Chart.waveform_types:
continue
# truncate, assume no abstract coordinate in waveform sample
trunc_x, trunc_y = self._truncate_data(data)
# no available data points
if trunc_x.size == 0 or trunc_y.size == 0:
continue
# update y range
scale = min(self.parent.chan_scales.get(chan, 1.0) for chan in data.channels)
self.vmax = max(scale * np.max(trunc_y), self.vmax)
self.vmin = min(scale * np.min(trunc_y), self.vmin)
# generate new data
new_data = deepcopy(data)
new_data.xvals = trunc_x
new_data.yvals = trunc_y
self._output_dataset[key] = new_data
# calculate chart level scaling factor
if self.parent.formatter['control.auto_chart_scaling']:
max_val = max(abs(self.vmax),
abs(self.vmin),
self.parent.formatter['general.vertical_resolution'])
self.scale = min(1.0 / max_val, self.parent.formatter['general.max_scale'])
else:
self.scale = 1.0
# update vertical range with scaling and limitation
self.vmax = max(self.scale * self.vmax,
self.parent.formatter['channel_scaling.pos_spacing'])
self.vmin = min(self.scale * self.vmin,
self.parent.formatter['channel_scaling.neg_spacing'])
# other data
for key, data in self._collections.items():
if data.data_type in Chart.waveform_types:
continue
# truncate
trunc_x, trunc_y = self._truncate_data(data)
# no available data points
if trunc_x.size == 0 or trunc_y.size == 0:
continue
# generate new data
new_data = deepcopy(data)
new_data.xvals = trunc_x
new_data.yvals = trunc_y
self._output_dataset[key] = new_data
@property
def is_active(self) -> bool:
"""Check if there is any active waveform data in this entry.
Returns:
Return `True` if there is any visible waveform in this chart.
"""
for data in self._output_dataset.values():
if data.data_type in Chart.waveform_types and self._check_visible(data):
return True
return False
@property
def collections(self) -> Iterator[Tuple[str, drawings.ElementaryData]]:
"""Return currently active entries from drawing data collection.
The object is returned with unique name as a key of an object handler.
When the horizontal coordinate contains `AbstractCoordinate`,
the value is substituted by current time range preference.
"""
for name, data in self._output_dataset.items():
# prepare unique name
unique_id = 'chart{ind:d}_{key}'.format(ind=self.index, key=name)
if self._check_visible(data):
yield unique_id, data
@property
def channels(self) -> List[pulse.channels.Channel]:
"""Return a list of channels associated with this chart.
Returns:
List of channels associated with this chart.
"""
return list(self._channels)
def _truncate_data(self,
data: drawings.ElementaryData) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to truncate drawings according to time breaks.
# TODO: move this function to common module to support axis break for timeline.
Args:
data: Drawing object to truncate.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
xvals = self._bind_coordinate(data.xvals)
yvals = self._bind_coordinate(data.yvals)
if isinstance(data, drawings.BoxData):
# truncate box data. these object don't require interpolation at axis break.
return self._truncate_boxes(xvals, yvals)
elif data.data_type in [types.LabelType.PULSE_NAME, types.LabelType.OPAQUE_BOXTEXT]:
# truncate pulse labels. these objects are not removed by truncation.
return self._truncate_pulse_labels(xvals, yvals)
else:
# other objects
return self._truncate_vectors(xvals, yvals)
def _truncate_pulse_labels(self,
xvals: np.ndarray,
yvals: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to remove text according to time breaks.
Args:
xvals: Time points.
yvals: Data points.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
xpos = xvals[0]
t0, t1 = self.parent.time_range
if xpos < t0 or xpos > t1:
return np.array([]), np.array([])
offset_accumulation = 0
for tl, tr in self.parent.time_breaks:
if xpos < tl:
return np.array([xpos - offset_accumulation]), yvals
if tl < xpos < tr:
return np.array([tl - offset_accumulation]), yvals
else:
offset_accumulation += tr - tl
return np.array([xpos - offset_accumulation]), yvals
def _truncate_boxes(self,
xvals: np.ndarray,
yvals: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to clip box object according to time breaks.
Args:
xvals: Time points.
yvals: Data points.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
x0, x1 = xvals
t0, t1 = self.parent.time_range
if x1 < t0 or x0 > t1:
# out of drawing range
return np.array([]), np.array([])
# clip outside
x0 = max(t0, x0)
x1 = min(t1, x1)
offset_accumulate = 0
for tl, tr in self.parent.time_breaks:
tl -= offset_accumulate
tr -= offset_accumulate
#
# truncate, there are 5 patterns wrt the relative position of truncation and xvals
#
if x1 < tl:
break
if tl < x0 and tr > x1:
# case 1: all data points are truncated
# : +-----+ :
# : |/////| :
# -----:---+-----+---:-----
# l 0 1 r
return np.array([]), np.array([])
elif tl < x1 < tr:
# case 2: t < tl, right side is truncated
# +---:-----+ :
# | ://///| :
# -----+---:-----+---:-----
# 0 l 1 r
x1 = tl
elif tl < x0 < tr:
# case 3: tr > t, left side is truncated
# : +-----:---+
# : |/////: |
# -----:---+-----:---+-----
# l 0 r 1
x0 = tl
x1 = tl + t1 - tr
elif tl > x0 and tr < x1:
# case 4: tr > t > tl, middle part is truncated
# +---:-----:---+
# | ://///: |
# -----+---:-----:---+-----
# 0 l r 1
x1 -= tr - tl
elif tr < x0:
# case 5: tr > t > tl, nothing truncated but need time shift
# : : +---+
# : : | |
# -----:---:-----+---+-----
# l r 0 1
x0 -= tr - tl
x1 -= tr - tl
offset_accumulate += tr - tl
return np.asarray([x0, x1], dtype=float), yvals
def _truncate_vectors(self,
xvals: np.ndarray,
yvals: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to remove sequential data points according to time breaks.
Args:
xvals: Time points.
yvals: Data points.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
xvals = np.asarray(xvals, dtype=float)
yvals = np.asarray(yvals, dtype=float)
t0, t1 = self.parent.time_range
if max(xvals) < t0 or min(xvals) > t1:
# out of drawing range
return np.array([]), np.array([])
if min(xvals) < t0:
# truncate x less than left limit
inds = xvals > t0
yvals = np.append(np.interp(t0, xvals, yvals), yvals[inds])
xvals = np.append(t0, xvals[inds])
if max(xvals) > t1:
# truncate x larger than right limit
inds = xvals < t1
yvals = np.append(yvals[inds], np.interp(t1, xvals, yvals))
xvals = np.append(xvals[inds], t1)
# time breaks
trunc_xvals = [xvals]
trunc_yvals = [yvals]
offset_accumulate = 0
for tl, tr in self.parent.time_breaks:
sub_xs = trunc_xvals.pop()
sub_ys = trunc_yvals.pop()
tl -= offset_accumulate
tr -= offset_accumulate
#
# truncate, there are 5 patterns wrt the relative position of truncation and xvals
#
min_xs = min(sub_xs)
max_xs = max(sub_xs)
if max_xs < tl:
trunc_xvals.append(sub_xs)
trunc_yvals.append(sub_ys)
break
if tl < min_xs and tr > max_xs:
# case 1: all data points are truncated
# : +-----+ :
# : |/////| :
# -----:---+-----+---:-----
# l min max r
return np.array([]), np.array([])
elif tl < max_xs < tr:
# case 2: t < tl, right side is truncated
# +---:-----+ :
# | ://///| :
# -----+---:-----+---:-----
# min l max r
inds = sub_xs > tl
trunc_xvals.append(np.append(tl, sub_xs[inds]) - (tl - min_xs))
trunc_yvals.append(np.append(np.interp(tl, sub_xs, sub_ys), sub_ys[inds]))
elif tl < min_xs < tr:
# case 3: tr > t, left side is truncated
# : +-----:---+
# : |/////: |
# -----:---+-----:---+-----
# l min r max
inds = sub_xs < tr
trunc_xvals.append(np.append(sub_xs[inds], tr))
trunc_yvals.append(np.append(sub_ys[inds], np.interp(tr, sub_xs, sub_ys)))
elif tl > min_xs and tr < max_xs:
# case 4: tr > t > tl, middle part is truncated
# +---:-----:---+
# | ://///: |
# -----+---:-----:---+-----
# min l r max
inds0 = sub_xs < tl
trunc_xvals.append(np.append(sub_xs[inds0], tl))
trunc_yvals.append(np.append(sub_ys[inds0], np.interp(tl, sub_xs, sub_ys)))
inds1 = sub_xs > tr
trunc_xvals.append(np.append(tr, sub_xs[inds1]) - (tr - tl))
trunc_yvals.append(np.append(np.interp(tr, sub_xs, sub_ys), sub_ys[inds1]))
elif tr < min_xs:
# case 5: tr > t > tl, nothing truncated but need time shift
# : : +---+
# : : | |
# -----:---:-----+---+-----
# l r 0 1
trunc_xvals.append(sub_xs - (tr - tl))
trunc_yvals.append(sub_ys)
else:
# no need to truncate
trunc_xvals.append(sub_xs)
trunc_yvals.append(sub_ys)
offset_accumulate += tr - tl
new_x = np.concatenate(trunc_xvals)
new_y = np.concatenate(trunc_yvals)
return np.asarray(new_x, dtype=float), np.asarray(new_y, dtype=float)
def _bind_coordinate(self, vals: Iterator[types.Coordinate]) -> np.ndarray:
"""A helper function to bind actual coordinates to an `AbstractCoordinate`.
Args:
vals: Sequence of coordinate objects associated with a drawing.
Returns:
Numpy data array with substituted values.
"""
def substitute(val: types.Coordinate):
if val == types.AbstractCoordinate.LEFT:
return self.parent.time_range[0]
if val == types.AbstractCoordinate.RIGHT:
return self.parent.time_range[1]
if val == types.AbstractCoordinate.TOP:
return self.vmax
if val == types.AbstractCoordinate.BOTTOM:
return self.vmin
raise VisualizationError('Coordinate {name} is not supported.'.format(name=val))
try:
return np.asarray(vals, dtype=float)
except (TypeError, ValueError):
return np.asarray(list(map(substitute, vals)), dtype=float)
def _check_visible(self, data: drawings.ElementaryData) -> bool:
"""A helper function to check if the data is visible.
Args:
data: Drawing object to test.
Returns:
Return `True` if the data is visible.
"""
is_active_type = data.data_type not in self.parent.disable_types
is_active_chan = any(chan not in self.parent.disable_chans for chan in data.channels)
if not (is_active_type and is_active_chan):
return False
return True
@classmethod
def _increment_cls_index(cls):
"""Increment counter of the chart."""
cls.chart_index += 1
@classmethod
def _cls_index(cls) -> int:
"""Return counter index of the chart."""
return cls.chart_index
| [
"numpy.array",
"copy.deepcopy",
"qiskit.pulse.transforms.target_qobj_transform",
"numpy.asarray",
"numpy.max",
"qiskit.visualization.pulse_v2.types.ChartAxis",
"itertools.chain.from_iterable",
"numpy.concatenate",
"numpy.min",
"numpy.round",
"qiskit.visualization.pulse_v2.types.SnapshotInstructi... | [((9694, 9749), 'qiskit.pulse.transforms.target_qobj_transform', 'target_qobj_transform', (['program'], {'remove_directives': '(False)'}), '(program, remove_directives=False)\n', (9715, 9749), False, 'from qiskit.pulse.transforms import target_qobj_transform\n'), ((18398, 18446), 'qiskit.visualization.pulse_v2.events.ChannelEvents.load_program', 'events.ChannelEvents.load_program', (['program', 'chan'], {}), '(program, chan)\n', (18431, 18446), False, 'from qiskit.visualization.pulse_v2 import events, types, drawings, device_info\n'), ((28462, 28492), 'numpy.asarray', 'np.asarray', (['xvals'], {'dtype': 'float'}), '(xvals, dtype=float)\n', (28472, 28492), True, 'import numpy as np\n'), ((28509, 28539), 'numpy.asarray', 'np.asarray', (['yvals'], {'dtype': 'float'}), '(yvals, dtype=float)\n', (28519, 28539), True, 'import numpy as np\n'), ((32248, 32275), 'numpy.concatenate', 'np.concatenate', (['trunc_xvals'], {}), '(trunc_xvals)\n', (32262, 32275), True, 'import numpy as np\n'), ((32292, 32319), 'numpy.concatenate', 'np.concatenate', (['trunc_yvals'], {}), '(trunc_yvals)\n', (32306, 32319), True, 'import numpy as np\n'), ((8728, 8751), 'qiskit.visualization.pulse_v2.types.WaveformChannel', 'types.WaveformChannel', ([], {}), '()\n', (8749, 8751), False, 'from qiskit.visualization.pulse_v2 import events, types, drawings, device_info\n'), ((9167, 9225), 'functools.partial', 'partial', (['gen'], {'formatter': 'self.formatter', 'device': 'self.device'}), '(gen, formatter=self.formatter, device=self.device)\n', (9174, 9225), False, 'from functools import partial\n'), ((11663, 11720), 'qiskit.visualization.pulse_v2.types.ChartAxis', 'types.ChartAxis', ([], {'name': 'chart.name', 'channels': 'chart.channels'}), '(name=chart.name, channels=chart.channels)\n', (11678, 11720), False, 'from qiskit.visualization.pulse_v2 import events, types, drawings, device_info\n'), ((12280, 12352), 'qiskit.visualization.pulse_v2.types.SnapshotInstruction', 'types.SnapshotInstruction', (['t0', 'self.device.dt', 'inst.label', 'inst.channels'], {}), '(t0, self.device.dt, inst.label, inst.channels)\n', (12305, 12352), False, 'from qiskit.visualization.pulse_v2 import events, types, drawings, device_info\n'), ((18828, 18900), 'functools.partial', 'partial', (['gen'], {'formatter': 'self.parent.formatter', 'device': 'self.parent.device'}), '(gen, formatter=self.parent.formatter, device=self.parent.device)\n', (18835, 18900), False, 'from functools import partial\n'), ((19358, 19430), 'functools.partial', 'partial', (['gen'], {'formatter': 'self.parent.formatter', 'device': 'self.parent.device'}), '(gen, formatter=self.parent.formatter, device=self.parent.device)\n', (19365, 19430), False, 'from functools import partial\n'), ((20713, 20727), 'copy.deepcopy', 'deepcopy', (['data'], {}), '(data)\n', (20721, 20727), False, 'from copy import deepcopy\n'), ((21984, 21998), 'copy.deepcopy', 'deepcopy', (['data'], {}), '(data)\n', (21992, 21998), False, 'from copy import deepcopy\n'), ((25409, 25447), 'numpy.array', 'np.array', (['[xpos - offset_accumulation]'], {}), '([xpos - offset_accumulation])\n', (25417, 25447), True, 'import numpy as np\n'), ((27985, 28018), 'numpy.asarray', 'np.asarray', (['[x0, x1]'], {'dtype': 'float'}), '([x0, x1], dtype=float)\n', (27995, 28018), True, 'import numpy as np\n'), ((28906, 28932), 'numpy.append', 'np.append', (['t0', 'xvals[inds]'], {}), '(t0, xvals[inds])\n', (28915, 28932), True, 'import numpy as np\n'), ((29133, 29159), 'numpy.append', 'np.append', (['xvals[inds]', 't1'], {}), '(xvals[inds], t1)\n', (29142, 29159), True, 'import numpy as np\n'), ((32336, 32366), 'numpy.asarray', 'np.asarray', (['new_x'], {'dtype': 'float'}), '(new_x, dtype=float)\n', (32346, 32366), True, 'import numpy as np\n'), ((32368, 32398), 'numpy.asarray', 'np.asarray', (['new_y'], {'dtype': 'float'}), '(new_y, dtype=float)\n', (32378, 32398), True, 'import numpy as np\n'), ((33290, 33319), 'numpy.asarray', 'np.asarray', (['vals'], {'dtype': 'float'}), '(vals, dtype=float)\n', (33300, 33319), True, 'import numpy as np\n'), ((6597, 6686), 'qiskit.visualization.exceptions.VisualizationError', 'VisualizationError', (['"""Axis break is greater than time window. Nothing will be drawn."""'], {}), "(\n 'Axis break is greater than time window. Nothing will be drawn.')\n", (6615, 6686), False, 'from qiskit.visualization.exceptions import VisualizationError\n'), ((8913, 8950), 'qiskit.visualization.pulse_v2.types.PhaseFreqTuple', 'types.PhaseFreqTuple', ([], {'phase': '(0)', 'freq': '(0)'}), '(phase=0, freq=0)\n', (8933, 8950), False, 'from qiskit.visualization.pulse_v2 import events, types, drawings, device_info\n'), ((11216, 11267), 'qiskit.visualization.pulse_v2.types.BarrierInstruction', 'types.BarrierInstruction', (['t0', 'self.device.dt', 'chans'], {}), '(t0, self.device.dt, chans)\n', (11240, 11267), False, 'from qiskit.visualization.pulse_v2 import events, types, drawings, device_info\n'), ((11801, 11859), 'functools.partial', 'partial', (['gen'], {'formatter': 'self.formatter', 'device': 'self.device'}), '(gen, formatter=self.formatter, device=self.device)\n', (11808, 11859), False, 'from functools import partial\n'), ((12436, 12494), 'functools.partial', 'partial', (['gen'], {'formatter': 'self.formatter', 'device': 'self.device'}), '(gen, formatter=self.formatter, device=self.device)\n', (12443, 12494), False, 'from functools import partial\n'), ((14587, 14681), 'qiskit.visualization.exceptions.VisualizationError', 'VisualizationError', (['"""Setting time range with SI units requires backend `dt` information."""'], {}), "(\n 'Setting time range with SI units requires backend `dt` information.')\n", (14605, 14681), False, 'from qiskit.visualization.exceptions import VisualizationError\n'), ((19090, 19124), 'itertools.chain.from_iterable', 'chain.from_iterable', (['drawing_items'], {}), '(drawing_items)\n', (19109, 19124), False, 'from itertools import chain\n'), ((19611, 19645), 'itertools.chain.from_iterable', 'chain.from_iterable', (['drawing_items'], {}), '(drawing_items)\n', (19630, 19645), False, 'from itertools import chain\n'), ((25030, 25042), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (25038, 25042), True, 'import numpy as np\n'), ((25044, 25056), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (25052, 25056), True, 'import numpy as np\n'), ((26004, 26016), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (26012, 26016), True, 'import numpy as np\n'), ((26018, 26030), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (26026, 26030), True, 'import numpy as np\n'), ((28682, 28694), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (28690, 28694), True, 'import numpy as np\n'), ((28696, 28708), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (28704, 28708), True, 'import numpy as np\n'), ((28844, 28871), 'numpy.interp', 'np.interp', (['t0', 'xvals', 'yvals'], {}), '(t0, xvals, yvals)\n', (28853, 28871), True, 'import numpy as np\n'), ((29084, 29111), 'numpy.interp', 'np.interp', (['t1', 'xvals', 'yvals'], {}), '(t1, xvals, yvals)\n', (29093, 29111), True, 'import numpy as np\n'), ((11358, 11416), 'functools.partial', 'partial', (['gen'], {'formatter': 'self.formatter', 'device': 'self.device'}), '(gen, formatter=self.formatter, device=self.device)\n', (11365, 11416), False, 'from functools import partial\n'), ((14449, 14483), 'numpy.round', 'np.round', (['(t_start / self.device.dt)'], {}), '(t_start / self.device.dt)\n', (14457, 14483), True, 'import numpy as np\n'), ((14513, 14545), 'numpy.round', 'np.round', (['(t_end / self.device.dt)'], {}), '(t_end / self.device.dt)\n', (14521, 14545), True, 'import numpy as np\n'), ((20565, 20580), 'numpy.max', 'np.max', (['trunc_y'], {}), '(trunc_y)\n', (20571, 20580), True, 'import numpy as np\n'), ((20629, 20644), 'numpy.min', 'np.min', (['trunc_y'], {}), '(trunc_y)\n', (20635, 20644), True, 'import numpy as np\n'), ((25185, 25223), 'numpy.array', 'np.array', (['[xpos - offset_accumulation]'], {}), '([xpos - offset_accumulation])\n', (25193, 25223), True, 'import numpy as np\n'), ((25285, 25321), 'numpy.array', 'np.array', (['[tl - offset_accumulation]'], {}), '([tl - offset_accumulation])\n', (25293, 25321), True, 'import numpy as np\n'), ((26702, 26714), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (26710, 26714), True, 'import numpy as np\n'), ((26716, 26728), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (26724, 26728), True, 'import numpy as np\n'), ((30081, 30093), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (30089, 30093), True, 'import numpy as np\n'), ((30095, 30107), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (30103, 30107), True, 'import numpy as np\n'), ((30432, 30459), 'numpy.append', 'np.append', (['tl', 'sub_xs[inds]'], {}), '(tl, sub_xs[inds])\n', (30441, 30459), True, 'import numpy as np\n'), ((30522, 30551), 'numpy.interp', 'np.interp', (['tl', 'sub_xs', 'sub_ys'], {}), '(tl, sub_xs, sub_ys)\n', (30531, 30551), True, 'import numpy as np\n'), ((30892, 30919), 'numpy.append', 'np.append', (['sub_xs[inds]', 'tr'], {}), '(sub_xs[inds], tr)\n', (30901, 30919), True, 'import numpy as np\n'), ((30980, 31009), 'numpy.interp', 'np.interp', (['tr', 'sub_xs', 'sub_ys'], {}), '(tr, sub_xs, sub_ys)\n', (30989, 31009), True, 'import numpy as np\n'), ((31355, 31383), 'numpy.append', 'np.append', (['sub_xs[inds0]', 'tl'], {}), '(sub_xs[inds0], tl)\n', (31364, 31383), True, 'import numpy as np\n'), ((31445, 31474), 'numpy.interp', 'np.interp', (['tl', 'sub_xs', 'sub_ys'], {}), '(tl, sub_xs, sub_ys)\n', (31454, 31474), True, 'import numpy as np\n'), ((31548, 31576), 'numpy.append', 'np.append', (['tr', 'sub_xs[inds1]'], {}), '(tr, sub_xs[inds1])\n', (31557, 31576), True, 'import numpy as np\n'), ((31635, 31664), 'numpy.interp', 'np.interp', (['tr', 'sub_xs', 'sub_ys'], {}), '(tr, sub_xs, sub_ys)\n', (31644, 31664), True, 'import numpy as np\n')] |
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import os
import glob
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
class PilotNetDataset(Dataset):
"""PilotNet dataset class that preserver temporal continuity. Returns
images and ground truth values when the object is indexed.
Parameters
----------
path : str
Path of the dataset folder. If the folder does not exists, the folder
is created and the dataset is downloaded and extracted to the folder.
Defaults to '../data'.
sequence : int
Length of temporal sequence to preserve. Default is 16.
transform : lambda
Transformation function to be applied to the input image.
Defaults to None.
train : bool
Flag to indicate training or testing set. Defaults to True.
visualize : bool
If true, the train/test split is ignored and the temporal sequence of
the data is preserved. Defaults to False.
Examples
--------
>>> dataset = PilotNetDataset()
>>> images, gts = dataeset[0]
>>> num_samples = len(dataset)
"""
def __init__(
self, path='data', sequence=16,
train=True, visualize=False, transform=None,
extract=True, download=True,
):
self.path = path + '/driving_dataset/'
id = '1Ue4XohCOV5YXy57S_5tDfCVqzLr101M7'
dataset_link = 'https://docs.google.com/uc?export=download&id={id}'
download_msg = f'''Please download dataset form \n{dataset_link}')
and copy driving_dataset.zip to {path}/
Note: create the folder if it does not exist.'''.replace(' ' * 8, '')
# check if dataset is available in path. If not download it
if len(glob.glob(self.path)) == 0:
if download is True:
os.makedirs(path, exist_ok=True)
print('Dataset not available locally. Starting download ...')
download_cmd = 'wget --load-cookies /tmp/cookies.txt '\
+ '"https://docs.google.com/uc?export=download&confirm='\
+ '$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate '\
+ f"'https://docs.google.com/uc?export=download&id={id}' -O- | "\
+ f"sed -rn \'s/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p\')&id={id}"\
+ f'" -O {path}/driving_dataset.zip && rm -rf /tmp/cookies.txt'
print(download_cmd)
exec_id = os.system(download_cmd + f' >> {path}/download.log')
if exec_id == 0:
print('Download complete.')
else:
raise Exception(download_msg)
if extract is True:
if os.path.exists(path + '/driving_dataset.zip'):
print('Extracting data (this may take a while) ...')
exec_id = os.system(
f'unzip {path}/driving_dataset.zip -d {path} '
f'>> {path}/unzip.log'
)
if exec_id == 0:
print('Extraction complete.')
else:
print(
f'Could not extract file '
f'{path + "/driving_dataset.zip"}. '
f'Please extract it manually.'
)
else:
print(f'Could not find {path + "/driving_dataset.zip"}.')
raise Exception(download_msg)
else:
print('Dataset does not exist. set extract=True')
if not os.path.exists(path + '/driving_dataset.zip'):
raise Exception(download_msg)
with open(self.path + '/data.txt', 'r') as data:
all_samples = [line.split() for line in data]
self.samples = all_samples
if visualize is True:
inds = np.arange(len(all_samples)//sequence)
else:
inds = np.random.RandomState(
seed=42
).permutation(len(all_samples)//sequence)
if train is True:
self.ind_map = inds[
:int(len(all_samples) / sequence * 0.8)
] * sequence
else:
self.ind_map = inds[
-int(len(all_samples) / sequence * 0.2):
] * sequence
self.sequence = sequence
self.transform = transform
def __getitem__(self, index: int):
images = []
gts = []
for i in range(self.sequence):
path, gt = self.samples[self.ind_map[index] + i]
if np.abs(float(gt)) < 1e-5 and i != 0 and i != len(self.samples)-1:
gt = 0.5 * ( # removing dataset anomalities
float(self.samples[self.ind_map[index] + i-1][1]) +
float(self.samples[self.ind_map[index] + i+1][1])
)
image = Image.open(self.path + path)
gt_val = float(gt) * np.pi / 180
if self.transform is not None:
image = self.transform(image)
images.append(image)
gts.append(torch.tensor(gt_val, dtype=image.dtype))
images = torch.stack(images, dim=3)
gts = torch.stack(gts, dim=0)
return images, gts
def __len__(self):
return len(self.ind_map)
| [
"os.path.exists",
"PIL.Image.open",
"os.makedirs",
"torch.stack",
"torch.tensor",
"numpy.random.RandomState",
"os.system",
"glob.glob"
] | [((5351, 5377), 'torch.stack', 'torch.stack', (['images'], {'dim': '(3)'}), '(images, dim=3)\n', (5362, 5377), False, 'import torch\n'), ((5392, 5415), 'torch.stack', 'torch.stack', (['gts'], {'dim': '(0)'}), '(gts, dim=0)\n', (5403, 5415), False, 'import torch\n'), ((5073, 5101), 'PIL.Image.open', 'Image.open', (['(self.path + path)'], {}), '(self.path + path)\n', (5083, 5101), False, 'from PIL import Image\n'), ((1784, 1804), 'glob.glob', 'glob.glob', (['self.path'], {}), '(self.path)\n', (1793, 1804), False, 'import glob\n'), ((1861, 1893), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (1872, 1893), False, 'import os\n'), ((2560, 2612), 'os.system', 'os.system', (["(download_cmd + f' >> {path}/download.log')"], {}), "(download_cmd + f' >> {path}/download.log')\n", (2569, 2612), False, 'import os\n'), ((2818, 2863), 'os.path.exists', 'os.path.exists', (["(path + '/driving_dataset.zip')"], {}), "(path + '/driving_dataset.zip')\n", (2832, 2863), False, 'import os\n'), ((5292, 5331), 'torch.tensor', 'torch.tensor', (['gt_val'], {'dtype': 'image.dtype'}), '(gt_val, dtype=image.dtype)\n', (5304, 5331), False, 'import torch\n'), ((2968, 3044), 'os.system', 'os.system', (['f"""unzip {path}/driving_dataset.zip -d {path} >> {path}/unzip.log"""'], {}), "(f'unzip {path}/driving_dataset.zip -d {path} >> {path}/unzip.log')\n", (2977, 3044), False, 'import os\n'), ((3729, 3774), 'os.path.exists', 'os.path.exists', (["(path + '/driving_dataset.zip')"], {}), "(path + '/driving_dataset.zip')\n", (3743, 3774), False, 'import os\n'), ((4099, 4129), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (4120, 4129), True, 'import numpy as np\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Operator construction, including OpPrimitives and singletons. """
import unittest
from test.aqua import QiskitAquaTestCase
import itertools
import scipy
from scipy.stats import unitary_group
import numpy as np
from ddt import ddt, data
from qiskit import QiskitError
from qiskit.aqua import AquaError
from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector
from qiskit.extensions.exceptions import ExtensionError
from qiskit.quantum_info import Operator, Pauli, Statevector
from qiskit.circuit.library import CZGate, ZGate
from qiskit.aqua.operators import (
X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn,
CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp,
SummedOp, OperatorBase, Zero
)
from qiskit.aqua.operators import MatrixOperator
# pylint: disable=invalid-name
@ddt
class TestOpConstruction(QiskitAquaTestCase):
"""Operator Construction tests."""
def test_pauli_primitives(self):
""" from to file test """
newop = X ^ Y ^ Z ^ I
self.assertEqual(newop.primitive, Pauli(label='XYZI'))
kpower_op = (Y ^ 5) ^ (I ^ 3)
self.assertEqual(kpower_op.primitive, Pauli(label='YYYYYIII'))
kpower_op2 = (Y ^ I) ^ 4
self.assertEqual(kpower_op2.primitive, Pauli(label='YIYIYIYI'))
# Check immutability
self.assertEqual(X.primitive, Pauli(label='X'))
self.assertEqual(Y.primitive, Pauli(label='Y'))
self.assertEqual(Z.primitive, Pauli(label='Z'))
self.assertEqual(I.primitive, Pauli(label='I'))
def test_composed_eval(self):
""" Test eval of ComposedOp """
self.assertAlmostEqual(Minus.eval('1'), -.5 ** .5)
def test_evals(self):
""" evals test """
# pylint: disable=no-member
# TODO: Think about eval names
self.assertEqual(Z.eval('0').eval('0'), 1)
self.assertEqual(Z.eval('1').eval('0'), 0)
self.assertEqual(Z.eval('0').eval('1'), 0)
self.assertEqual(Z.eval('1').eval('1'), -1)
self.assertEqual(X.eval('0').eval('0'), 0)
self.assertEqual(X.eval('1').eval('0'), 1)
self.assertEqual(X.eval('0').eval('1'), 1)
self.assertEqual(X.eval('1').eval('1'), 0)
self.assertEqual(Y.eval('0').eval('0'), 0)
self.assertEqual(Y.eval('1').eval('0'), -1j)
self.assertEqual(Y.eval('0').eval('1'), 1j)
self.assertEqual(Y.eval('1').eval('1'), 0)
with self.assertRaises(ValueError):
Y.eval('11')
with self.assertRaises(ValueError):
(X ^ Y).eval('1111')
with self.assertRaises(ValueError):
Y.eval((X ^ X).to_matrix_op())
# Check that Pauli logic eval returns same as matrix logic
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('0'), 1)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('0'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('1'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('1'), -1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('0'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('1'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('1'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('0'), -1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('1'), 1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('1'), 0)
pauli_op = Z ^ I ^ X ^ Y
mat_op = PrimitiveOp(pauli_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=pauli_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
# print('{} {} {} {}'.format(bstr1, bstr2, pauli_op.eval(bstr1, bstr2),
# mat_op.eval(bstr1, bstr2)))
np.testing.assert_array_almost_equal(pauli_op.eval(bstr1).eval(bstr2),
mat_op.eval(bstr1).eval(bstr2))
gnarly_op = SummedOp([(H ^ I ^ Y).compose(X ^ X ^ Z).tensor(Z),
PrimitiveOp(Operator.from_label('+r0I')),
3 * (X ^ CX ^ T)], coeff=3 + .2j)
gnarly_mat_op = PrimitiveOp(gnarly_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=gnarly_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
np.testing.assert_array_almost_equal(gnarly_op.eval(bstr1).eval(bstr2),
gnarly_mat_op.eval(bstr1).eval(bstr2))
def test_circuit_construction(self):
""" circuit construction test """
hadq2 = H ^ I
cz = hadq2.compose(CX).compose(hadq2)
qc = QuantumCircuit(2)
qc.append(cz.primitive, qargs=range(2))
ref_cz_mat = PrimitiveOp(CZGate()).to_matrix()
np.testing.assert_array_almost_equal(cz.to_matrix(), ref_cz_mat)
def test_io_consistency(self):
""" consistency test """
new_op = X ^ Y ^ I
label = 'XYI'
# label = new_op.primitive.to_label()
self.assertEqual(str(new_op.primitive), label)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
Operator.from_label(label).data)
self.assertEqual(new_op.primitive, Pauli(label=label))
x_mat = X.primitive.to_matrix()
y_mat = Y.primitive.to_matrix()
i_mat = np.eye(2, 2)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
np.kron(np.kron(x_mat, y_mat), i_mat))
hi = np.kron(H.to_matrix(), I.to_matrix())
hi2 = Operator.from_label('HI').data
hi3 = (H ^ I).to_matrix()
np.testing.assert_array_almost_equal(hi, hi2)
np.testing.assert_array_almost_equal(hi2, hi3)
xy = np.kron(X.to_matrix(), Y.to_matrix())
xy2 = Operator.from_label('XY').data
xy3 = (X ^ Y).to_matrix()
np.testing.assert_array_almost_equal(xy, xy2)
np.testing.assert_array_almost_equal(xy2, xy3)
# Check if numpy array instantiation is the same as from Operator
matrix_op = Operator.from_label('+r')
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# Ditto list of lists
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op.data.tolist()).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# TODO make sure this works once we resolve endianness mayhem
# qc = QuantumCircuit(3)
# qc.x(2)
# qc.y(1)
# from qiskit import BasicAer, QuantumCircuit, execute
# unitary = execute(qc, BasicAer.get_backend('unitary_simulator')).result().get_unitary()
# np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(), unitary)
def test_to_matrix(self):
"""to matrix text """
np.testing.assert_array_equal(X.to_matrix(), Operator.from_label('X').data)
np.testing.assert_array_equal(Y.to_matrix(), Operator.from_label('Y').data)
np.testing.assert_array_equal(Z.to_matrix(), Operator.from_label('Z').data)
op1 = Y + H
np.testing.assert_array_almost_equal(op1.to_matrix(), Y.to_matrix() + H.to_matrix())
op2 = op1 * .5
np.testing.assert_array_almost_equal(op2.to_matrix(), op1.to_matrix() * .5)
op3 = (4 - .6j) * op2
np.testing.assert_array_almost_equal(op3.to_matrix(), op2.to_matrix() * (4 - .6j))
op4 = op3.tensor(X)
np.testing.assert_array_almost_equal(op4.to_matrix(),
np.kron(op3.to_matrix(), X.to_matrix()))
op5 = op4.compose(H ^ I)
np.testing.assert_array_almost_equal(op5.to_matrix(), np.dot(op4.to_matrix(),
(H ^ I).to_matrix()))
op6 = op5 + PrimitiveOp(Operator.from_label('+r').data)
np.testing.assert_array_almost_equal(
op6.to_matrix(), op5.to_matrix() + Operator.from_label('+r').data)
param = Parameter("α")
m = np.array([[0, -1j], [1j, 0]])
op7 = MatrixOp(m, param)
np.testing.assert_array_equal(op7.to_matrix(), m * param)
param = Parameter("β")
op8 = PauliOp(primitive=Pauli(label="Y"), coeff=param)
np.testing.assert_array_equal(op8.to_matrix(), m * param)
param = Parameter("γ")
qc = QuantumCircuit(1)
qc.h(0)
op9 = CircuitOp(qc, coeff=param)
m = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
np.testing.assert_array_equal(op9.to_matrix(), m * param)
def test_circuit_op_to_matrix(self):
""" test CircuitOp.to_matrix """
qc = QuantumCircuit(1)
qc.rz(1.0, 0)
qcop = CircuitOp(qc)
np.testing.assert_array_almost_equal(
qcop.to_matrix(), scipy.linalg.expm(-0.5j * Z.to_matrix()))
def test_matrix_to_instruction(self):
"""Test MatrixOp.to_instruction yields an Instruction object."""
matop = (H ^ 3).to_matrix_op()
with self.subTest('assert to_instruction returns Instruction'):
self.assertIsInstance(matop.to_instruction(), Instruction)
matop = ((H ^ 3) + (Z ^ 3)).to_matrix_op()
with self.subTest('matrix operator is not unitary'):
with self.assertRaises(ExtensionError):
matop.to_instruction()
def test_adjoint(self):
""" adjoint test """
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
np.testing.assert_array_almost_equal(np.conj(np.transpose(gnarly_op.to_matrix())),
gnarly_op.adjoint().to_matrix())
def test_primitive_strings(self):
""" get primitives test """
self.assertEqual(X.primitive_strings(), {'Pauli'})
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
self.assertEqual(gnarly_op.primitive_strings(), {'QuantumCircuit', 'Matrix'})
def test_to_pauli_op(self):
""" Test to_pauli_op method """
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
mat_op = gnarly_op.to_matrix_op()
pauli_op = gnarly_op.to_pauli_op()
self.assertIsInstance(pauli_op, SummedOp)
for p in pauli_op:
self.assertIsInstance(p, PauliOp)
np.testing.assert_array_almost_equal(mat_op.to_matrix(), pauli_op.to_matrix())
def test_circuit_permute(self):
r""" Test the CircuitOp's .permute method """
perm = range(7)[::-1]
c_op = (((CX ^ 3) ^ X) @
(H ^ 7) @
(X ^ Y ^ Z ^ I ^ X ^ X ^ X) @
(Y ^ (CX ^ 3)) @
(X ^ Y ^ Z ^ I ^ X ^ X ^ X))
c_op_perm = c_op.permute(perm)
self.assertNotEqual(c_op, c_op_perm)
c_op_id = c_op_perm.permute(perm)
self.assertEqual(c_op, c_op_id)
def test_summed_op_reduce(self):
"""Test SummedOp"""
sum_op = (X ^ X * 2) + (Y ^ Y) # type: PauliSumOp
sum_op = sum_op.to_pauli_op() # type: SummedOp[PauliOp]
with self.subTest('SummedOp test 1'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1])
sum_op = (X ^ X * 2) + (Y ^ Y)
sum_op += Y ^ Y
sum_op = sum_op.to_pauli_op() # type: SummedOp[PauliOp]
with self.subTest('SummedOp test 2-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 2-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 2])
sum_op = (X ^ X * 2) + (Y ^ Y)
sum_op += (Y ^ Y) + (X ^ X * 2)
sum_op = sum_op.to_pauli_op() # type: SummedOp[PauliOp]
with self.subTest('SummedOp test 3-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'YY', 'XX'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1, 1, 2])
sum_op = sum_op.reduce().to_pauli_op()
with self.subTest('SummedOp test 3-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
with self.subTest('SummedOp test 4-a'):
self.assertEqual(sum_op.coeff, 2)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 4-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
sum_op += Y ^ Y
with self.subTest('SummedOp test 5-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 5-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 3])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
sum_op += ((X ^ X) * 2 + (Y ^ Y)).to_pauli_op()
with self.subTest('SummedOp test 6-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 2, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 6-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [6, 3])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
sum_op += sum_op
with self.subTest('SummedOp test 7-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 4, 2])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 7-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [8, 4])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2) + SummedOp([X ^ X * 2, Z ^ Z], 3)
with self.subTest('SummedOp test 8-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'XX', 'ZZ'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 6, 3])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 8-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'ZZ'])
self.assertListEqual([op.coeff for op in sum_op], [10, 2, 3])
def test_compose_op_of_different_dim(self):
"""
Test if smaller operator expands to correct dim when composed with bigger operator.
Test if PrimitiveOps compose methods are consistent.
"""
# PauliOps of different dim
xy_p = (X ^ Y)
xyz_p = (X ^ Y ^ Z)
pauli_op = xy_p @ xyz_p
expected_result = (I ^ I ^ Z)
self.assertEqual(pauli_op, expected_result)
# MatrixOps of different dim
xy_m = xy_p.to_matrix_op()
xyz_m = xyz_p.to_matrix_op()
matrix_op = xy_m @ xyz_m
self.assertEqual(matrix_op, expected_result.to_matrix_op())
# CircuitOps of different dim
xy_c = xy_p.to_circuit_op()
xyz_c = xyz_p.to_circuit_op()
circuit_op = xy_c @ xyz_c
self.assertTrue(np.array_equal(pauli_op.to_matrix(), matrix_op.to_matrix()))
self.assertTrue(np.allclose(pauli_op.to_matrix(), circuit_op.to_matrix(), rtol=1e-14))
self.assertTrue(np.allclose(matrix_op.to_matrix(), circuit_op.to_matrix(), rtol=1e-14))
def test_permute_on_primitive_op(self):
""" Test if permute methods of PrimitiveOps are consistent and work as expected. """
indices = [1, 2, 4]
# PauliOp
pauli_op = (X ^ Y ^ Z)
permuted_pauli_op = pauli_op.permute(indices)
expected_pauli_op = (X ^ I ^ Y ^ Z ^ I)
self.assertEqual(permuted_pauli_op, expected_pauli_op)
# CircuitOp
circuit_op = pauli_op.to_circuit_op()
permuted_circuit_op = circuit_op.permute(indices)
expected_circuit_op = expected_pauli_op.to_circuit_op()
self.assertEqual(permuted_circuit_op.primitive.__str__(),
expected_circuit_op.primitive.__str__())
# MatrixOp
matrix_op = pauli_op.to_matrix_op()
permuted_matrix_op = matrix_op.permute(indices)
expected_matrix_op = expected_pauli_op.to_matrix_op()
equal = np.allclose(permuted_matrix_op.to_matrix(), expected_matrix_op.to_matrix())
self.assertTrue(equal)
def test_permute_on_list_op(self):
""" Test if ListOp permute method is consistent with PrimitiveOps permute methods. """
op1 = (X ^ Y ^ Z).to_circuit_op()
op2 = (Z ^ X ^ Y)
# ComposedOp
indices = [1, 2, 0]
primitive_op = op1 @ op2
primitive_op_perm = primitive_op.permute(indices) # CircuitOp.permute
composed_op = ComposedOp([op1, op2])
composed_op_perm = composed_op.permute(indices)
# reduce the ListOp to PrimitiveOp
to_primitive = composed_op_perm.oplist[0] @ composed_op_perm.oplist[1]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
# TensoredOp
indices = [3, 5, 4, 0, 2, 1]
primitive_op = op1 ^ op2
primitive_op_perm = primitive_op.permute(indices)
tensored_op = TensoredOp([op1, op2])
tensored_op_perm = tensored_op.permute(indices)
# reduce the ListOp to PrimitiveOp
composed_oplist = tensored_op_perm.oplist
to_primitive = \
composed_oplist[0] @ (composed_oplist[1].oplist[0] ^ composed_oplist[1].oplist[1]) @ \
composed_oplist[2]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
# SummedOp
primitive_op = (X ^ Y ^ Z)
summed_op = SummedOp([primitive_op])
indices = [1, 2, 0]
primitive_op_perm = primitive_op.permute(indices) # PauliOp.permute
summed_op_perm = summed_op.permute(indices)
# reduce the ListOp to PrimitiveOp
to_primitive = summed_op_perm.oplist[0] @ primitive_op @ summed_op_perm.oplist[2]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
def test_expand_on_list_op(self):
""" Test if expanded ListOp has expected num_qubits. """
add_qubits = 3
# ComposedOp
composed_op = ComposedOp([(X ^ Y ^ Z), (H ^ T), (Z ^ X ^ Y ^ Z).to_matrix_op()])
expanded = composed_op._expand_dim(add_qubits)
self.assertEqual(composed_op.num_qubits + add_qubits, expanded.num_qubits)
# TensoredOp
tensored_op = TensoredOp([(X ^ Y), (Z ^ I)])
expanded = tensored_op._expand_dim(add_qubits)
self.assertEqual(tensored_op.num_qubits + add_qubits, expanded.num_qubits)
# SummedOp
summed_op = SummedOp([(X ^ Y), (Z ^ I ^ Z)])
expanded = summed_op._expand_dim(add_qubits)
self.assertEqual(summed_op.num_qubits + add_qubits, expanded.num_qubits)
def test_expand_on_state_fn(self):
""" Test if expanded StateFn has expected num_qubits. """
num_qubits = 3
add_qubits = 2
# case CircuitStateFn, with primitive QuantumCircuit
qc2 = QuantumCircuit(num_qubits)
qc2.cx(0, 1)
cfn = CircuitStateFn(qc2, is_measurement=True)
cfn_exp = cfn._expand_dim(add_qubits)
self.assertEqual(cfn_exp.num_qubits, add_qubits + num_qubits)
# case OperatorStateFn, with OperatorBase primitive, in our case CircuitStateFn
osfn = OperatorStateFn(cfn)
osfn_exp = osfn._expand_dim(add_qubits)
self.assertEqual(osfn_exp.num_qubits, add_qubits + num_qubits)
# case DictStateFn
dsfn = DictStateFn('1'*num_qubits, is_measurement=True)
self.assertEqual(dsfn.num_qubits, num_qubits)
dsfn_exp = dsfn._expand_dim(add_qubits)
self.assertEqual(dsfn_exp.num_qubits, num_qubits + add_qubits)
# case VectorStateFn
vsfn = VectorStateFn(np.ones(2**num_qubits, dtype=complex))
self.assertEqual(vsfn.num_qubits, num_qubits)
vsfn_exp = vsfn._expand_dim(add_qubits)
self.assertEqual(vsfn_exp.num_qubits, num_qubits + add_qubits)
def test_permute_on_state_fn(self):
""" Test if StateFns permute are consistent. """
num_qubits = 4
dim = 2**num_qubits
primitive_list = [1.0/(i+1) for i in range(dim)]
primitive_dict = {format(i, 'b').zfill(num_qubits): 1.0/(i+1) for i in range(dim)}
dict_fn = DictStateFn(primitive=primitive_dict, is_measurement=True)
vec_fn = VectorStateFn(primitive=primitive_list, is_measurement=True)
# check if dict_fn and vec_fn are equivalent
equivalent = np.allclose(dict_fn.to_matrix(), vec_fn.to_matrix())
self.assertTrue(equivalent)
# permute
indices = [2, 3, 0, 1]
permute_dict = dict_fn.permute(indices)
permute_vect = vec_fn.permute(indices)
equivalent = np.allclose(permute_dict.to_matrix(), permute_vect.to_matrix())
self.assertTrue(equivalent)
def test_compose_consistency(self):
"""Test if PrimitiveOp @ ComposedOp is consistent with ComposedOp @ PrimitiveOp."""
# PauliOp
op1 = (X ^ Y ^ Z)
op2 = (X ^ Y ^ Z)
op3 = (X ^ Y ^ Z).to_circuit_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
# CircitOp
op1 = op1.to_circuit_op()
op2 = op2.to_circuit_op()
op3 = op3.to_matrix_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
# MatrixOp
op1 = op1.to_matrix_op()
op2 = op2.to_matrix_op()
op3 = op3.to_pauli_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
def test_compose_with_indices(self):
""" Test compose method using its permutation feature."""
pauli_op = (X ^ Y ^ Z)
circuit_op = (T ^ H)
matrix_op = (X ^ Y ^ H ^ T).to_matrix_op()
evolved_op = EvolvedOp(matrix_op)
# composition of PrimitiveOps
num_qubits = 4
primitive_op = pauli_op @ circuit_op @ matrix_op
composed_op = pauli_op @ circuit_op @ evolved_op
self.assertEqual(primitive_op.num_qubits, num_qubits)
self.assertEqual(composed_op.num_qubits, num_qubits)
# with permutation
num_qubits = 5
indices = [1, 4]
permuted_primitive_op = evolved_op @ circuit_op.permute(indices) @ pauli_op @ matrix_op
composed_primitive_op = \
evolved_op @ pauli_op.compose(circuit_op, permutation=indices, front=True) @ matrix_op
self.assertTrue(np.allclose(permuted_primitive_op.to_matrix(),
composed_primitive_op.to_matrix()))
self.assertEqual(num_qubits, permuted_primitive_op.num_qubits)
# ListOp
num_qubits = 6
tensored_op = TensoredOp([pauli_op, circuit_op])
summed_op = pauli_op + circuit_op.permute([2, 1])
composed_op = circuit_op @ evolved_op @ matrix_op
list_op = summed_op @ composed_op.compose(tensored_op, permutation=[1, 2, 3, 5, 4],
front=True)
self.assertEqual(num_qubits, list_op.num_qubits)
num_qubits = 4
circuit_fn = CircuitStateFn(primitive=circuit_op.primitive, is_measurement=True)
operator_fn = OperatorStateFn(primitive=circuit_op ^ circuit_op, is_measurement=True)
no_perm_op = circuit_fn @ operator_fn
self.assertEqual(no_perm_op.num_qubits, num_qubits)
indices = [0, 4]
perm_op = operator_fn.compose(circuit_fn, permutation=indices, front=True)
self.assertEqual(perm_op.num_qubits, max(indices) + 1)
# StateFn
num_qubits = 3
dim = 2**num_qubits
vec = [1.0/(i+1) for i in range(dim)]
dic = {format(i, 'b').zfill(num_qubits): 1.0/(i+1) for i in range(dim)}
is_measurement = True
op_state_fn = OperatorStateFn(matrix_op, is_measurement=is_measurement) # num_qubit = 4
vec_state_fn = VectorStateFn(vec, is_measurement=is_measurement) # 3
dic_state_fn = DictStateFn(dic, is_measurement=is_measurement) # 3
circ_state_fn = CircuitStateFn(circuit_op.to_circuit(), is_measurement=is_measurement) # 2
composed_op = op_state_fn @ vec_state_fn @ dic_state_fn @ circ_state_fn
self.assertEqual(composed_op.num_qubits, op_state_fn.num_qubits)
# with permutation
perm = [2, 4, 6]
composed = \
op_state_fn @ dic_state_fn.compose(vec_state_fn, permutation=perm, front=True) @ \
circ_state_fn
self.assertEqual(composed.num_qubits, max(perm) + 1)
def test_summed_op_equals(self):
"""Test corner cases of SummedOp's equals function."""
with self.subTest('multiplicative factor'):
self.assertEqual(2 * X, X + X)
with self.subTest('commutative'):
self.assertEqual(X + Z, Z + X)
with self.subTest('circuit and paulis'):
z = CircuitOp(ZGate())
self.assertEqual(Z + z, z + Z)
with self.subTest('matrix op and paulis'):
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(Z + z, z + Z)
with self.subTest('matrix multiplicative'):
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(2 * z, z + z)
with self.subTest('parameter coefficients'):
expr = Parameter('theta')
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(expr * z, expr * z)
with self.subTest('different coefficient types'):
expr = Parameter('theta')
z = MatrixOp([[1, 0], [0, -1]])
self.assertNotEqual(expr * z, 2 * z)
with self.subTest('additions aggregation'):
z = MatrixOp([[1, 0], [0, -1]])
a = z + z + Z
b = 2 * z + Z
c = z + Z + z
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(a, c)
def test_circuit_compose_register_independent(self):
"""Test that CircuitOp uses combines circuits independent of the register.
I.e. that is uses ``QuantumCircuit.compose`` over ``combine`` or ``extend``.
"""
op = Z ^ 2
qr = QuantumRegister(2, 'my_qr')
circuit = QuantumCircuit(qr)
composed = op.compose(CircuitOp(circuit))
self.assertEqual(composed.num_qubits, 2)
def test_matrix_op_conversions(self):
"""Test to reveal QiskitError when to_instruction or to_circuit method is called on
parametrized matrix op."""
m = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, -1, 0, 0]])
matrix_op = MatrixOp(m, Parameter('beta'))
for method in ['to_instruction', 'to_circuit']:
with self.subTest(method):
# QiskitError: multiplication of Operator with ParameterExpression isn't implemented
self.assertRaises(QiskitError, getattr(matrix_op, method))
def test_list_op_to_circuit(self):
"""Test if unitary ListOps transpile to circuit. """
# generate unitary matrices of dimension 2,4,8, seed is fixed
np.random.seed(233423)
u2 = unitary_group.rvs(2)
u4 = unitary_group.rvs(4)
u8 = unitary_group.rvs(8)
# pauli matrices as numpy.arrays
x = np.array([[0.0, 1.0], [1.0, 0.0]])
y = np.array([[0.0, -1.0j], [1.0j, 0.0]])
z = np.array([[1.0, 0.0], [0.0, -1.0]])
# create MatrixOp and CircuitOp out of matrices
op2 = MatrixOp(u2)
op4 = MatrixOp(u4)
op8 = MatrixOp(u8)
c2 = op2.to_circuit_op()
# algorithm using only matrix operations on numpy.arrays
xu4 = np.kron(x, u4)
zc2 = np.kron(z, u2)
zc2y = np.kron(zc2, y)
matrix = np.matmul(xu4, zc2y)
matrix = np.matmul(matrix, u8)
matrix = np.kron(matrix, u2)
operator = Operator(matrix)
# same algorithm as above, but using PrimitiveOps
list_op = ((X ^ op4) @ (Z ^ c2 ^ Y) @ op8) ^ op2
circuit = list_op.to_circuit()
# verify that ListOp.to_circuit() outputs correct quantum circuit
self.assertTrue(operator.equiv(circuit), "ListOp.to_circuit() outputs wrong circuit!")
def test_composed_op_to_circuit(self):
"""
Test if unitary ComposedOp transpile to circuit and represents expected operator.
Test if to_circuit on non-unitary ListOp raises exception.
"""
x = np.array([[0.0, 1.0], [1.0, 0.0]]) # Pauli X as numpy array
y = np.array([[0.0, -1.0j], [1.0j, 0.0]]) # Pauli Y as numpy array
m1 = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0]]) # non-unitary
m2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, -1, 0, 0]]) # non-unitary
m_op1 = MatrixOp(m1)
m_op2 = MatrixOp(m2)
pm1 = (X ^ Y) ^ m_op1 # non-unitary TensoredOp
pm2 = (X ^ Y) ^ m_op2 # non-unitary TensoredOp
self.assertRaises(ExtensionError, pm1.to_circuit)
self.assertRaises(ExtensionError, pm2.to_circuit)
summed_op = pm1 + pm2 # unitary SummedOp([TensoredOp, TensoredOp])
circuit = summed_op.to_circuit() # should transpile without any exception
# same algorithm that leads to summed_op above, but using only arrays and matrix operations
unitary = np.kron(np.kron(x, y), m1 + m2)
self.assertTrue(Operator(unitary).equiv(circuit))
def test_op_to_circuit_with_parameters(self):
"""On parametrized SummedOp, to_matrix_op returns ListOp, instead of MatrixOp. To avoid
the infinite recursion, AquaError is raised. """
m1 = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0]]) # non-unitary
m2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, -1, 0, 0]]) # non-unitary
op1_with_param = MatrixOp(m1, Parameter('alpha')) # non-unitary
op2_with_param = MatrixOp(m2, Parameter('beta')) # non-unitary
summed_op_with_param = op1_with_param + op2_with_param # unitary
self.assertRaises(AquaError, summed_op_with_param.to_circuit) # should raise Aqua error
def test_permute_list_op_with_inconsistent_num_qubits(self):
"""Test if permute raises error if ListOp contains operators with different num_qubits."""
list_op = ListOp([X, X ^ X])
self.assertRaises(AquaError, list_op.permute, [0, 1])
@data(Z, CircuitOp(ZGate()), MatrixOp([[1, 0], [0, -1]]))
def test_op_indent(self, op):
"""Test that indentation correctly adds INDENTATION at the beginning of each line"""
initial_str = str(op)
indented_str = op._indent(initial_str)
starts_with_indent = indented_str.startswith(op.INDENTATION)
self.assertTrue(starts_with_indent)
indented_str_content = (
indented_str[len(op.INDENTATION):]
).split("\n{}".format(op.INDENTATION))
self.assertListEqual(indented_str_content, initial_str.split("\n"))
def test_composed_op_immutable_under_eval(self):
"""Test ``ComposedOp.eval`` does not change the operator instance."""
op = 2 * ComposedOp([X])
_ = op.eval()
# previous bug: after op.eval(), op was 2 * ComposedOp([2 * X])
self.assertEqual(op, 2 * ComposedOp([X]))
def test_op_parameters(self):
"""Test that Parameters are stored correctly"""
phi = Parameter('φ')
theta = ParameterVector(name='θ',
length=2)
qc = QuantumCircuit(2)
qc.rz(phi, 0)
qc.rz(phi, 1)
for i in range(2):
qc.rx(theta[i], i)
qc.h(0)
qc.x(1)
l = Parameter('λ')
op = PrimitiveOp(qc,
coeff=l)
params = set([phi, l, *theta.params])
self.assertEqual(params, op.parameters)
self.assertEqual(params, StateFn(op).parameters)
self.assertEqual(params, StateFn(qc, coeff=l).parameters)
def test_list_op_parameters(self):
"""Test that Parameters are stored correctly in a List Operator"""
lam = Parameter('λ')
phi = Parameter('φ')
omega = Parameter('ω')
mat_op = PrimitiveOp([[0, 1],
[1, 0]],
coeff=omega)
qc = QuantumCircuit(1)
qc.rx(phi, 0)
qc_op = PrimitiveOp(qc)
op1 = SummedOp([mat_op, qc_op])
params = [phi, omega]
self.assertEqual(op1.parameters, set(params))
# check list nesting case
op2 = PrimitiveOp([[1, 0],
[0, -1]],
coeff=lam)
list_op = ListOp([op1, op2])
params.append(lam)
self.assertEqual(list_op.parameters, set(params))
@data(VectorStateFn([1, 0]),
DictStateFn({'0': 1}),
CircuitStateFn(QuantumCircuit(1)),
OperatorStateFn(I),
OperatorStateFn(MatrixOp([[1, 0], [0, 1]])),
OperatorStateFn(CircuitOp(QuantumCircuit(1))))
def test_statefn_eval(self, op):
"""Test calling eval on StateFn returns the statevector."""
expected = Statevector([1, 0])
self.assertEqual(op.eval().primitive, expected)
def test_to_circuit_op(self):
"""Test to_circuit_op method."""
vector = np.array([2, 2])
vsfn = VectorStateFn([1, 1], coeff=2)
dsfn = DictStateFn({'0': 1, '1': 1}, coeff=2)
for sfn in [vsfn, dsfn]:
np.testing.assert_array_almost_equal(
sfn.to_circuit_op().eval().primitive.data, vector
)
def test_invalid_primitive(self):
"""Test invalid MatrixOp construction"""
msg = "MatrixOp can only be instantiated with " \
"['list', 'ndarray', 'spmatrix', 'Operator'], not "
with self.assertRaises(TypeError) as cm:
_ = MatrixOp('invalid')
self.assertEqual(str(cm.exception), msg + "'str'")
with self.assertRaises(TypeError) as cm:
_ = MatrixOp(MatrixOperator(np.eye(2)))
self.assertEqual(str(cm.exception), msg + "'MatrixOperator'")
with self.assertRaises(TypeError) as cm:
_ = MatrixOp(None)
self.assertEqual(str(cm.exception), msg + "'NoneType'")
with self.assertRaises(TypeError) as cm:
_ = MatrixOp(2.0)
self.assertEqual(str(cm.exception), msg + "'float'")
def test_summedop_equals(self):
"""Test SummedOp.equals """
ops = [Z, CircuitOp(ZGate()), MatrixOp([[1, 0], [0, -1]]), Zero, Minus]
sum_op = sum(ops + [ListOp(ops)])
self.assertEqual(sum_op, sum_op)
self.assertEqual(sum_op + sum_op, 2 * sum_op)
self.assertEqual(sum_op + sum_op + sum_op, 3 * sum_op)
ops2 = [Z, CircuitOp(ZGate()), MatrixOp([[1, 0], [0, 1]]), Zero, Minus]
sum_op2 = sum(ops2 + [ListOp(ops)])
self.assertNotEqual(sum_op, sum_op2)
self.assertEqual(sum_op2, sum_op2)
sum_op3 = sum(ops)
self.assertNotEqual(sum_op, sum_op3)
self.assertNotEqual(sum_op2, sum_op3)
self.assertEqual(sum_op3, sum_op3)
class TestOpMethods(QiskitAquaTestCase):
"""Basic method tests."""
def test_listop_num_qubits(self):
"""Test that ListOp.num_qubits checks that all operators have the same number of qubits."""
op = ListOp([X ^ Y, Y ^ Z])
with self.subTest('All operators have the same numbers of qubits'):
self.assertEqual(op.num_qubits, 2)
op = ListOp([X ^ Y, Y])
with self.subTest('Operators have different numbers of qubits'):
with self.assertRaises(ValueError):
op.num_qubits # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
X @ op # pylint: disable=pointless-statement
@ddt
class TestListOpMethods(QiskitAquaTestCase):
"""Test ListOp accessing methods"""
@data(ListOp, SummedOp, ComposedOp, TensoredOp)
def test_indexing(self, list_op_type):
"""Test indexing and slicing"""
coeff = 3 + .2j
states_op = list_op_type([X, Y, Z, I], coeff=coeff)
single_op = states_op[1]
self.assertIsInstance(single_op, OperatorBase)
self.assertNotIsInstance(single_op, ListOp)
list_one_element = states_op[1:2]
self.assertIsInstance(list_one_element, list_op_type)
self.assertEqual(len(list_one_element), 1)
self.assertEqual(list_one_element[0], Y)
list_two_elements = states_op[::2]
self.assertIsInstance(list_two_elements, list_op_type)
self.assertEqual(len(list_two_elements), 2)
self.assertEqual(list_two_elements[0], X)
self.assertEqual(list_two_elements[1], Z)
self.assertEqual(list_one_element.coeff, coeff)
self.assertEqual(list_two_elements.coeff, coeff)
class TestListOpComboFn(QiskitAquaTestCase):
"""Test combo fn is propagated."""
def setUp(self):
super().setUp()
self.combo_fn = lambda x: [x_i ** 2 for x_i in x]
self.listop = ListOp([X], combo_fn=self.combo_fn)
def assertComboFnPreserved(self, processed_op):
"""Assert the quadratic combo_fn is preserved."""
x = [1, 2, 3]
self.assertListEqual(processed_op.combo_fn(x), self.combo_fn(x))
def test_at_conversion(self):
"""Test after conversion the combo_fn is preserved."""
for method in ['to_matrix_op', 'to_pauli_op', 'to_circuit_op']:
with self.subTest(method):
converted = getattr(self.listop, method)()
self.assertComboFnPreserved(converted)
def test_after_mul(self):
"""Test after multiplication the combo_fn is preserved."""
self.assertComboFnPreserved(2 * self.listop)
def test_at_traverse(self):
"""Test after traversing the combo_fn is preserved."""
def traverse_fn(op):
return -op
traversed = self.listop.traverse(traverse_fn)
self.assertComboFnPreserved(traversed)
def test_after_adjoint(self):
"""Test after traversing the combo_fn is preserved."""
self.assertComboFnPreserved(self.listop.adjoint())
def test_after_reduce(self):
"""Test after reducing the combo_fn is preserved."""
self.assertComboFnPreserved(self.listop.reduce())
if __name__ == '__main__':
unittest.main()
| [
"numpy.sqrt",
"qiskit.circuit.QuantumCircuit",
"qiskit.aqua.operators.X.primitive.to_matrix",
"qiskit.circuit.ParameterVector",
"qiskit.circuit.Parameter",
"numpy.array",
"unittest.main",
"qiskit.aqua.operators.ComposedOp",
"qiskit.aqua.operators.X.primitive_strings",
"qiskit.aqua.operators.X.eval... | [((39404, 39450), 'ddt.data', 'data', (['ListOp', 'SummedOp', 'ComposedOp', 'TensoredOp'], {}), '(ListOp, SummedOp, ComposedOp, TensoredOp)\n', (39408, 39450), False, 'from ddt import ddt, data\n'), ((41858, 41873), 'unittest.main', 'unittest.main', ([], {}), '()\n', (41871, 41873), False, 'import unittest\n'), ((4443, 4484), 'itertools.product', 'itertools.product', (['full_basis', 'full_basis'], {}), '(full_basis, full_basis)\n', (4460, 4484), False, 'import itertools\n'), ((5166, 5207), 'itertools.product', 'itertools.product', (['full_basis', 'full_basis'], {}), '(full_basis, full_basis)\n', (5183, 5207), False, 'import itertools\n'), ((5546, 5563), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(2)'], {}), '(2)\n', (5560, 5563), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((6193, 6216), 'qiskit.aqua.operators.X.primitive.to_matrix', 'X.primitive.to_matrix', ([], {}), '()\n', (6214, 6216), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((6233, 6256), 'qiskit.aqua.operators.Y.primitive.to_matrix', 'Y.primitive.to_matrix', ([], {}), '()\n', (6254, 6256), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((6273, 6285), 'numpy.eye', 'np.eye', (['(2)', '(2)'], {}), '(2, 2)\n', (6279, 6285), True, 'import numpy as np\n'), ((6584, 6629), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['hi', 'hi2'], {}), '(hi, hi2)\n', (6620, 6629), True, 'import numpy as np\n'), ((6638, 6684), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['hi2', 'hi3'], {}), '(hi2, hi3)\n', (6674, 6684), True, 'import numpy as np\n'), ((6824, 6869), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['xy', 'xy2'], {}), '(xy, xy2)\n', (6860, 6869), True, 'import numpy as np\n'), ((6878, 6924), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['xy2', 'xy3'], {}), '(xy2, xy3)\n', (6914, 6924), True, 'import numpy as np\n'), ((7020, 7045), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""+r"""'], {}), "('+r')\n", (7039, 7045), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((9063, 9077), 'qiskit.circuit.Parameter', 'Parameter', (['"""α"""'], {}), "('α')\n", (9072, 9077), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((9090, 9123), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (9098, 9123), True, 'import numpy as np\n'), ((9134, 9152), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['m', 'param'], {}), '(m, param)\n', (9142, 9152), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((9236, 9250), 'qiskit.circuit.Parameter', 'Parameter', (['"""β"""'], {}), "('β')\n", (9245, 9250), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((9397, 9411), 'qiskit.circuit.Parameter', 'Parameter', (['"""γ"""'], {}), "('γ')\n", (9406, 9411), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((9425, 9442), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(1)'], {}), '(1)\n', (9439, 9442), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((9473, 9499), 'qiskit.aqua.operators.CircuitOp', 'CircuitOp', (['qc'], {'coeff': 'param'}), '(qc, coeff=param)\n', (9482, 9499), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((9715, 9732), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(1)'], {}), '(1)\n', (9729, 9732), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((9770, 9783), 'qiskit.aqua.operators.CircuitOp', 'CircuitOp', (['qc'], {}), '(qc)\n', (9779, 9783), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((13931, 13962), 'qiskit.aqua.operators.SummedOp', 'SummedOp', (['[X ^ X * 2, Y ^ Y]', '(2)'], {}), '([X ^ X * 2, Y ^ Y], 2)\n', (13939, 13962), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((14524, 14555), 'qiskit.aqua.operators.SummedOp', 'SummedOp', (['[X ^ X * 2, Y ^ Y]', '(2)'], {}), '([X ^ X * 2, Y ^ Y], 2)\n', (14532, 14555), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((15150, 15181), 'qiskit.aqua.operators.SummedOp', 'SummedOp', (['[X ^ X * 2, Y ^ Y]', '(2)'], {}), '([X ^ X * 2, Y ^ Y], 2)\n', (15158, 15181), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((15817, 15848), 'qiskit.aqua.operators.SummedOp', 'SummedOp', (['[X ^ X * 2, Y ^ Y]', '(2)'], {}), '([X ^ X * 2, Y ^ Y], 2)\n', (15825, 15848), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((19562, 19584), 'qiskit.aqua.operators.ComposedOp', 'ComposedOp', (['[op1, op2]'], {}), '([op1, op2])\n', (19572, 19584), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((20094, 20116), 'qiskit.aqua.operators.TensoredOp', 'TensoredOp', (['[op1, op2]'], {}), '([op1, op2])\n', (20104, 20116), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((20655, 20679), 'qiskit.aqua.operators.SummedOp', 'SummedOp', (['[primitive_op]'], {}), '([primitive_op])\n', (20663, 20679), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((21550, 21576), 'qiskit.aqua.operators.TensoredOp', 'TensoredOp', (['[X ^ Y, Z ^ I]'], {}), '([X ^ Y, Z ^ I])\n', (21560, 21576), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((21759, 21787), 'qiskit.aqua.operators.SummedOp', 'SummedOp', (['[X ^ Y, Z ^ I ^ Z]'], {}), '([X ^ Y, Z ^ I ^ Z])\n', (21767, 21787), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((22154, 22180), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['num_qubits'], {}), '(num_qubits)\n', (22168, 22180), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((22217, 22257), 'qiskit.aqua.operators.CircuitStateFn', 'CircuitStateFn', (['qc2'], {'is_measurement': '(True)'}), '(qc2, is_measurement=True)\n', (22231, 22257), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((22479, 22499), 'qiskit.aqua.operators.OperatorStateFn', 'OperatorStateFn', (['cfn'], {}), '(cfn)\n', (22494, 22499), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((22663, 22713), 'qiskit.aqua.operators.DictStateFn', 'DictStateFn', (["('1' * num_qubits)"], {'is_measurement': '(True)'}), "('1' * num_qubits, is_measurement=True)\n", (22674, 22713), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((23475, 23533), 'qiskit.aqua.operators.DictStateFn', 'DictStateFn', ([], {'primitive': 'primitive_dict', 'is_measurement': '(True)'}), '(primitive=primitive_dict, is_measurement=True)\n', (23486, 23533), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((23551, 23611), 'qiskit.aqua.operators.VectorStateFn', 'VectorStateFn', ([], {'primitive': 'primitive_list', 'is_measurement': '(True)'}), '(primitive=primitive_list, is_measurement=True)\n', (23564, 23611), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((25261, 25281), 'qiskit.aqua.operators.EvolvedOp', 'EvolvedOp', (['matrix_op'], {}), '(matrix_op)\n', (25270, 25281), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((26164, 26198), 'qiskit.aqua.operators.TensoredOp', 'TensoredOp', (['[pauli_op, circuit_op]'], {}), '([pauli_op, circuit_op])\n', (26174, 26198), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((26572, 26639), 'qiskit.aqua.operators.CircuitStateFn', 'CircuitStateFn', ([], {'primitive': 'circuit_op.primitive', 'is_measurement': '(True)'}), '(primitive=circuit_op.primitive, is_measurement=True)\n', (26586, 26639), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((26662, 26733), 'qiskit.aqua.operators.OperatorStateFn', 'OperatorStateFn', ([], {'primitive': '(circuit_op ^ circuit_op)', 'is_measurement': '(True)'}), '(primitive=circuit_op ^ circuit_op, is_measurement=True)\n', (26677, 26733), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((27262, 27319), 'qiskit.aqua.operators.OperatorStateFn', 'OperatorStateFn', (['matrix_op'], {'is_measurement': 'is_measurement'}), '(matrix_op, is_measurement=is_measurement)\n', (27277, 27319), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((27360, 27409), 'qiskit.aqua.operators.VectorStateFn', 'VectorStateFn', (['vec'], {'is_measurement': 'is_measurement'}), '(vec, is_measurement=is_measurement)\n', (27373, 27409), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((27438, 27485), 'qiskit.aqua.operators.DictStateFn', 'DictStateFn', (['dic'], {'is_measurement': 'is_measurement'}), '(dic, is_measurement=is_measurement)\n', (27449, 27485), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((29616, 29643), 'qiskit.circuit.QuantumRegister', 'QuantumRegister', (['(2)', '"""my_qr"""'], {}), "(2, 'my_qr')\n", (29631, 29643), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((29662, 29680), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['qr'], {}), '(qr)\n', (29676, 29680), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((29963, 30031), 'numpy.array', 'np.array', (['[[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, -1, 0, 0]]'], {}), '([[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, -1, 0, 0]])\n', (29971, 30031), True, 'import numpy as np\n'), ((30534, 30556), 'numpy.random.seed', 'np.random.seed', (['(233423)'], {}), '(233423)\n', (30548, 30556), True, 'import numpy as np\n'), ((30570, 30590), 'scipy.stats.unitary_group.rvs', 'unitary_group.rvs', (['(2)'], {}), '(2)\n', (30587, 30590), False, 'from scipy.stats import unitary_group\n'), ((30604, 30624), 'scipy.stats.unitary_group.rvs', 'unitary_group.rvs', (['(4)'], {}), '(4)\n', (30621, 30624), False, 'from scipy.stats import unitary_group\n'), ((30638, 30658), 'scipy.stats.unitary_group.rvs', 'unitary_group.rvs', (['(8)'], {}), '(8)\n', (30655, 30658), False, 'from scipy.stats import unitary_group\n'), ((30713, 30747), 'numpy.array', 'np.array', (['[[0.0, 1.0], [1.0, 0.0]]'], {}), '([[0.0, 1.0], [1.0, 0.0]])\n', (30721, 30747), True, 'import numpy as np\n'), ((30760, 30797), 'numpy.array', 'np.array', (['[[0.0, -1.0j], [1.0j, 0.0]]'], {}), '([[0.0, -1.0j], [1.0j, 0.0]])\n', (30768, 30797), True, 'import numpy as np\n'), ((30810, 30845), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, -1.0]]'], {}), '([[1.0, 0.0], [0.0, -1.0]])\n', (30818, 30845), True, 'import numpy as np\n'), ((30917, 30929), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['u2'], {}), '(u2)\n', (30925, 30929), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((30944, 30956), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['u4'], {}), '(u4)\n', (30952, 30956), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((30971, 30983), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['u8'], {}), '(u8)\n', (30979, 30983), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((31097, 31111), 'numpy.kron', 'np.kron', (['x', 'u4'], {}), '(x, u4)\n', (31104, 31111), True, 'import numpy as np\n'), ((31126, 31140), 'numpy.kron', 'np.kron', (['z', 'u2'], {}), '(z, u2)\n', (31133, 31140), True, 'import numpy as np\n'), ((31156, 31171), 'numpy.kron', 'np.kron', (['zc2', 'y'], {}), '(zc2, y)\n', (31163, 31171), True, 'import numpy as np\n'), ((31189, 31209), 'numpy.matmul', 'np.matmul', (['xu4', 'zc2y'], {}), '(xu4, zc2y)\n', (31198, 31209), True, 'import numpy as np\n'), ((31227, 31248), 'numpy.matmul', 'np.matmul', (['matrix', 'u8'], {}), '(matrix, u8)\n', (31236, 31248), True, 'import numpy as np\n'), ((31266, 31285), 'numpy.kron', 'np.kron', (['matrix', 'u2'], {}), '(matrix, u2)\n', (31273, 31285), True, 'import numpy as np\n'), ((31305, 31321), 'qiskit.quantum_info.Operator', 'Operator', (['matrix'], {}), '(matrix)\n', (31313, 31321), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((31885, 31919), 'numpy.array', 'np.array', (['[[0.0, 1.0], [1.0, 0.0]]'], {}), '([[0.0, 1.0], [1.0, 0.0]])\n', (31893, 31919), True, 'import numpy as np\n'), ((31958, 31995), 'numpy.array', 'np.array', (['[[0.0, -1.0j], [1.0j, 0.0]]'], {}), '([[0.0, -1.0j], [1.0j, 0.0]])\n', (31966, 31995), True, 'import numpy as np\n'), ((32036, 32103), 'numpy.array', 'np.array', (['[[0, 0, 1, 0], [0, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 1, 0], [0, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (32044, 32103), True, 'import numpy as np\n'), ((32132, 32199), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, -1, 0, 0]]'], {}), '([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, -1, 0, 0]])\n', (32140, 32199), True, 'import numpy as np\n'), ((32232, 32244), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['m1'], {}), '(m1)\n', (32240, 32244), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((32261, 32273), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['m2'], {}), '(m2)\n', (32269, 32273), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((33091, 33158), 'numpy.array', 'np.array', (['[[0, 0, 1, 0], [0, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 1, 0], [0, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (33099, 33158), True, 'import numpy as np\n'), ((33187, 33254), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, -1, 0, 0]]'], {}), '([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, -1, 0, 0]])\n', (33195, 33254), True, 'import numpy as np\n'), ((33771, 33789), 'qiskit.aqua.operators.ListOp', 'ListOp', (['[X, X ^ X]'], {}), '([X, X ^ X])\n', (33777, 33789), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((33886, 33913), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (33894, 33913), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((34849, 34863), 'qiskit.circuit.Parameter', 'Parameter', (['"""φ"""'], {}), "('φ')\n", (34858, 34863), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((34880, 34915), 'qiskit.circuit.ParameterVector', 'ParameterVector', ([], {'name': '"""θ"""', 'length': '(2)'}), "(name='θ', length=2)\n", (34895, 34915), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((34962, 34979), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(2)'], {}), '(2)\n', (34976, 34979), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((35127, 35141), 'qiskit.circuit.Parameter', 'Parameter', (['"""λ"""'], {}), "('λ')\n", (35136, 35141), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((35155, 35179), 'qiskit.aqua.operators.PrimitiveOp', 'PrimitiveOp', (['qc'], {'coeff': 'l'}), '(qc, coeff=l)\n', (35166, 35179), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((35553, 35567), 'qiskit.circuit.Parameter', 'Parameter', (['"""λ"""'], {}), "('λ')\n", (35562, 35567), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((35582, 35596), 'qiskit.circuit.Parameter', 'Parameter', (['"""φ"""'], {}), "('φ')\n", (35591, 35596), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((35613, 35627), 'qiskit.circuit.Parameter', 'Parameter', (['"""ω"""'], {}), "('ω')\n", (35622, 35627), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((35646, 35688), 'qiskit.aqua.operators.PrimitiveOp', 'PrimitiveOp', (['[[0, 1], [1, 0]]'], {'coeff': 'omega'}), '([[0, 1], [1, 0]], coeff=omega)\n', (35657, 35688), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((35762, 35779), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(1)'], {}), '(1)\n', (35776, 35779), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((35818, 35833), 'qiskit.aqua.operators.PrimitiveOp', 'PrimitiveOp', (['qc'], {}), '(qc)\n', (35829, 35833), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((35849, 35874), 'qiskit.aqua.operators.SummedOp', 'SummedOp', (['[mat_op, qc_op]'], {}), '([mat_op, qc_op])\n', (35857, 35874), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((36009, 36050), 'qiskit.aqua.operators.PrimitiveOp', 'PrimitiveOp', (['[[1, 0], [0, -1]]'], {'coeff': 'lam'}), '([[1, 0], [0, -1]], coeff=lam)\n', (36020, 36050), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((36123, 36141), 'qiskit.aqua.operators.ListOp', 'ListOp', (['[op1, op2]'], {}), '([op1, op2])\n', (36129, 36141), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((36606, 36625), 'qiskit.quantum_info.Statevector', 'Statevector', (['[1, 0]'], {}), '([1, 0])\n', (36617, 36625), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((36239, 36260), 'qiskit.aqua.operators.VectorStateFn', 'VectorStateFn', (['[1, 0]'], {}), '([1, 0])\n', (36252, 36260), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((36272, 36293), 'qiskit.aqua.operators.DictStateFn', 'DictStateFn', (["{'0': 1}"], {}), "({'0': 1})\n", (36283, 36293), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((36350, 36368), 'qiskit.aqua.operators.OperatorStateFn', 'OperatorStateFn', (['I'], {}), '(I)\n', (36365, 36368), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((36775, 36791), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (36783, 36791), True, 'import numpy as np\n'), ((36807, 36837), 'qiskit.aqua.operators.VectorStateFn', 'VectorStateFn', (['[1, 1]'], {'coeff': '(2)'}), '([1, 1], coeff=2)\n', (36820, 36837), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((36853, 36891), 'qiskit.aqua.operators.DictStateFn', 'DictStateFn', (["{'0': 1, '1': 1}"], {'coeff': '(2)'}), "({'0': 1, '1': 1}, coeff=2)\n", (36864, 36891), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((38826, 38848), 'qiskit.aqua.operators.ListOp', 'ListOp', (['[X ^ Y, Y ^ Z]'], {}), '([X ^ Y, Y ^ Z])\n', (38832, 38848), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((38986, 39004), 'qiskit.aqua.operators.ListOp', 'ListOp', (['[X ^ Y, Y]'], {}), '([X ^ Y, Y])\n', (38992, 39004), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((40549, 40584), 'qiskit.aqua.operators.ListOp', 'ListOp', (['[X]'], {'combo_fn': 'self.combo_fn'}), '([X], combo_fn=self.combo_fn)\n', (40555, 40584), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((1647, 1666), 'qiskit.quantum_info.Pauli', 'Pauli', ([], {'label': '"""XYZI"""'}), "(label='XYZI')\n", (1652, 1666), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((1753, 1776), 'qiskit.quantum_info.Pauli', 'Pauli', ([], {'label': '"""YYYYYIII"""'}), "(label='YYYYYIII')\n", (1758, 1776), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((1859, 1882), 'qiskit.quantum_info.Pauli', 'Pauli', ([], {'label': '"""YIYIYIYI"""'}), "(label='YIYIYIYI')\n", (1864, 1882), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((1952, 1968), 'qiskit.quantum_info.Pauli', 'Pauli', ([], {'label': '"""X"""'}), "(label='X')\n", (1957, 1968), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((2008, 2024), 'qiskit.quantum_info.Pauli', 'Pauli', ([], {'label': '"""Y"""'}), "(label='Y')\n", (2013, 2024), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((2064, 2080), 'qiskit.quantum_info.Pauli', 'Pauli', ([], {'label': '"""Z"""'}), "(label='Z')\n", (2069, 2080), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((2120, 2136), 'qiskit.quantum_info.Pauli', 'Pauli', ([], {'label': '"""I"""'}), "(label='I')\n", (2125, 2136), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((2244, 2259), 'qiskit.aqua.operators.Minus.eval', 'Minus.eval', (['"""1"""'], {}), "('1')\n", (2254, 2259), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((3074, 3086), 'qiskit.aqua.operators.Y.eval', 'Y.eval', (['"""11"""'], {}), "('11')\n", (3080, 3086), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((6156, 6174), 'qiskit.quantum_info.Pauli', 'Pauli', ([], {'label': 'label'}), '(label=label)\n', (6161, 6174), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((6467, 6480), 'qiskit.aqua.operators.H.to_matrix', 'H.to_matrix', ([], {}), '()\n', (6478, 6480), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((6482, 6495), 'qiskit.aqua.operators.I.to_matrix', 'I.to_matrix', ([], {}), '()\n', (6493, 6495), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((6511, 6536), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""HI"""'], {}), "('HI')\n", (6530, 6536), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((6707, 6720), 'qiskit.aqua.operators.X.to_matrix', 'X.to_matrix', ([], {}), '()\n', (6718, 6720), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((6722, 6735), 'qiskit.aqua.operators.Y.to_matrix', 'Y.to_matrix', ([], {}), '()\n', (6733, 6735), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((6751, 6776), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""XY"""'], {}), "('XY')\n", (6770, 6776), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((7910, 7923), 'qiskit.aqua.operators.X.to_matrix', 'X.to_matrix', ([], {}), '()\n', (7921, 7923), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((7994, 8007), 'qiskit.aqua.operators.Y.to_matrix', 'Y.to_matrix', ([], {}), '()\n', (8005, 8007), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((8078, 8091), 'qiskit.aqua.operators.Z.to_matrix', 'Z.to_matrix', ([], {}), '()\n', (8089, 8091), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((9512, 9539), 'numpy.array', 'np.array', (['[[1, 1], [1, -1]]'], {}), '([[1, 1], [1, -1]])\n', (9520, 9539), True, 'import numpy as np\n'), ((9542, 9552), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9549, 9552), True, 'import numpy as np\n'), ((10863, 10884), 'qiskit.aqua.operators.X.primitive_strings', 'X.primitive_strings', ([], {}), '()\n', (10882, 10884), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((16453, 16484), 'qiskit.aqua.operators.SummedOp', 'SummedOp', (['[X ^ X * 2, Y ^ Y]', '(2)'], {}), '([X ^ X * 2, Y ^ Y], 2)\n', (16461, 16484), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((16487, 16518), 'qiskit.aqua.operators.SummedOp', 'SummedOp', (['[X ^ X * 2, Z ^ Z]', '(3)'], {}), '([X ^ X * 2, Z ^ Z], 3)\n', (16495, 16518), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((22945, 22984), 'numpy.ones', 'np.ones', (['(2 ** num_qubits)'], {'dtype': 'complex'}), '(2 ** num_qubits, dtype=complex)\n', (22952, 22984), True, 'import numpy as np\n'), ((24312, 24334), 'qiskit.aqua.operators.ComposedOp', 'ComposedOp', (['[op2, op3]'], {}), '([op2, op3])\n', (24322, 24334), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((24351, 24373), 'qiskit.aqua.operators.ComposedOp', 'ComposedOp', (['[op3, op2]'], {}), '([op3, op2])\n', (24361, 24373), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((24597, 24619), 'qiskit.aqua.operators.ComposedOp', 'ComposedOp', (['[op2, op3]'], {}), '([op2, op3])\n', (24607, 24619), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((24636, 24658), 'qiskit.aqua.operators.ComposedOp', 'ComposedOp', (['[op3, op2]'], {}), '([op3, op2])\n', (24646, 24658), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((24879, 24901), 'qiskit.aqua.operators.ComposedOp', 'ComposedOp', (['[op2, op3]'], {}), '([op2, op3])\n', (24889, 24901), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((24918, 24940), 'qiskit.aqua.operators.ComposedOp', 'ComposedOp', (['[op3, op2]'], {}), '([op3, op2])\n', (24928, 24940), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((28479, 28506), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (28487, 28506), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((28619, 28646), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (28627, 28646), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((28763, 28781), 'qiskit.circuit.Parameter', 'Parameter', (['"""theta"""'], {}), "('theta')\n", (28772, 28781), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((28798, 28825), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (28806, 28825), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((28953, 28971), 'qiskit.circuit.Parameter', 'Parameter', (['"""theta"""'], {}), "('theta')\n", (28962, 28971), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((28988, 29015), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (28996, 29015), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((29134, 29161), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (29142, 29161), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((29711, 29729), 'qiskit.aqua.operators.CircuitOp', 'CircuitOp', (['circuit'], {}), '(circuit)\n', (29720, 29729), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((30064, 30081), 'qiskit.circuit.Parameter', 'Parameter', (['"""beta"""'], {}), "('beta')\n", (30073, 30081), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((32791, 32804), 'numpy.kron', 'np.kron', (['x', 'y'], {}), '(x, y)\n', (32798, 32804), True, 'import numpy as np\n'), ((33309, 33327), 'qiskit.circuit.Parameter', 'Parameter', (['"""alpha"""'], {}), "('alpha')\n", (33318, 33327), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((33382, 33399), 'qiskit.circuit.Parameter', 'Parameter', (['"""beta"""'], {}), "('beta')\n", (33391, 33399), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((33876, 33883), 'qiskit.circuit.library.ZGate', 'ZGate', ([], {}), '()\n', (33881, 33883), False, 'from qiskit.circuit.library import CZGate, ZGate\n'), ((34584, 34599), 'qiskit.aqua.operators.ComposedOp', 'ComposedOp', (['[X]'], {}), '([X])\n', (34594, 34599), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((36320, 36337), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(1)'], {}), '(1)\n', (36334, 36337), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((36396, 36422), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (36404, 36422), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((37334, 37353), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['"""invalid"""'], {}), "('invalid')\n", (37342, 37353), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((37653, 37667), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['None'], {}), '(None)\n', (37661, 37667), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((37799, 37812), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['(2.0)'], {}), '(2.0)\n', (37807, 37812), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((37986, 38013), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (37994, 38013), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((38267, 38293), 'qiskit.aqua.operators.MatrixOp', 'MatrixOp', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (38275, 38293), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((4361, 4412), 'itertools.product', 'itertools.product', (['"""01"""'], {'repeat': 'pauli_op.num_qubits'}), "('01', repeat=pauli_op.num_qubits)\n", (4378, 4412), False, 'import itertools\n'), ((5083, 5135), 'itertools.product', 'itertools.product', (['"""01"""'], {'repeat': 'gnarly_op.num_qubits'}), "('01', repeat=gnarly_op.num_qubits)\n", (5100, 5135), False, 'import itertools\n'), ((6080, 6106), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['label'], {}), '(label)\n', (6099, 6106), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((6414, 6435), 'numpy.kron', 'np.kron', (['x_mat', 'y_mat'], {}), '(x_mat, y_mat)\n', (6421, 6435), True, 'import numpy as np\n'), ((7925, 7949), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""X"""'], {}), "('X')\n", (7944, 7949), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((8009, 8033), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""Y"""'], {}), "('Y')\n", (8028, 8033), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((8093, 8117), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""Z"""'], {}), "('Z')\n", (8112, 8117), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((8207, 8220), 'qiskit.aqua.operators.Y.to_matrix', 'Y.to_matrix', ([], {}), '()\n', (8218, 8220), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((8223, 8236), 'qiskit.aqua.operators.H.to_matrix', 'H.to_matrix', ([], {}), '()\n', (8234, 8236), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((8629, 8642), 'qiskit.aqua.operators.X.to_matrix', 'X.to_matrix', ([], {}), '()\n', (8640, 8642), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((9283, 9299), 'qiskit.quantum_info.Pauli', 'Pauli', ([], {'label': '"""Y"""'}), "(label='Y')\n", (9288, 9299), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((28359, 28366), 'qiskit.circuit.library.ZGate', 'ZGate', ([], {}), '()\n', (28364, 28366), False, 'from qiskit.circuit.library import CZGate, ZGate\n'), ((34727, 34742), 'qiskit.aqua.operators.ComposedOp', 'ComposedOp', (['[X]'], {}), '([X])\n', (34737, 34742), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((35334, 35345), 'qiskit.aqua.operators.StateFn', 'StateFn', (['op'], {}), '(op)\n', (35341, 35345), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((35391, 35411), 'qiskit.aqua.operators.StateFn', 'StateFn', (['qc'], {'coeff': 'l'}), '(qc, coeff=l)\n', (35398, 35411), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((36461, 36478), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(1)'], {}), '(1)\n', (36475, 36478), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector\n'), ((37976, 37983), 'qiskit.circuit.library.ZGate', 'ZGate', ([], {}), '()\n', (37981, 37983), False, 'from qiskit.circuit.library import CZGate, ZGate\n'), ((38257, 38264), 'qiskit.circuit.library.ZGate', 'ZGate', ([], {}), '()\n', (38262, 38264), False, 'from qiskit.circuit.library import CZGate, ZGate\n'), ((2426, 2437), 'qiskit.aqua.operators.Z.eval', 'Z.eval', (['"""0"""'], {}), "('0')\n", (2432, 2437), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2477, 2488), 'qiskit.aqua.operators.Z.eval', 'Z.eval', (['"""1"""'], {}), "('1')\n", (2483, 2488), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2528, 2539), 'qiskit.aqua.operators.Z.eval', 'Z.eval', (['"""0"""'], {}), "('0')\n", (2534, 2539), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2579, 2590), 'qiskit.aqua.operators.Z.eval', 'Z.eval', (['"""1"""'], {}), "('1')\n", (2585, 2590), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2631, 2642), 'qiskit.aqua.operators.X.eval', 'X.eval', (['"""0"""'], {}), "('0')\n", (2637, 2642), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2682, 2693), 'qiskit.aqua.operators.X.eval', 'X.eval', (['"""1"""'], {}), "('1')\n", (2688, 2693), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2733, 2744), 'qiskit.aqua.operators.X.eval', 'X.eval', (['"""0"""'], {}), "('0')\n", (2739, 2744), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2784, 2795), 'qiskit.aqua.operators.X.eval', 'X.eval', (['"""1"""'], {}), "('1')\n", (2790, 2795), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2835, 2846), 'qiskit.aqua.operators.Y.eval', 'Y.eval', (['"""0"""'], {}), "('0')\n", (2841, 2846), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2886, 2897), 'qiskit.aqua.operators.Y.eval', 'Y.eval', (['"""1"""'], {}), "('1')\n", (2892, 2897), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2939, 2950), 'qiskit.aqua.operators.Y.eval', 'Y.eval', (['"""0"""'], {}), "('0')\n", (2945, 2950), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((2991, 3002), 'qiskit.aqua.operators.Y.eval', 'Y.eval', (['"""1"""'], {}), "('1')\n", (2997, 3002), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((4891, 4918), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""+r0I"""'], {}), "('+r0I')\n", (4910, 4918), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((5646, 5654), 'qiskit.circuit.library.CZGate', 'CZGate', ([], {}), '()\n', (5652, 5654), False, 'from qiskit.circuit.library import CZGate, ZGate\n'), ((7091, 7113), 'qiskit.aqua.operators.PrimitiveOp', 'PrimitiveOp', (['matrix_op'], {}), '(matrix_op)\n', (7102, 7113), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((7172, 7199), 'qiskit.aqua.operators.PrimitiveOp', 'PrimitiveOp', (['matrix_op.data'], {}), '(matrix_op.data)\n', (7183, 7199), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((7383, 7410), 'qiskit.aqua.operators.PrimitiveOp', 'PrimitiveOp', (['matrix_op.data'], {}), '(matrix_op.data)\n', (7394, 7410), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((8889, 8914), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""+r"""'], {}), "('+r')\n", (8908, 8914), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((9014, 9039), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""+r"""'], {}), "('+r')\n", (9033, 9039), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((9886, 9899), 'qiskit.aqua.operators.Z.to_matrix', 'Z.to_matrix', ([], {}), '()\n', (9897, 9899), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((10559, 10587), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""+r0IX"""'], {}), "('+r0IX')\n", (10578, 10587), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((10995, 11023), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""+r0IX"""'], {}), "('+r0IX')\n", (11014, 11023), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((11286, 11314), 'qiskit.quantum_info.Operator.from_label', 'Operator.from_label', (['"""+r0IX"""'], {}), "('+r0IX')\n", (11305, 11314), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((32840, 32857), 'qiskit.quantum_info.Operator', 'Operator', (['unitary'], {}), '(unitary)\n', (32848, 32857), False, 'from qiskit.quantum_info import Operator, Pauli, Statevector\n'), ((37504, 37513), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (37510, 37513), True, 'import numpy as np\n'), ((38056, 38067), 'qiskit.aqua.operators.ListOp', 'ListOp', (['ops'], {}), '(ops)\n', (38062, 38067), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((38338, 38349), 'qiskit.aqua.operators.ListOp', 'ListOp', (['ops'], {}), '(ops)\n', (38344, 38349), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((3358, 3371), 'qiskit.aqua.operators.Z.to_matrix', 'Z.to_matrix', ([], {}), '()\n', (3369, 3371), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((3434, 3447), 'qiskit.aqua.operators.Z.to_matrix', 'Z.to_matrix', ([], {}), '()\n', (3445, 3447), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((3510, 3523), 'qiskit.aqua.operators.Z.to_matrix', 'Z.to_matrix', ([], {}), '()\n', (3521, 3523), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((3586, 3599), 'qiskit.aqua.operators.Z.to_matrix', 'Z.to_matrix', ([], {}), '()\n', (3597, 3599), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((3663, 3676), 'qiskit.aqua.operators.X.to_matrix', 'X.to_matrix', ([], {}), '()\n', (3674, 3676), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((3739, 3752), 'qiskit.aqua.operators.X.to_matrix', 'X.to_matrix', ([], {}), '()\n', (3750, 3752), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((3815, 3828), 'qiskit.aqua.operators.X.to_matrix', 'X.to_matrix', ([], {}), '()\n', (3826, 3828), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((3891, 3904), 'qiskit.aqua.operators.X.to_matrix', 'X.to_matrix', ([], {}), '()\n', (3902, 3904), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((3967, 3980), 'qiskit.aqua.operators.Y.to_matrix', 'Y.to_matrix', ([], {}), '()\n', (3978, 3980), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((4043, 4056), 'qiskit.aqua.operators.Y.to_matrix', 'Y.to_matrix', ([], {}), '()\n', (4054, 4056), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((4121, 4134), 'qiskit.aqua.operators.Y.to_matrix', 'Y.to_matrix', ([], {}), '()\n', (4132, 4134), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n'), ((4198, 4211), 'qiskit.aqua.operators.Y.to_matrix', 'Y.to_matrix', ([], {}), '()\n', (4209, 4211), False, 'from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn, CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp, SummedOp, OperatorBase, Zero\n')] |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
Different FCI solvers are implemented to support different type of symmetry.
Symmetry
File Point group Spin singlet Real hermitian* Alpha/beta degeneracy
direct_spin0_symm Yes Yes Yes Yes
direct_spin1_symm Yes No Yes Yes
direct_spin0 No Yes Yes Yes
direct_spin1 No No Yes Yes
direct_uhf No No Yes No
direct_nosym No No No** Yes
* Real hermitian Hamiltonian implies (ij|kl) = (ji|kl) = (ij|lk) = (ji|lk)
** Hamiltonian is real but not hermitian, (ij|kl) != (ji|kl) ...
'''
import sys
import ctypes
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf import symm
from pyscf.fci import cistring
from pyscf.fci import direct_spin0
from pyscf.fci import direct_spin1
from pyscf.fci import direct_spin1_symm
from pyscf.fci import addons
from pyscf.fci.spin_op import contract_ss
from pyscf import __config__
libfci = lib.load_library('libfci')
TOTIRREPS = 8
def contract_1e(f1e, fcivec, norb, nelec, link_index=None, orbsym=None):
return direct_spin0.contract_1e(f1e, fcivec, norb, nelec, link_index)
# Note eri is NOT the 2e hamiltonian matrix, the 2e hamiltonian is
# h2e = eri_{pq,rs} p^+ q r^+ s
# = (pq|rs) p^+ r^+ s q - (pq|rs) \delta_{qr} p^+ s
# so eri is defined as
# eri_{pq,rs} = (pq|rs) - (1/Nelec) \sum_q (pq|qs)
# to restore the symmetry between pq and rs,
# eri_{pq,rs} = (pq|rs) - (.5/Nelec) [\sum_q (pq|qs) + \sum_p (pq|rp)]
# Please refer to the treatment in direct_spin1.absorb_h1e
# the input fcivec should be symmetrized
def contract_2e(eri, fcivec, norb, nelec, link_index=None, orbsym=None, wfnsym=0):
if orbsym is None:
return direct_spin0.contract_2e(eri, fcivec, norb, nelec, link_index)
eri = ao2mo.restore(4, eri, norb)
neleca, nelecb = direct_spin1._unpack_nelec(nelec)
assert(neleca == nelecb)
link_indexa = direct_spin0._unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
eri_irs, rank_eri, irrep_eri = direct_spin1_symm.reorder_eri(eri, norb, orbsym)
strsa = numpy.asarray(cistring.gen_strings4orblist(range(norb), neleca))
aidx, link_indexa = direct_spin1_symm.gen_str_irrep(strsa, orbsym, link_indexa,
rank_eri, irrep_eri)
Tirrep = ctypes.c_void_p*TOTIRREPS
linka_ptr = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in link_indexa])
eri_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in eri_irs])
dimirrep = (ctypes.c_int*TOTIRREPS)(*[x.shape[0] for x in eri_irs])
fcivec_shape = fcivec.shape
fcivec = fcivec.reshape((na,na), order='C')
ci1new = numpy.zeros_like(fcivec)
nas = (ctypes.c_int*TOTIRREPS)(*[x.size for x in aidx])
ci0 = []
ci1 = []
for ir in range(TOTIRREPS):
ma, mb = aidx[ir].size, aidx[wfnsym ^ ir].size
ci0.append(numpy.zeros((ma,mb)))
ci1.append(numpy.zeros((ma,mb)))
if ma > 0 and mb > 0:
lib.take_2d(fcivec, aidx[ir], aidx[wfnsym ^ ir], out=ci0[ir])
ci0_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in ci0])
ci1_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in ci1])
libfci.FCIcontract_2e_symm1(eri_ptrs, ci0_ptrs, ci1_ptrs,
ctypes.c_int(norb), nas, nas,
ctypes.c_int(nlinka), ctypes.c_int(nlinka),
linka_ptr, linka_ptr, dimirrep,
ctypes.c_int(wfnsym))
for ir in range(TOTIRREPS):
if ci0[ir].size > 0:
lib.takebak_2d(ci1new, ci1[ir], aidx[ir], aidx[wfnsym ^ ir])
return lib.transpose_sum(ci1new, inplace=True).reshape(fcivec_shape)
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
ecore=0, **kwargs):
assert(len(orbsym) == norb)
cis = FCISolver(None)
cis.level_shift = level_shift
cis.conv_tol = tol
cis.lindep = lindep
cis.max_cycle = max_cycle
cis.max_space = max_space
cis.nroots = nroots
cis.davidson_only = davidson_only
cis.pspace_size = pspace_size
cis.orbsym = orbsym
cis.wfnsym = wfnsym
unknown = {}
for k, v in kwargs.items():
if not hasattr(cis, k):
unknown[k] = v
setattr(cis, k, v)
if unknown:
sys.stderr.write('Unknown keys %s for FCI kernel %s\n' %
(str(unknown.keys()), __name__))
wfnsym = direct_spin1_symm._id_wfnsym(cis, norb, nelec, cis.orbsym,
cis.wfnsym)
if cis.wfnsym is not None and ci0 is None:
ci0 = addons.symm_initguess(norb, nelec, orbsym, wfnsym)
e, c = cis.kernel(h1e, eri, norb, nelec, ci0, ecore=ecore, **unknown)
return e, c
make_rdm1 = direct_spin0.make_rdm1
make_rdm1s = direct_spin0.make_rdm1s
make_rdm12 = direct_spin0.make_rdm12
trans_rdm1s = direct_spin0.trans_rdm1s
trans_rdm1 = direct_spin0.trans_rdm1
trans_rdm12 = direct_spin0.trans_rdm12
def energy(h1e, eri, fcivec, norb, nelec, link_index=None, orbsym=None, wfnsym=0):
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec) * .5
ci1 = contract_2e(h2e, fcivec, norb, nelec, link_index, orbsym, wfnsym)
return numpy.dot(fcivec.ravel(), ci1.ravel())
def get_init_guess(norb, nelec, nroots, hdiag, orbsym, wfnsym=0):
neleca, nelecb = direct_spin1._unpack_nelec(nelec)
assert(neleca == nelecb)
strsa = cistring.gen_strings4orblist(range(norb), neleca)
airreps = direct_spin1_symm._gen_strs_irrep(strsa, orbsym)
na = nb = len(airreps)
init_strs = []
iroot = 0
for addr in numpy.argsort(hdiag):
addra = addr // nb
addrb = addr % nb
if airreps[addra] ^ airreps[addrb] == wfnsym:
if (addrb,addra) not in init_strs:
init_strs.append((addra,addrb))
iroot += 1
if iroot >= nroots:
break
ci0 = []
for addra,addrb in init_strs:
x = numpy.zeros((na,nb))
if addra == addrb == 0:
x[addra,addrb] = 1
else:
x[addra,addrb] = x[addrb,addra] = numpy.sqrt(.5)
ci0.append(x.ravel())
# Add noise
#ci0[0][0 ] += 1e-5
#ci0[0][-1] -= 1e-5
if len(ci0) == 0:
raise RuntimeError('No determinant matches the target symmetry %s' %
wfnsym)
return ci0
class FCISolver(direct_spin0.FCISolver):
davidson_only = getattr(__config__, 'fci_direct_spin1_symm_FCI_davidson_only', True)
# pspace may break point group symmetry
pspace_size = getattr(__config__, 'fci_direct_spin1_symm_FCI_pspace_size', 0)
def __init__(self, mol=None, **kwargs):
direct_spin0.FCISolver.__init__(self, mol, **kwargs)
# wfnsym will be guessed based on initial guess if it is None
self.wfnsym = None
def dump_flags(self, verbose=None):
direct_spin0.FCISolver.dump_flags(self, verbose)
log = logger.new_logger(self, verbose)
if isinstance(self.wfnsym, str):
log.info('specified CI wfn symmetry = %s', self.wfnsym)
elif isinstance(self.wfnsym, (int, numpy.number)):
log.info('specified CI wfn symmetry = %s',
symm.irrep_id2name(self.mol.groupname, self.wfnsym))
def absorb_h1e(self, h1e, eri, norb, nelec, fac=1):
return direct_spin1.absorb_h1e(h1e, eri, norb, nelec, fac)
def make_hdiag(self, h1e, eri, norb, nelec):
return direct_spin0.make_hdiag(h1e, eri, norb, nelec)
def pspace(self, h1e, eri, norb, nelec, hdiag, np=400):
return direct_spin0.pspace(h1e, eri, norb, nelec, hdiag, np)
def contract_1e(self, f1e, fcivec, norb, nelec, link_index=None, **kwargs):
return contract_1e(f1e, fcivec, norb, nelec, link_index, **kwargs)
def contract_2e(self, eri, fcivec, norb, nelec, link_index=None,
orbsym=None, wfnsym=None, **kwargs):
if orbsym is None: orbsym = self.orbsym
if wfnsym is None: wfnsym = self.wfnsym
wfnsym = direct_spin1_symm._id_wfnsym(self, norb, nelec, orbsym,
wfnsym)
return contract_2e(eri, fcivec, norb, nelec, link_index, orbsym, wfnsym, **kwargs)
def get_init_guess(self, norb, nelec, nroots, hdiag):
wfnsym = direct_spin1_symm._id_wfnsym(self, norb, nelec, self.orbsym,
self.wfnsym)
return get_init_guess(norb, nelec, nroots, hdiag, self.orbsym, wfnsym)
def guess_wfnsym(self, norb, nelec, fcivec=None, orbsym=None, wfnsym=None,
**kwargs):
if orbsym is None:
orbsym = self.orbsym
if fcivec is None:
wfnsym = direct_spin1_symm._id_wfnsym(self, norb, nelec, orbsym,
wfnsym)
else:
wfnsym = addons.guess_wfnsym(fcivec, norb, nelec, orbsym)
verbose = kwargs.get('verbose', None)
log = logger.new_logger(self, verbose)
log.debug('Guessing CI wfn symmetry = %s', wfnsym)
return wfnsym
def kernel(self, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None, pspace_size=None,
orbsym=None, wfnsym=None, ecore=0, **kwargs):
if nroots is None: nroots = self.nroots
if orbsym is None: orbsym = self.orbsym
if wfnsym is None: wfnsym = self.wfnsym
if self.verbose >= logger.WARN:
self.check_sanity()
self.norb = norb
self.nelec = nelec
wfnsym = self.guess_wfnsym(norb, nelec, ci0, orbsym, wfnsym, **kwargs)
with lib.temporary_env(self, orbsym=orbsym, wfnsym=wfnsym):
e, c = direct_spin0.kernel_ms0(self, h1e, eri, norb, nelec, ci0, None,
tol, lindep, max_cycle, max_space,
nroots, davidson_only, pspace_size,
ecore=ecore, **kwargs)
self.eci, self.ci = e, c
return e, c
FCI = FCISolver
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'sto-3g',
'O': 'sto-3g',}
mol.symmetry = 1
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, scf.hf.get_hcore(mol), m.mo_coeff))
eri = ao2mo.incore.full(m._eri, m.mo_coeff)
numpy.random.seed(1)
na = cistring.num_strings(norb, nelec//2)
fcivec = numpy.random.random((na,na))
fcivec = fcivec + fcivec.T
orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, m.mo_coeff)
print(numpy.allclose(orbsym, [0, 0, 2, 0, 3, 0, 2]))
cis = FCISolver(mol)
cis.orbsym = orbsym
fcivec = addons.symmetrize_wfn(fcivec, norb, nelec, cis.orbsym, wfnsym=0)
ci1 = cis.contract_2e(eri, fcivec, norb, nelec)
ci1ref = direct_spin0.contract_2e(eri, fcivec, norb, nelec)
print(numpy.allclose(ci1ref, ci1))
e = cis.kernel(h1e, eri, norb, nelec, ecore=m.energy_nuc(), davidson_only=True)[0]
print(e, e - -75.012647118991595)
mol.atom = [['H', (0, 0, i)] for i in range(8)]
mol.basis = {'H': 'sto-3g'}
mol.symmetry = True
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
eri = ao2mo.incore.full(m._eri, m.mo_coeff)
na = cistring.num_strings(norb, nelec//2)
fcivec = numpy.random.random((na,na))
fcivec = fcivec + fcivec.T
orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, m.mo_coeff)
cis = FCISolver(mol)
cis.orbsym = orbsym
fcivec = addons.symmetrize_wfn(fcivec, norb, nelec, cis.orbsym, wfnsym=0)
ci1 = cis.contract_2e(eri, fcivec, norb, nelec)
ci1ref = direct_spin0.contract_2e(eri, fcivec, norb, nelec)
print(numpy.allclose(ci1ref, ci1))
| [
"pyscf.lib.takebak_2d",
"pyscf.lib.logger.new_logger",
"numpy.sqrt",
"pyscf.fci.addons.guess_wfnsym",
"pyscf.fci.direct_spin0.pspace",
"pyscf.lib.take_2d",
"numpy.argsort",
"pyscf.fci.direct_spin1.absorb_h1e",
"pyscf.symm.label_orb_symm",
"pyscf.ao2mo.restore",
"pyscf.lib.load_library",
"pyscf... | [((1830, 1856), 'pyscf.lib.load_library', 'lib.load_library', (['"""libfci"""'], {}), "('libfci')\n", (1846, 1856), False, 'from pyscf import lib\n'), ((1957, 2019), 'pyscf.fci.direct_spin0.contract_1e', 'direct_spin0.contract_1e', (['f1e', 'fcivec', 'norb', 'nelec', 'link_index'], {}), '(f1e, fcivec, norb, nelec, link_index)\n', (1981, 2019), False, 'from pyscf.fci import direct_spin0\n'), ((2673, 2700), 'pyscf.ao2mo.restore', 'ao2mo.restore', (['(4)', 'eri', 'norb'], {}), '(4, eri, norb)\n', (2686, 2700), False, 'from pyscf import ao2mo\n'), ((2722, 2755), 'pyscf.fci.direct_spin1._unpack_nelec', 'direct_spin1._unpack_nelec', (['nelec'], {}), '(nelec)\n', (2748, 2755), False, 'from pyscf.fci import direct_spin1\n'), ((2803, 2848), 'pyscf.fci.direct_spin0._unpack', 'direct_spin0._unpack', (['norb', 'nelec', 'link_index'], {}), '(norb, nelec, link_index)\n', (2823, 2848), False, 'from pyscf.fci import direct_spin0\n'), ((2923, 2971), 'pyscf.fci.direct_spin1_symm.reorder_eri', 'direct_spin1_symm.reorder_eri', (['eri', 'norb', 'orbsym'], {}), '(eri, norb, orbsym)\n', (2952, 2971), False, 'from pyscf.fci import direct_spin1_symm\n'), ((3074, 3159), 'pyscf.fci.direct_spin1_symm.gen_str_irrep', 'direct_spin1_symm.gen_str_irrep', (['strsa', 'orbsym', 'link_indexa', 'rank_eri', 'irrep_eri'], {}), '(strsa, orbsym, link_indexa, rank_eri, irrep_eri\n )\n', (3105, 3159), False, 'from pyscf.fci import direct_spin1_symm\n'), ((3575, 3599), 'numpy.zeros_like', 'numpy.zeros_like', (['fcivec'], {}), '(fcivec)\n', (3591, 3599), False, 'import numpy\n'), ((5507, 5577), 'pyscf.fci.direct_spin1_symm._id_wfnsym', 'direct_spin1_symm._id_wfnsym', (['cis', 'norb', 'nelec', 'cis.orbsym', 'cis.wfnsym'], {}), '(cis, norb, nelec, cis.orbsym, cis.wfnsym)\n', (5535, 5577), False, 'from pyscf.fci import direct_spin1_symm\n'), ((6409, 6442), 'pyscf.fci.direct_spin1._unpack_nelec', 'direct_spin1._unpack_nelec', (['nelec'], {}), '(nelec)\n', (6435, 6442), False, 'from pyscf.fci import direct_spin1\n'), ((6548, 6596), 'pyscf.fci.direct_spin1_symm._gen_strs_irrep', 'direct_spin1_symm._gen_strs_irrep', (['strsa', 'orbsym'], {}), '(strsa, orbsym)\n', (6581, 6596), False, 'from pyscf.fci import direct_spin1_symm\n'), ((6674, 6694), 'numpy.argsort', 'numpy.argsort', (['hdiag'], {}), '(hdiag)\n', (6687, 6694), False, 'import numpy\n'), ((11354, 11364), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (11362, 11364), False, 'from pyscf import gto\n'), ((11646, 11658), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (11653, 11658), False, 'from pyscf import scf\n'), ((11824, 11861), 'pyscf.ao2mo.incore.full', 'ao2mo.incore.full', (['m._eri', 'm.mo_coeff'], {}), '(m._eri, m.mo_coeff)\n', (11841, 11861), False, 'from pyscf import ao2mo\n'), ((11866, 11886), 'numpy.random.seed', 'numpy.random.seed', (['(1)'], {}), '(1)\n', (11883, 11886), False, 'import numpy\n'), ((11896, 11934), 'pyscf.fci.cistring.num_strings', 'cistring.num_strings', (['norb', '(nelec // 2)'], {}), '(norb, nelec // 2)\n', (11916, 11934), False, 'from pyscf.fci import cistring\n'), ((11946, 11975), 'numpy.random.random', 'numpy.random.random', (['(na, na)'], {}), '((na, na))\n', (11965, 11975), False, 'import numpy\n'), ((12020, 12084), 'pyscf.symm.label_orb_symm', 'symm.label_orb_symm', (['mol', 'mol.irrep_id', 'mol.symm_orb', 'm.mo_coeff'], {}), '(mol, mol.irrep_id, mol.symm_orb, m.mo_coeff)\n', (12039, 12084), False, 'from pyscf import symm\n'), ((12204, 12268), 'pyscf.fci.addons.symmetrize_wfn', 'addons.symmetrize_wfn', (['fcivec', 'norb', 'nelec', 'cis.orbsym'], {'wfnsym': '(0)'}), '(fcivec, norb, nelec, cis.orbsym, wfnsym=0)\n', (12225, 12268), False, 'from pyscf.fci import addons\n'), ((12334, 12384), 'pyscf.fci.direct_spin0.contract_2e', 'direct_spin0.contract_2e', (['eri', 'fcivec', 'norb', 'nelec'], {}), '(eri, fcivec, norb, nelec)\n', (12358, 12384), False, 'from pyscf.fci import direct_spin0\n'), ((12682, 12694), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (12689, 12694), False, 'from pyscf import scf\n'), ((12781, 12818), 'pyscf.ao2mo.incore.full', 'ao2mo.incore.full', (['m._eri', 'm.mo_coeff'], {}), '(m._eri, m.mo_coeff)\n', (12798, 12818), False, 'from pyscf import ao2mo\n'), ((12828, 12866), 'pyscf.fci.cistring.num_strings', 'cistring.num_strings', (['norb', '(nelec // 2)'], {}), '(norb, nelec // 2)\n', (12848, 12866), False, 'from pyscf.fci import cistring\n'), ((12878, 12907), 'numpy.random.random', 'numpy.random.random', (['(na, na)'], {}), '((na, na))\n', (12897, 12907), False, 'import numpy\n'), ((12951, 13015), 'pyscf.symm.label_orb_symm', 'symm.label_orb_symm', (['mol', 'mol.irrep_id', 'mol.symm_orb', 'm.mo_coeff'], {}), '(mol, mol.irrep_id, mol.symm_orb, m.mo_coeff)\n', (12970, 13015), False, 'from pyscf import symm\n'), ((13078, 13142), 'pyscf.fci.addons.symmetrize_wfn', 'addons.symmetrize_wfn', (['fcivec', 'norb', 'nelec', 'cis.orbsym'], {'wfnsym': '(0)'}), '(fcivec, norb, nelec, cis.orbsym, wfnsym=0)\n', (13099, 13142), False, 'from pyscf.fci import addons\n'), ((13208, 13258), 'pyscf.fci.direct_spin0.contract_2e', 'direct_spin0.contract_2e', (['eri', 'fcivec', 'norb', 'nelec'], {}), '(eri, fcivec, norb, nelec)\n', (13232, 13258), False, 'from pyscf.fci import direct_spin0\n'), ((2599, 2661), 'pyscf.fci.direct_spin0.contract_2e', 'direct_spin0.contract_2e', (['eri', 'fcivec', 'norb', 'nelec', 'link_index'], {}), '(eri, fcivec, norb, nelec, link_index)\n', (2623, 2661), False, 'from pyscf.fci import direct_spin0\n'), ((4200, 4218), 'ctypes.c_int', 'ctypes.c_int', (['norb'], {}), '(norb)\n', (4212, 4218), False, 'import ctypes\n'), ((4262, 4282), 'ctypes.c_int', 'ctypes.c_int', (['nlinka'], {}), '(nlinka)\n', (4274, 4282), False, 'import ctypes\n'), ((4284, 4304), 'ctypes.c_int', 'ctypes.c_int', (['nlinka'], {}), '(nlinka)\n', (4296, 4304), False, 'import ctypes\n'), ((4402, 4422), 'ctypes.c_int', 'ctypes.c_int', (['wfnsym'], {}), '(wfnsym)\n', (4414, 4422), False, 'import ctypes\n'), ((5681, 5731), 'pyscf.fci.addons.symm_initguess', 'addons.symm_initguess', (['norb', 'nelec', 'orbsym', 'wfnsym'], {}), '(norb, nelec, orbsym, wfnsym)\n', (5702, 5731), False, 'from pyscf.fci import addons\n'), ((6143, 6189), 'pyscf.fci.direct_spin1.absorb_h1e', 'direct_spin1.absorb_h1e', (['h1e', 'eri', 'norb', 'nelec'], {}), '(h1e, eri, norb, nelec)\n', (6166, 6189), False, 'from pyscf.fci import direct_spin1\n'), ((7046, 7067), 'numpy.zeros', 'numpy.zeros', (['(na, nb)'], {}), '((na, nb))\n', (7057, 7067), False, 'import numpy\n'), ((7762, 7814), 'pyscf.fci.direct_spin0.FCISolver.__init__', 'direct_spin0.FCISolver.__init__', (['self', 'mol'], {}), '(self, mol, **kwargs)\n', (7793, 7814), False, 'from pyscf.fci import direct_spin0\n'), ((7961, 8009), 'pyscf.fci.direct_spin0.FCISolver.dump_flags', 'direct_spin0.FCISolver.dump_flags', (['self', 'verbose'], {}), '(self, verbose)\n', (7994, 8009), False, 'from pyscf.fci import direct_spin0\n'), ((8024, 8056), 'pyscf.lib.logger.new_logger', 'logger.new_logger', (['self', 'verbose'], {}), '(self, verbose)\n', (8041, 8056), False, 'from pyscf.lib import logger\n'), ((8426, 8477), 'pyscf.fci.direct_spin1.absorb_h1e', 'direct_spin1.absorb_h1e', (['h1e', 'eri', 'norb', 'nelec', 'fac'], {}), '(h1e, eri, norb, nelec, fac)\n', (8449, 8477), False, 'from pyscf.fci import direct_spin1\n'), ((8543, 8589), 'pyscf.fci.direct_spin0.make_hdiag', 'direct_spin0.make_hdiag', (['h1e', 'eri', 'norb', 'nelec'], {}), '(h1e, eri, norb, nelec)\n', (8566, 8589), False, 'from pyscf.fci import direct_spin0\n'), ((8666, 8719), 'pyscf.fci.direct_spin0.pspace', 'direct_spin0.pspace', (['h1e', 'eri', 'norb', 'nelec', 'hdiag', 'np'], {}), '(h1e, eri, norb, nelec, hdiag, np)\n', (8685, 8719), False, 'from pyscf.fci import direct_spin0\n'), ((9116, 9179), 'pyscf.fci.direct_spin1_symm._id_wfnsym', 'direct_spin1_symm._id_wfnsym', (['self', 'norb', 'nelec', 'orbsym', 'wfnsym'], {}), '(self, norb, nelec, orbsym, wfnsym)\n', (9144, 9179), False, 'from pyscf.fci import direct_spin1_symm\n'), ((9393, 9466), 'pyscf.fci.direct_spin1_symm._id_wfnsym', 'direct_spin1_symm._id_wfnsym', (['self', 'norb', 'nelec', 'self.orbsym', 'self.wfnsym'], {}), '(self, norb, nelec, self.orbsym, self.wfnsym)\n', (9421, 9466), False, 'from pyscf.fci import direct_spin1_symm\n'), ((10071, 10103), 'pyscf.lib.logger.new_logger', 'logger.new_logger', (['self', 'verbose'], {}), '(self, verbose)\n', (10088, 10103), False, 'from pyscf.lib import logger\n'), ((12095, 12140), 'numpy.allclose', 'numpy.allclose', (['orbsym', '[0, 0, 2, 0, 3, 0, 2]'], {}), '(orbsym, [0, 0, 2, 0, 3, 0, 2])\n', (12109, 12140), False, 'import numpy\n'), ((12395, 12422), 'numpy.allclose', 'numpy.allclose', (['ci1ref', 'ci1'], {}), '(ci1ref, ci1)\n', (12409, 12422), False, 'import numpy\n'), ((13269, 13296), 'numpy.allclose', 'numpy.allclose', (['ci1ref', 'ci1'], {}), '(ci1ref, ci1)\n', (13283, 13296), False, 'import numpy\n'), ((3793, 3814), 'numpy.zeros', 'numpy.zeros', (['(ma, mb)'], {}), '((ma, mb))\n', (3804, 3814), False, 'import numpy\n'), ((3834, 3855), 'numpy.zeros', 'numpy.zeros', (['(ma, mb)'], {}), '((ma, mb))\n', (3845, 3855), False, 'import numpy\n'), ((3898, 3959), 'pyscf.lib.take_2d', 'lib.take_2d', (['fcivec', 'aidx[ir]', 'aidx[wfnsym ^ ir]'], {'out': 'ci0[ir]'}), '(fcivec, aidx[ir], aidx[wfnsym ^ ir], out=ci0[ir])\n', (3909, 3959), False, 'from pyscf import lib\n'), ((4497, 4557), 'pyscf.lib.takebak_2d', 'lib.takebak_2d', (['ci1new', 'ci1[ir]', 'aidx[ir]', 'aidx[wfnsym ^ ir]'], {}), '(ci1new, ci1[ir], aidx[ir], aidx[wfnsym ^ ir])\n', (4511, 4557), False, 'from pyscf import lib\n'), ((4569, 4608), 'pyscf.lib.transpose_sum', 'lib.transpose_sum', (['ci1new'], {'inplace': '(True)'}), '(ci1new, inplace=True)\n', (4586, 4608), False, 'from pyscf import lib\n'), ((7190, 7205), 'numpy.sqrt', 'numpy.sqrt', (['(0.5)'], {}), '(0.5)\n', (7200, 7205), False, 'import numpy\n'), ((9812, 9875), 'pyscf.fci.direct_spin1_symm._id_wfnsym', 'direct_spin1_symm._id_wfnsym', (['self', 'norb', 'nelec', 'orbsym', 'wfnsym'], {}), '(self, norb, nelec, orbsym, wfnsym)\n', (9840, 9875), False, 'from pyscf.fci import direct_spin1_symm\n'), ((9961, 10009), 'pyscf.fci.addons.guess_wfnsym', 'addons.guess_wfnsym', (['fcivec', 'norb', 'nelec', 'orbsym'], {}), '(fcivec, norb, nelec, orbsym)\n', (9980, 10009), False, 'from pyscf.fci import addons\n'), ((10798, 10851), 'pyscf.lib.temporary_env', 'lib.temporary_env', (['self'], {'orbsym': 'orbsym', 'wfnsym': 'wfnsym'}), '(self, orbsym=orbsym, wfnsym=wfnsym)\n', (10815, 10851), False, 'from pyscf import lib\n'), ((10872, 11037), 'pyscf.fci.direct_spin0.kernel_ms0', 'direct_spin0.kernel_ms0', (['self', 'h1e', 'eri', 'norb', 'nelec', 'ci0', 'None', 'tol', 'lindep', 'max_cycle', 'max_space', 'nroots', 'davidson_only', 'pspace_size'], {'ecore': 'ecore'}), '(self, h1e, eri, norb, nelec, ci0, None, tol, lindep,\n max_cycle, max_space, nroots, davidson_only, pspace_size, ecore=ecore,\n **kwargs)\n', (10895, 11037), False, 'from pyscf.fci import direct_spin0\n'), ((11778, 11799), 'pyscf.scf.hf.get_hcore', 'scf.hf.get_hcore', (['mol'], {}), '(mol)\n', (11794, 11799), False, 'from pyscf import scf\n'), ((8301, 8352), 'pyscf.symm.irrep_id2name', 'symm.irrep_id2name', (['self.mol.groupname', 'self.wfnsym'], {}), '(self.mol.groupname, self.wfnsym)\n', (8319, 8352), False, 'from pyscf import symm\n')] |
import types
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def get_mask(in_features, out_features, in_flow_features, mask_type=None):
"""
mask_type: input | None | output
See Figure 1 for a better illustration:
https://arxiv.org/pdf/1502.03509.pdf
"""
if mask_type == 'input':
in_degrees = torch.arange(in_features) % in_flow_features
else:
in_degrees = torch.arange(in_features) % (in_flow_features - 1)
if mask_type == 'output':
out_degrees = torch.arange(out_features) % in_flow_features - 1
else:
out_degrees = torch.arange(out_features) % (in_flow_features - 1)
return (out_degrees.unsqueeze(-1) >= in_degrees.unsqueeze(0)).float()
class MaskedLinear(nn.Module):
def __init__(self, in_features, out_features, mask, cond_in_features=None, bias=True):
super(MaskedLinear, self).__init__()
self.linear = nn.Linear(in_features, out_features)
if cond_in_features is not None:
self.cond_linear = nn.Linear(cond_in_features, out_features, bias=False)
self.register_buffer('mask', mask)
def forward(self, inputs, cond_inputs=None):
output = F.linear(inputs, self.linear.weight * self.mask, self.linear.bias)
if cond_inputs is not None:
output += self.cond_linear(cond_inputs)
return output
nn.MaskedLinear = MaskedLinear
class MADE(nn.Module):
""" An implementation of MADE
(https://arxiv.org/abs/1502.03509s).
"""
def __init__(self, num_inputs, num_hidden, num_cond_inputs=None, act='relu'):
super(MADE, self).__init__()
activations = {
'relu': nn.ReLU,
'sigmoid': nn.Sigmoid,
'tanh': nn.Tanh
}
act_func = activations[act]
input_mask = get_mask(
num_inputs, num_hidden, num_inputs, mask_type='input')
hidden_mask = get_mask(num_hidden, num_hidden, num_inputs)
output_mask = get_mask(
num_hidden, num_inputs * 2, num_inputs, mask_type='output')
self.joiner = nn.MaskedLinear(num_inputs, num_hidden, input_mask, num_cond_inputs)
self.trunk = nn.Sequential(
act_func(),
nn.MaskedLinear(num_hidden, num_hidden, hidden_mask),
act_func(),
nn.MaskedLinear(num_hidden, num_inputs * 2, output_mask))
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
h = self.joiner(inputs, cond_inputs)
m, a = self.trunk(h).chunk(2, 1)
u = (inputs - m) * torch.exp(-a)
return u, -a.sum(-1, keepdim=True)
else:
x = torch.zeros_like(inputs)
for i_col in range(inputs.shape[1]):
h = self.joiner(x, cond_inputs)
m, a = self.trunk(h).chunk(2, 1)
x[:, i_col] = inputs[:, i_col] * torch.exp(a[:, i_col]) + m[:, i_col]
return x, -a.sum(-1, keepdim=True)
class Sigmoid(nn.Module):
def __init__(self):
super(Sigmoid, self).__init__()
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
s = torch.sigmoid
return s(inputs), torch.log(s(inputs) * (1 - s(inputs))).sum(-1, keepdim=True)
else:
return torch.log(inputs / (1 - inputs)), -torch.log(inputs - inputs ** 2).sum(-1, keepdim=True)
class Logit(Sigmoid):
def __init__(self):
super(Logit, self).__init__()
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
return super(Logit, self).forward(inputs, 'inverse')
else:
return super(Logit, self).forward(inputs, 'direct')
class BatchNormFlow(nn.Module):
""" An implementation of a batch normalization layer from
Density estimation using Real NVP
(https://arxiv.org/abs/1605.08803).
"""
def __init__(self, num_inputs, momentum=0.0, eps=1e-5):
super(BatchNormFlow, self).__init__()
self.log_gamma = nn.Parameter(torch.zeros(num_inputs))
self.beta = nn.Parameter(torch.zeros(num_inputs))
self.momentum = momentum
self.eps = eps
self.register_buffer('running_mean', torch.zeros(num_inputs))
self.register_buffer('running_var', torch.ones(num_inputs))
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
if self.training:
self.batch_mean = inputs.mean(0)
self.batch_var = (
inputs - self.batch_mean).pow(2).mean(0) + self.eps
self.running_mean.mul_(self.momentum)
self.running_var.mul_(self.momentum)
self.running_mean.add_(self.batch_mean.data *
(1 - self.momentum))
self.running_var.add_(self.batch_var.data *
(1 - self.momentum))
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (inputs - mean) / var.sqrt()
y = torch.exp(self.log_gamma) * x_hat + self.beta
return y, (self.log_gamma - 0.5 * torch.log(var)).sum(
-1, keepdim=True)
else:
if self.training:
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (inputs - self.beta) / torch.exp(self.log_gamma)
y = x_hat * var.sqrt() + mean
return y, (-self.log_gamma + 0.5 * torch.log(var)).sum(
-1, keepdim=True)
class ActNorm(nn.Module):
""" An implementation of a activation normalization layer
from Glow: Generative Flow with Invertible 1x1 Convolutions
(https://arxiv.org/abs/1807.03039).
"""
def __init__(self, num_inputs):
super(ActNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(num_inputs))
self.bias = nn.Parameter(torch.zeros(num_inputs))
self.initialized = False
def forward(self, inputs, cond_inputs=None, mode='direct'):
if self.initialized == False:
self.weight.data.copy_(torch.log(1.0 / (inputs.std(0) + 1e-12)))
self.bias.data.copy_(inputs.mean(0))
self.initialized = True
if mode == 'direct':
return (
inputs - self.bias) * torch.exp(self.weight), self.weight.sum(
-1, keepdim=True).unsqueeze(0).repeat(inputs.size(0), 1)
else:
return inputs * torch.exp(
-self.weight) + self.bias, -self.weight.sum(
-1, keepdim=True).unsqueeze(0).repeat(inputs.size(0), 1)
class InvertibleMM(nn.Module):
""" An implementation of a invertible matrix multiplication
layer from Glow: Generative Flow with Invertible 1x1 Convolutions
(https://arxiv.org/abs/1807.03039).
"""
def __init__(self, num_inputs):
super(InvertibleMM, self).__init__()
self.W = nn.Parameter(torch.Tensor(num_inputs, num_inputs))
nn.init.orthogonal_(self.W)
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
return inputs @ self.W, torch.log(torch.abs(torch.det(
self.W))).unsqueeze(0).unsqueeze(0).repeat(inputs.size(0), 1)
else:
return inputs @ torch.inverse(self.W), -torch.log(
torch.abs(torch.det(self.W))).unsqueeze(0).unsqueeze(0).repeat(
inputs.size(0), 1)
class Shuffle(nn.Module):
""" An implementation of a shuffling layer from
Density estimation using Real NVP
(https://arxiv.org/abs/1605.08803).
"""
def __init__(self, num_inputs):
super(Shuffle, self).__init__()
self.perm = np.random.permutation(num_inputs)
self.inv_perm = np.argsort(self.perm)
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
return inputs[:, self.perm], torch.zeros(
inputs.size(0), 1, device=inputs.device)
else:
return inputs[:, self.inv_perm], torch.zeros(
inputs.size(0), 1, device=inputs.device)
class Reverse(nn.Module):
""" An implementation of a reversing layer from
Density estimation using Real NVP
(https://arxiv.org/abs/1605.08803).
"""
def __init__(self, num_inputs):
super(Reverse, self).__init__()
self.perm = np.array(np.arange(0, num_inputs)[::-1])
self.inv_perm = np.argsort(self.perm)
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
return inputs[:, self.perm], torch.zeros(
inputs.size(0), 1, device=inputs.device)
else:
return inputs[:, self.inv_perm], torch.zeros(
inputs.size(0), 1, device=inputs.device)
class CouplingLayer(nn.Module):
""" An implementation of a coupling layer
from RealNVP (https://arxiv.org/abs/1605.08803).
"""
def __init__(self, num_inputs, num_hidden=64, s_act='tanh', t_act='relu'):
super(CouplingLayer, self).__init__()
self.num_inputs = num_inputs
activations = {
'relu': nn.ReLU,
'sigmoid': nn.Sigmoid,
'tanh': nn.Tanh
}
s_act_func = activations[s_act]
t_act_func = activations[t_act]
self.scale_net = nn.Sequential(
nn.Linear(num_inputs // 2, num_hidden), s_act_func(),
nn.Linear(num_hidden, num_hidden), s_act_func(),
nn.Linear(num_hidden, self.num_inputs - num_inputs // 2))
self.translate_net = nn.Sequential(
nn.Linear(num_inputs // 2, num_hidden), t_act_func(),
nn.Linear(num_hidden, num_hidden), t_act_func(),
nn.Linear(num_hidden, self.num_inputs - num_inputs // 2))
def init(m):
if isinstance(m, nn.Linear):
m.bias.data.fill_(0)
nn.init.orthogonal_(m.weight.data)
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
x_a, x_b = inputs.chunk(2, dim=-1)
log_s = self.scale_net(x_b)
t = self.translate_net(x_b)
s = torch.exp(log_s)
y_a = x_a * s + t
y_b = x_b
return torch.cat([y_a, y_b], dim=-1), log_s.sum(-1, keepdim=True)
else:
y_a, y_b = inputs.chunk(2, dim=-1)
log_s, t = self.main(y_b).chunk(2, dim=-1)
s = torch.exp(-log_s)
x_a = (y_a - t) * s
x_b = y_b
return torch.cat([x_a, x_b], dim=-1), -log_s.sum(-1, keepdim=True)
class FlowSequential(nn.Sequential):
""" A sequential container for flows.
In addition to a forward pass it implements a backward pass and
computes log jacobians.
"""
def forward(self, inputs, cond_inputs=None, mode='direct', logdets=None):
""" Performs a forward or backward pass for flow modules.
Args:
inputs: a tuple of inputs and logdets
mode: to run direct computation or inverse
"""
if logdets is None:
logdets = torch.zeros(inputs.size(0), 1, device=inputs.device)
assert mode in ['direct', 'inverse']
if mode == 'direct':
for module in self._modules.values():
inputs, logdet = module(inputs, cond_inputs, mode)
logdets += logdet
else:
for module in reversed(self._modules.values()):
inputs, logdet = module(inputs, cond_inputs, mode)
logdets += logdet
return inputs, logdets
| [
"torch.nn.functional.linear",
"torch.nn.MaskedLinear",
"torch.ones",
"torch.log",
"numpy.arange",
"torch.Tensor",
"torch.det",
"torch.exp",
"torch.nn.init.orthogonal_",
"numpy.argsort",
"torch.cat",
"torch.nn.Linear",
"torch.inverse",
"torch.zeros_like",
"torch.zeros",
"torch.arange",
... | [((950, 986), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (959, 986), True, 'import torch.nn as nn\n'), ((1224, 1290), 'torch.nn.functional.linear', 'F.linear', (['inputs', '(self.linear.weight * self.mask)', 'self.linear.bias'], {}), '(inputs, self.linear.weight * self.mask, self.linear.bias)\n', (1232, 1290), True, 'import torch.nn.functional as F\n'), ((2118, 2186), 'torch.nn.MaskedLinear', 'nn.MaskedLinear', (['num_inputs', 'num_hidden', 'input_mask', 'num_cond_inputs'], {}), '(num_inputs, num_hidden, input_mask, num_cond_inputs)\n', (2133, 2186), True, 'import torch.nn as nn\n'), ((7297, 7324), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.W'], {}), '(self.W)\n', (7316, 7324), True, 'import torch.nn as nn\n'), ((8023, 8056), 'numpy.random.permutation', 'np.random.permutation', (['num_inputs'], {}), '(num_inputs)\n', (8044, 8056), True, 'import numpy as np\n'), ((8081, 8102), 'numpy.argsort', 'np.argsort', (['self.perm'], {}), '(self.perm)\n', (8091, 8102), True, 'import numpy as np\n'), ((8765, 8786), 'numpy.argsort', 'np.argsort', (['self.perm'], {}), '(self.perm)\n', (8775, 8786), True, 'import numpy as np\n'), ((370, 395), 'torch.arange', 'torch.arange', (['in_features'], {}), '(in_features)\n', (382, 395), False, 'import torch\n'), ((446, 471), 'torch.arange', 'torch.arange', (['in_features'], {}), '(in_features)\n', (458, 471), False, 'import torch\n'), ((632, 658), 'torch.arange', 'torch.arange', (['out_features'], {}), '(out_features)\n', (644, 658), False, 'import torch\n'), ((1059, 1112), 'torch.nn.Linear', 'nn.Linear', (['cond_in_features', 'out_features'], {'bias': '(False)'}), '(cond_in_features, out_features, bias=False)\n', (1068, 1112), True, 'import torch.nn as nn\n'), ((2260, 2312), 'torch.nn.MaskedLinear', 'nn.MaskedLinear', (['num_hidden', 'num_hidden', 'hidden_mask'], {}), '(num_hidden, num_hidden, hidden_mask)\n', (2275, 2312), True, 'import torch.nn as nn\n'), ((2350, 2406), 'torch.nn.MaskedLinear', 'nn.MaskedLinear', (['num_hidden', '(num_inputs * 2)', 'output_mask'], {}), '(num_hidden, num_inputs * 2, output_mask)\n', (2365, 2406), True, 'import torch.nn as nn\n'), ((2719, 2743), 'torch.zeros_like', 'torch.zeros_like', (['inputs'], {}), '(inputs)\n', (2735, 2743), False, 'import torch\n'), ((4102, 4125), 'torch.zeros', 'torch.zeros', (['num_inputs'], {}), '(num_inputs)\n', (4113, 4125), False, 'import torch\n'), ((4160, 4183), 'torch.zeros', 'torch.zeros', (['num_inputs'], {}), '(num_inputs)\n', (4171, 4183), False, 'import torch\n'), ((4287, 4310), 'torch.zeros', 'torch.zeros', (['num_inputs'], {}), '(num_inputs)\n', (4298, 4310), False, 'import torch\n'), ((4356, 4378), 'torch.ones', 'torch.ones', (['num_inputs'], {}), '(num_inputs)\n', (4366, 4378), False, 'import torch\n'), ((6146, 6168), 'torch.ones', 'torch.ones', (['num_inputs'], {}), '(num_inputs)\n', (6156, 6168), False, 'import torch\n'), ((6203, 6226), 'torch.zeros', 'torch.zeros', (['num_inputs'], {}), '(num_inputs)\n', (6214, 6226), False, 'import torch\n'), ((7251, 7287), 'torch.Tensor', 'torch.Tensor', (['num_inputs', 'num_inputs'], {}), '(num_inputs, num_inputs)\n', (7263, 7287), False, 'import torch\n'), ((9686, 9724), 'torch.nn.Linear', 'nn.Linear', (['(num_inputs // 2)', 'num_hidden'], {}), '(num_inputs // 2, num_hidden)\n', (9695, 9724), True, 'import torch.nn as nn\n'), ((9752, 9785), 'torch.nn.Linear', 'nn.Linear', (['num_hidden', 'num_hidden'], {}), '(num_hidden, num_hidden)\n', (9761, 9785), True, 'import torch.nn as nn\n'), ((9813, 9869), 'torch.nn.Linear', 'nn.Linear', (['num_hidden', '(self.num_inputs - num_inputs // 2)'], {}), '(num_hidden, self.num_inputs - num_inputs // 2)\n', (9822, 9869), True, 'import torch.nn as nn\n'), ((9927, 9965), 'torch.nn.Linear', 'nn.Linear', (['(num_inputs // 2)', 'num_hidden'], {}), '(num_inputs // 2, num_hidden)\n', (9936, 9965), True, 'import torch.nn as nn\n'), ((9993, 10026), 'torch.nn.Linear', 'nn.Linear', (['num_hidden', 'num_hidden'], {}), '(num_hidden, num_hidden)\n', (10002, 10026), True, 'import torch.nn as nn\n'), ((10054, 10110), 'torch.nn.Linear', 'nn.Linear', (['num_hidden', '(self.num_inputs - num_inputs // 2)'], {}), '(num_hidden, self.num_inputs - num_inputs // 2)\n', (10063, 10110), True, 'import torch.nn as nn\n'), ((10500, 10516), 'torch.exp', 'torch.exp', (['log_s'], {}), '(log_s)\n', (10509, 10516), False, 'import torch\n'), ((10780, 10797), 'torch.exp', 'torch.exp', (['(-log_s)'], {}), '(-log_s)\n', (10789, 10797), False, 'import torch\n'), ((550, 576), 'torch.arange', 'torch.arange', (['out_features'], {}), '(out_features)\n', (562, 576), False, 'import torch\n'), ((2627, 2640), 'torch.exp', 'torch.exp', (['(-a)'], {}), '(-a)\n', (2636, 2640), False, 'import torch\n'), ((3363, 3395), 'torch.log', 'torch.log', (['(inputs / (1 - inputs))'], {}), '(inputs / (1 - inputs))\n', (3372, 3395), False, 'import torch\n'), ((5660, 5685), 'torch.exp', 'torch.exp', (['self.log_gamma'], {}), '(self.log_gamma)\n', (5669, 5685), False, 'import torch\n'), ((8709, 8733), 'numpy.arange', 'np.arange', (['(0)', 'num_inputs'], {}), '(0, num_inputs)\n', (8718, 8733), True, 'import numpy as np\n'), ((10228, 10262), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['m.weight.data'], {}), '(m.weight.data)\n', (10247, 10262), True, 'import torch.nn as nn\n'), ((10589, 10618), 'torch.cat', 'torch.cat', (['[y_a, y_b]'], {'dim': '(-1)'}), '([y_a, y_b], dim=-1)\n', (10598, 10618), False, 'import torch\n'), ((10871, 10900), 'torch.cat', 'torch.cat', (['[x_a, x_b]'], {'dim': '(-1)'}), '([x_a, x_b], dim=-1)\n', (10880, 10900), False, 'import torch\n'), ((5251, 5276), 'torch.exp', 'torch.exp', (['self.log_gamma'], {}), '(self.log_gamma)\n', (5260, 5276), False, 'import torch\n'), ((6615, 6637), 'torch.exp', 'torch.exp', (['self.weight'], {}), '(self.weight)\n', (6624, 6637), False, 'import torch\n'), ((7606, 7627), 'torch.inverse', 'torch.inverse', (['self.W'], {}), '(self.W)\n', (7619, 7627), False, 'import torch\n'), ((2939, 2961), 'torch.exp', 'torch.exp', (['a[:, i_col]'], {}), '(a[:, i_col])\n', (2948, 2961), False, 'import torch\n'), ((6775, 6798), 'torch.exp', 'torch.exp', (['(-self.weight)'], {}), '(-self.weight)\n', (6784, 6798), False, 'import torch\n'), ((3398, 3429), 'torch.log', 'torch.log', (['(inputs - inputs ** 2)'], {}), '(inputs - inputs ** 2)\n', (3407, 3429), False, 'import torch\n'), ((5343, 5357), 'torch.log', 'torch.log', (['var'], {}), '(var)\n', (5352, 5357), False, 'import torch\n'), ((5777, 5791), 'torch.log', 'torch.log', (['var'], {}), '(var)\n', (5786, 5791), False, 'import torch\n'), ((7475, 7492), 'torch.det', 'torch.det', (['self.W'], {}), '(self.W)\n', (7484, 7492), False, 'import torch\n'), ((7667, 7684), 'torch.det', 'torch.det', (['self.W'], {}), '(self.W)\n', (7676, 7684), False, 'import torch\n')] |
import os
import numpy as np
import flask
import pickle
from flask import Flask, render_template, request
# def create_app(test_config=None):
# # create and configure the app
# app = Flask(__name__, instance_relative_config=True)
# app.config.from_mapping(
# SECRET_KEY='dev',
# DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
# )
# if test_config is None:
# # load the instance config, if it exists, when not testing
# app.config.from_pyfile('config.py', silent=True)
# else:
# # load the test config if passed in
# app.config.from_mapping(test_config)
# # ensure the instance folder exists
# try:
# os.makedirs(app.instance_path)
# except OSError:
# pass
# # a simple page that says hello
# @app.route('/')
# @app.route('/index')
# def index():
# return flask.render_template('index.html')
# def ValuePredictor(to_predict_list):
# to_predict = np.array(to_predict_list).reshape(1,12)
# loaded_model = pickle.load(open("model.pkl","rb"))
# result = loaded_model.predict(to_predict)
# return result[0]
# @app.route('/result',methods = ['POST', 'GET'])
# def result():
# if request.method == 'POST':
# to_predict_list = request.form.to_dict()
# to_predict_list=list(to_predict_list.values())
# to_predict_list = list(map(int, to_predict_list))
# result = ValuePredictor(to_predict_list)
# if int(result)==1:
# prediction='Income more than 50K'
# else:
# prediction='Income less that 50K'
# return render_template("result.html",prediction=prediction)
# return app
app=Flask(__name__)
@app.route('/')
@app.route('/index')
def index():
return flask.render_template('index.html')
def ValuePredictor(to_predict_list):
to_predict = np.array(to_predict_list).reshape(1,12)
loaded_model = pickle.load(open("model1.pkl","rb"))
result = loaded_model.predict(to_predict)
return result[0]
@app.route('/result',methods = ['POST'])
def result():
if request.method == 'POST':
to_predict_list = request.form.to_dict()
to_predict_list=list(to_predict_list.values())
to_predict_list = list(map(int, to_predict_list))
result = ValuePredictor(to_predict_list)
if int(result)==1:
prediction='Income more than 50K'
else:
prediction='Income less that 50K'
return render_template("result.html",prediction=prediction)
# if __name__ == '__main__':
# app.debug = True
# app.run() | [
"flask.render_template",
"numpy.array",
"flask.request.form.to_dict",
"flask.Flask"
] | [((1795, 1810), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1800, 1810), False, 'from flask import Flask, render_template, request\n'), ((1874, 1909), 'flask.render_template', 'flask.render_template', (['"""index.html"""'], {}), "('index.html')\n", (1895, 1909), False, 'import flask\n'), ((2244, 2266), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (2264, 2266), False, 'from flask import Flask, render_template, request\n'), ((2577, 2630), 'flask.render_template', 'render_template', (['"""result.html"""'], {'prediction': 'prediction'}), "('result.html', prediction=prediction)\n", (2592, 2630), False, 'from flask import Flask, render_template, request\n'), ((1966, 1991), 'numpy.array', 'np.array', (['to_predict_list'], {}), '(to_predict_list)\n', (1974, 1991), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- Coding: utf-8 -*-
import sys
import os
import signal
import pickle
import gensim
import numpy as np
signal.signal(signal.SIGINT, signal.SIG_DFL)
num_clusters = int(sys.argv[1])
dimension = int(sys.argv[2])
wikiFile = sys.argv[3]
baseFile = os.path.splitext(wikiFile)[0]
idfFile = baseFile + '.idf'
modelFile = baseFile + '.vec'
probaFile = baseFile + '.proba'
probaVecFile = baseFile + '.pvec'
model = gensim.models.KeyedVectors.load_word2vec_format(modelFile, binary=False)
idx_proba_dict = pickle.load(open(probaFile, 'rb'))
word_idf_dict = pickle.load(open(idfFile, 'rb'))
proba_wordvecs = {}
for word in idx_proba_dict:
proba_wordvecs[word] = np.zeros(num_clusters * dimension, dtype=np.float32)
if word in model and word in idx_proba_dict and word in word_idf_dict:
for index in range(0, num_clusters):
proba_wordvecs[word][index*dimension:(index+1)*dimension] = model[word] * idx_proba_dict[word][index] * word_idf_dict[word]
with open(probaVecFile, 'wb') as f:
pickle.dump(proba_wordvecs, f)
| [
"signal.signal",
"pickle.dump",
"os.path.splitext",
"gensim.models.KeyedVectors.load_word2vec_format",
"numpy.zeros"
] | [((131, 175), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_DFL'], {}), '(signal.SIGINT, signal.SIG_DFL)\n', (144, 175), False, 'import signal\n'), ((436, 508), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['modelFile'], {'binary': '(False)'}), '(modelFile, binary=False)\n', (483, 508), False, 'import gensim\n'), ((273, 299), 'os.path.splitext', 'os.path.splitext', (['wikiFile'], {}), '(wikiFile)\n', (289, 299), False, 'import os\n'), ((684, 736), 'numpy.zeros', 'np.zeros', (['(num_clusters * dimension)'], {'dtype': 'np.float32'}), '(num_clusters * dimension, dtype=np.float32)\n', (692, 736), True, 'import numpy as np\n'), ((1020, 1050), 'pickle.dump', 'pickle.dump', (['proba_wordvecs', 'f'], {}), '(proba_wordvecs, f)\n', (1031, 1050), False, 'import pickle\n')] |
import os
from flask import *
from werkzeug.utils import secure_filename
from flask_restful import Api, Resource
from flask_cors import CORS, cross_origin
from fastai.vision.all import *
from PIL import Image
import numpy as np
import base64
from io import BytesIO
import json
import logging
app = Flask(__name__)
CORS(app)
api = Api(app)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.DEBUG)
model = load_learner('model/model_v0.pkl')
UPLOAD_FOLDER = 'uploads'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
ALLOWED_EXTENSIONS = (['png', 'jpg', 'jpeg'])
def is_allowed_filename(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@cross_origin()
@app.route('/upload', methods=['POST'])
def upload():
if 'file' not in request.files:
image = json.dumps(request.get_json())
im = Image.open(BytesIO(base64.b64decode(image.split(',')[1])))
im.save('image.png')
image_np = np.array(im)
image_without_alpha = image_np[:, :, :3]
is_clean, _, probs = model.predict(image_without_alpha)
prob = float(list(probs.numpy())[1])
return {"is_clean": is_clean, "predictedVal": prob}
file = request.files['file']
if file.filename == '':
resp = jsonify({'message': 'No file selected for uploading'})
resp.status_code = 400
return resp
if file and is_allowed_filename(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
path = UPLOAD_FOLDER + '/' + filename
resp = predict(path)
resp.status_code = 201
return resp
else:
resp = jsonify({'message': 'Allowed file types are png, jpg, jpeg'})
resp.status_code = 400
return resp
@cross_origin()
def predict(img_path):
img = Image.open(img_path)
print(img)
img_np = np.array(img)
is_clean, _ ,probs = model.predict(img_np)
prob = float(list(probs.numpy())[1])
return {"is_clean": is_clean , "predictedVal": prob}
if __name__ == "__main__":
app.run(debug=True)
| [
"logging.StreamHandler",
"PIL.Image.open",
"flask_cors.CORS",
"flask_restful.Api",
"os.path.join",
"flask_cors.cross_origin",
"numpy.array",
"werkzeug.utils.secure_filename"
] | [((316, 325), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (320, 325), False, 'from flask_cors import CORS, cross_origin\n'), ((332, 340), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (335, 340), False, 'from flask_restful import Api, Resource\n'), ((718, 732), 'flask_cors.cross_origin', 'cross_origin', ([], {}), '()\n', (730, 732), False, 'from flask_cors import CORS, cross_origin\n'), ((1708, 1722), 'flask_cors.cross_origin', 'cross_origin', ([], {}), '()\n', (1720, 1722), False, 'from flask_cors import CORS, cross_origin\n'), ((363, 396), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (384, 396), False, 'import logging\n'), ((1753, 1773), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1763, 1773), False, 'from PIL import Image\n'), ((1796, 1809), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1804, 1809), True, 'import numpy as np\n'), ((963, 975), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (971, 975), True, 'import numpy as np\n'), ((1391, 1421), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (1406, 1421), False, 'from werkzeug.utils import secure_filename\n'), ((1434, 1485), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (1446, 1485), False, 'import os\n')] |
#!/usr/bin/env python
# coding=utf8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""BFE 논문의 성능 비교를 위해 CUB BIRD200-2011 를 생성한다."""
import os
import random
import argparse
import sys
sys.path.append('./')
sys.path.append('../')
from datetime import datetime
from datasets import dataset_utils
from datasets import image_coder as coder
from utils import data_util
import tensorflow as tf
import numpy as np
# Just disables the warning, doesn't enable AVX/FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
_NUM_CLASSES = 200
_TRAIN_CLASS_RANGE = list(range(0, 100))
_VALIDATION_CLASS_RANGE = list(range(100, 200))
def _write_label_id_to_name(name, data_dir, id_to_name):
output_filename = '%s_labels.txt' % (name)
output_file = os.path.join(data_dir, output_filename)
with open(output_file, 'w') as f:
for index in sorted(id_to_name):
f.write('%d:%s\n' % (index, id_to_name[index]))
def _get_bbox_info(input_dir):
box_file = os.path.join(input_dir, 'bounding_boxes.txt')
imageid_file = os.path.join(input_dir, 'images.txt')
imageid_to_labelstr = {}
with open(imageid_file) as fp:
for line in fp:
token = line.strip().split()
imageid_to_labelstr[token[0]] = token[1]
bbox_info = {}
with open(box_file) as fp:
for line in fp:
imageid, x, y, w, h = line.strip().split()
xmin = float(x)
xmax = float(x) + float(w)
ymin = float(y)
ymax = float(y) + float(h)
bbox = [int(xmin), int(ymin), int(xmax), int(ymax)]
filename = imageid_to_labelstr[imageid]
file_id = os.path.splitext(os.path.basename(filename))[0]
bbox_info[file_id] = bbox
return bbox_info
def _find_image_files(name, data_dir):
"""
Build a list of all images files and labels in the data set.
:param: data_dir: string, path to the root directory of images.
:return:
filenames: double list of strings; each string is a path to an image file by label.
labels: double list of integer; each integer identifies the ground truth by label.
total: total number of images that was founded inside data_dir.
"""
print('Determining list of input files and labels from %s.' % data_dir)
# BFE 에서는 0-99 까지를 training, 100-199 까지를 test 로 사용한다.
if name == 'train':
label_index_range = _TRAIN_CLASS_RANGE
elif name == 'validation':
label_index_range = _VALIDATION_CLASS_RANGE
else:
raise ValueError('Invalid index range')
labels = []
filenames = []
total = 0
label_index = 0
id_to_name = {}
for label_name in sorted(os.listdir(data_dir)):
filenames_in_label = []
labels_in_label = []
if label_index not in label_index_range:
label_index += 1
continue
path = os.path.join(data_dir, label_name)
if os.path.isdir(path):
image_file_path = '%s/*' % (path)
matching_files = tf.gfile.Glob(image_file_path)
id_to_name[label_index] = label_name
total += len(matching_files)
labels_in_label.extend([label_index] * len(matching_files))
filenames_in_label.extend(matching_files)
label_index += 1
# 특정 label 내의 이미지에 대해서만 shuffle 처리를 수행.
shuffled_index = list(range(len(filenames_in_label)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames_in_label = [filenames_in_label[i] for i in shuffled_index]
labels_in_label = [labels_in_label[i] for i in shuffled_index]
filenames.extend(filenames_in_label)
labels.extend(labels_in_label)
print('Found %d image files across %d labels inside %s.' % (total, label_index, data_dir))
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
return filenames, labels, id_to_name, total
def _process_image(filename, bbox=None):
"""
이미지 파일을 읽어들여 RGB 타입으로 변환하여 반환한다.
:param filename: string, 읽어들일 이미지 파일 경로. e.g., '/path/to/example.JPG'.
:param bbox: [xmin, ymin, xmax, ymax] 형식의 bbox 데이터 또는 None
:return:
image_data: 이미지 데이터로 jpg 포맷의 데이터.
height: 이미지 height
width: 이미지 width
"""
with tf.gfile.GFile(filename, 'rb') as f:
image_data = f.read()
image_format = dataset_utils.get_image_file_format(filename)
# try:
image = coder.decode_jpg(image_data)
height = image.shape[0]
width = image.shape[1]
# except tf.errors.InvalidArgumentError:
# raise ValueError("Invalid decode in {}".format(filename))
if bbox is None:
return image_data, height, width, image_format
else:
# change bbox to [y, x, h, w]
crop_window = [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]]
# 전체 이미지 크기보다 bbox 영역이 넘어가는 경우 에러가 발생한다.
# 이를 막기위한 보정 코드를 추가한다.
h_gap = crop_window[2] + crop_window[0] - height
w_gap = crop_window[3] + crop_window[1] - width
if h_gap > 0:
crop_window[2] -= h_gap
if w_gap > 0:
crop_window[3] -= w_gap
assert crop_window[2] > 0
assert crop_window[3] > 0
image = coder.crop_bbox(image, crop_window)
image_data = coder.encode_jpg(image)
image_format = 'jpg'
return image_data, crop_window[2], crop_window[3], image_format
def _process_image_files_batch(thread_index, offsets, output_filenames, filenames, labels, bbox_info):
"""
하나의 스레드 단위에서 이미지 리스트를 읽어 TRRecord 타입으로 변환하는 함수
:param thread_index: 현재 작업중인 thread 번호.
:param offsets: offset list. 이미지 목록 중 현재 스레드에서 처리해야 할 offset 값으로 shard 갯수만큼 리스트로 제공
:param output_filenames: 출력 파일 이름으로 shard 갯수만큼 리스트로 제공.
:param filenames: 처리해야 할 전체 이미지 파일 리스트
:param labels: 처리해야 할 전체 이미지 레이블 리스트
"""
assert len(offsets) == len(output_filenames)
assert len(filenames) == len(labels)
num_files_in_thread = offsets[-1][1] - offsets[0][0]
counter = 0
# 하나의 thread 에는 여러 개의 shard 가 할당될 수 있다.
for offset, output_filename in zip(offsets, output_filenames):
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
# offset 에는 현재 shard 에 대한 (start, end) offset이 저장되어 있음.
files_in_shard = np.arange(offset[0], offset[1], dtype=int)
shard_counter = 0
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
file_id = os.path.splitext(os.path.basename(filename))[0]
if bbox_info is None:
bbox = None
else:
bbox = bbox_info[file_id]
# try:
image_data, height, width, image_format = _process_image(filename, bbox)
# except ValueError:
# dataset_utils.log('[thread %2d]: Invalid image found. %s - [skip].' % (thread_index, filename))
# continue
example = data_util.convert_to_example_without_bbox(image_data, 'jpg', label, height, width)
writer.write(example.SerializeToString())
counter += 1
shard_counter += 1
if not counter % 1000:
dataset_utils.log('%s [thread %2d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
writer.close()
dataset_utils.log('%s [thread %2d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
def _process_dataset(name, filenames, labels, bbox_info, num_shards=128):
"""
이미지 파일 목록을 읽어들여 TFRecord 객체로 변환하는 함수
:param name: string, 데이터 고유 문자열 (train, validation 등)
:param filenames: list of strings; 이미지 파일 경로 리스트.
:param labels: list of integer; 이미지에 대한 정수화된 정답 레이블 리스트
:param num_shards: 데이터 집합을 샤딩할 갯수.
"""
assert len(filenames) == len(labels)
shard_offsets = dataset_utils.make_shard_offsets(len(filenames), FLAGS.num_threads, num_shards)
shard_output_filenames = dataset_utils.make_shard_filenames(name, len(filenames), FLAGS.num_threads, num_shards)
def _process_batch(thread_index):
offsets = shard_offsets[thread_index]
output_filenames = shard_output_filenames[thread_index]
_process_image_files_batch(thread_index, offsets, output_filenames, filenames, labels, bbox_info)
dataset_utils.thread_execute(FLAGS.num_threads, _process_batch)
dataset_utils.log('%s: Finished writing all %d images in data set.' % (datetime.now(), len(filenames)))
def main(unused_argv):
if (FLAGS.data_dir is None) or (FLAGS.output_dir is None):
parser.print_help()
return
dataset_utils.log('Make Naver-Food TFRecord dataset by label.')
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
if FLAGS.use_bbox:
bbox_info = _get_bbox_info(FLAGS.data_dir)
dataset_utils.log(' - Use bounding box info. (opt. ON)')
else:
bbox_info = None
source_dir = os.path.join(FLAGS.data_dir, 'images')
filenames_train, labels_train, id_to_name, total = _find_image_files('train', source_dir)
dataset_utils.log('Convert [train] dataset.')
_process_dataset('train', filenames_train, labels_train, bbox_info, 128)
filenames_val, labels_val, id_to_name, total = _find_image_files('validation', source_dir)
dataset_utils.log('Convert [validation] dataset.')
_process_dataset('validation', filenames_val, labels_val, bbox_info, 16)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data_dir', type=str, default=None, help='Input data directory.')
parser.add_argument('-o', '--output_dir', type=str, default=None, help='Output data directory.')
parser.add_argument('--num_threads', type=int, default=16, help='Number of threads to preprocess the images.')
parser.add_argument(
'--use_bbox', type=dataset_utils.str2bool, nargs='?', const=True, default=False,
help='Whether to use bounding boxes or not.')
if __name__ == '__main__':
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(argv=[sys.argv[0]] + unparsed)
| [
"utils.data_util.convert_to_example_without_bbox",
"datasets.image_coder.decode_jpg",
"tensorflow.gfile.GFile",
"sys.path.append",
"numpy.arange",
"tensorflow.app.run",
"datasets.dataset_utils.thread_execute",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"os.path.isdir",
"tensorf... | [((248, 269), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (263, 269), False, 'import sys\n'), ((270, 292), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (285, 292), False, 'import sys\n'), ((9187, 9212), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9210, 9212), False, 'import argparse\n'), ((795, 834), 'os.path.join', 'os.path.join', (['data_dir', 'output_filename'], {}), '(data_dir, output_filename)\n', (807, 834), False, 'import os\n'), ((1008, 1053), 'os.path.join', 'os.path.join', (['input_dir', '"""bounding_boxes.txt"""'], {}), "(input_dir, 'bounding_boxes.txt')\n", (1020, 1053), False, 'import os\n'), ((1071, 1108), 'os.path.join', 'os.path.join', (['input_dir', '"""images.txt"""'], {}), "(input_dir, 'images.txt')\n", (1083, 1108), False, 'import os\n'), ((3671, 3689), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (3682, 3689), False, 'import random\n'), ((3692, 3722), 'random.shuffle', 'random.shuffle', (['shuffled_index'], {}), '(shuffled_index)\n', (3706, 3722), False, 'import random\n'), ((8089, 8152), 'datasets.dataset_utils.thread_execute', 'dataset_utils.thread_execute', (['FLAGS.num_threads', '_process_batch'], {}), '(FLAGS.num_threads, _process_batch)\n', (8117, 8152), False, 'from datasets import dataset_utils\n'), ((8382, 8445), 'datasets.dataset_utils.log', 'dataset_utils.log', (['"""Make Naver-Food TFRecord dataset by label."""'], {}), "('Make Naver-Food TFRecord dataset by label.')\n", (8399, 8445), False, 'from datasets import dataset_utils\n'), ((8699, 8737), 'os.path.join', 'os.path.join', (['FLAGS.data_dir', '"""images"""'], {}), "(FLAGS.data_dir, 'images')\n", (8711, 8737), False, 'import os\n'), ((8833, 8878), 'datasets.dataset_utils.log', 'dataset_utils.log', (['"""Convert [train] dataset."""'], {}), "('Convert [train] dataset.')\n", (8850, 8878), False, 'from datasets import dataset_utils\n'), ((9050, 9100), 'datasets.dataset_utils.log', 'dataset_utils.log', (['"""Convert [validation] dataset."""'], {}), "('Convert [validation] dataset.')\n", (9067, 9100), False, 'from datasets import dataset_utils\n'), ((9745, 9786), 'tensorflow.app.run', 'tf.app.run', ([], {'argv': '([sys.argv[0]] + unparsed)'}), '(argv=[sys.argv[0]] + unparsed)\n', (9755, 9786), True, 'import tensorflow as tf\n'), ((2587, 2607), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (2597, 2607), False, 'import os\n'), ((2759, 2793), 'os.path.join', 'os.path.join', (['data_dir', 'label_name'], {}), '(data_dir, label_name)\n', (2771, 2793), False, 'import os\n'), ((2801, 2820), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2814, 2820), False, 'import os\n'), ((4194, 4224), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (4208, 4224), True, 'import tensorflow as tf\n'), ((4276, 4321), 'datasets.dataset_utils.get_image_file_format', 'dataset_utils.get_image_file_format', (['filename'], {}), '(filename)\n', (4311, 4321), False, 'from datasets import dataset_utils\n'), ((4346, 4374), 'datasets.image_coder.decode_jpg', 'coder.decode_jpg', (['image_data'], {}), '(image_data)\n', (4362, 4374), True, 'from datasets import image_coder as coder\n'), ((5076, 5111), 'datasets.image_coder.crop_bbox', 'coder.crop_bbox', (['image', 'crop_window'], {}), '(image, crop_window)\n', (5091, 5111), True, 'from datasets import image_coder as coder\n'), ((5129, 5152), 'datasets.image_coder.encode_jpg', 'coder.encode_jpg', (['image'], {}), '(image)\n', (5145, 5152), True, 'from datasets import image_coder as coder\n'), ((5959, 6006), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', 'output_filename'], {}), '(FLAGS.output_dir, output_filename)\n', (5971, 6006), False, 'import os\n'), ((6020, 6060), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_file'], {}), '(output_file)\n', (6047, 6060), True, 'import tensorflow as tf\n'), ((6143, 6185), 'numpy.arange', 'np.arange', (['offset[0]', 'offset[1]'], {'dtype': 'int'}), '(offset[0], offset[1], dtype=int)\n', (6152, 6185), True, 'import numpy as np\n'), ((8456, 8488), 'os.path.exists', 'os.path.exists', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (8470, 8488), False, 'import os\n'), ((8494, 8523), 'os.makedirs', 'os.makedirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (8505, 8523), False, 'import os\n'), ((8597, 8653), 'datasets.dataset_utils.log', 'dataset_utils.log', (['""" - Use bounding box info. (opt. ON)"""'], {}), "(' - Use bounding box info. (opt. ON)')\n", (8614, 8653), False, 'from datasets import dataset_utils\n'), ((2885, 2915), 'tensorflow.gfile.Glob', 'tf.gfile.Glob', (['image_file_path'], {}), '(image_file_path)\n', (2898, 2915), True, 'import tensorflow as tf\n'), ((3245, 3263), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (3256, 3263), False, 'import random\n'), ((3270, 3300), 'random.shuffle', 'random.shuffle', (['shuffled_index'], {}), '(shuffled_index)\n', (3284, 3300), False, 'import random\n'), ((6712, 6798), 'utils.data_util.convert_to_example_without_bbox', 'data_util.convert_to_example_without_bbox', (['image_data', '"""jpg"""', 'label', 'height', 'width'], {}), "(image_data, 'jpg', label, height,\n width)\n", (6753, 6798), False, 'from utils import data_util\n'), ((8226, 8240), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8238, 8240), False, 'from datetime import datetime\n'), ((1635, 1661), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1651, 1661), False, 'import os\n'), ((6325, 6351), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (6341, 6351), False, 'import os\n'), ((7203, 7217), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7215, 7217), False, 'from datetime import datetime\n'), ((7034, 7048), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7046, 7048), False, 'from datetime import datetime\n')] |
# Create by Packetsss
# Personal use is allowed
# Commercial use is prohibited
import cv2
import numpy as np
capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
while 1:
_, frame = capture.read()
width = int(capture.get(3))
height = int(capture.get(4))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([90, 50, 50])
upper_blue = np.array([130, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# only picking up blue, extract color
# mask it self is only b/w, 0/1
result = cv2.bitwise_and(frame, frame, mask=mask)
# bitwise operation:
# 1, 1 -- > 1
# 1, 0 -- > 0
# 0, 1 -- > 0
# 0, 0 -- > 0
cv2.imshow("frame", result)
if cv2.waitKey(1) == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
| [
"cv2.inRange",
"cv2.bitwise_and",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.waitKey"
] | [((122, 156), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)', 'cv2.CAP_DSHOW'], {}), '(0, cv2.CAP_DSHOW)\n', (138, 156), False, 'import cv2\n'), ((780, 803), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (801, 803), False, 'import cv2\n'), ((274, 312), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (286, 312), False, 'import cv2\n'), ((330, 352), 'numpy.array', 'np.array', (['[90, 50, 50]'], {}), '([90, 50, 50])\n', (338, 352), True, 'import numpy as np\n'), ((370, 395), 'numpy.array', 'np.array', (['[130, 255, 255]'], {}), '([130, 255, 255])\n', (378, 395), True, 'import numpy as np\n'), ((408, 448), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_blue', 'upper_blue'], {}), '(hsv, lower_blue, upper_blue)\n', (419, 448), False, 'import cv2\n'), ((541, 581), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (556, 581), False, 'import cv2\n'), ((684, 711), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'result'], {}), "('frame', result)\n", (694, 711), False, 'import cv2\n'), ((719, 733), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (730, 733), False, 'import cv2\n')] |
import numpy as np
import torch
import torch.utils.data as data
import glob
# import tifffile as tiff
# from torchvision import transforms as T
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".tif", '.png', '.jpg', '.npy'])
class train_dataset(data.Dataset):
def __init__(self, data_path='', size_w=256, size_h=256, flip=0,
batch_size=1, transform = None):
super(train_dataset, self).__init__()
self.src_list = np.array(sorted(glob.glob(data_path + 'imgs/' + '*.npy')))
self.lab_list = np.array(sorted(glob.glob(data_path + 'masks/' + '*.npy')))
self.data_path = data_path
self.size_w = size_w
self.size_h = size_h
self.flip = flip
self.index = 0
self.batch_size = batch_size
self.transform = transform
def data_iter_index(self, index=1000):
batch_index = np.random.choice(len(self.src_list), index)
x_batch = self.src_list[batch_index]
y_batch = self.lab_list[batch_index]
data_series = []
label_series = []
try:
for i in range(index):
# im = tiff.imread(x_batch[i]) / 255.0
# data_series.append(im[:256, :256, :])
# mask = tiff.imread(y_batch[i])[:256, :256, :].argmax(axis = -1)
# label_series.append(mask)
im = np.load(x_batch[i]).transpose([1,2,0]) #/ 255.0
# print(data.shape)
if self.transform:
im = self.transform(im)
data_series.append(im.numpy())
label_series.append(np.load(y_batch[i]))
self.index += 1
except OSError:
return None, None
data_series = torch.from_numpy(np.array(data_series)).type(torch.FloatTensor)
# data_series = data_series.type(torch.FloatTensor)
# data_series = data_series.permute(0, 3, 1, 2)
label_series = torch.from_numpy(np.array(label_series)).type(torch.FloatTensor)
torch_data = data.TensorDataset(data_series, label_series)
data_iter = data.DataLoader(
dataset=torch_data, # torch TensorDataset format
batch_size=self.batch_size, # mini batch size
shuffle=True,
num_workers=0,
)
return data_iter
def data_iter(self):
data_series = []
label_series = []
try:
for i in range(len(self.src_list)):
# im = tiff.imread(self.src_list[i]) / 255.0
# data_series.append(im[:256, :256, :])
# mask = tiff.imread(self.lab_list[i])[:256, :256, :].argmax(axis = -1)
# # label_series.append(rgb_to_1Hlabel(mask).argmax(axis = 0))
im = np.load(self.src_list[i]).transpose([1,2,0])# / 255.0
if self.transform:
im = self.transform(im)
data_series.append(im.numpy())
label_series.append(np.load(self.lab_list[i]))
self.index += 1
except OSError:
return None, None
data_series = torch.from_numpy(np.array(data_series)).type(torch.FloatTensor)
# data_series = data_series.permute(0, 3, 1, 2)
label_series = torch.from_numpy(np.array(label_series)).type(torch.FloatTensor)
torch_data = data.TensorDataset(data_series, label_series)
data_iter = data.DataLoader(
dataset=torch_data, # torch TensorDataset format
batch_size=self.batch_size, # mini batch size
shuffle=True,
num_workers=0,
)
return data_iter | [
"torch.utils.data.TensorDataset",
"numpy.array",
"torch.utils.data.DataLoader",
"numpy.load",
"glob.glob"
] | [((2122, 2167), 'torch.utils.data.TensorDataset', 'data.TensorDataset', (['data_series', 'label_series'], {}), '(data_series, label_series)\n', (2140, 2167), True, 'import torch.utils.data as data\n'), ((2188, 2285), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'torch_data', 'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(dataset=torch_data, batch_size=self.batch_size, shuffle=\n True, num_workers=0)\n', (2203, 2285), True, 'import torch.utils.data as data\n'), ((3467, 3512), 'torch.utils.data.TensorDataset', 'data.TensorDataset', (['data_series', 'label_series'], {}), '(data_series, label_series)\n', (3485, 3512), True, 'import torch.utils.data as data\n'), ((3533, 3630), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'torch_data', 'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(dataset=torch_data, batch_size=self.batch_size, shuffle=\n True, num_workers=0)\n', (3548, 3630), True, 'import torch.utils.data as data\n'), ((512, 552), 'glob.glob', 'glob.glob', (["(data_path + 'imgs/' + '*.npy')"], {}), "(data_path + 'imgs/' + '*.npy')\n", (521, 552), False, 'import glob\n'), ((595, 636), 'glob.glob', 'glob.glob', (["(data_path + 'masks/' + '*.npy')"], {}), "(data_path + 'masks/' + '*.npy')\n", (604, 636), False, 'import glob\n'), ((1676, 1695), 'numpy.load', 'np.load', (['y_batch[i]'], {}), '(y_batch[i])\n', (1683, 1695), True, 'import numpy as np\n'), ((1850, 1871), 'numpy.array', 'np.array', (['data_series'], {}), '(data_series)\n', (1858, 1871), True, 'import numpy as np\n'), ((2053, 2075), 'numpy.array', 'np.array', (['label_series'], {}), '(label_series)\n', (2061, 2075), True, 'import numpy as np\n'), ((3100, 3125), 'numpy.load', 'np.load', (['self.lab_list[i]'], {}), '(self.lab_list[i])\n', (3107, 3125), True, 'import numpy as np\n'), ((3255, 3276), 'numpy.array', 'np.array', (['data_series'], {}), '(data_series)\n', (3263, 3276), True, 'import numpy as np\n'), ((3398, 3420), 'numpy.array', 'np.array', (['label_series'], {}), '(label_series)\n', (3406, 3420), True, 'import numpy as np\n'), ((1428, 1447), 'numpy.load', 'np.load', (['x_batch[i]'], {}), '(x_batch[i])\n', (1435, 1447), True, 'import numpy as np\n'), ((2861, 2886), 'numpy.load', 'np.load', (['self.src_list[i]'], {}), '(self.src_list[i])\n', (2868, 2886), True, 'import numpy as np\n')] |
import argparse
import time
import numpy as np
from ssn_dataset import SSNDataSet
from transforms import *
from ops.utils import temporal_nms
import pandas as pd
from multiprocessing import Pool
from terminaltables import *
import sys
sys.path.append('./anet_toolkit/Evaluation')
from anet_toolkit.Evaluation.eval_detection import compute_average_precision_detection
from ops.utils import softmax
import os
import os.path
import pickle
from ops.utils import get_configs
import evaluate
import math
# options
parser = argparse.ArgumentParser(
description="Evaluate detection performance metrics")
parser.add_argument('dataset', type=str, choices=['activitynet1.2', 'thumos14', 'coin_small'])
parser.add_argument('detection_pickles', type=str, nargs='+')
parser.add_argument('--nms_threshold', type=float, default=None)
parser.add_argument('--no_regression', default=False, action="store_true")
parser.add_argument('--softmax_before_filter', default=False, action="store_true")
parser.add_argument('-j', '--ap_workers', type=int, default=32)
parser.add_argument('--top_k', type=int, default=None)
parser.add_argument('--cls_scores', type=str, default=None)
parser.add_argument('--cls_top_k', type=int, default=1)
parser.add_argument('--score_weights', type=float, default=None, nargs='+')
parser.add_argument('--externel_score', type=str, default='test_gt_score_combined_refined_fusion')
args = parser.parse_args()
dataset_configs = get_configs(args.dataset)
num_class = dataset_configs['num_class']
test_prop_file = 'data/{}_proposal_list.txt'.format(dataset_configs['test_list'])
evaluate.number_label = num_class
nms_threshold = args.nms_threshold if args.nms_threshold else dataset_configs['evaluation']['nms_threshold']
top_k = args.top_k if args.top_k else dataset_configs['evaluation']['top_k']
softmax_bf = args.softmax_before_filter \
if args.softmax_before_filter else dataset_configs['evaluation']['softmax_before_filter']
print("initiating evaluation of detection results {}".format(args.detection_pickles))
score_pickle_list = []
for pc in args.detection_pickles:
score_pickle_list.append(pickle.load(open(pc, 'rb')))
if args.score_weights:
weights = np.array(args.score_weights) / sum(args.score_weights)
else:
weights = [1.0/len(score_pickle_list) for _ in score_pickle_list]
def merge_scores(vid):
def merge_part(arrs, index, weights):
if arrs[0][index] is not None:
return np.sum([a[index] * w for a, w in zip(arrs, weights)], axis=0)
else:
return None
arrays = [pc[vid] for pc in score_pickle_list]
act_weights = weights
comp_weights = weights
reg_weights = weights
rel_props = score_pickle_list[0][vid][0]
return rel_props, \
merge_part(arrays, 1, act_weights), \
merge_part(arrays, 2, comp_weights), \
merge_part(arrays, 3, reg_weights)
print('Merge detection scores from {} sources...'.format(len(score_pickle_list)))
detection_scores = {k: merge_scores(k) for k in score_pickle_list[0]}
print('Done.')
dataset = SSNDataSet("", test_prop_file, verbose=False)
dataset_detections = [dict() for i in range(num_class)]
if args.cls_scores:
print('Using classifier scores from {}'.format(args.cls_scores))
cls_score_pc = pickle.load(open(args.cls_scores, 'rb'), encoding='bytes')
cls_score_dict = {os.path.splitext(os.path.basename(k.decode('utf-8')))[0]:v for k, v in cls_score_pc.items()}
else:
cls_score_dict = None
# generate detection results
def gen_detection_results(video_id, score_tp):
if len(score_tp[0].shape) == 3:
rel_prop = np.squeeze(score_tp[0], 0)
else:
rel_prop = score_tp[0]
# standardize regression scores
reg_scores = score_tp[3]
if reg_scores is None:
reg_scores = np.zeros((len(rel_prop), num_class, 2), dtype=np.float32)
reg_scores = reg_scores.reshape((-1, num_class, 2))
if top_k <= 0 and cls_score_dict is None:
combined_scores = softmax(score_tp[1])[:, 1:] * np.exp(score_tp[2])
for i in range(num_class):
loc_scores = reg_scores[:, i, 0][:, None]
dur_scores = reg_scores[:, i, 1][:, None]
try:
dataset_detections[i][video_id] = np.concatenate((
rel_prop, combined_scores[:, i][:, None], loc_scores, dur_scores), axis=1)
except:
print(i, rel_prop.shape, combined_scores.shape, reg_scores.shape)
raise
elif cls_score_dict is None:
#combined_scores = softmax(score_tp[1][:, 1:]) * np.exp(score_tp[2])
# load combined scores from external numpys
ex_vid = video_id.split("/")[-1]
ex_scores = np.load(os.path.join(args.externel_score,"proposal_" + ex_vid + ".npy"))
combined_scores = ex_scores[:,:,4]
keep_idx = np.argsort(combined_scores.ravel())[-top_k:]
for k in keep_idx:
cls = k % num_class
prop_idx = k // num_class
if video_id not in dataset_detections[cls]:
dataset_detections[cls][video_id] = np.array([
[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[prop_idx, cls],
reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]
])
else:
dataset_detections[cls][video_id] = np.vstack(
[dataset_detections[cls][video_id],
[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[prop_idx, cls],
reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]])
else:
if softmax_bf:
combined_scores = softmax(score_tp[1])[:, 1:] * np.exp(score_tp[2])
else:
combined_scores = score_tp[1][:, 1:] * np.exp(score_tp[2])
video_cls_score = cls_score_dict[os.path.splitext(os.path.basename(video_id))[0]]
for video_cls in np.argsort(video_cls_score,)[-args.cls_top_k:]:
loc_scores = reg_scores[:, video_cls, 0][:, None]
dur_scores = reg_scores[:, video_cls, 1][:, None]
try:
dataset_detections[video_cls][video_id] = np.concatenate((
rel_prop, combined_scores[:, video_cls][:, None], loc_scores, dur_scores), axis=1)
except:
print(video_cls, rel_prop.shape, combined_scores.shape, reg_scores.shape, loc_scores.shape, dur_scores.shape)
raise
print("Preprocessing detections...")
for k, v in detection_scores.items():
gen_detection_results(k, v)
print('Done.')
# perform NMS
print("Performing nms...")
for cls in range(num_class):
dataset_detections[cls] = {
k: temporal_nms(v, nms_threshold) for k,v in dataset_detections[cls].items()
}
print("NMS Done.")
def perform_regression(detections):
t0 = detections[:, 0]
t1 = detections[:, 1]
center = (t0 + t1) / 2
duration = (t1 - t0)
new_center = center + duration * detections[:, 3]
new_duration = duration * np.exp(detections[:, 4])
new_detections = np.concatenate((
np.clip(new_center - new_duration / 2, 0, 1)[:, None], np.clip(new_center + new_duration / 2, 0, 1)[:, None], detections[:, 2:]
), axis=1)
return new_detections
# perform regression
if not args.no_regression:
print("Performing location regression...")
for cls in range(num_class):
dataset_detections[cls] = {
k: perform_regression(v) for k, v in dataset_detections[cls].items()
}
print("Regression Done.")
else:
print("Skip regresssion as requested by --no_regression")
# ravel test detections
def ravel_detections(detection_db, cls):
detection_list = []
for vid, dets in detection_db[cls].items():
detection_list.extend([[vid, cls] + x[:3] for x in dets.tolist()])
df = pd.DataFrame(detection_list, columns=["video-id", "cls","t-start", "t-end", "score"])
return df
plain_detections = [ravel_detections(dataset_detections, cls) for cls in range(num_class)]
# get gt
all_gt = pd.DataFrame(dataset.get_all_gt(), columns=["video-id", "cls","t-start", "t-end"])
gt_by_cls = []
for cls in range(num_class):
gt_by_cls.append(all_gt[all_gt.cls == cls].reset_index(drop=True).drop('cls', 1))
pickle.dump(gt_by_cls, open('gt_dump.pc', 'wb'), pickle.HIGHEST_PROTOCOL)
pickle.dump(plain_detections, open('pred_dump.pc', 'wb'), pickle.HIGHEST_PROTOCOL)
print("Calling mean AP calculator from toolkit with {} workers...".format(args.ap_workers))
if args.dataset == 'activitynet1.2':
iou_range = np.arange(0.5, 1.0, 0.05)
elif args.dataset == 'thumos14':
iou_range = np.arange(0.1, 1.0, 0.1)
elif args.dataset == 'coin_small':
iou_range = np.arange(0.1, 1.0, 0.1)
else:
raise ValueError("unknown dataset {}".format(args.dataset))
ap_values = np.zeros((num_class, len(iou_range)))
ar_values = np.zeros((num_class, len(iou_range)))
def eval_ap(iou, iou_idx, cls, gt, predition):
ap = evaluate.ap(predition,iou[0],gt)
sys.stdout.flush()
return cls, iou_idx, ap
def callback(rst):
sys.stdout.flush()
ap_values[rst[0], rst[1]] = rst[2][0]
ar_values[rst[0], rst[1]] = rst[2][1]
zdy_miou = np.zeros((num_class,)) # used to store the mIoU of each classes
gt_by_class = [[] for i in range(num_class)]
prediction_by_class = [[] for i in range(num_class)]
gt = []
prediction = []
for cls in range(num_class):
for zdy_record in gt_by_cls[cls].itertuples():
gt_by_class[cls].append([cls,zdy_record[2],zdy_record[3],1,zdy_record[1]])
gt += gt_by_class[cls]
for zdy_record in plain_detections[cls].itertuples():
prediction_by_class[cls].append([zdy_record[2],zdy_record[3],zdy_record[4],zdy_record[5],zdy_record[1]])
prediction += prediction_by_class[cls]
if cls!=0:
zdy_miou[cls] = evaluate.miou(prediction_by_class[cls],gt_by_class[cls])
miou = zdy_miou[1:].mean()
print(str(len(gt)))
print(str(len(prediction)))
f1_values = np.zeros((len(iou_range),))
pool = Pool(args.ap_workers)
jobs = []
for iou_idx, min_overlap in enumerate(iou_range):
for cls in range(num_class):
jobs.append(pool.apply_async(eval_ap, args=([min_overlap], iou_idx, cls, gt_by_class[cls], prediction_by_class[cls],),callback=callback))
f1 = evaluate.f1(prediction,min_overlap,gt)
f1_values[iou_idx] = f1
pool.close()
pool.join()
print("Evaluation done.\n\n")
map_iou = ap_values.mean(axis=0)
mar = ar_values.mean(axis=0)
display_title = "Detection Performance on {}".format(args.dataset)
display_data = [["IoU thresh"], ["mean AP"], ["mean AR"], ["F1 criterion"]]
for i in range(len(iou_range)):
display_data[0].append("{:.02f}".format(iou_range[i]))
display_data[1].append("{:.04f}".format(map_iou[i]))
display_data[2].append("{:.04f}".format(mar[i]))
display_data[3].append("{:.04f}".format(f1_values[i]))
display_data[0].append('Average')
display_data[1].append("{:.04f}".format(map_iou.mean()))
display_data[2].append("{:.04f}".format(mar.mean()))
display_data[3].append("{:.04f}".format(f1_values.mean()))
table = AsciiTable(display_data, display_title)
table.justify_columns[-1] = 'right'
table.inner_footing_row_border = True
print(table.table)
print("mIoU: {:.4f}".format(miou))
| [
"numpy.clip",
"evaluate.f1",
"numpy.argsort",
"numpy.array",
"evaluate.miou",
"sys.path.append",
"ops.utils.temporal_nms",
"numpy.arange",
"argparse.ArgumentParser",
"numpy.exp",
"numpy.vstack",
"numpy.concatenate",
"pandas.DataFrame",
"sys.stdout.flush",
"evaluate.ap",
"ops.utils.get_... | [((237, 281), 'sys.path.append', 'sys.path.append', (['"""./anet_toolkit/Evaluation"""'], {}), "('./anet_toolkit/Evaluation')\n", (252, 281), False, 'import sys\n'), ((522, 599), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate detection performance metrics"""'}), "(description='Evaluate detection performance metrics')\n", (545, 599), False, 'import argparse\n'), ((1439, 1464), 'ops.utils.get_configs', 'get_configs', (['args.dataset'], {}), '(args.dataset)\n', (1450, 1464), False, 'from ops.utils import get_configs\n'), ((2985, 3030), 'ssn_dataset.SSNDataSet', 'SSNDataSet', (['""""""', 'test_prop_file'], {'verbose': '(False)'}), "('', test_prop_file, verbose=False)\n", (2995, 3030), False, 'from ssn_dataset import SSNDataSet\n'), ((8524, 8546), 'numpy.zeros', 'np.zeros', (['(num_class,)'], {}), '((num_class,))\n', (8532, 8546), True, 'import numpy as np\n'), ((9303, 9324), 'multiprocessing.Pool', 'Pool', (['args.ap_workers'], {}), '(args.ap_workers)\n', (9307, 9324), False, 'from multiprocessing import Pool\n'), ((7202, 7292), 'pandas.DataFrame', 'pd.DataFrame', (['detection_list'], {'columns': "['video-id', 'cls', 't-start', 't-end', 'score']"}), "(detection_list, columns=['video-id', 'cls', 't-start', 't-end',\n 'score'])\n", (7214, 7292), True, 'import pandas as pd\n'), ((7922, 7947), 'numpy.arange', 'np.arange', (['(0.5)', '(1.0)', '(0.05)'], {}), '(0.5, 1.0, 0.05)\n', (7931, 7947), True, 'import numpy as np\n'), ((8315, 8349), 'evaluate.ap', 'evaluate.ap', (['predition', 'iou[0]', 'gt'], {}), '(predition, iou[0], gt)\n', (8326, 8349), False, 'import evaluate\n'), ((8349, 8367), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8365, 8367), False, 'import sys\n'), ((8415, 8433), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8431, 8433), False, 'import sys\n'), ((9561, 9601), 'evaluate.f1', 'evaluate.f1', (['prediction', 'min_overlap', 'gt'], {}), '(prediction, min_overlap, gt)\n', (9572, 9601), False, 'import evaluate\n'), ((2176, 2204), 'numpy.array', 'np.array', (['args.score_weights'], {}), '(args.score_weights)\n', (2184, 2204), True, 'import numpy as np\n'), ((3515, 3541), 'numpy.squeeze', 'np.squeeze', (['score_tp[0]', '(0)'], {}), '(score_tp[0], 0)\n', (3525, 3541), True, 'import numpy as np\n'), ((6142, 6172), 'ops.utils.temporal_nms', 'temporal_nms', (['v', 'nms_threshold'], {}), '(v, nms_threshold)\n', (6154, 6172), False, 'from ops.utils import temporal_nms\n'), ((6447, 6471), 'numpy.exp', 'np.exp', (['detections[:, 4]'], {}), '(detections[:, 4])\n', (6453, 6471), True, 'import numpy as np\n'), ((7994, 8018), 'numpy.arange', 'np.arange', (['(0.1)', '(1.0)', '(0.1)'], {}), '(0.1, 1.0, 0.1)\n', (8003, 8018), True, 'import numpy as np\n'), ((9121, 9178), 'evaluate.miou', 'evaluate.miou', (['prediction_by_class[cls]', 'gt_by_class[cls]'], {}), '(prediction_by_class[cls], gt_by_class[cls])\n', (9134, 9178), False, 'import evaluate\n'), ((3878, 3897), 'numpy.exp', 'np.exp', (['score_tp[2]'], {}), '(score_tp[2])\n', (3884, 3897), True, 'import numpy as np\n'), ((8067, 8091), 'numpy.arange', 'np.arange', (['(0.1)', '(1.0)', '(0.1)'], {}), '(0.1, 1.0, 0.1)\n', (8076, 8091), True, 'import numpy as np\n'), ((3848, 3868), 'ops.utils.softmax', 'softmax', (['score_tp[1]'], {}), '(score_tp[1])\n', (3855, 3868), False, 'from ops.utils import softmax\n'), ((4063, 4157), 'numpy.concatenate', 'np.concatenate', (['(rel_prop, combined_scores[:, i][:, None], loc_scores, dur_scores)'], {'axis': '(1)'}), '((rel_prop, combined_scores[:, i][:, None], loc_scores,\n dur_scores), axis=1)\n', (4077, 4157), True, 'import numpy as np\n'), ((4456, 4520), 'os.path.join', 'os.path.join', (['args.externel_score', "('proposal_' + ex_vid + '.npy')"], {}), "(args.externel_score, 'proposal_' + ex_vid + '.npy')\n", (4468, 4520), False, 'import os\n'), ((5468, 5495), 'numpy.argsort', 'np.argsort', (['video_cls_score'], {}), '(video_cls_score)\n', (5478, 5495), True, 'import numpy as np\n'), ((6510, 6554), 'numpy.clip', 'np.clip', (['(new_center - new_duration / 2)', '(0)', '(1)'], {}), '(new_center - new_duration / 2, 0, 1)\n', (6517, 6554), True, 'import numpy as np\n'), ((6565, 6609), 'numpy.clip', 'np.clip', (['(new_center + new_duration / 2)', '(0)', '(1)'], {}), '(new_center + new_duration / 2, 0, 1)\n', (6572, 6609), True, 'import numpy as np\n'), ((4777, 4936), 'numpy.array', 'np.array', (['[[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[prop_idx,\n cls], reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]]'], {}), '([[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[\n prop_idx, cls], reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls,\n 1]]])\n', (4785, 4936), True, 'import numpy as np\n'), ((4994, 5189), 'numpy.vstack', 'np.vstack', (['[dataset_detections[cls][video_id], [rel_prop[prop_idx, 0], rel_prop[\n prop_idx, 1], combined_scores[prop_idx, cls], reg_scores[prop_idx, cls,\n 0], reg_scores[prop_idx, cls, 1]]]'], {}), '([dataset_detections[cls][video_id], [rel_prop[prop_idx, 0],\n rel_prop[prop_idx, 1], combined_scores[prop_idx, cls], reg_scores[\n prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]])\n', (5003, 5189), True, 'import numpy as np\n'), ((5274, 5293), 'numpy.exp', 'np.exp', (['score_tp[2]'], {}), '(score_tp[2])\n', (5280, 5293), True, 'import numpy as np\n'), ((5344, 5363), 'numpy.exp', 'np.exp', (['score_tp[2]'], {}), '(score_tp[2])\n', (5350, 5363), True, 'import numpy as np\n'), ((5676, 5778), 'numpy.concatenate', 'np.concatenate', (['(rel_prop, combined_scores[:, video_cls][:, None], loc_scores, dur_scores)'], {'axis': '(1)'}), '((rel_prop, combined_scores[:, video_cls][:, None],\n loc_scores, dur_scores), axis=1)\n', (5690, 5778), True, 'import numpy as np\n'), ((5244, 5264), 'ops.utils.softmax', 'softmax', (['score_tp[1]'], {}), '(score_tp[1])\n', (5251, 5264), False, 'from ops.utils import softmax\n'), ((5416, 5442), 'os.path.basename', 'os.path.basename', (['video_id'], {}), '(video_id)\n', (5432, 5442), False, 'import os\n')] |
from typing import Union
import numpy as np
from numba import njit
from jesse.helpers import get_candle_source
from jesse.helpers import get_config
from .supersmoother import supersmoother_fast
def trendflex(candles: np.ndarray, period: int = 20, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
Trendflex indicator by <NAME>
:param candles: np.ndarray
:param period: int - default=20
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
warmup_candles_num = get_config('env.data.warmup_candles_num', 240)
if not sequential and len(candles) > warmup_candles_num:
candles = candles[-warmup_candles_num:]
source = get_candle_source(candles, source_type=source_type)
ssf = supersmoother_fast(source, period / 2)
tf = trendflex_fast(ssf, period)
if sequential:
return tf
else:
return None if np.isnan(tf[-1]) else tf[-1]
@njit
def trendflex_fast(ssf, period):
tf = np.full_like(ssf, 0)
ms = np.full_like(ssf, 0)
sums = np.full_like(ssf, 0)
for i in range(ssf.shape[0]):
if not (i < period):
sum = 0
for t in range(1, period + 1):
sum = sum + ssf[i] - ssf[i - t]
sum = sum / period
sums[i] = sum
ms[i] = 0.04 * sums[i] * sums[i] + 0.96 * ms[i - 1]
if ms[i] != 0:
tf[i] = sums[i] / np.sqrt(ms[i])
return tf
| [
"numpy.sqrt",
"numpy.full_like",
"numpy.isnan",
"jesse.helpers.get_config",
"jesse.helpers.get_candle_source"
] | [((607, 653), 'jesse.helpers.get_config', 'get_config', (['"""env.data.warmup_candles_num"""', '(240)'], {}), "('env.data.warmup_candles_num', 240)\n", (617, 653), False, 'from jesse.helpers import get_config\n'), ((777, 828), 'jesse.helpers.get_candle_source', 'get_candle_source', (['candles'], {'source_type': 'source_type'}), '(candles, source_type=source_type)\n', (794, 828), False, 'from jesse.helpers import get_candle_source\n'), ((1067, 1087), 'numpy.full_like', 'np.full_like', (['ssf', '(0)'], {}), '(ssf, 0)\n', (1079, 1087), True, 'import numpy as np\n'), ((1097, 1117), 'numpy.full_like', 'np.full_like', (['ssf', '(0)'], {}), '(ssf, 0)\n', (1109, 1117), True, 'import numpy as np\n'), ((1129, 1149), 'numpy.full_like', 'np.full_like', (['ssf', '(0)'], {}), '(ssf, 0)\n', (1141, 1149), True, 'import numpy as np\n'), ((988, 1004), 'numpy.isnan', 'np.isnan', (['tf[-1]'], {}), '(tf[-1])\n', (996, 1004), True, 'import numpy as np\n'), ((1508, 1522), 'numpy.sqrt', 'np.sqrt', (['ms[i]'], {}), '(ms[i])\n', (1515, 1522), True, 'import numpy as np\n')] |
from static.simulation.plot import create_plot, create_plot2
import numpy as np
from static.simulation.country import CountryCreator
from static.simulation.seir import seibqhr
from static.simulation.real_data import download
# TODO Add True Recovered
rc, rr, rd = download()
countries_arr, countries_keys = CountryCreator.initialization()
FATALITY_RATE = 0.01
AIR_TRANSPORT_USAGE = 0.6
ROAD_TRANSPORT_USAGE = 1 - AIR_TRANSPORT_USAGE
INCUBATION_PERIOD = 5.2
INCUBATION_RATE = 1 / INCUBATION_PERIOD
QUARANTINE_DURATION = 14
QUARANTINE_RATE = 1 / QUARANTINE_DURATION
RECOVERY_RATE_INFECTED = 0.08 # 0.33
RECOVERY_RATE_CONFIRMED = 0.04 # 0.15
total_road_arrives = 0
total_air_arrives = 0
probability_arr = [0]
for _, target_country in countries_arr.items():
probability_arr.append(target_country.arrive)
total_air_arrives += probability_arr[-1]
probability_arr = list(map(lambda x: x / total_air_arrives, probability_arr))
for prob_i in range(1, len(probability_arr)):
probability_arr[prob_i] = probability_arr[prob_i] + probability_arr[prob_i - 1]
total_cases_arr = []
true_cases_arr = []
total_deaths_arr = []
total_recovered_arr = []
r_total_cases_arr = []
# r_true_cases_arr = []
r_total_deaths_arr = []
r_total_recovered_arr = []
total_true_recovered_arr = []
infected_countries_arr = []
data_transmitter = 0
countries_arr['CHN'].infected = 1
# countries_arr['CHN'].exposed = 4000
# countries_arr['CHN'].suspected = 800
# countries_arr['CHN'].quarantined = 2132
# countries_arr['CHN'].confirmed = 494
# countries_arr['CHN'].susceptible = countries_arr['CHN'].population - 8361
# countries_arr['CHN'].contact_rate_exp_rate = 0.15
# countries_arr['CHN'].quarantined_rate_exp_rate = 0.1531
# countries_arr['CHN'].diagnose_speed_exp_rate = 0.2
infected_countries_arr.append('CHN')
def infec(code, day):
target = countries_arr[code]
road_dep = countries_arr[code].departure * ROAD_TRANSPORT_USAGE
air_dep = countries_arr[code].departure * AIR_TRANSPORT_USAGE
pop = countries_arr[code].population
infec_people = countries_arr[code].infected + countries_arr[code].exposed
infec_prob = infec_people / pop
for _ in range(int(road_dep)):
if np.random.sample() < infec_prob:
target_prob = np.random.sample()
for prob_i in range(1, len(target.borders_prob)):
if target.borders_prob[prob_i - 1] < target_prob < target.borders_prob[prob_i]:
if countries_arr[code].borders[prob_i - 1] not in infected_countries_arr:
print(countries_arr[countries_arr[code].borders[prob_i - 1]].name + " INFECTED")
infected_countries_arr.append(countries_arr[code].borders[prob_i - 1])
countries_arr[countries_arr[code].borders[prob_i - 1]].day_when_infected = day
countries_arr[countries_arr[code].borders[prob_i - 1]].infected += 1
countries_arr[code].infected -= 1
break
for _ in range(int(air_dep)):
if np.random.sample() < infec_prob:
target_prob = np.random.sample()
for prob_i in range(1, len(probability_arr) - 1):
if probability_arr[prob_i - 1] < target_prob < probability_arr[prob_i]:
if countries_keys[prob_i] not in infected_countries_arr:
print(countries_arr[countries_keys[prob_i]].name + " INFECTED")
infected_countries_arr.append(countries_keys[prob_i])
countries_arr[countries_keys[prob_i]].day_when_infected = day
countries_arr[countries_keys[prob_i]].infected += 1
countries_arr[countries_keys[prob_i]].population += 1
countries_arr[code].infected -= 1
countries_arr[code].population -= 1
break
def main(data):
for day in range(1, int(data) + 1):
print("DAY " + str(day))
day_deaths = 0
day_cases = 0
true_cases = 0
day_recovered = 0
day_true_recovered = 0
oc_contact_rate_exp_rate = 0.004
oc_quarantined_rate_exp_rate = 0.005
oc_diagnose_speed_exp_rate = 0.05
if day == 47:
countries_arr['CHN'].contact_rate_exp_rate = 0.1
countries_arr['CHN'].quarantined_rate_exp_rate = 0.1
countries_arr['CHN'].diagnose_speed_exp_rate = 0.1
countries_arr['CHN'].day_when_infected = day
countries_arr['CHN'].quarantine_mode = True
for code, country in countries_arr.items():
if country.infected > 0 or country.exposed > 0:
if not country.quarantine_mode and (
country.confirmed + country.recovered) / country.population > 0.000001:
country.contact_rate_exp_rate = oc_contact_rate_exp_rate
country.quarantined_rate_exp_rate = oc_quarantined_rate_exp_rate
country.diagnose_speed_exp_rate = oc_diagnose_speed_exp_rate
country.susceptible, country.exposed, country.infected, country.suspected, country.quarantined, \
country.confirmed, country.recovered, country.auto_recovered = seibqhr(
day_after_infected=day - country.day_when_infected,
c0=country.contact_rate_0, cb=country.contact_rate_min,
r1=country.contact_rate_exp_rate,
beta=country.transmission_prob,
q0=country.quarantined_rate_exposed_0,
qm=country.quarantined_rate_exposed_max,
r2=country.quarantined_rate_exp_rate,
m=country.susceptible_to_suspected_rate,
b=country.detection_rate,
f0=country.suspected_to_confirmed_0,
fm=country.suspected_to_confirmed_max,
r4=country.suspected_to_confirmed_exp_rate,
sigma=INCUBATION_RATE,
lamb=QUARANTINE_RATE,
deltaI0=country.infected_to_confirmed_min,
deltaIf=country.infected_to_confirmed_max,
r3=country.diagnose_speed_exp_rate,
gammaI=RECOVERY_RATE_INFECTED,
gammaH=RECOVERY_RATE_CONFIRMED,
alpha=country.death_rate,
S0=country.susceptible, E0=country.exposed,
I0=country.infected,
B0=country.suspected, Q0=country.quarantined,
H0=country.confirmed,
R0=country.recovered,
A0=country.auto_recovered)
country.deaths = country.population - country.confirmed - country.exposed - country.infected - \
country.recovered - country.quarantined - country.suspected - country.susceptible - country.auto_recovered
country.infected_arr.append(country.confirmed + country.infected + country.exposed)
country.deaths_arr.append(country.deaths)
country.exposed_arr.append(country.exposed)
country.recovered_arr.append(country.recovered)
infec(code, day)
else:
country.infected_arr.append(0)
country.deaths_arr.append(0)
country.exposed_arr.append(0)
country.recovered_arr.append(0)
day_cases = day_cases + country.confirmed
true_cases = true_cases + country.confirmed + country.infected
day_deaths += country.deaths
day_recovered += country.recovered
total_cases_arr.append(day_cases)
true_cases_arr.append(true_cases)
total_deaths_arr.append(day_deaths)
total_recovered_arr.append(day_recovered)
if day < 48:
r_total_cases_arr.append(day_cases)
r_total_deaths_arr.append(day_deaths)
r_total_recovered_arr.append(day_recovered)
elif day-48 < len(rc):
r_total_cases_arr.append(rc[day-48] - rd[day-48] - rr[day-48])
r_total_deaths_arr.append(rd[day-48])
r_total_recovered_arr.append(rr[day-48])
else:
r_total_cases_arr.append(r_total_cases_arr[-1])
r_total_deaths_arr.append(r_total_deaths_arr[-1])
r_total_recovered_arr.append(r_total_recovered_arr[-1])
total_cases = total_cases_arr[-1]
true_cases = true_cases_arr[-1]
total_deaths = total_deaths_arr[-1]
total_recovered = total_recovered_arr[-1]
print(countries_arr["CHN"].infected)
print(countries_arr["CHN"].confirmed)
print(countries_arr["CHN"].recovered)
print(countries_arr["CHN"].quarantined)
print(countries_arr["CHN"].exposed)
print(countries_arr["CHN"].suspected)
print(day - countries_arr["CHN"].day_when_infected)
days = day
infected_countries_str = ""
for ic in infected_countries_arr:
infected_countries_str += ic
infected_countries_str += " "
result = {
"confirmed": int(total_cases),
"true_cases": int(true_cases),
"deaths": int(total_deaths),
"recovered": int(total_recovered),
"confirmed2": int(r_total_cases_arr[-1]),
"deaths2": int(r_total_deaths_arr[-1]),
"recovered2": int(r_total_recovered_arr[-1]),
"rc"
"plot": "0",
"plot2": "0",
"infected_countries_arr": infected_countries_arr
}
plot_data = [days, total_cases_arr, r_total_cases_arr]
r_plot_data = [days, total_deaths_arr, r_total_deaths_arr]
result["plot"] = create_plot(r_plot_data)
result["plot2"] = create_plot2(plot_data)
yield result
def connect(get):
global data_transmitter
if "init" in get:
days = get.split()[1]
print(days)
data_transmitter = main(days)
print(data_transmitter)
return next(data_transmitter)
def testing():
global data_transmitter
data_transmitter = main(120)
for i in data_transmitter:
1 + 1
# testing()
| [
"static.simulation.real_data.download",
"static.simulation.seir.seibqhr",
"static.simulation.plot.create_plot2",
"static.simulation.country.CountryCreator.initialization",
"numpy.random.sample",
"static.simulation.plot.create_plot"
] | [((265, 275), 'static.simulation.real_data.download', 'download', ([], {}), '()\n', (273, 275), False, 'from static.simulation.real_data import download\n'), ((308, 339), 'static.simulation.country.CountryCreator.initialization', 'CountryCreator.initialization', ([], {}), '()\n', (337, 339), False, 'from static.simulation.country import CountryCreator\n'), ((9818, 9842), 'static.simulation.plot.create_plot', 'create_plot', (['r_plot_data'], {}), '(r_plot_data)\n', (9829, 9842), False, 'from static.simulation.plot import create_plot, create_plot2\n'), ((9869, 9892), 'static.simulation.plot.create_plot2', 'create_plot2', (['plot_data'], {}), '(plot_data)\n', (9881, 9892), False, 'from static.simulation.plot import create_plot, create_plot2\n'), ((2197, 2215), 'numpy.random.sample', 'np.random.sample', ([], {}), '()\n', (2213, 2215), True, 'import numpy as np\n'), ((2256, 2274), 'numpy.random.sample', 'np.random.sample', ([], {}), '()\n', (2272, 2274), True, 'import numpy as np\n'), ((3047, 3065), 'numpy.random.sample', 'np.random.sample', ([], {}), '()\n', (3063, 3065), True, 'import numpy as np\n'), ((3106, 3124), 'numpy.random.sample', 'np.random.sample', ([], {}), '()\n', (3122, 3124), True, 'import numpy as np\n'), ((5259, 6247), 'static.simulation.seir.seibqhr', 'seibqhr', ([], {'day_after_infected': '(day - country.day_when_infected)', 'c0': 'country.contact_rate_0', 'cb': 'country.contact_rate_min', 'r1': 'country.contact_rate_exp_rate', 'beta': 'country.transmission_prob', 'q0': 'country.quarantined_rate_exposed_0', 'qm': 'country.quarantined_rate_exposed_max', 'r2': 'country.quarantined_rate_exp_rate', 'm': 'country.susceptible_to_suspected_rate', 'b': 'country.detection_rate', 'f0': 'country.suspected_to_confirmed_0', 'fm': 'country.suspected_to_confirmed_max', 'r4': 'country.suspected_to_confirmed_exp_rate', 'sigma': 'INCUBATION_RATE', 'lamb': 'QUARANTINE_RATE', 'deltaI0': 'country.infected_to_confirmed_min', 'deltaIf': 'country.infected_to_confirmed_max', 'r3': 'country.diagnose_speed_exp_rate', 'gammaI': 'RECOVERY_RATE_INFECTED', 'gammaH': 'RECOVERY_RATE_CONFIRMED', 'alpha': 'country.death_rate', 'S0': 'country.susceptible', 'E0': 'country.exposed', 'I0': 'country.infected', 'B0': 'country.suspected', 'Q0': 'country.quarantined', 'H0': 'country.confirmed', 'R0': 'country.recovered', 'A0': 'country.auto_recovered'}), '(day_after_infected=day - country.day_when_infected, c0=country.\n contact_rate_0, cb=country.contact_rate_min, r1=country.\n contact_rate_exp_rate, beta=country.transmission_prob, q0=country.\n quarantined_rate_exposed_0, qm=country.quarantined_rate_exposed_max, r2\n =country.quarantined_rate_exp_rate, m=country.\n susceptible_to_suspected_rate, b=country.detection_rate, f0=country.\n suspected_to_confirmed_0, fm=country.suspected_to_confirmed_max, r4=\n country.suspected_to_confirmed_exp_rate, sigma=INCUBATION_RATE, lamb=\n QUARANTINE_RATE, deltaI0=country.infected_to_confirmed_min, deltaIf=\n country.infected_to_confirmed_max, r3=country.diagnose_speed_exp_rate,\n gammaI=RECOVERY_RATE_INFECTED, gammaH=RECOVERY_RATE_CONFIRMED, alpha=\n country.death_rate, S0=country.susceptible, E0=country.exposed, I0=\n country.infected, B0=country.suspected, Q0=country.quarantined, H0=\n country.confirmed, R0=country.recovered, A0=country.auto_recovered)\n', (5266, 6247), False, 'from static.simulation.seir import seibqhr\n')] |
"""Generate observation list files based on default values and APT output files.
Authors
-------
- <NAME>
- <NAME>
Use
---
::
from mirage.yaml import generate_observationlist
generate_observationlist.get_observation_dict(xml_file, yaml_file, catalogs,
parameter_defaults=None, verbose=False):
TODO
----
- Determine solution to set default parameters explicitly in configuration file or similar.
- Clarify use and role of FilterConfig
"""
import collections
import copy
import logging
import os
from astropy.table import Table, vstack
import numpy as np
from ..apt import read_apt_xml
from ..logging import logging_functions
from ..utils.constants import LOG_CONFIG_FILENAME, STANDARD_LOGFILE_NAME
classpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
log_config_file = os.path.join(classpath, 'logging', LOG_CONFIG_FILENAME)
logging_functions.create_logger(log_config_file, STANDARD_LOGFILE_NAME)
# Get the mapping between the user-input catalog dictionary keys and
# the keys used in the full table of observation parameters
CAT_TYPE_MAPPING = {'point_source': 'PointsourceCatalog', 'galaxy': 'GalaxyCatalog',
'extended': 'ExtendedCatalog', 'moving_pointsource': 'MovingTargetList',
'moving_sersic': 'MovingTargetSersic', 'moving_extended': 'MovingTargetExtended',
'moving_target_to_track': 'MovingTargetToTrack',
'tso_imaging_catalog': 'ImagingTSOCatalog',
'tso_grism_catalog': 'GrismTSOCatalog'}
POSSIBLE_CATS = list(CAT_TYPE_MAPPING.keys())
def catalog_dictionary_per_observation(cats, obs_nums, targets, defaults):
"""Translate a dictionary of catalogs from a case of either:
1. Separate catalogs for each target name
2. Separate catalogs for each target name and instrument
into a dictionary of catalogs for each instrument and observation
Parameters
----------
cats : dict
Dictionary of catalogs. Can be:
Same catalogs for all instruments within each observation
catalogs = {'my_targ_1': {'point_source': 'ptsrc1.cat',
'galaxy': 'galaxy1.cat',
'extended': 'ex1.cat'},
'my_targ_2': {'point_source': 'ptsrc2.cat',
'galaxy': 'galaxy2.cat',
'extended': 'ex2.cat'}}
Different catalogs for each instrument in each observation
catalogs = {'my_targ_1': {'nircam': {'point_source': 'ptsrc1.cat',
'galaxy': 'galaxy1.cat',
'extended': 'ex1.cat'},
'niriss': {'pointsource': 'ptsrc_nis.cat',
'galaxy': 'galaxy_nis.cat'}},
'my_targ_2': {'nircam': {'point_source': 'ptsrc2.cat',
'galaxy': 'galaxy2.cat',
'extended': 'ex2.cat'}}}
obs_nums : numpy.ndarray
1D array of observation ID numbers
targets : numpy.ndarray
1d array of target names, with a 1:1 correspondence to obs_nums
defaults : dict
Dictionary of default catalog values
Returns
-------
obs_cats : dict
Dictionary of catalogs per observation, with keys that match
those in the defaults
obs_cats = {'001': {'nircam': {'PointsourceCatalog': 'ptsrc1.cat',
'GalaxyCatalog': 'galaxy1.cat',
'ExtendedCatalog': 'ex1.cat'},
'niriss': {'PointsourceCatalog': 'ptsrc_nis.cat',
'GalaxyCatalog': 'galaxy_nis.cat'},
}
'002': {'nircam': {'PointsourceCatalog': 'ptsrc2.cat',
'GalaxyCatalog': 'galaxy2.cat',
'ExtendedCatalog': 'ex2.cat'},
'niriss': {'PointsourceCatalog': 'ptsrc_nis2.cat',
'GalaxyCatalog': 'galaxy_nis2.cat'}
}
}
"""
# Set up the output dictionary. Populate with keys for all observations
# and default catalog values to cover any entries in obs_cats that are
# note present
obs_cats = {}
for number in obs_nums:
obs_cats[number] = {'nircam': {}, 'niriss': {}, 'fgs': {}, 'miri':{}, 'nirspec': {}}
for cat_type in POSSIBLE_CATS:
obs_cats[number]['nircam'][CAT_TYPE_MAPPING[cat_type]] = defaults[CAT_TYPE_MAPPING[cat_type]]
obs_cats[number]['niriss'][CAT_TYPE_MAPPING[cat_type]] = defaults[CAT_TYPE_MAPPING[cat_type]]
obs_cats[number]['fgs'][CAT_TYPE_MAPPING[cat_type]] = defaults[CAT_TYPE_MAPPING[cat_type]]
obs_cats[number]['miri'][CAT_TYPE_MAPPING[cat_type]] = 'None'
obs_cats[number]['nirspec'][CAT_TYPE_MAPPING[cat_type]] = 'None'
# Loop over the keys in the top level of the input dictionary
for key1 in cats:
# Find the observation numbers that use this target
match = np.array(targets) == key1
# Check to see if the second level of the input dictionary is
# a dictionary of catalogs, or a dictionary of instruments
keys2 = cats[key1].keys()
keys_present = [True if poss in keys2 else False for poss in POSSIBLE_CATS]
if any(keys_present):
# Dictionary contains catalog names, so we use the same catalogs
# for all instruments
# Loop over the observation numbers that use this target and
# populate the entries for each with the catalog names. In
# this case the catalog names are the same for all instruments
for obs_number in obs_nums[match]:
for key2 in keys2:
obs_cats[obs_number]['nircam'][CAT_TYPE_MAPPING[key2]] = cats[key1][key2]
obs_cats[obs_number]['niriss'][CAT_TYPE_MAPPING[key2]] = cats[key1][key2]
obs_cats[obs_number]['fgs'][CAT_TYPE_MAPPING[key2]] = cats[key1][key2]
else:
# Dictionary contains instrument names
# Loop over observation numbers that use this target and
# populate the different catalogs for each instrument
for obs_number in obs_nums[match]:
for instrument in keys2:
ctypes = cats[key1][instrument].keys()
for ctype in ctypes:
obs_cats[obs_number][instrument][CAT_TYPE_MAPPING[ctype]] = cats[key1][instrument][ctype]
return obs_cats
def convert_background_dict(bkgd):
"""Given a dictionary of background rates where the keys are observation
numbers and the values are strings or numbers, expand the dictionary to
contain entries for each instrument, and channels in the case of nircam.
This function primarily makes sense as a way to enable users to keep a
single background level 'high', 'medium', 'low' that applies to all
insturuments in a given observation.
Parameters
----------
bkgd : dict
For example:
background = {'001': 'high', '002': 'medium', '003': 2.3}
Return
------
new_bkgd : dict
For example:
background = {'001': {'nircam': {'sw': high, 'lw': high}, 'niriss': high, 'fgs': high},
'002': {'nircam': {'sw': medium, 'lw': medium}, 'niriss': medium, 'fgs': medium},
'003': {'nircam': {'sw': 2.3, 'lw': 2.3}, 'niriss': 2.3, 'fgs': 2.3}
"""
new_bkgd = {}
for key, value in bkgd.items():
new_bkgd[key] = {'nircam': {'sw': value, 'lw': value}, 'niriss': value, 'fgs': value}
return new_bkgd
def dictionary_slice(dictionary, index):
"""Return a dictionary with only the i'th element from every list stored in a key.
Parameters
----------
dictionary
index
Returns
-------
"""
new_dict = {}
for key in dictionary.keys():
new_dict[key] = [dictionary[key][index]]
return new_dict
def ensure_lower_case_background_keys(dictionary):
"""Ensure that the dictionary keys in the nested dictionary are all
lower case. This was designed to be used on the user-input
background level dictionary
Parameters
----------
dictionary : dict
Nested dictionary of background values
background = {'001': {'nircam': {'sw': 0.2, 'lw': 0.3}, 'niriss': 0.4, 'fgs': 0.2},
'002': {'nircam': {'sw': 'medium', 'lw': 'high'}, 'niriss': 'low', 'fgs': 'high'},
'003': {'nircam': {'sw': 0.75, 'lw': 'high'}, 'niriss': 0.2, 'fgs': 0.1}}
Returns
-------
new_dict : dict
Same as input dictionary, but with all keys converted to lower case
"""
new_dict = {}
for observation_number, obs_dict in dictionary.items():
new_dict[observation_number] = {}
for instrument, inst_data in obs_dict.items():
# nircam with it's dictionary of sw and lw doesn't necessarily
# have to be present, if the input proposal has no nircam
# observations
if isinstance(inst_data, collections.abc.Mapping):
new_dict[observation_number][instrument.lower()] = {}
for channel, channel_val in inst_data.items():
new_dict[observation_number][instrument.lower()][channel.lower()] = channel_val
else:
new_dict[observation_number][instrument.lower()] = inst_data
return new_dict
def ensure_lower_case_keys(dictionary):
"""Ensure that the dictionary keys in the nested dictionary are all
lower case. This was designed to be used on the user-input
background level dictionary
Parameters
----------
dictionary : dict
Nested dictionary of background values
background = {'001': {'nircam': {'sw': 0.2, 'lw': 0.3}, 'niriss': 0.4, 'fgs': 0.2},
'002': {'nircam': {'sw': 'medium', 'lw': 'high'}, 'niriss': 'low', 'fgs': 'high'},
'003': {'nircam': {'sw': 0.75, 'lw': 'high'}, 'niriss': 0.2, 'fgs': 0.1}}
Returns
-------
new_dict : dict
Same as input dictionary, but with all keys converted to lower case
"""
new_dict = {}
for key1, value1 in dictionary.items():
new_dict[key1] = {}
for key2, value2 in value1.items():
# nircam with it's dictionary of sw and lw doesn't necessarily
# have to be present, if the input proposal has no nircam
# observations
if isinstance(value2, collections.abc.Mapping):
new_dict[key1][key2.lower()] = {}
for key3, value3 in value2.items():
new_dict[key1][key2.lower()][key3.lower()] = value3
else:
new_dict[key1][key2.lower()] = value2
return new_dict
def ensure_lower_case_catalogs_keys(dictionary):
"""Ensure that the dictionary keys in the nested dictionary are all
lower case. This was designed to be used on the user-input
background level dictionary
Parameters
----------
dictionary : dict
Nested dictionary of background values
background = {'001': {'nircam': {'sw': 0.2, 'lw': 0.3}, 'niriss': 0.4, 'fgs': 0.2},
'002': {'nircam': {'sw': 'medium', 'lw': 'high'}, 'niriss': 'low', 'fgs': 'high'},
'003': {'nircam': {'sw': 0.75, 'lw': 'high'}, 'niriss': 0.2, 'fgs': 0.1}}
catalogs = {'TARG1': {'point_source': 'ptsrc1.cat',
'galaxy': 'galaxy1.cat',
'extended': 'ex1.cat',
'moving_pointsource': 'mt_ptsrc1.cat',
'moving_sersic': 'mt_gal_1.cat',
'moving_extended': 'mt_ext_1.cat',
'moving_target_to_track': 'mt_track_1.cat'
},
'TARG2': {'point_source': 'ptsrc2.cat',
'galaxy': 'galaxy2.cat',
'extended': 'ex2.cat',
'moving_pointsource': 'mt_ptsrc2.cat',
'moving_sersic': 'mt_gal_2.cat',
'moving_extended': 'mt_ext_2.cat',
'moving_target_to_track': 'mt_track_2.cat'
}
}
# Different catalogs for each instrument in each observation
catalogs = {'TARG1': {'nircam': {'point_source': 'ptsrc_nrc_1.cat',
'galaxy': 'galaxy_nrc_1.cat',
'extended': 'ex_nrc_1.cat',
'moving_pointsource': 'mt_ptsrc_nrc_1.cat',
'moving_sersic': 'mt_gal_nrc_1.cat',
'moving_extended': 'mt_ext_nrc_1.cat',
'moving_target_to_track': 'mt_track_nrc_1.cat'
},
'niriss': {'point_source': 'ptsrc_nis_1.cat',
'galaxy': 'galaxy_nis_1.cat',
'extended': 'ex_nis_1.cat',
'moving_pointsource': 'mt_ptsrc_nis_1.cat',
'moving_sersic': 'mt_gal_nis_1.cat',
'moving_extended': 'mt_ext_nis_1.cat',
'moving_target_to_track': 'mt_track_nis_1.cat'
}
},
'TARG2': {'nircam': {'point_source': 'ptsrc_nrc_2.cat',
'galaxy': 'galaxy_nrc_2.cat',
'extended': 'ex_nrc_2.cat',
'moving_pointsource': 'mt_ptsrc_nrc_2.cat',
'moving_sersic': 'mt_gal_nrc_2.cat',
'moving_extended': 'mt_ext_nrc_2.cat',
'moving_target_to_track': 'mt_track_nrc_2.cat'
},
'niriss': {'point_source': 'ptsrc_nis_2.cat',
'galaxy': 'galaxy_nis_2.cat',
'extended': 'ex_nis_2.cat',
'moving_pointsource': 'mt_ptsrc_nis_2.cat',
'moving_sersic': 'mt_gal_nis_2.cat',
'moving_extended': 'mt_ext_nis_2.cat',
'moving_target_to_track': 'mt_track_nis_2.cat'
}
},
}
Returns
-------
new_dict : dict
Same as input dictionary, but with all keys converted to lower case
"""
new_dict = {}
for target_name, targ_dict in dictionary.items():
new_dict[target_name] = {}
for key2, value2 in targ_dict.items():
if isinstance(value2, collections.abc.Mapping):
new_dict[target_name][key2.lower()] = {}
for key3, value3 in targ_dict[key2].items():
new_dict[target_name][key2.lower()][key3.lower()] = value3
else:
new_dict[target_name][key2.lower()] = value2
return new_dict
def expand_for_dithers(indict, verbose=True):
"""Expand a given dictionary to create one entry for each dither.
Supports parallel observations.
Moved here and modified from apt_inputs.py
Parameters
----------
indict : dict
dictionary of observations
Returns
-------
expanded : dict
Dictionary, expanded to include a separate entry for
each dither
"""
# Initialize logger
logger = logging.getLogger('mirage.yaml.generate_observationlist.expand_for_dithers')
expanded = {}
for key in indict:
expanded[key] = []
# use astropy table operations to expand dithers while maintaining parallels in sync
# implementation assumes that only one instrument is used in parallel
table = Table(indict)
table['row'] = np.arange(len(table))
expanded_table = None #copy.deepcopy(table)
# complication here is to handle cases with unsupported instruments (MIRI, NIRSpec) in parallel
for i, row in enumerate(table['row']):
number_of_dithers = int(table['number_of_dithers'][i])
expand_prime_dithers_only = False
expand_parallel_dithers = False
# skip over parallel observations because they are already accounted for
if table['ParallelInstrument'][i]:
continue
try:
if (table['CoordinatedParallel'][i] == 'true') and (not table['ParallelInstrument'][i]) \
and (table['ParallelInstrument'][i + 1]) and (table['Instrument'][i] != table['Instrument'][i + 1]):
expand_parallel_dithers = True
else:
expand_prime_dithers_only = True
except IndexError: # last row in table is not a parallel
expand_prime_dithers_only = True
if (table['CoordinatedParallel'][i] == 'false'):
expand_prime_dithers_only = True
if expand_prime_dithers_only and expand_parallel_dithers:
raise RuntimeError('Possible conflict found when expanding for dithers.')
if expand_parallel_dithers:
dither_table = table[i:i + 2]
if (number_of_dithers > 1):
#replicate parallel observation n times
dither_table = vstack([dither_table]*number_of_dithers)
if expanded_table is None:
expanded_table = dither_table
else:
# if verbose:
# print('Parallel: Adding {:>3d} rows to table with {:>3d} rows'.format(len(dither_table), len(expanded_table)))
expanded_table = vstack((expanded_table, dither_table))
elif expand_prime_dithers_only:
# add row multiplied by number of dithers
dither_table = vstack([table[i]]*number_of_dithers)
if expanded_table is None:
expanded_table = dither_table
else:
# print('Prime: Adding {:>3d} rows to table with {:>3d} rows'.format(len(dither_table),
# len(expanded_table)))
expanded_table = vstack((expanded_table, dither_table))
# set number of dithers to 1 after expansion
expanded_table['number_of_dithers'] = np.ones(len(expanded_table)).astype(int)
# NIRCam cannot handle when PrimaryDithers=None
for index, value in enumerate(expanded_table['PrimaryDithers']):
if value == 'None':
expanded_table['PrimaryDithers'][index] = expanded_table['number_of_dithers'][index]
expanded = {}
for key in expanded_table.colnames:
expanded[key] = np.array(expanded_table[key]).tolist()
if verbose:
logger.info('Number of entries before expanding dithers: {}'.format(len(table)))
logger.info('Number of entries after expanding dithers: {}'.format(len(expanded_table)))
if verbose:
for obs_id in list(dict.fromkeys(expanded_table['ObservationID'])):
logger.info('Expanded table for Observation {} has {} entries'.format(obs_id, len(np.where(expanded_table['ObservationID']==obs_id)[0])))
return expanded
def get_observation_dict(xml_file, yaml_file, catalogs,
parameter_overrides={'cosmic_rays': None, 'background': None, 'roll_angle': None, 'dates': None},
verbose=False):
"""Write observation list file (required mirage input) on the basis of APT files.
Parameters
----------
xml_file : str
path to APT .xml file
yaml_file : str
output_file
catalogs : dict
Dictionary of catalog files, one entry per instrument. For NIRCam the entry has to be a
dictionary itself, e.g. catalogs['nircam']['lw'] = somefile
If the user prvides a list of catalogs, that list has to have one entry per observation in
the program, accounting for any instrument used.
parameter_overrides : dict
Dictionary of default parameter value, e.g. date, roll angle, ...
Returns
-------
xml_dict : dict
Expanded dictionary that holds exposure information
skipped_obs_numbers : list
List of observation numbers with unsupported observation templates. These observations
were not added to the dictionary.
TODO
----
Read default values from configuration file
"""
# Initialize logger
logger = logging.getLogger('mirage.yaml.generate_observationlist.get_observation_dict')
# Read in filters from APT .xml file
readxml_obj = read_apt_xml.ReadAPTXML()
xml_dict = readxml_obj.read_xml(xml_file, verbose=verbose)
# if verbose:
#print('Summary of observation dictionary:')
#for key in xml_dict.keys():
# print('{:<25}: number of elements is {:>5}'.format(key, len(xml_dict[key])))
# create an expanded dictionary that contains lists of parameters expanded for dithers
xml_dict = expand_for_dithers(xml_dict, verbose=verbose)
#print('Summary of observation dictionary after expanding for dithers:')
#for key in xml_dict.keys():
# print('{:<25}: number of elements is {:>5}'.format(key, len(xml_dict[key])))
return_dict = None
# array of unique instrument names
all_observation_ids = xml_dict['ObservationID']
# List of target names from the proposal
all_targets = xml_dict['TargetID']
# Set default values. These are overwritten if there is an appropriate
# entry in parameter_defaults
default_time = '00:00:00'
default_values = {}
default_values['Date'] = '2022-10-04T00:00:00'
default_values['PAV3'] = '0.'
default_values['PointsourceCatalog'] = 'None'
default_values['GalaxyCatalog'] = 'None'
default_values['ExtendedCatalog'] = 'None'
default_values['ExtendedScale'] = '1.0'
default_values['ExtendedCenter'] = '1024,1024'
default_values['MovingTargetList'] = 'None'
default_values['MovingTargetSersic'] = 'None'
default_values['MovingTargetExtended'] = 'None'
default_values['MovingTargetConvolveExtended'] = 'True'
default_values['MovingTargetToTrack'] = 'None'
default_values['ImagingTSOCatalog'] = 'None'
default_values['GrismTSOCatalog'] = 'None'
default_values['BackgroundRate_sw'] = 'low'
default_values['BackgroundRate_lw'] = 'low'
default_values['BackgroundRate'] = 'low'
default_values['CosmicRayLibrary'] = 'SUNMAX'
default_values['CosmicRayScale'] = 1.0
default_parameter_name_list = ['MovingTargetConvolveExtended', 'ExtendedScale', 'ExtendedCenter']
# Cosmic rays
# Can be:
# cr = {'library': 'SUNMAX', 'scale': 1.0}
# cr = {'001': {'library': 'SUNMAX', 'scale': 1.2}}
cosmic_rays = parameter_overrides['cosmic_rays']
# Case where one value of library and scale are to be used for
# all observations
if cosmic_rays is not None:
if 'library' in cosmic_rays.keys():
default_values['CosmicRayLibrary'] = cosmic_rays['library']
default_values['CosmicRayScale'] = cosmic_rays['scale']
# Now set cosmic_rays to None so that it won't be used when looping
# over observations below
cosmic_rays = None
else:
# Case where different values are given for different observations
# Just use cosmic_rays below when looping over observations
pass
# Background levels
# background = 'high'
# background = 22.2
# background = {'001': 'high', '002': 'medium', '003': 22.3}
# background = {'001': {'nircam': {'sw': 0.2, 'lw':0.3}, 'niriss': 0.4, 'fgs': 0.2}}
background = parameter_overrides['background']
if background is not None:
if isinstance(background, str) or isinstance(background, float) or isinstance(background, int):
default_values['BackgroundRate_sw'] = background
default_values['BackgroundRate_lw'] = background
default_values['BackgroundRate'] = background
# Now set background to None so that it won't be used when looping
# over observations below
background = None
else:
bkeys = list(background.keys())
if (isinstance(background[bkeys[0]], str) or isinstance(background[bkeys[0]], float) or isinstance(background[bkeys[0]], int)):
# Case where one background is specified for all instruments
# in each observation
background = convert_background_dict(background)
else:
# Case where the user inputs the full dictionary, with a
# background value for each instrument and observation.
# Force all dictionary keys to be lower case.
background = ensure_lower_case_keys(background)
# Dates
# dates = '2019-5-25'
# dates = {'001': '2019-05-25', '002': '2019-11-15T12:13:14'}
dates = parameter_overrides['dates']
if dates is not None:
if isinstance(dates, str):
if 'T' in dates:
# In the end we need dates in the format of YYYY-MM-DDTHH:MM:SS
default_values['Date'] = dates
else:
# If the time part is not present in the input, then add it.
default_values['Date'] = '{}T{}'.format(dates, default_time)
# Now set dates to None so that it won't be used when looping
# over observations below
dates = None
else:
# Just use dates below when looping over observations
pass
# Roll angle, aka PAV3
# pav3 = 34.5
# pav3 = {'001': 34.5, '002': 154.5}
pav3 = parameter_overrides['roll_angle']
if pav3 is not None:
if isinstance(pav3, float) or isinstance(pav3, int):
default_values['PAV3'] = pav3
# Now set pav3 to None so that it won't be used when looping
# over observations below
pav3 = None
else:
# Just use pav3 below when looping over observations
pass
# Catalogs
# In the easy case where the same catalogs are to be used,
# just populate the default values
if catalogs is not None:
cat_keys = catalogs.keys()
keys_present = [True if poss in cat_keys else False for poss in POSSIBLE_CATS]
if any(keys_present):
for cat_key in cat_keys:
if cat_key == 'point_source':
default_values['PointsourceCatalog'] = catalogs[cat_key]
if cat_key == 'galaxy':
default_values['GalaxyCatalog'] = catalogs[cat_key]
if cat_key == 'extended':
default_values['ExtendedCatalog'] = catalogs[cat_key]
if cat_key == 'moving_pointsource':
default_values['MovingTargetList'] = catalogs[cat_key]
if cat_key == 'moving_sersic':
default_values['MovingTargetSersic'] = catalogs[cat_key]
if cat_key == 'moving_extended':
default_values['MovingTargetExtended'] = catalogs[cat_key]
if cat_key == 'moving_target_to_track':
default_values['MovingTargetToTrack'] = catalogs[cat_key]
if cat_key == 'tso_imaging_catalog':
default_values['ImagingTSOCatalog'] = catalogs[cat_key]
if cat_key == 'tso_grism_catalog':
default_values['GrismTSOCatalog'] = catalogs[cat_key]
# Now that we have modified the default values, set catalogs to
# None so that it is not accessed later
catalogs_per_observation = None
else:
# If the catalog dictionary is more complex, specifying different
# catalogs for each target, or different catalogs for each instrument
# and target, then translate this dictionary into a dictionary of
# catalogs for each observation and instrument
catalogs = ensure_lower_case_keys(catalogs)
catalogs_per_observation = catalog_dictionary_per_observation(catalogs,
np.array(all_observation_ids),
np.array(all_targets),
default_values)
else:
catalogs_per_observation = None
# assemble string that will constitute the yaml content
text_out = ["# Observation list created by generate_observationlist.py\n\n"]
text = ['']
entry_number = 0 # running number for every entry in the observation list
# Create an instrument-specific counter to be used with input catalogs
counter = {}
for inst in np.unique(xml_dict['Instrument']):
counter[inst] = 0
entry_numbers = []
observation_numbers = list(dict.fromkeys(xml_dict['ObservationID']))
for observation_index, observation_number in enumerate(observation_numbers):
first_index = xml_dict['ObservationID'].index(observation_number)
text += [
"Observation{}:\n".format(observation_number),
" Name: '{}'\n".format(xml_dict['ObservationName'][first_index])
]
observation_rows = np.where(np.array(xml_dict['ObservationID']) == observation_number)[0]
for index in observation_rows:
number_of_dithers = int(xml_dict['number_of_dithers'][index])
instrument = xml_dict['Instrument'][index]
for dither_index in range(number_of_dithers):
# Get the proper date value
if dates is None:
date_value = default_values['Date']
else:
try:
value = dates[observation_number]
if 'T' in value:
date_value = dates[observation_number]
else:
date_value = '{}T{}'.format(dates[observation_number], default_time)
except KeyError:
logger.error(("\n\nERROR: No date value specified for Observation {} in date dictionary. "
"Quitting.\n\n".format(observation_number)))
raise KeyError
# Get the proper PAV3 value
if pav3 is None:
pav3_value = default_values['PAV3']
else:
try:
pav3_value = pav3[observation_number]
except KeyError:
logger.error(("\n\nERROR: No roll angle value specified for Observation {} in roll_angle "
"dictionary. Quitting.\n\n".format(observation_number)))
raise KeyError
# Get the proper catalog values
if catalogs_per_observation is None:
ptsrc_catalog_value = default_values['PointsourceCatalog']
galaxy_catalog_value = default_values['GalaxyCatalog']
extended_catalog_value = default_values['ExtendedCatalog']
mov_ptsrc_catalog_value = default_values['MovingTargetList']
mov_sersic_catalog_value = default_values['MovingTargetSersic']
mov_extended_catalog_value = default_values['MovingTargetExtended']
mov_tracked_catalog_value = default_values['MovingTargetToTrack']
im_tso_catalog_value = default_values['ImagingTSOCatalog']
gr_tso_catalog_value = default_values['GrismTSOCatalog']
else:
try:
catalogs_to_use = catalogs_per_observation[observation_number][instrument.lower()]
except KeyError:
logger.error(("\n\nERROR: Missing observation number or instrument entry in catalog "
"dictionary. Failed to find catalogs[{}][{}]\n\n".format(observation_number,
instrument.lower())))
raise KeyError
ptsrc_catalog_value = catalogs_to_use['PointsourceCatalog']
galaxy_catalog_value = catalogs_to_use['GalaxyCatalog']
extended_catalog_value = catalogs_to_use['ExtendedCatalog']
mov_ptsrc_catalog_value = catalogs_to_use['MovingTargetList']
mov_sersic_catalog_value = catalogs_to_use['MovingTargetSersic']
mov_extended_catalog_value = catalogs_to_use['MovingTargetExtended']
mov_tracked_catalog_value = catalogs_to_use['MovingTargetToTrack']
im_tso_catalog_value = catalogs_to_use['ImagingTSOCatalog']
gr_tso_catalog_value = catalogs_to_use['GrismTSOCatalog']
# Get the proper cosmic ray values
if cosmic_rays is None:
cr_library_value = default_values['CosmicRayLibrary']
cr_scale_value = default_values['CosmicRayScale']
else:
try:
cr_library_value = cosmic_rays[observation_number]['library']
cr_scale_value = cosmic_rays[observation_number]['scale']
except KeyError:
logger.error(("\n\nERROR: No cosmic ray library and/or scale value specified for "
"Observation {} in cosmic_ray dictionary. Quitting.\n\n"
.format(observation_number)))
raise KeyError
text += [
" EntryNumber{}:\n".format(entry_number),
" Instrument: {}\n".format(instrument),
" Date: {}\n".format(date_value),
" PAV3: {}\n".format(pav3_value),
" DitherIndex: {}\n".format(dither_index),
" CosmicRayLibrary: {}\n".format(cr_library_value),
" CosmicRayScale: {}\n".format(cr_scale_value),
]
if return_dict is None:
return_dict = dictionary_slice(xml_dict, index)
else:
return_dict = read_apt_xml.append_dictionary(return_dict, dictionary_slice(xml_dict, index))
if instrument.lower() in ['nircam', 'wfsc']:
sw_filt = xml_dict['ShortFilter'][index]
lw_filt = xml_dict['LongFilter'][index]
# Get the proper background rate
if background is None:
background_sw_value = default_values['BackgroundRate_sw']
background_lw_value = default_values['BackgroundRate_lw']
else:
try:
background_sw_value = background[observation_number]['nircam']['sw']
background_lw_value = background[observation_number]['nircam']['lw']
except KeyError:
logger.error(("\n\nERROR: Missing entry in the background dictionary for NIRCam SW and/or "
"LW channels, observation number: {}\n\n".format(observation_number)))
raise KeyError
text += [
" FilterConfig:\n",
" SW:\n",
" Filter: {}\n".format(sw_filt),
" PointSourceCatalog: {}\n".format(ptsrc_catalog_value),
" GalaxyCatalog: {}\n".format(galaxy_catalog_value),
" ExtendedCatalog: {}\n".format(extended_catalog_value),
" MovingTargetList: {}\n".format(mov_ptsrc_catalog_value),
" MovingTargetSersic: {}\n".format(mov_sersic_catalog_value),
" MovingTargetExtended: {}\n".format(mov_extended_catalog_value),
" MovingTargetToTrack: {}\n".format(mov_tracked_catalog_value),
" ImagingTSOCatalog: {}\n".format(im_tso_catalog_value),
" GrismTSOCatalog: {}\n".format(gr_tso_catalog_value),
" BackgroundRate: {}\n".format(background_sw_value),
]
for key in default_parameter_name_list:
text += [" {}: {}\n".format(key, default_values[key])]
text += [
" LW:\n",
" Filter: {}\n".format(lw_filt),
" PointSourceCatalog: {}\n".format(ptsrc_catalog_value),
" GalaxyCatalog: {}\n".format(galaxy_catalog_value),
" ExtendedCatalog: {}\n".format(extended_catalog_value),
" MovingTargetList: {}\n".format(mov_ptsrc_catalog_value),
" MovingTargetSersic: {}\n".format(mov_sersic_catalog_value),
" MovingTargetExtended: {}\n".format(mov_extended_catalog_value),
" MovingTargetToTrack: {}\n".format(mov_tracked_catalog_value),
" ImagingTSOCatalog: {}\n".format(im_tso_catalog_value),
" GrismTSOCatalog: {}\n".format(gr_tso_catalog_value),
" BackgroundRate: {}\n".format(background_lw_value),
]
for key in default_parameter_name_list:
text += [" {}: {}\n".format(key, default_values[key])]
elif instrument.lower() in ['niriss', 'fgs', 'nirspec', 'miri']:
if (instrument.lower() == 'niriss') and (xml_dict['APTTemplate'][index] in ['NirissExternalCalibration']):
filter_wheel_value = xml_dict['FilterWheel'][index]
pupil_wheel_value = xml_dict['PupilWheel'][index]
text += [
" FilterWheel: {}\n".format(filter_wheel_value),
" PupilWheel: {}\n".format(pupil_wheel_value),
]
if 'CLEAR' in filter_wheel_value:
filter_value = pupil_wheel_value
elif ('CLEAR' in pupil_wheel_value) or ('NRM' in pupil_wheel_value):
filter_value = filter_wheel_value
else:
filter_value = xml_dict['Filter'][index]
# Get the proper background rate
if background is None:
background_value = default_values['BackgroundRate']
else:
try:
background_value = background[observation_number][instrument.lower()]
except KeyError:
logger.error(("\n\nERROR: Missing entry in the background dictionary for observation "
"number: {}, instrument: {}\n\n".format(observation_number, instrument)))
raise KeyError
text += [
" Filter: {}\n".format(filter_value),
" PointSourceCatalog: {}\n".format(ptsrc_catalog_value),
" GalaxyCatalog: {}\n".format(galaxy_catalog_value),
" ExtendedCatalog: {}\n".format(extended_catalog_value),
" MovingTargetList: {}\n".format(mov_ptsrc_catalog_value),
" MovingTargetSersic: {}\n".format(mov_sersic_catalog_value),
" MovingTargetExtended: {}\n".format(mov_extended_catalog_value),
" MovingTargetToTrack: {}\n".format(mov_tracked_catalog_value),
" ImagingTSOCatalog: {}\n".format(im_tso_catalog_value),
" GrismTSOCatalog: {}\n".format(gr_tso_catalog_value),
" BackgroundRate: {}\n".format(background_value),
]
for key in default_parameter_name_list:
text += [" {}: {}\n".format(key, default_values[key])]
entry_numbers.append(entry_number)
entry_number += 1
# Update the catalog counters for the instruments used in this observation
#for inst_name in instruments_in_observation:
# counter[inst_name] += 1
text_out += text
return_dict['entry_number'] = entry_numbers
# If the directory to hold the observation file does not yet exist, create it
obs_dir = os.path.dirname(yaml_file)
if obs_dir != '' and os.path.isdir(obs_dir) is False:
try:
os.mkdir(obs_dir)
except OSError:
logger.error("Creation of the directory {} failed".format(obs_dir))
else:
logger.info("Successfully created the directory {} to hold the observation list file.".format(obs_dir))
f = open(yaml_file, 'w')
for line in text_out:
f.write(line)
f.close()
logger.info('Wrote {} observations and {} entries to {}'.format(len(observation_numbers), entry_number, yaml_file))
return return_dict, readxml_obj.skipped_observations
| [
"logging.getLogger",
"numpy.unique",
"astropy.table.Table",
"numpy.where",
"os.path.join",
"os.path.dirname",
"numpy.array",
"os.path.isdir",
"os.mkdir",
"astropy.table.vstack"
] | [((847, 902), 'os.path.join', 'os.path.join', (['classpath', '"""logging"""', 'LOG_CONFIG_FILENAME'], {}), "(classpath, 'logging', LOG_CONFIG_FILENAME)\n", (859, 902), False, 'import os\n'), ((16348, 16424), 'logging.getLogger', 'logging.getLogger', (['"""mirage.yaml.generate_observationlist.expand_for_dithers"""'], {}), "('mirage.yaml.generate_observationlist.expand_for_dithers')\n", (16365, 16424), False, 'import logging\n'), ((16670, 16683), 'astropy.table.Table', 'Table', (['indict'], {}), '(indict)\n', (16675, 16683), False, 'from astropy.table import Table, vstack\n'), ((21289, 21367), 'logging.getLogger', 'logging.getLogger', (['"""mirage.yaml.generate_observationlist.get_observation_dict"""'], {}), "('mirage.yaml.generate_observationlist.get_observation_dict')\n", (21306, 21367), False, 'import logging\n'), ((29695, 29728), 'numpy.unique', 'np.unique', (["xml_dict['Instrument']"], {}), "(xml_dict['Instrument'])\n", (29704, 29728), True, 'import numpy as np\n'), ((42044, 42070), 'os.path.dirname', 'os.path.dirname', (['yaml_file'], {}), '(yaml_file)\n', (42059, 42070), False, 'import os\n'), ((794, 819), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (809, 819), False, 'import os\n'), ((5306, 5323), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (5314, 5323), True, 'import numpy as np\n'), ((42096, 42118), 'os.path.isdir', 'os.path.isdir', (['obs_dir'], {}), '(obs_dir)\n', (42109, 42118), False, 'import os\n'), ((42154, 42171), 'os.mkdir', 'os.mkdir', (['obs_dir'], {}), '(obs_dir)\n', (42162, 42171), False, 'import os\n'), ((18134, 18176), 'astropy.table.vstack', 'vstack', (['([dither_table] * number_of_dithers)'], {}), '([dither_table] * number_of_dithers)\n', (18140, 18176), False, 'from astropy.table import Table, vstack\n'), ((18475, 18513), 'astropy.table.vstack', 'vstack', (['(expanded_table, dither_table)'], {}), '((expanded_table, dither_table))\n', (18481, 18513), False, 'from astropy.table import Table, vstack\n'), ((18636, 18674), 'astropy.table.vstack', 'vstack', (['([table[i]] * number_of_dithers)'], {}), '([table[i]] * number_of_dithers)\n', (18642, 18674), False, 'from astropy.table import Table, vstack\n'), ((19519, 19548), 'numpy.array', 'np.array', (['expanded_table[key]'], {}), '(expanded_table[key])\n', (19527, 19548), True, 'import numpy as np\n'), ((29080, 29109), 'numpy.array', 'np.array', (['all_observation_ids'], {}), '(all_observation_ids)\n', (29088, 29109), True, 'import numpy as np\n'), ((29185, 29206), 'numpy.array', 'np.array', (['all_targets'], {}), '(all_targets)\n', (29193, 29206), True, 'import numpy as np\n'), ((19017, 19055), 'astropy.table.vstack', 'vstack', (['(expanded_table, dither_table)'], {}), '((expanded_table, dither_table))\n', (19023, 19055), False, 'from astropy.table import Table, vstack\n'), ((30214, 30249), 'numpy.array', 'np.array', (["xml_dict['ObservationID']"], {}), "(xml_dict['ObservationID'])\n", (30222, 30249), True, 'import numpy as np\n'), ((19949, 20000), 'numpy.where', 'np.where', (["(expanded_table['ObservationID'] == obs_id)"], {}), "(expanded_table['ObservationID'] == obs_id)\n", (19957, 20000), True, 'import numpy as np\n')] |
"""Multi-dimentional Gaussian copula mutual information estimation."""
import numpy as np
from scipy.special import psi
from itertools import product
from frites.core.copnorm import copnorm_nd
###############################################################################
###############################################################################
# N-D TOOLS
###############################################################################
###############################################################################
def nd_reshape(x, mvaxis=None, traxis=-1):
"""Multi-dimentional reshaping.
This function is used to be sure that an nd array has a correct shape
of (..., mvaxis, traxis).
Parameters
----------
x : array_like
Multi-dimentional array
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
Returns
-------
x_rsh : array_like
The reshaped multi-dimentional array of shape (..., mvaxis, traxis)
"""
assert isinstance(traxis, int)
traxis = np.arange(x.ndim)[traxis]
# Create an empty mvaxis axis
if not isinstance(mvaxis, int):
x = x[..., np.newaxis]
mvaxis = -1
assert isinstance(mvaxis, int)
mvaxis = np.arange(x.ndim)[mvaxis]
# move the multi-variate and trial axis
x = np.moveaxis(x, (mvaxis, traxis), (-2, -1))
return x
def nd_shape_checking(x, y, mvaxis, traxis):
"""Check that the shape between two ndarray is consitent.
x.shape = (nx_1, ..., n_xn, x_mvaxis, traxis)
y.shape = (nx_1, ..., n_xn, y_mvaxis, traxis)
"""
assert x.ndim == y.ndim
dims = np.delete(np.arange(x.ndim), -2)
assert all([x.shape[k] == y.shape[k] for k in dims])
###############################################################################
###############################################################################
# MUTUAL INFORMATION
###############################################################################
###############################################################################
def mi_nd_gg(x, y, mvaxis=None, traxis=-1, biascorrect=True, demeaned=False,
shape_checking=True):
"""Multi-dimentional MI between two Gaussian variables in bits.
Parameters
----------
x, y : array_like
Arrays to consider for computing the Mutual Information. The two input
variables x and y should have the same shape except on the mvaxis
(if needed).
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
biascorrect : bool | True
Specifies whether bias correction should be applied to the estimated MI
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
Returns
-------
mi : array_like
The mutual information with the same shape as x and y, without the
mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
# x.shape (..., x_mvaxis, traxis)
# y.shape (..., y_mvaxis, traxis)
ntrl = x.shape[-1]
nvarx, nvary = x.shape[-2], y.shape[-2]
nvarxy = nvarx + nvary
# joint variable along the mvaxis
xy = np.concatenate((x, y), axis=-2)
if not demeaned:
xy -= xy.mean(axis=-1, keepdims=True)
cxy = np.einsum('...ij, ...kj->...ik', xy, xy)
cxy /= float(ntrl - 1.)
# submatrices of joint covariance
cx = cxy[..., :nvarx, :nvarx]
cy = cxy[..., nvarx:, nvarx:]
# Cholesky decomposition
chcxy = np.linalg.cholesky(cxy)
chcx = np.linalg.cholesky(cx)
chcy = np.linalg.cholesky(cy)
# entropies in nats
# normalizations cancel for mutual information
hx = np.log(np.einsum('...ii->...i', chcx)).sum(-1)
hy = np.log(np.einsum('...ii->...i', chcy)).sum(-1)
hxy = np.log(np.einsum('...ii->...i', chcxy)).sum(-1)
ln2 = np.log(2)
if biascorrect:
vec = np.arange(1, nvarxy + 1)
psiterms = psi((ntrl - vec).astype(float) / 2.0) / 2.0
dterm = (ln2 - np.log(ntrl - 1.0)) / 2.0
hx = hx - nvarx * dterm - psiterms[:nvarx].sum()
hy = hy - nvary * dterm - psiterms[:nvary].sum()
hxy = hxy - nvarxy * dterm - psiterms[:nvarxy].sum()
# MI in bits
i = (hx + hy - hxy) / ln2
return i
def mi_model_nd_gd(x, y, mvaxis=None, traxis=-1, biascorrect=True,
demeaned=False, shape_checking=True):
"""Multi-dimentional MI between a Gaussian and a discret variables in bits.
This function is based on ANOVA style model comparison.
Parameters
----------
x, y : array_like
Arrays to consider for computing the Mutual Information. The two input
variables x and y should have the same shape except on the mvaxis
(if needed).
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
biascorrect : bool | True
Specifies whether bias correction should be applied to the estimated MI
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
Returns
-------
mi : array_like
The mutual information with the same shape as x and y, without the
mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
assert isinstance(y, np.ndarray) and (y.ndim == 1)
assert x.shape[-1] == len(y)
# x.shape (..., x_mvaxis, traxis)
nvarx, ntrl = x.shape[-2], x.shape[-1]
u_y = np.unique(y)
sh = x.shape[:-2]
zm_shape = list(sh) + [len(u_y)]
# joint variable along the mvaxis
if not demeaned:
x = x - x.mean(axis=-1, keepdims=True)
# class-conditional entropies
ntrl_y = np.zeros((len(u_y),), dtype=int)
hcond = np.zeros(zm_shape, dtype=float)
# c = .5 * (np.log(2. * np.pi) + 1)
for num, yi in enumerate(u_y):
idx = y == yi
xm = x[..., idx]
ntrl_y[num] = idx.sum()
xm = xm - xm.mean(axis=-1, keepdims=True)
cm = np.einsum('...ij, ...kj->...ik', xm, xm) / float(ntrl_y[num] - 1.)
chcm = np.linalg.cholesky(cm)
hcond[..., num] = np.log(np.einsum('...ii->...i', chcm)).sum(-1)
# class weights
w = ntrl_y / float(ntrl)
# unconditional entropy from unconditional Gaussian fit
cx = np.einsum('...ij, ...kj->...ik', x, x) / float(ntrl - 1.)
chc = np.linalg.cholesky(cx)
hunc = np.log(np.einsum('...ii->...i', chc)).sum(-1)
ln2 = np.log(2)
if biascorrect:
vars = np.arange(1, nvarx + 1)
psiterms = psi((ntrl - vars).astype(float) / 2.) / 2.
dterm = (ln2 - np.log(float(ntrl - 1))) / 2.
hunc = hunc - nvarx * dterm - psiterms.sum()
dterm = (ln2 - np.log((ntrl_y - 1).astype(float))) / 2.
psiterms = np.zeros_like(ntrl_y, dtype=float)
for vi in vars:
idx = ntrl_y - vi
psiterms = psiterms + psi(idx.astype(float) / 2.)
hcond = hcond - nvarx * dterm - (psiterms / 2.)
# MI in bits
i = (hunc - np.einsum('i, ...i', w, hcond)) / ln2
return i
def cmi_nd_ggg(x, y, z, mvaxis=None, traxis=-1, biascorrect=True,
demeaned=False, shape_checking=True):
"""Multi-dimentional MI between three Gaussian variables in bits.
This function is based on ANOVA style model comparison.
Parameters
----------
x, y, z : array_like
Arrays to consider for computing the Mutual Information. The three
input variables x, y and z should have the same shape except on the
mvaxis (if needed).
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
biascorrect : bool | True
Specifies whether bias correction should be applied to the estimated MI
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
Returns
-------
mi : array_like
The mutual information with the same shape as x, y and z without the
mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
z = nd_reshape(z, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
nd_shape_checking(x, z, mvaxis, traxis)
# x.shape == y.shape == z.shape (..., x_mvaxis, traxis)
ntrl = x.shape[-1]
nvarx, nvary, nvarz = x.shape[-2], y.shape[-2], z.shape[-2]
nvarxy = nvarx + nvary
nvaryz = nvary + nvarz
nvarxy = nvarx + nvary
nvarxz = nvarx + nvarz
nvarxyz = nvarx + nvaryz
# joint variable along the mvaxis
xyz = np.concatenate((x, y, z), axis=-2)
if not demeaned:
xyz -= xyz.mean(axis=-1, keepdims=True)
cxyz = np.einsum('...ij, ...kj->...ik', xyz, xyz)
cxyz /= float(ntrl - 1.)
# submatrices of joint covariance
cz = cxyz[..., nvarxy:, nvarxy:]
cyz = cxyz[..., nvarx:, nvarx:]
sh = list(cxyz.shape)
sh[-1], sh[-2] = nvarxz, nvarxz
cxz = np.zeros(tuple(sh), dtype=float)
cxz[..., :nvarx, :nvarx] = cxyz[..., :nvarx, :nvarx]
cxz[..., :nvarx, nvarx:] = cxyz[..., :nvarx, nvarxy:]
cxz[..., nvarx:, :nvarx] = cxyz[..., nvarxy:, :nvarx]
cxz[..., nvarx:, nvarx:] = cxyz[..., nvarxy:, nvarxy:]
# Cholesky decomposition
chcz = np.linalg.cholesky(cz)
chcxz = np.linalg.cholesky(cxz)
chcyz = np.linalg.cholesky(cyz)
chcxyz = np.linalg.cholesky(cxyz)
# entropies in nats
# normalizations cancel for mutual information
hz = np.log(np.einsum('...ii->...i', chcz)).sum(-1)
hxz = np.log(np.einsum('...ii->...i', chcxz)).sum(-1)
hyz = np.log(np.einsum('...ii->...i', chcyz)).sum(-1)
hxyz = np.log(np.einsum('...ii->...i', chcxyz)).sum(-1)
ln2 = np.log(2)
if biascorrect:
vec = np.arange(1, nvarxyz + 1)
psiterms = psi((ntrl - vec).astype(float) / 2.0) / 2.0
dterm = (ln2 - np.log(ntrl - 1.0)) / 2.0
hz = hz - nvarz * dterm - psiterms[:nvarz].sum()
hxz = hxz - nvarxz * dterm - psiterms[:nvarxz].sum()
hyz = hyz - nvaryz * dterm - psiterms[:nvaryz].sum()
hxyz = hxyz - nvarxyz * dterm - psiterms[:nvarxyz].sum()
# MI in bits
i = (hxz + hyz - hxyz - hz) / ln2
return i
###############################################################################
###############################################################################
# GAUSSIAN COPULA MUTUAL INFORMATION
###############################################################################
###############################################################################
def gcmi_nd_cc(x, y, mvaxis=None, traxis=-1, shape_checking=True, gcrn=True):
"""GCMI between two continuous variables.
The only difference with `mi_gg` is that a normalization is performed for
each continuous variable.
Parameters
----------
x, y : array_like
Continuous variables
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
gcrn : bool | True
Apply a Gaussian Copula rank normalization. This operation is
relatively slow for big arrays.
Returns
-------
mi : array_like
The mutual information with the same shape as x and y, without the
mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
# x.shape (..., x_mvaxis, traxis)
# y.shape (..., y_mvaxis, traxis)
if gcrn:
cx, cy = copnorm_nd(x, axis=-1), copnorm_nd(y, axis=-1)
else:
cx, cy = x, y
return mi_nd_gg(cx, cy, mvaxis=-2, traxis=-1, biascorrect=True,
demeaned=True, shape_checking=False)
def gcmi_model_nd_cd(x, y, mvaxis=None, traxis=-1, shape_checking=True,
gcrn=True):
"""GCMI between a continuous and discret variables.
The only difference with `mi_gg` is that a normalization is performed for
each continuous variable.
Parameters
----------
x : array_like
Continuous variable
y : array_like
Discret variable of shape (n_trials,)
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
shape_checking : bool | True
Perform a reshape and check that x is consistents. For high
performances and to avoid extensive memory usage, it's better to
already have x with a shape of (..., mvaxis, traxis) and to set this
parameter to False
gcrn : bool | True
Apply a Gaussian Copula rank normalization. This operation is
relatively slow for big arrays.
Returns
-------
mi : array_like
The mutual information with the same shape as x, without the mvaxis and
traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
# x.shape (..., x_mvaxis, traxis)
# y.shape (traxis)
cx = copnorm_nd(x, axis=-1) if gcrn else x
return mi_model_nd_gd(cx, y, mvaxis=-2, traxis=-1, biascorrect=True,
demeaned=True, shape_checking=False)
###############################################################################
###############################################################################
# GAUSSIAN COPULA CONTIONAL MUTUAL INFORMATION
###############################################################################
###############################################################################
def gccmi_nd_ccnd(x, y, *z, mvaxis=None, traxis=-1, gcrn=True,
shape_checking=True, biascorrect=True, demeaned=True):
"""Conditional GCMI between two continuous variables.
This function performs a GC-CMI between 2 continuous variables conditioned
with multiple discrete variables.
Parameters
----------
x, y : array_like
Arrays to consider for computing the Mutual Information. The two input
variables x and y should have the same shape except on the mvaxis
(if needed).
z : list | array_like
Array that describes the conditions across the trial axis. Should be a
list of arrays of shape (n_trials,) of integers
(e.g. [0, 0, ..., 1, 1, 2, 2])
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
gcrn : bool | True
Apply a Gaussian Copula rank normalization. This operation is
relatively slow for big arrays.
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
Returns
-------
cmi : array_like
Conditional mutual-information with the same shape as x and y without
the mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
ntrl = x.shape[-1]
# Find unique values of each discret array
prod_idx = discret_to_index(*z)
# sh = x.shape[:-3] if isinstance(mvaxis, int) else x.shape[:-2]
sh = x.shape[:-2]
zm_shape = list(sh) + [len(prod_idx)]
# calculate gcmi for each z value
pz = np.zeros((len(prod_idx),), dtype=float)
icond = np.zeros(zm_shape, dtype=float)
for num, idx in enumerate(prod_idx):
pz[num] = idx.sum()
if gcrn:
thsx = copnorm_nd(x[..., idx], axis=-1)
thsy = copnorm_nd(y[..., idx], axis=-1)
else:
thsx = x[..., idx]
thsy = y[..., idx]
icond[..., num] = mi_nd_gg(thsx, thsy, mvaxis=-2, traxis=-1,
biascorrect=biascorrect, demeaned=demeaned,
shape_checking=False)
pz /= ntrl
# conditional mutual information
cmi = np.sum(pz * icond, axis=-1)
return cmi
def cmi_nd_ggd(x, y, z, mvaxis=None, traxis=-1, shape_checking=True,
biascorrect=True, demeaned=False):
"""Conditional MI between a continuous and a discret variable.
This function performs a CMI between a continuous and a discret variable
conditioned with multiple discrete variables.
Parameters
----------
x : array_like
Continuous variable
y : array_like
Discret variable
z : list | array_like
Array that describes the conditions across the trial axis of shape
(n_trials,)
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
Returns
-------
cmi : array_like
Conditional mutual-information with the same shape as x and y without
the mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
ntrl = x.shape[-1]
assert (z.ndim == 1) and (len(z) == ntrl)
ntrl = x.shape[-1]
# sh = x.shape[:-3] if isinstance(mvaxis, int) else x.shape[:-2]
u_z = np.unique(z)
sh = x.shape[:-2]
zm_shape = list(sh) + [len(u_z)]
# calculate gcmi for each z value
pz = np.zeros((len(u_z),), dtype=float)
icond = np.zeros(zm_shape, dtype=float)
for n_z, zi in enumerate(u_z):
idx = z == zi
pz[n_z] = idx.sum()
thsx, thsy = x[..., idx], y[..., idx]
icond[..., n_z] = mi_nd_gg(thsx, thsy, mvaxis=-2, traxis=-1,
biascorrect=biascorrect, demeaned=demeaned,
shape_checking=False)
pz /= ntrl
# conditional mutual information
cmi = np.sum(np.multiply(pz, icond), axis=-1)
return cmi
def gccmi_model_nd_cdnd(x, y, *z, mvaxis=None, traxis=-1, gcrn=True,
shape_checking=True):
"""Conditional GCMI between a continuous and a discret variable.
This function performs a GC-CMI between a continuous and a discret
variable conditioned with multiple discrete variables.
Parameters
----------
x : array_like
Continuous variable
y : array_like
Discret variable
z : list | array_like
Array that describes the conditions across the trial axis. Should be a
list of arrays of shape (n_trials,) of integers
(e.g. [0, 0, ..., 1, 1, 2, 2])
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
gcrn : bool | True
Apply a Gaussian Copula rank normalization. This operation is
relatively slow for big arrays.
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
Returns
-------
cmi : array_like
Conditional mutual-information with the same shape as x and y without
the mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
assert isinstance(y, np.ndarray) and (y.ndim == 1)
assert x.shape[-1] == len(y)
ntrl = x.shape[-1]
# Find unique values of each discret array
prod_idx = discret_to_index(*z)
# sh = x.shape[:-3] if isinstance(mvaxis, int) else x.shape[:-2]
sh = x.shape[:-2]
zm_shape = list(sh) + [len(prod_idx)]
# calculate gcmi for each z value
pz = np.zeros((len(prod_idx),), dtype=float)
icond = np.zeros(zm_shape, dtype=float)
for num, idx in enumerate(prod_idx):
pz[num] = idx.sum()
if gcrn:
thsx = copnorm_nd(x[..., idx], axis=-1)
else:
thsx = x[..., idx]
thsy = y[idx]
icond[..., num] = mi_model_nd_gd(thsx, thsy, mvaxis=-2, traxis=-1,
biascorrect=True, demeaned=True,
shape_checking=False)
pz /= ntrl
# conditional mutual information
cmi = np.sum(pz * icond, axis=-1)
return cmi
def discret_to_index(*z):
"""Convert a list of discret variables into boolean indices.
Parameters
----------
z : tuple | list
List of discret variables
Returns
-------
idx : list
List of boolean arrays. Each array specify the condition to use
"""
if isinstance(z, np.ndarray) and (z.ndim == 1):
return [z == k for k in np.unique(z)]
elif isinstance(z, (tuple, list)):
# array checking
is_array = all([isinstance(k, np.ndarray) for k in z])
is_vec = all([k.ndim == 1 for k in z])
is_shape = all([z[0].shape == k.shape for k in z])
if not (is_array and is_vec and is_shape):
raise TypeError("z should be a list of 1-D array, all with the "
"same shape")
# build unique indices
u_z = tuple([tuple(np.unique(k)) for k in z])
idx = []
for k in product(*u_z):
_idx = []
for _c, _k in zip(z, k):
_idx += [_c == _k]
_idx_bool = np.all(np.c_[_idx], axis=0)
if _idx_bool.any():
idx += [_idx_bool]
return idx
def gccmi_nd_ccc(x, y, z, mvaxis=None, traxis=-1, shape_checking=True,
gcrn=True):
"""GCCMI between two continuous variables conditioned on a third.
Parameters
----------
x, y, z : array_like
Continuous variables. z is the continuous variable that is considered
as the condition
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
gcrn : bool | True
Apply a Gaussian Copula rank normalization. This operation is
relatively slow for big arrays.
Returns
-------
mi : array_like
The mutual information with the same shape as x and y, without the
mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
z = nd_reshape(z, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
nd_shape_checking(x, z, mvaxis, traxis)
# x.shape == y.shape == z.shape (..., x_mvaxis, traxis)
if gcrn:
cx, cy = copnorm_nd(x, axis=-1), copnorm_nd(y, axis=-1)
cz = copnorm_nd(z, axis=-1)
else:
cx, cy, cz = x, y, z
return cmi_nd_ggg(cx, cy, cz, mvaxis=-2, traxis=-1, biascorrect=True,
demeaned=True, shape_checking=False)
| [
"numpy.multiply",
"numpy.unique",
"numpy.log",
"numpy.zeros_like",
"frites.core.copnorm.copnorm_nd",
"itertools.product",
"numpy.sum",
"numpy.zeros",
"numpy.einsum",
"numpy.concatenate",
"numpy.moveaxis",
"numpy.all",
"numpy.linalg.cholesky",
"numpy.arange"
] | [((1512, 1554), 'numpy.moveaxis', 'np.moveaxis', (['x', '(mvaxis, traxis)', '(-2, -1)'], {}), '(x, (mvaxis, traxis), (-2, -1))\n', (1523, 1554), True, 'import numpy as np\n'), ((4065, 4096), 'numpy.concatenate', 'np.concatenate', (['(x, y)'], {'axis': '(-2)'}), '((x, y), axis=-2)\n', (4079, 4096), True, 'import numpy as np\n'), ((4174, 4214), 'numpy.einsum', 'np.einsum', (['"""...ij, ...kj->...ik"""', 'xy', 'xy'], {}), "('...ij, ...kj->...ik', xy, xy)\n", (4183, 4214), True, 'import numpy as np\n'), ((4392, 4415), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cxy'], {}), '(cxy)\n', (4410, 4415), True, 'import numpy as np\n'), ((4427, 4449), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cx'], {}), '(cx)\n', (4445, 4449), True, 'import numpy as np\n'), ((4461, 4483), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cy'], {}), '(cy)\n', (4479, 4483), True, 'import numpy as np\n'), ((4741, 4750), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4747, 4750), True, 'import numpy as np\n'), ((6881, 6893), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (6890, 6893), True, 'import numpy as np\n'), ((7153, 7184), 'numpy.zeros', 'np.zeros', (['zm_shape'], {'dtype': 'float'}), '(zm_shape, dtype=float)\n', (7161, 7184), True, 'import numpy as np\n'), ((7768, 7790), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cx'], {}), '(cx)\n', (7786, 7790), True, 'import numpy as np\n'), ((7859, 7868), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (7865, 7868), True, 'import numpy as np\n'), ((10553, 10587), 'numpy.concatenate', 'np.concatenate', (['(x, y, z)'], {'axis': '(-2)'}), '((x, y, z), axis=-2)\n', (10567, 10587), True, 'import numpy as np\n'), ((10668, 10710), 'numpy.einsum', 'np.einsum', (['"""...ij, ...kj->...ik"""', 'xyz', 'xyz'], {}), "('...ij, ...kj->...ik', xyz, xyz)\n", (10677, 10710), True, 'import numpy as np\n'), ((11230, 11252), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cz'], {}), '(cz)\n', (11248, 11252), True, 'import numpy as np\n'), ((11265, 11288), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cxz'], {}), '(cxz)\n', (11283, 11288), True, 'import numpy as np\n'), ((11301, 11324), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cyz'], {}), '(cyz)\n', (11319, 11324), True, 'import numpy as np\n'), ((11338, 11362), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cxyz'], {}), '(cxyz)\n', (11356, 11362), True, 'import numpy as np\n'), ((11682, 11691), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (11688, 11691), True, 'import numpy as np\n'), ((18293, 18324), 'numpy.zeros', 'np.zeros', (['zm_shape'], {'dtype': 'float'}), '(zm_shape, dtype=float)\n', (18301, 18324), True, 'import numpy as np\n'), ((18859, 18886), 'numpy.sum', 'np.sum', (['(pz * icond)'], {'axis': '(-1)'}), '(pz * icond, axis=-1)\n', (18865, 18886), True, 'import numpy as np\n'), ((20692, 20704), 'numpy.unique', 'np.unique', (['z'], {}), '(z)\n', (20701, 20704), True, 'import numpy as np\n'), ((20859, 20890), 'numpy.zeros', 'np.zeros', (['zm_shape'], {'dtype': 'float'}), '(zm_shape, dtype=float)\n', (20867, 20890), True, 'import numpy as np\n'), ((23353, 23384), 'numpy.zeros', 'np.zeros', (['zm_shape'], {'dtype': 'float'}), '(zm_shape, dtype=float)\n', (23361, 23384), True, 'import numpy as np\n'), ((23865, 23892), 'numpy.sum', 'np.sum', (['(pz * icond)'], {'axis': '(-1)'}), '(pz * icond, axis=-1)\n', (23871, 23892), True, 'import numpy as np\n'), ((1237, 1254), 'numpy.arange', 'np.arange', (['x.ndim'], {}), '(x.ndim)\n', (1246, 1254), True, 'import numpy as np\n'), ((1433, 1450), 'numpy.arange', 'np.arange', (['x.ndim'], {}), '(x.ndim)\n', (1442, 1450), True, 'import numpy as np\n'), ((1836, 1853), 'numpy.arange', 'np.arange', (['x.ndim'], {}), '(x.ndim)\n', (1845, 1853), True, 'import numpy as np\n'), ((4785, 4809), 'numpy.arange', 'np.arange', (['(1)', '(nvarxy + 1)'], {}), '(1, nvarxy + 1)\n', (4794, 4809), True, 'import numpy as np\n'), ((7484, 7506), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cm'], {}), '(cm)\n', (7502, 7506), True, 'import numpy as np\n'), ((7700, 7738), 'numpy.einsum', 'np.einsum', (['"""...ij, ...kj->...ik"""', 'x', 'x'], {}), "('...ij, ...kj->...ik', x, x)\n", (7709, 7738), True, 'import numpy as np\n'), ((7904, 7927), 'numpy.arange', 'np.arange', (['(1)', '(nvarx + 1)'], {}), '(1, nvarx + 1)\n', (7913, 7927), True, 'import numpy as np\n'), ((8181, 8215), 'numpy.zeros_like', 'np.zeros_like', (['ntrl_y'], {'dtype': 'float'}), '(ntrl_y, dtype=float)\n', (8194, 8215), True, 'import numpy as np\n'), ((11726, 11751), 'numpy.arange', 'np.arange', (['(1)', '(nvarxyz + 1)'], {}), '(1, nvarxyz + 1)\n', (11735, 11751), True, 'import numpy as np\n'), ((15611, 15633), 'frites.core.copnorm.copnorm_nd', 'copnorm_nd', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (15621, 15633), False, 'from frites.core.copnorm import copnorm_nd\n'), ((21297, 21319), 'numpy.multiply', 'np.multiply', (['pz', 'icond'], {}), '(pz, icond)\n', (21308, 21319), True, 'import numpy as np\n'), ((26698, 26720), 'frites.core.copnorm.copnorm_nd', 'copnorm_nd', (['z'], {'axis': '(-1)'}), '(z, axis=-1)\n', (26708, 26720), False, 'from frites.core.copnorm import copnorm_nd\n'), ((7402, 7442), 'numpy.einsum', 'np.einsum', (['"""...ij, ...kj->...ik"""', 'xm', 'xm'], {}), "('...ij, ...kj->...ik', xm, xm)\n", (7411, 7442), True, 'import numpy as np\n'), ((8422, 8452), 'numpy.einsum', 'np.einsum', (['"""i, ...i"""', 'w', 'hcond'], {}), "('i, ...i', w, hcond)\n", (8431, 8452), True, 'import numpy as np\n'), ((14014, 14036), 'frites.core.copnorm.copnorm_nd', 'copnorm_nd', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (14024, 14036), False, 'from frites.core.copnorm import copnorm_nd\n'), ((14038, 14060), 'frites.core.copnorm.copnorm_nd', 'copnorm_nd', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (14048, 14060), False, 'from frites.core.copnorm import copnorm_nd\n'), ((18430, 18462), 'frites.core.copnorm.copnorm_nd', 'copnorm_nd', (['x[..., idx]'], {'axis': '(-1)'}), '(x[..., idx], axis=-1)\n', (18440, 18462), False, 'from frites.core.copnorm import copnorm_nd\n'), ((18482, 18514), 'frites.core.copnorm.copnorm_nd', 'copnorm_nd', (['y[..., idx]'], {'axis': '(-1)'}), '(y[..., idx], axis=-1)\n', (18492, 18514), False, 'from frites.core.copnorm import copnorm_nd\n'), ((23490, 23522), 'frites.core.copnorm.copnorm_nd', 'copnorm_nd', (['x[..., idx]'], {'axis': '(-1)'}), '(x[..., idx], axis=-1)\n', (23500, 23522), False, 'from frites.core.copnorm import copnorm_nd\n'), ((24828, 24841), 'itertools.product', 'product', (['*u_z'], {}), '(*u_z)\n', (24835, 24841), False, 'from itertools import product\n'), ((26638, 26660), 'frites.core.copnorm.copnorm_nd', 'copnorm_nd', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (26648, 26660), False, 'from frites.core.copnorm import copnorm_nd\n'), ((26662, 26684), 'frites.core.copnorm.copnorm_nd', 'copnorm_nd', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (26672, 26684), False, 'from frites.core.copnorm import copnorm_nd\n'), ((4576, 4606), 'numpy.einsum', 'np.einsum', (['"""...ii->...i"""', 'chcx'], {}), "('...ii->...i', chcx)\n", (4585, 4606), True, 'import numpy as np\n'), ((4632, 4662), 'numpy.einsum', 'np.einsum', (['"""...ii->...i"""', 'chcy'], {}), "('...ii->...i', chcy)\n", (4641, 4662), True, 'import numpy as np\n'), ((4689, 4720), 'numpy.einsum', 'np.einsum', (['"""...ii->...i"""', 'chcxy'], {}), "('...ii->...i', chcxy)\n", (4698, 4720), True, 'import numpy as np\n'), ((4896, 4914), 'numpy.log', 'np.log', (['(ntrl - 1.0)'], {}), '(ntrl - 1.0)\n', (4902, 4914), True, 'import numpy as np\n'), ((7809, 7838), 'numpy.einsum', 'np.einsum', (['"""...ii->...i"""', 'chc'], {}), "('...ii->...i', chc)\n", (7818, 7838), True, 'import numpy as np\n'), ((11455, 11485), 'numpy.einsum', 'np.einsum', (['"""...ii->...i"""', 'chcz'], {}), "('...ii->...i', chcz)\n", (11464, 11485), True, 'import numpy as np\n'), ((11512, 11543), 'numpy.einsum', 'np.einsum', (['"""...ii->...i"""', 'chcxz'], {}), "('...ii->...i', chcxz)\n", (11521, 11543), True, 'import numpy as np\n'), ((11570, 11601), 'numpy.einsum', 'np.einsum', (['"""...ii->...i"""', 'chcyz'], {}), "('...ii->...i', chcyz)\n", (11579, 11601), True, 'import numpy as np\n'), ((11629, 11661), 'numpy.einsum', 'np.einsum', (['"""...ii->...i"""', 'chcxyz'], {}), "('...ii->...i', chcxyz)\n", (11638, 11661), True, 'import numpy as np\n'), ((11838, 11856), 'numpy.log', 'np.log', (['(ntrl - 1.0)'], {}), '(ntrl - 1.0)\n', (11844, 11856), True, 'import numpy as np\n'), ((24291, 24303), 'numpy.unique', 'np.unique', (['z'], {}), '(z)\n', (24300, 24303), True, 'import numpy as np\n'), ((24961, 24988), 'numpy.all', 'np.all', (['np.c_[_idx]'], {'axis': '(0)'}), '(np.c_[_idx], axis=0)\n', (24967, 24988), True, 'import numpy as np\n'), ((7540, 7570), 'numpy.einsum', 'np.einsum', (['"""...ii->...i"""', 'chcm'], {}), "('...ii->...i', chcm)\n", (7549, 7570), True, 'import numpy as np\n'), ((24767, 24779), 'numpy.unique', 'np.unique', (['k'], {}), '(k)\n', (24776, 24779), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 18 09:31:17 2018
@author: philt
"""
# Regression Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
# Use 1:2 to create a matrix vers 1 which creates a vector
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# EDA - plot raw data
#plt.scatter(X, y, color = 'purple')
#plt.title('EDA - Raw Data Plot')
#plt.xlabel('Position level')
#plt.ylabel('Salary')
#plt.show()
# Splitting the dataset into the Training set and Test set
# Don't have enough data. Want to make an accurate prediction
# Feature Scaling
# No need here - Library habdles this
# Fitting Linear Regression to the dataset
from sklearn.linear_model import LinearRegression
LinReg = LinearRegression()
LinReg.fit(X, y)
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
PolyReg = PolynomialFeatures(degree=4) # Start with degree = 2
X_poly = PolyReg.fit_transform(X)
PolyLinReg = LinearRegression()
PolyLinReg.fit(X_poly, y)
# Plot residuals
''' Linear Residual plot shows a U shape suggesting a better fit
using a non-linear model'''
res_lin = y - LinReg.predict(X)
res_poly = y - PolyLinReg.predict(PolyReg.fit_transform(X))
plt.figure(figsize=(12,8), facecolor='1.0')
plt.scatter(X, res_lin)
plt.title('EDA - Residual Data Plot (Simple Linear)', size=28)
plt.xlabel('Position level', size=24)
plt.ylabel('y-yhat', size=24)
plt.show()
''' Polynomial Residual plot shows a random pattern'''
plt.figure(figsize=(12,8), facecolor='1.0')
plt.scatter(X, res_poly)
plt.title('EDA - Residual Data Plot (Polynimial)', size=28)
plt.xlabel('Position level', size=24)
plt.ylabel('y-yhat', size=24)
plt.show()
# Check Q-Q plot (Normally Distributed)
import numpy.random as random
'''Plot shows the one outlier, but besides that has a
normal distribution'''
#y_test = y[0:9] # Removed outlier for testing
y_test.sort()
norm = random.normal(0, 2, len(y))
norm.sort()
plt.figure(figsize=(12,8), facecolor='1.0')
plt.plot(norm, y, "o")
#Generate a trend line
z = np.polyfit(norm, y, 1)
p = np.poly1d(z)
plt.plot(norm, p(norm), "--", linewidth=2)
plt.title("Normal Q-Q Plot", size = 28)
plt.xlabel("Theoretical Quantiles", size=24)
plt.ylabel("Salary Quantiles", size=24)
plt.tick_params(labelsize=16)
plt.show()
## Visualizing the Linear Regression
#plt.scatter(X, y, color = 'red')
#plt.plot(X, LinReg.predict(X), color='blue')
#plt.title('Truth or Bluff (Linear Regression)')
#plt.xlabel('Position level')
#plt.ylabel('Salary')
#
## Visualizing the Poly Regression
#X_grid = np.arange(min(X), max(X), 0.1)
#X_grid = X_grid.reshape((len(X_grid), 1))
#plt.scatter(X, y, color = 'red')
#plt.plot(X_grid, PolyLinReg.predict(PolyReg.fit_transform(X_grid)),
# color='green')
#plt.title('Truth or Bluff (Polynomial Regression)')
#plt.xlabel('Position level')
#plt.ylabel('Salary')
#plt.show()
# Predictin a new result with Linear Regression
print("Linear Regression Result for a 6.5 Level: ", LinReg.predict(6.5))
# Predicting a new result with Polynimial Regression
print("Poly Regression Result for a 6.5 Level: ",
PolyLinReg.predict(PolyReg.fit_transform(6.5)))
| [
"sklearn.preprocessing.PolynomialFeatures",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"numpy.poly1d",
"matplotlib.pyplot.title",
... | [((243, 279), 'pandas.read_csv', 'pd.read_csv', (['"""Position_Salaries.csv"""'], {}), "('Position_Salaries.csv')\n", (254, 279), True, 'import pandas as pd\n'), ((843, 861), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (859, 861), False, 'from sklearn.linear_model import LinearRegression\n'), ((990, 1018), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(4)'}), '(degree=4)\n', (1008, 1018), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1091, 1109), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1107, 1109), False, 'from sklearn.linear_model import LinearRegression\n'), ((1340, 1384), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)', 'facecolor': '"""1.0"""'}), "(figsize=(12, 8), facecolor='1.0')\n", (1350, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1384, 1407), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'res_lin'], {}), '(X, res_lin)\n', (1395, 1407), True, 'import matplotlib.pyplot as plt\n'), ((1408, 1470), 'matplotlib.pyplot.title', 'plt.title', (['"""EDA - Residual Data Plot (Simple Linear)"""'], {'size': '(28)'}), "('EDA - Residual Data Plot (Simple Linear)', size=28)\n", (1417, 1470), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1508), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position level"""'], {'size': '(24)'}), "('Position level', size=24)\n", (1481, 1508), True, 'import matplotlib.pyplot as plt\n'), ((1509, 1538), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-yhat"""'], {'size': '(24)'}), "('y-yhat', size=24)\n", (1519, 1538), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1547, 1549), True, 'import matplotlib.pyplot as plt\n'), ((1606, 1650), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)', 'facecolor': '"""1.0"""'}), "(figsize=(12, 8), facecolor='1.0')\n", (1616, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1650, 1674), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'res_poly'], {}), '(X, res_poly)\n', (1661, 1674), True, 'import matplotlib.pyplot as plt\n'), ((1675, 1734), 'matplotlib.pyplot.title', 'plt.title', (['"""EDA - Residual Data Plot (Polynimial)"""'], {'size': '(28)'}), "('EDA - Residual Data Plot (Polynimial)', size=28)\n", (1684, 1734), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1772), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position level"""'], {'size': '(24)'}), "('Position level', size=24)\n", (1745, 1772), True, 'import matplotlib.pyplot as plt\n'), ((1773, 1802), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-yhat"""'], {'size': '(24)'}), "('y-yhat', size=24)\n", (1783, 1802), True, 'import matplotlib.pyplot as plt\n'), ((1803, 1813), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1811, 1813), True, 'import matplotlib.pyplot as plt\n'), ((2071, 2115), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)', 'facecolor': '"""1.0"""'}), "(figsize=(12, 8), facecolor='1.0')\n", (2081, 2115), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2137), 'matplotlib.pyplot.plot', 'plt.plot', (['norm', 'y', '"""o"""'], {}), "(norm, y, 'o')\n", (2123, 2137), True, 'import matplotlib.pyplot as plt\n'), ((2166, 2188), 'numpy.polyfit', 'np.polyfit', (['norm', 'y', '(1)'], {}), '(norm, y, 1)\n', (2176, 2188), True, 'import numpy as np\n'), ((2193, 2205), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (2202, 2205), True, 'import numpy as np\n'), ((2249, 2286), 'matplotlib.pyplot.title', 'plt.title', (['"""Normal Q-Q Plot"""'], {'size': '(28)'}), "('Normal Q-Q Plot', size=28)\n", (2258, 2286), True, 'import matplotlib.pyplot as plt\n'), ((2289, 2333), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Theoretical Quantiles"""'], {'size': '(24)'}), "('Theoretical Quantiles', size=24)\n", (2299, 2333), True, 'import matplotlib.pyplot as plt\n'), ((2334, 2373), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Salary Quantiles"""'], {'size': '(24)'}), "('Salary Quantiles', size=24)\n", (2344, 2373), True, 'import matplotlib.pyplot as plt\n'), ((2374, 2403), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(16)'}), '(labelsize=16)\n', (2389, 2403), True, 'import matplotlib.pyplot as plt\n'), ((2404, 2414), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2412, 2414), True, 'import matplotlib.pyplot as plt\n')] |
from SkateUtils.NonHolonomicWorld import NHWorld, NHWorldV2
from SkateUtils.DartMotionEdit import DartSkelMotion
import numpy as np
from math import exp, pi, log
from PyCommon.modules.Math import mmMath as mm
from random import random, randrange
import gym
import gym.spaces
from gym.utils import seeding
import pydart2 as pydart
def exp_reward_term(w, exp_w, v):
norm = np.linalg.norm(v)
return w * exp(-exp_w * norm * norm)
class SkateDartEnv(gym.Env):
def __init__(self, ):
pydart.init()
cur_path = '/'.join(__file__.split('/')[:-1])
self.world = NHWorldV2(1./1200., cur_path+'/../../data/skel/skater_3dof_with_ground.skel')
self.world.control_skel = self.world.skeletons[1]
self.skel = self.world.skeletons[1]
self.Kp, self.Kd = 1000., 60.
self.ref_world = NHWorldV2(1./1200., cur_path+'/../../data/skel/skater_3dof_with_ground.skel')
self.ref_skel = self.ref_world.skeletons[1]
self.ref_motion = DartSkelMotion()
self.ref_motion.load(cur_path+'/skate_ref.skmo')
self.ref_motion.refine_dqs(self.ref_skel)
self.step_per_frame = 40
self.rsi = True
self.w_p = 0.65
self.w_v = 0.1
self.w_e = 0.15
self.w_c = 0.1
self.exp_p = 2.
self.exp_v = 0.1
self.exp_e = 40.
self.exp_c = 10.
self.body_num = self.skel.num_bodynodes() - 2
self.reward_bodies = [body for body in self.skel.bodynodes]
self.reward_bodies.pop(self.reward_bodies.index(self.skel.body('h_blade_left')))
self.reward_bodies.pop(self.reward_bodies.index(self.skel.body('h_blade_right')))
self.idx_e = [self.skel.bodynode_index('h_hand_left'), self.skel.bodynode_index('h_hand_right'),
self.skel.bodynode_index('h_blade_left'), self.skel.bodynode_index('h_blade_right')]
self.body_e = list(map(self.skel.body, self.idx_e))
self.ref_body_e = list(map(self.ref_skel.body, self.idx_e))
self.motion_len = len(self.ref_motion)
self.motion_time = len(self.ref_motion) / self.ref_motion.fps
state_num = 1 + (3*3 + 4) * self.body_num
action_num = self.skel.num_dofs() - 6
state_high = np.array([np.finfo(np.float32).max] * state_num)
action_high = np.array([pi*10./2.] * action_num)
self.action_space = gym.spaces.Box(-action_high, action_high, dtype=np.float32)
self.observation_space = gym.spaces.Box(-state_high, state_high, dtype=np.float32)
self.viewer = None
self.current_frame = 0
self.count_frame = 0
self.max_frame = 30*10
def state(self):
pelvis = self.skel.body(0)
p_pelvis = pelvis.world_transform()[:3, 3]
R_pelvis = pelvis.world_transform()[:3, :3]
# phase = min(1., (self.world.time() + self.time_offset)/self.motion_time)
phase = self.ref_motion.get_frame_looped(self.current_frame)
state = [phase]
p = np.array([np.dot(R_pelvis.T, body.to_world() - p_pelvis) for body in self.reward_bodies]).flatten()
R = np.array([mm.rot2quat(np.dot(R_pelvis.T, body.world_transform()[:3, :3])) for body in self.reward_bodies]).flatten()
v = np.array([np.dot(R_pelvis.T, body.world_linear_velocity()) for body in self.reward_bodies]).flatten()
w = np.array([np.dot(R_pelvis.T, body.world_angular_velocity())/20. for body in self.reward_bodies]).flatten()
state.extend(p)
state.extend(R)
state.extend(v)
state.extend(w)
return np.asarray(state).flatten()
def reward(self):
self.ref_skel.set_positions(self.ref_motion.qs[self.current_frame])
self.ref_skel.set_velocities(self.ref_motion.dqs[self.current_frame])
p_e_hat = np.asarray([body.world_transform()[:3, 3] for body in self.ref_body_e]).flatten()
p_e = np.asarray([body.world_transform()[:3, 3] for body in self.body_e]).flatten()
r_p = exp_reward_term(self.w_p, self.exp_p, self.skel.position_differences(self.skel.q, self.ref_skel.q))
r_v = exp_reward_term(self.w_v, self.exp_v, self.skel.velocity_differences(self.skel.dq, self.ref_skel.dq))
r_e = exp_reward_term(self.w_e, self.exp_e, p_e - p_e_hat)
r_com = exp_reward_term(self.w_c, self.exp_c, self.skel.com() - self.ref_skel.com())
return r_p + r_v + r_e + r_com
def is_done(self):
if self.skel.com()[1] < 0.2:
# print('fallen')
return True
elif self.skel.body('h_head') in self.world.collision_result.contacted_bodies:
return True
elif True in np.isnan(np.asarray(self.skel.q)) or True in np.isnan(np.asarray(self.skel.dq)):
# print('nan')
return True
elif self.ref_motion.has_loop and self.count_frame >= self.max_frame:
# print('timeout')
return True
elif self.current_frame == self.motion_len - 1 or self.count_frame >= self.max_frame:
# print('timeout')
return True
return False
def step(self, _action):
action = np.hstack((np.zeros(6), _action/10.))
next_frame = self.current_frame + 1
self.ref_skel.set_positions(self.ref_motion.qs[next_frame])
self.ref_skel.set_velocities(self.ref_motion.dqs[next_frame])
h = self.world.time_step()
q_des = self.ref_skel.q + action
for i in range(self.step_per_frame):
self.skel.set_forces(self.skel.get_spd(q_des, h, self.Kp, self.Kd))
self.world.step()
self.current_frame = next_frame
self.count_frame += 1
return tuple([self.state(), self.reward(), self.is_done(), dict()])
def continue_from_frame(self, frame):
self.current_frame = frame
self.ref_skel.set_positions(self.ref_motion.qs[self.current_frame])
skel_pelvis_offset = self.skel.joint(0).position_in_world_frame() - self.ref_skel.joint(0).position_in_world_frame()
skel_pelvis_offset[1] = 0.
self.ref_motion.translate_by_offset(skel_pelvis_offset)
def reset(self):
self.world.reset()
self.continue_from_frame(randrange(self.motion_len-1))
self.skel.set_positions(self.ref_motion.qs[self.current_frame])
self.skel.set_velocities(np.asarray(self.ref_motion.dqs[self.current_frame]))
self.count_frame = 0
return self.state()
def render(self, mode='human', close=False):
return None
def close(self):
pass
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
def flag_rsi(self, rsi=True):
self.rsi = rsi
| [
"random.randrange",
"numpy.asarray",
"SkateUtils.DartMotionEdit.DartSkelMotion",
"gym.spaces.Box",
"pydart2.init",
"numpy.array",
"numpy.zeros",
"numpy.linalg.norm",
"SkateUtils.NonHolonomicWorld.NHWorldV2",
"numpy.finfo",
"math.exp",
"gym.utils.seeding.np_random"
] | [((377, 394), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (391, 394), True, 'import numpy as np\n'), ((410, 435), 'math.exp', 'exp', (['(-exp_w * norm * norm)'], {}), '(-exp_w * norm * norm)\n', (413, 435), False, 'from math import exp, pi, log\n'), ((501, 514), 'pydart2.init', 'pydart.init', ([], {}), '()\n', (512, 514), True, 'import pydart2 as pydart\n'), ((590, 677), 'SkateUtils.NonHolonomicWorld.NHWorldV2', 'NHWorldV2', (['(1.0 / 1200.0)', "(cur_path + '/../../data/skel/skater_3dof_with_ground.skel')"], {}), "(1.0 / 1200.0, cur_path +\n '/../../data/skel/skater_3dof_with_ground.skel')\n", (599, 677), False, 'from SkateUtils.NonHolonomicWorld import NHWorld, NHWorldV2\n'), ((834, 921), 'SkateUtils.NonHolonomicWorld.NHWorldV2', 'NHWorldV2', (['(1.0 / 1200.0)', "(cur_path + '/../../data/skel/skater_3dof_with_ground.skel')"], {}), "(1.0 / 1200.0, cur_path +\n '/../../data/skel/skater_3dof_with_ground.skel')\n", (843, 921), False, 'from SkateUtils.NonHolonomicWorld import NHWorld, NHWorldV2\n'), ((990, 1006), 'SkateUtils.DartMotionEdit.DartSkelMotion', 'DartSkelMotion', ([], {}), '()\n', (1004, 1006), False, 'from SkateUtils.DartMotionEdit import DartSkelMotion\n'), ((2316, 2356), 'numpy.array', 'np.array', (['([pi * 10.0 / 2.0] * action_num)'], {}), '([pi * 10.0 / 2.0] * action_num)\n', (2324, 2356), True, 'import numpy as np\n'), ((2380, 2439), 'gym.spaces.Box', 'gym.spaces.Box', (['(-action_high)', 'action_high'], {'dtype': 'np.float32'}), '(-action_high, action_high, dtype=np.float32)\n', (2394, 2439), False, 'import gym\n'), ((2473, 2530), 'gym.spaces.Box', 'gym.spaces.Box', (['(-state_high)', 'state_high'], {'dtype': 'np.float32'}), '(-state_high, state_high, dtype=np.float32)\n', (2487, 2530), False, 'import gym\n'), ((6610, 6643), 'gym.utils.seeding.np_random', 'gym.utils.seeding.np_random', (['seed'], {}), '(seed)\n', (6637, 6643), False, 'import gym\n'), ((6196, 6226), 'random.randrange', 'randrange', (['(self.motion_len - 1)'], {}), '(self.motion_len - 1)\n', (6205, 6226), False, 'from random import random, randrange\n'), ((6331, 6382), 'numpy.asarray', 'np.asarray', (['self.ref_motion.dqs[self.current_frame]'], {}), '(self.ref_motion.dqs[self.current_frame])\n', (6341, 6382), True, 'import numpy as np\n'), ((3576, 3593), 'numpy.asarray', 'np.asarray', (['state'], {}), '(state)\n', (3586, 3593), True, 'import numpy as np\n'), ((5145, 5156), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (5153, 5156), True, 'import numpy as np\n'), ((2255, 2275), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2263, 2275), True, 'import numpy as np\n'), ((4661, 4684), 'numpy.asarray', 'np.asarray', (['self.skel.q'], {}), '(self.skel.q)\n', (4671, 4684), True, 'import numpy as np\n'), ((4706, 4730), 'numpy.asarray', 'np.asarray', (['self.skel.dq'], {}), '(self.skel.dq)\n', (4716, 4730), True, 'import numpy as np\n')] |
import os
import sys
import cv2
import pathlib
import subprocess
import time
import numpy as np
def main():
network_length = 5
curr_pat = "*.avi"
classes = []
for root,dirs,file in os.walk("./kth"):
for dir_name in dirs:
classes.append(dir_name)
#print(classes)
def image_name(filename):
#print(filename)
lister = filename.split('/')[2].split('_')
final_name = lister[0][6:]+lister[2]
#print(final_name)
return(final_name)
dirName = 'processed'
if not os.path.exists(dirName):
os.mkdir(dirName)
print("Pre-processing ...")
for curr_class in classes:
os.mkdir(dirName+"/"+curr_class)
for curr_class in classes:
curr_dir = pathlib.Path('./kth/'+curr_class)
for curr_file in curr_dir.glob(curr_pat):
list_arr= []
vidObj = cv2.VideoCapture(str(curr_file))
try :
frames = int(subprocess.check_output("ffmpeg -i "+str(curr_file)+" -vcodec copy -acodec copy -f null /dev/null 2>&1 | grep 'frame=' | cut -f 3 -d ' '", shell=True))
except ValueError:
continue
else :
for x in range(7,22):
vidObj.set(1,x)
success, image = vidObj.read()
if success:
if x%3 == 0:
average = abs(one-two) + image
average = np.uint8(average)
gray = cv2.cvtColor(average, cv2.COLOR_BGR2GRAY)
list_arr.append(gray)
elif x%3 == 1 :
one = image
elif x%3 == 2 :
two = image
video_array = np.stack((list_arr),axis = 0)
if np.save("./processed/"+curr_class+"/"+image_name(str(curr_file)),video_array):
#time(0.1)
pass
vidObj.release()
#print(str(curr_file)," : ",frames)
#vidObj.release()
'''
#vidObj.release()
for x in range(network_length):
success, image = vidObj.read()
cv2.imwrite("frame%d.jpg" % count, image)
count += 1
'''
print("Pre-Processing done!")
if not os.path.exists('processed'):
main()
else:
print("Pre-processed Directory already exists.\nPlease input 'yes' to delete existing folder and start pre processing again : ")
com = input()
if com == 'yes':
subprocess.check_output("rm -rf processed", shell=True)
main()
else:
print("Exiting")
#ffmpeg -i 00000.avi -vcodec copy -acodec copy -f null /dev/null 2>&1 | grep 'frame=' | cut -f 3 -d ' '
| [
"subprocess.check_output",
"os.path.exists",
"numpy.uint8",
"pathlib.Path",
"numpy.stack",
"os.mkdir",
"cv2.cvtColor",
"os.walk"
] | [((190, 206), 'os.walk', 'os.walk', (['"""./kth"""'], {}), "('./kth')\n", (197, 206), False, 'import os\n'), ((1903, 1930), 'os.path.exists', 'os.path.exists', (['"""processed"""'], {}), "('processed')\n", (1917, 1930), False, 'import os\n'), ((483, 506), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (497, 506), False, 'import os\n'), ((510, 527), 'os.mkdir', 'os.mkdir', (['dirName'], {}), '(dirName)\n', (518, 527), False, 'import os\n'), ((2117, 2172), 'subprocess.check_output', 'subprocess.check_output', (['"""rm -rf processed"""'], {'shell': '(True)'}), "('rm -rf processed', shell=True)\n", (2140, 2172), False, 'import subprocess\n'), ((590, 626), 'os.mkdir', 'os.mkdir', (["(dirName + '/' + curr_class)"], {}), "(dirName + '/' + curr_class)\n", (598, 626), False, 'import os\n'), ((676, 711), 'pathlib.Path', 'pathlib.Path', (["('./kth/' + curr_class)"], {}), "('./kth/' + curr_class)\n", (688, 711), False, 'import pathlib\n'), ((1437, 1463), 'numpy.stack', 'np.stack', (['list_arr'], {'axis': '(0)'}), '(list_arr, axis=0)\n', (1445, 1463), True, 'import numpy as np\n'), ((1227, 1244), 'numpy.uint8', 'np.uint8', (['average'], {}), '(average)\n', (1235, 1244), True, 'import numpy as np\n'), ((1260, 1301), 'cv2.cvtColor', 'cv2.cvtColor', (['average', 'cv2.COLOR_BGR2GRAY'], {}), '(average, cv2.COLOR_BGR2GRAY)\n', (1272, 1301), False, 'import cv2\n')] |
from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from pygcn.utils import load_data, accuracy
from pygcn.models import GCN, MLP
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=5000,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=32,
help='Number of hidden units.')
parser.add_argument('--input_droprate', type=float, default=0.5,
help='Dropout rate of the input layer (1 - keep probability).')
parser.add_argument('--hidden_droprate', type=float, default=0.5,
help='Dropout rate of the hidden layer (1 - keep probability).')
parser.add_argument('--dropnode_rate', type=float, default=0.5,
help='Dropnode rate (1 - keep probability).')
parser.add_argument('--patience', type=int, default=100, help='Patience')
parser.add_argument('--order', type=int, default=5, help='Propagation step')
parser.add_argument('--sample', type=int, default=4, help='Sampling times of dropnode')
parser.add_argument('--tem', type=float, default=0.5, help='Sharpening temperature')
parser.add_argument('--lam', type=float, default=1., help='Lamda')
parser.add_argument('--dataset', type=str, default='cora', help='Data set')
parser.add_argument('--cuda_device', type=int, default=4, help='Cuda device')
parser.add_argument('--use_bn', action='store_true', default=False, help='Using Batch Normalization')
#dataset = 'citeseer'
#dataset = 'pubmed'
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.cuda.set_device(args.cuda_device)
dataset = args.dataset
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
A, features, labels, idx_train, idx_val, idx_test = load_data(dataset)
idx_unlabel = torch.arange(idx_train.shape[0], labels.shape[0], dtype=int)
# Model and optimizer
model = MLP(nfeat=features.shape[1],
nhid=args.hidden,
nclass=labels.max().item() + 1,
input_droprate=args.input_droprate,
hidden_droprate=args.hidden_droprate,
use_bn = args.use_bn)
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = features.cuda()
A = A.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
idx_unlabel = idx_unlabel.cuda()
def propagate(feature, A, order):
#feature = F.dropout(feature, args.dropout, training=training)
x = feature
y = feature
for i in range(order):
x = torch.spmm(A, x).detach_()
#print(y.add_(x))
y.add_(x)
return y.div_(order+1.0).detach_()
def rand_prop(features, training):
n = features.shape[0]
drop_rate = args.dropnode_rate
drop_rates = torch.FloatTensor(np.ones(n) * drop_rate)
if training:
masks = torch.bernoulli(1. - drop_rates).unsqueeze(1)
features = masks.cuda() * features
else:
features = features * (1. - drop_rate)
features = propagate(features, A, args.order)
return features
def consis_loss(logps, temp=args.tem):
ps = [torch.exp(p) for p in logps]
sum_p = 0.
for p in ps:
sum_p = sum_p + p
avg_p = sum_p/len(ps)
#p2 = torch.exp(logp2)
sharp_p = (torch.pow(avg_p, 1./temp) / torch.sum(torch.pow(avg_p, 1./temp), dim=1, keepdim=True)).detach()
loss = 0.
for p in ps:
loss += torch.mean((p-sharp_p).pow(2).sum(1))
loss = loss/len(ps)
return args.lam * loss
def train(epoch):
t = time.time()
X = features
model.train()
optimizer.zero_grad()
X_list = []
K = args.sample
for k in range(K):
X_list.append(rand_prop(X, training=True))
output_list = []
for k in range(K):
output_list.append(torch.log_softmax(model(X_list[k]), dim=-1))
loss_train = 0.
for k in range(K):
loss_train += F.nll_loss(output_list[k][idx_train], labels[idx_train])
loss_train = loss_train/K
#loss_train = F.nll_loss(output_1[idx_train], labels[idx_train]) + F.nll_loss(output_1[idx_train], labels[idx_train])
#loss_js = js_loss(output_1[idx_unlabel], output_2[idx_unlabel])
#loss_en = entropy_loss(output_1[idx_unlabel]) + entropy_loss(output_2[idx_unlabel])
loss_consis = consis_loss(output_list)
loss_train = loss_train + loss_consis
acc_train = accuracy(output_list[0][idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
if not args.fastmode:
model.eval()
X = rand_prop(X,training=False)
output = model(X)
output = torch.log_softmax(output, dim=-1)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
return loss_val.item(), acc_val.item()
def Train():
# Train model
t_total = time.time()
loss_values = []
acc_values = []
bad_counter = 0
# best = args.epochs + 1
loss_best = np.inf
acc_best = 0.0
loss_mn = np.inf
acc_mx = 0.0
best_epoch = 0
for epoch in range(args.epochs):
# if epoch < 200:
# l, a = train(epoch, True)
# loss_values.append(l)
# acc_values.append(a)
# continue
l, a = train(epoch)
loss_values.append(l)
acc_values.append(a)
print(bad_counter)
if loss_values[-1] <= loss_mn or acc_values[-1] >= acc_mx:# or epoch < 400:
if loss_values[-1] <= loss_best: #and acc_values[-1] >= acc_best:
loss_best = loss_values[-1]
acc_best = acc_values[-1]
best_epoch = epoch
torch.save(model.state_dict(), dataset +'.pkl')
loss_mn = np.min((loss_values[-1], loss_mn))
acc_mx = np.max((acc_values[-1], acc_mx))
bad_counter = 0
else:
bad_counter += 1
# print(bad_counter, loss_mn, acc_mx, loss_best, acc_best, best_epoch)
if bad_counter == args.patience:
print('Early stop! Min loss: ', loss_mn, ', Max accuracy: ', acc_mx)
print('Early stop model validation loss: ', loss_best, ', accuracy: ', acc_best)
break
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# Restore best model
print('Loading {}th epoch'.format(best_epoch))
model.load_state_dict(torch.load(dataset +'.pkl'))
def test():
model.eval()
X = features
X = rand_prop(X, training=False)
output = model(X)
output = torch.log_softmax(output, dim=-1)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
Train()
test()
| [
"torch.log_softmax",
"torch.exp",
"torch.pow",
"torch.cuda.is_available",
"pygcn.utils.accuracy",
"torch.arange",
"argparse.ArgumentParser",
"torch.nn.functional.nll_loss",
"numpy.max",
"numpy.random.seed",
"numpy.min",
"numpy.ones",
"time.time",
"torch.cuda.set_device",
"torch.manual_se... | [((329, 345), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (343, 345), False, 'from sklearn.preprocessing import StandardScaler\n'), ((375, 400), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (398, 400), False, 'import argparse\n'), ((2381, 2420), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.cuda_device'], {}), '(args.cuda_device)\n', (2402, 2420), False, 'import torch\n'), ((2444, 2469), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2458, 2469), True, 'import numpy as np\n'), ((2470, 2498), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2487, 2498), False, 'import torch\n'), ((2616, 2634), 'pygcn.utils.load_data', 'load_data', (['dataset'], {}), '(dataset)\n', (2625, 2634), False, 'from pygcn.utils import load_data, accuracy\n'), ((2649, 2709), 'torch.arange', 'torch.arange', (['idx_train.shape[0]', 'labels.shape[0]'], {'dtype': 'int'}), '(idx_train.shape[0], labels.shape[0], dtype=int)\n', (2661, 2709), False, 'import torch\n'), ((2355, 2380), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2378, 2380), False, 'import torch\n'), ((2517, 2550), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2539, 2550), False, 'import torch\n'), ((4482, 4493), 'time.time', 'time.time', ([], {}), '()\n', (4491, 4493), False, 'import time\n'), ((5322, 5376), 'pygcn.utils.accuracy', 'accuracy', (['output_list[0][idx_train]', 'labels[idx_train]'], {}), '(output_list[0][idx_train], labels[idx_train])\n', (5330, 5376), False, 'from pygcn.utils import load_data, accuracy\n'), ((5605, 5649), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output[idx_val]', 'labels[idx_val]'], {}), '(output[idx_val], labels[idx_val])\n', (5615, 5649), True, 'import torch.nn.functional as F\n'), ((5664, 5706), 'pygcn.utils.accuracy', 'accuracy', (['output[idx_val]', 'labels[idx_val]'], {}), '(output[idx_val], labels[idx_val])\n', (5672, 5706), False, 'from pygcn.utils import load_data, accuracy\n'), ((6109, 6120), 'time.time', 'time.time', ([], {}), '()\n', (6118, 6120), False, 'import time\n'), ((7823, 7856), 'torch.log_softmax', 'torch.log_softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (7840, 7856), False, 'import torch\n'), ((7873, 7919), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output[idx_test]', 'labels[idx_test]'], {}), '(output[idx_test], labels[idx_test])\n', (7883, 7919), True, 'import torch.nn.functional as F\n'), ((7935, 7979), 'pygcn.utils.accuracy', 'accuracy', (['output[idx_test]', 'labels[idx_test]'], {}), '(output[idx_test], labels[idx_test])\n', (7943, 7979), False, 'from pygcn.utils import load_data, accuracy\n'), ((4067, 4079), 'torch.exp', 'torch.exp', (['p'], {}), '(p)\n', (4076, 4079), False, 'import torch\n'), ((4851, 4907), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output_list[k][idx_train]', 'labels[idx_train]'], {}), '(output_list[k][idx_train], labels[idx_train])\n', (4861, 4907), True, 'import torch.nn.functional as F\n'), ((5555, 5588), 'torch.log_softmax', 'torch.log_softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (5572, 5588), False, 'import torch\n'), ((7673, 7701), 'torch.load', 'torch.load', (["(dataset + '.pkl')"], {}), "(dataset + '.pkl')\n", (7683, 7701), False, 'import torch\n'), ((3739, 3749), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (3746, 3749), True, 'import numpy as np\n'), ((6989, 7023), 'numpy.min', 'np.min', (['(loss_values[-1], loss_mn)'], {}), '((loss_values[-1], loss_mn))\n', (6995, 7023), True, 'import numpy as np\n'), ((7045, 7077), 'numpy.max', 'np.max', (['(acc_values[-1], acc_mx)'], {}), '((acc_values[-1], acc_mx))\n', (7051, 7077), True, 'import numpy as np\n'), ((3496, 3512), 'torch.spmm', 'torch.spmm', (['A', 'x'], {}), '(A, x)\n', (3506, 3512), False, 'import torch\n'), ((3798, 3831), 'torch.bernoulli', 'torch.bernoulli', (['(1.0 - drop_rates)'], {}), '(1.0 - drop_rates)\n', (3813, 3831), False, 'import torch\n'), ((4223, 4251), 'torch.pow', 'torch.pow', (['avg_p', '(1.0 / temp)'], {}), '(avg_p, 1.0 / temp)\n', (4232, 4251), False, 'import torch\n'), ((6003, 6014), 'time.time', 'time.time', ([], {}), '()\n', (6012, 6014), False, 'import time\n'), ((7546, 7557), 'time.time', 'time.time', ([], {}), '()\n', (7555, 7557), False, 'import time\n'), ((4261, 4289), 'torch.pow', 'torch.pow', (['avg_p', '(1.0 / temp)'], {}), '(avg_p, 1.0 / temp)\n', (4270, 4289), False, 'import torch\n')] |
"""Split-Mix Federated Learning"""
import sys, os, argparse, copy, time
import numpy as np
import wandb
from tqdm import tqdm
import torch
from torch import nn, optim
from torch.nn.modules.batchnorm import _NormBase
# federated
from federated.learning import train_slimmable, test, fed_test_model, refresh_bn, test_dbn
# model and data
from nets.models import ScalableModule
from nets.slimmable_models import EnsembleNet, EnsembleSubnet
# utils
from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, \
MultiStepLR, LocalMaskCrossEntropyLoss, str2bool
from utils.config import CHECKPOINT_ROOT
# NOTE import desired federation
from federated.core import SplitFederation as Federation, AdversaryCreator
def render_run_name(args, exp_folder):
"""Return a unique run_name from given args."""
if args.model == 'default':
args.model = {'Digits': 'ens_digit', 'Cifar10': 'ens_preresnet18', 'DomainNet': 'ens_alex'}[args.data]
run_name = f'{args.model}'
run_name += Federation.render_run_name(args)
# log non-default args
if args.seed != 1: run_name += f'__seed_{args.seed}'
# opt
if args.lr_sch != 'none': run_name += f'__lrs_{args.lr_sch}'
if args.opt != 'sgd': run_name += f'__opt_{args.opt}'
if args.batch != 32: run_name += f'__batch_{args.batch}'
if args.wk_iters != 1: run_name += f'__wk_iters_{args.wk_iters}'
# slimmable
if args.no_track_stat: run_name += f"__nts"
# split-mix
if not args.rescale_init: run_name += '__nri'
if not args.rescale_layer: run_name += '__nrl'
if args.loss_temp != 'none': run_name += f'__lt{args.loss_temp}'
if args.lbn: run_name += '__lbn'
# adv train
if args.adv_lmbd > 0:
run_name += f'__at{args.adv_lmbd}'
args.save_path = os.path.join(CHECKPOINT_ROOT, exp_folder)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
SAVE_FILE = os.path.join(args.save_path, run_name)
return run_name, SAVE_FILE
def get_model_fh(data, model, atom_slim_ratio):
# FIXME Only use EnsembleNet or Slimmable model.
if data == 'Digits':
if model in ['digit']:
from nets.slimmable_models import SlimmableDigitModel
# TODO remove. Function the same as ens_digit
ModelClass = SlimmableDigitModel
elif model == 'ens_digit':
from nets.models import DigitModel
ModelClass = lambda **kwargs: EnsembleNet(
base_net=DigitModel, atom_slim_ratio=atom_slim_ratio,
rescale_init=args.rescale_init, rescale_layer=args.rescale_layer, **kwargs)
else:
raise ValueError(f"Invalid model: {model}")
elif data in ['DomainNet']:
if model in ['alex']:
from nets.slimmable_models import SlimmableAlexNet
ModelClass = SlimmableAlexNet
elif model == 'ens_alex':
from nets.models import AlexNet
ModelClass = lambda **kwargs: EnsembleNet(
base_net=AlexNet, atom_slim_ratio=atom_slim_ratio,
rescale_init=args.rescale_init, rescale_layer=args.rescale_layer, **kwargs)
else:
raise ValueError(f"Invalid model: {model}")
elif data == 'Cifar10':
if model in ['preresnet18']: # From heteroFL
from nets.HeteFL.slimmable_preresne import resnet18
ModelClass = resnet18
elif model in ['ens_preresnet18']:
if args.no_track_stat:
# FIXME remove on release
from nets.HeteFL.preresne import resnet18
else:
from nets.HeteFL.preresnet import resnet18
ModelClass = lambda **kwargs: EnsembleNet(
base_net=resnet18, atom_slim_ratio=atom_slim_ratio,
rescale_init=args.rescale_init, rescale_layer=args.rescale_layer, **kwargs)
else:
raise ValueError(f"Invalid model: {model}")
else:
raise ValueError(f"Unknown dataset: {data}")
return ModelClass
def fed_test(fed, running_model, verbose, adversary=None, val_mix_model=None):
mark = 's' if adversary is None else 'r'
val_acc_list = [None for _ in range(fed.client_num)]
val_loss_mt = AverageMeter()
slim_val_acc_mt = {slim_ratio: AverageMeter() for slim_ratio in fed.val_slim_ratios}
for client_idx in range(fed.client_num):
fed.download(running_model, client_idx)
for i_slim_ratio, slim_ratio in enumerate(fed.val_slim_ratios):
# Load and set slim ratio
if isinstance(running_model, EnsembleNet):
running_model.switch_slim_mode(slim_ratio)
val_mix_model = running_model
else:
# FIXME ad-hoc for SlimmableNet
running_model.switch_slim_mode(1.0) # full net should load the full net
val_mix_model.full_net.load_state_dict(running_model.state_dict())
val_mix_model.set_total_slim_ratio(slim_ratio)
# Test
if running_model.bn_type.startswith('d'):
val_loss, val_acc = test_dbn(val_mix_model, val_loaders[client_idx], loss_fun, device,
adversary=adversary, att_BNn=True, detector='gt')
else:
val_loss, val_acc = test(val_mix_model, val_loaders[client_idx], loss_fun, device,
adversary=adversary)
# Log
val_loss_mt.append(val_loss)
val_acc_list[client_idx] = val_acc # NOTE only record the last slim_ratio.
if verbose > 0:
print(' {:<19s} slim {:.2f}| Val {:s}Loss: {:.4f} | Val {:s}Acc: {:.4f}'.format(
'User-' + fed.clients[client_idx] if i_slim_ratio == 0 else ' ', slim_ratio,
mark.upper(), val_loss, mark.upper(), val_acc))
wandb.log({
f"{fed.clients[client_idx]} sm{slim_ratio:.2f} val_s-acc": val_acc,
}, commit=False)
if slim_ratio == fed.user_max_slim_ratios[client_idx]:
wandb.log({
f"{fed.clients[client_idx]} val_{mark}-acc": val_acc,
}, commit=False)
slim_val_acc_mt[slim_ratio].append(val_acc)
slim_val_acc_dict = {k: mt.avg if len(mt) > 0 else None for k, mt in slim_val_acc_mt.items()}
wandb.log({
f"slim{k:.2f} val_sacc": acc for k, acc in slim_val_acc_dict.items()
}, commit=False)
return val_acc_list, val_loss_mt.avg
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
np.seterr(all='raise') # make sure warning are raised as exception
parser = argparse.ArgumentParser()
# basic problem setting
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--data', type=str, default='Digits', help='data name')
parser.add_argument('--model', type=str.lower, default='default', help='model name')
parser.add_argument('--no_track_stat', action='store_true', help='disable BN tracking')
parser.add_argument('--test_refresh_bn', action='store_true', help='refresh BN before test')
# control
parser.add_argument('--no_log', action='store_true', help='disable wandb log')
parser.add_argument('--test', action='store_true', help='test the pretrained model')
parser.add_argument('--resume', action='store_true', help='resume training from checkpoint')
parser.add_argument('--verbose', type=int, default=0, help='verbose level: 0 or 1')
# federated
Federation.add_argument(parser)
# optimization
parser.add_argument('--lr', type=float, default=1e-2, help='learning rate')
parser.add_argument('--lr_sch', type=str, default='none', help='learning rate schedule')
parser.add_argument('--opt', type=str.lower, default='sgd', help='optimizer')
parser.add_argument('--iters', type=int, default=300, help='#iterations for communication')
parser.add_argument('--wk_iters', type=int, default=1, help='#epochs in local train')
# slimmable test
parser.add_argument('--test_slim_ratio', type=float, default=1.,
help='slim_ratio of model at testing.')
parser.add_argument('--sort_bases', action='store_true', help='sort base models by val acc.')
# split-mix
parser.add_argument('--rescale_init', type=str2bool, default=True, help='rescale init after slim')
parser.add_argument('--rescale_layer', type=str2bool, default=True, help='rescale layer outputs after slim')
parser.add_argument('--loss_temp', type=str, default='none',
help='temper cross-entropy loss (str|float):'
' auto - set temp as the width scale; none - no temper; '
'other float values.')
parser.add_argument('--lbn', type=str2bool, default=False, help='use client-local BN stats (valid if tracking stats)')
# adversarial train
parser.add_argument('--adv_lmbd', type=float, default=0.,
help='adv coefficient in [0,1]; default 0 for standard training.')
parser.add_argument('--test_noise', choices=['none', 'LinfPGD'], default='none')
parser.add_argument('--test_adv_lmbd', type=float, default=0.)
args = parser.parse_args()
set_seed(args.seed)
# set experiment files, wandb
exp_folder = f'SplitMix_{args.data}'
run_name, SAVE_FILE = render_run_name(args, exp_folder)
wandb.init(group=run_name[:120], project=exp_folder, mode='offline' if args.no_log else 'online',
config={**vars(args), 'save_file': SAVE_FILE})
# /////////////////////////////////
# ///// Fed Dataset and Model /////
# /////////////////////////////////
fed = Federation(args.data, args)
# Data
train_loaders, val_loaders, test_loaders = fed.get_data()
mean_batch_iters = int(np.mean([len(tl) for tl in train_loaders]))
print(f" mean_batch_iters: {mean_batch_iters}")
# Model
ModelClass = get_model_fh(args.data, args.model, args.atom_slim_ratio)
running_model = ModelClass(
track_running_stats=not args.no_track_stat or (args.test and args.test_refresh_bn), num_classes=fed.num_classes,
bn_type='dbn' if 0. < args.adv_lmbd < 1. else 'bn',
slimmable_ratios=fed.train_slim_ratios,
).to(device)
# mixed model for validation.
val_mix_model = running_model if isinstance(running_model, EnsembleNet) \
else EnsembleSubnet(copy.deepcopy(running_model), args.atom_slim_ratio)
# adversary
if args.adv_lmbd > 0. or args.test:
assert isinstance(running_model, EnsembleNet), "Did not create adv for val_mix_model"
make_adv = AdversaryCreator(args.test_noise if args.test else 'LinfPGD')
adversary = make_adv(running_model)
else:
adversary = None
# Loss
if args.pu_nclass > 0: # niid
loss_fun = LocalMaskCrossEntropyLoss(fed.num_classes)
else:
loss_fun = nn.CrossEntropyLoss()
# Use running model to init a fed aggregator
fed.make_aggregator(running_model, local_bn=args.lbn)
# /////////////////
# //// Resume /////
# /////////////////
# log the best for each model on all datasets
best_epoch = 0
best_acc = [0. for j in range(fed.client_num)]
train_elapsed = [[] for _ in range(fed.client_num)]
start_epoch = 0
if args.resume or args.test:
if os.path.exists(SAVE_FILE):
print(f'Loading chkpt from {SAVE_FILE}')
checkpoint = torch.load(SAVE_FILE)
best_epoch, best_acc = checkpoint['best_epoch'], checkpoint['best_acc']
train_elapsed = checkpoint['train_elapsed']
start_epoch = int(checkpoint['a_iter']) + 1
fed.model_accum.load_state_dict(checkpoint['server_model'])
print('Resume training from epoch {} with best acc:'.format(start_epoch))
for client_idx, acc in enumerate(best_acc):
print(' Best user-{:<10s}| Epoch:{} | Val Acc: {:.4f}'.format(
fed.clients[client_idx], best_epoch, acc))
else:
if args.test:
raise FileNotFoundError(f"Not found checkpoint at {SAVE_FILE}")
else:
print(f"Not found checkpoint at {SAVE_FILE}\n **Continue without resume.**")
# ///////////////
# //// Test /////
# ///////////////
if args.test:
wandb.summary[f'best_epoch'] = best_epoch
# wandb.summary[f'per_epoch_train_elapsed'] = np.sum([np.mean(client_ts) for client_ts in train_elapsed])
# val to select base models
if args.sort_bases and isinstance(running_model, EnsembleNet):
base_accs = []
print(f"Evaluate base models..")
for base_idx in tqdm(range(fed.num_base), file=sys.stdout):
running_model.switch_slim_mode(fed.args.atom_slim_ratio, base_idx)
val_acc = fed_test_model(fed, running_model, val_loaders, loss_fun, device)
base_accs.append(val_acc)
print(f" Base Accs: {', '.join([f'{a:.3f}' for a in base_accs])}")
base_idxs = np.argsort(base_accs)[::-1]
print(f" Sorted base indexes: {base_idxs}")
running_model.base_idxs = base_idxs
# fed.download()
# Set up model with specified width
print(f" Test model: {args.model} x{args.test_slim_ratio} lmbd{args.test_adv_lmbd}"
+ ('' if args.test_noise == 'none' else f' with {args.test_noise} noise'))
assert args.atom_slim_ratio > 0, "When ensemble, the atom ratio has to be defined by" \
f" args.slim_ratio > 0. But got {args.atom_slim_ratio}"
print(f" Ensemble {int(args.test_slim_ratio / args.atom_slim_ratio)} "
f"{args.atom_slim_ratio} base nets")
if not isinstance(running_model, EnsembleNet):
assert args.adv_lmbd == 0, "Not create adversary for EnsembleSubnet."
running_model.switch_slim_mode(1.)
test_model = EnsembleSubnet(running_model, subnet_ratio=args.atom_slim_ratio,
ensemble_num=int(
args.test_slim_ratio / args.atom_slim_ratio))
else:
running_model.switch_slim_mode(args.test_slim_ratio)
test_model = running_model
# Test on clients
if isinstance(running_model, EnsembleNet):
print(f"### current slice: {running_model.current_slice()}")
test_acc_mt = AverageMeter()
for test_idx, test_loader in enumerate(test_loaders):
fed.download(running_model, test_idx, strict=not args.test_refresh_bn)
if running_model.bn_type.startswith('d'):
_, test_acc = test_dbn(test_model, test_loader, loss_fun, device,
adversary=adversary,
detector='clean', # FIXME does this really matter?
att_BNn=True, # args.te_att_BNn, # FIXME we shall remove this since we will attack the mixed output.
adversary_name=args.test_noise,
mix_dual_logit_lmbd=args.test_adv_lmbd,
attack_mix_dual_logit_lmbd=args.test_adv_lmbd,
deep_mix=True,
)
else:
if args.test_refresh_bn:
# test_model.base_net.rescale_layer = False
def set_rescale_layer_and_bn(m):
if isinstance(m, ScalableModule):
m.rescale_layer = False
if isinstance(m, _NormBase):
m.reset_running_stats()
m.momentum = None
test_model.apply(set_rescale_layer_and_bn)
for ep in tqdm(range(20), desc='refresh bn', leave=False):
refresh_bn(test_model, train_loaders[test_idx], device)
_, test_acc = test(test_model, test_loader, loss_fun, device, adversary=adversary)
print(' {:<11s}| Test Acc: {:.4f}'.format(fed.clients[test_idx], test_acc))
wandb.summary[f'{fed.clients[test_idx]} test acc'] = test_acc
test_acc_mt.append(test_acc)
# Profile model FLOPs, sizes (#param)
from nets.profile_func import profile_model
flops, params = profile_model(test_model, device=device)
wandb.summary['GFLOPs'] = flops / 1e9
wandb.summary['model size (MB)'] = params / 1e6
print('GFLOPS: %.4f, model size: %.4fMB' % (flops / 1e9, params / 1e6))
print(f"\n Average Test Acc: {test_acc_mt.avg}")
wandb.summary[f'avg test acc'] = test_acc_mt.avg
wandb.finish()
exit(0)
# ////////////////
# //// Train /////
# ////////////////
# LR scheduler
if args.lr_sch == 'cos':
lr_sch = CosineAnnealingLR(args.iters, eta_max=args.lr, last_epoch=start_epoch)
elif args.lr_sch == 'multi_step':
lr_sch = MultiStepLR(args.lr, milestones=[150, 250], gamma=0.1, last_epoch=start_epoch)
elif args.lr_sch == 'multi_step50':
lr_sch = MultiStepLR(args.lr, milestones=[150+50, 250+50], gamma=0.1, last_epoch=start_epoch)
elif args.lr_sch == 'multi_step100':
lr_sch = MultiStepLR(args.lr, milestones=[150+100, 250+100], gamma=0.1, last_epoch=start_epoch)
else:
assert args.lr_sch == 'none', f'Invalid lr_sch: {args.lr_sch}'
lr_sch = None
shift_tr_cnt_mt = [0 for _ in range(fed.num_base)] # count of trained times for each base model
for a_iter in range(start_epoch, args.iters):
# set global lr
global_lr = args.lr if lr_sch is None else lr_sch.step()
wandb.log({'global lr': global_lr}, commit=False)
# ----------- Train Client ---------------
train_loss_mt, train_acc_mt = AverageMeter(), AverageMeter()
print("============ Train epoch {} ============".format(a_iter))
for client_idx in fed.client_sampler.iter():
# (Alg 2) Sample base models defined by shift index.
slim_ratios, slim_shifts = fed.sample_bases(client_idx)
start_time = time.process_time()
# Download global model to local
fed.download(running_model, client_idx)
# (Alg 3) Local Train
if args.opt == 'sgd':
optimizer = optim.SGD(params=running_model.parameters(), lr=global_lr,
momentum=0.9, weight_decay=5e-4)
elif args.opt == 'adam':
optimizer = optim.Adam(params=running_model.parameters(), lr=global_lr)
else:
raise ValueError(f"Invalid optimizer: {args.opt}")
local_iters = mean_batch_iters * args.wk_iters if args.partition_mode != 'uni' \
else len(train_loaders[client_idx]) * args.wk_iters
train_loss, train_acc = train_slimmable(
running_model, train_loaders[client_idx], optimizer, loss_fun, device,
max_iter=local_iters,
slim_ratios=slim_ratios, slim_shifts=slim_shifts, progress=args.verbose > 0,
loss_temp=args.loss_temp,
adversary=adversary, adv_lmbd=args.adv_lmbd, att_BNn=True,
)
# Upload
fed.upload(running_model, client_idx,
max_slim_ratio=max(slim_ratios), slim_bias_idx=slim_shifts)
# Log
client_name = fed.clients[client_idx]
elapsed = time.process_time() - start_time
wandb.log({f'{client_name}_train_elapsed': elapsed}, commit=False)
train_elapsed[client_idx].append(elapsed)
train_loss_mt.append(train_loss), train_acc_mt.append(train_acc)
for slim_shift in slim_shifts:
shift_tr_cnt_mt[slim_shift] += 1
print(f' User-{client_name:<10s} Train | Loss: {train_loss:.4f} |'
f' Acc: {train_acc:.4f} | Elapsed: {elapsed:.2f} s')
wandb.log({
f"{client_name} train_loss": train_loss,
f"{client_name} train_acc": train_acc,
}, commit=False)
# Use accumulated model to update server model
fed.aggregate()
# ----------- Validation ---------------
val_acc_list, val_loss = fed_test(
fed, running_model, args.verbose, val_mix_model=val_mix_model, adversary=None)
if args.adv_lmbd > 0:
print(f' Avg Val SAcc {np.mean(val_acc_list) * 100:.2f}%')
wandb.log({'val_sacc': np.mean(val_acc_list)}, commit=False)
val_racc_list, val_rloss = fed_test(
fed, running_model, args.verbose, val_mix_model=val_mix_model, adversary=adversary)
print(f' Avg Val RAcc {np.mean(val_racc_list) * 100:.2f}%')
wandb.log({'val_racc': np.mean(val_racc_list)}, commit=False)
val_acc_list = [(1-args.adv_lmbd) * sa_ + args.adv_lmbd * ra_
for sa_, ra_ in zip(val_acc_list, val_racc_list)]
val_loss = (1-args.adv_lmbd) * val_loss + args.adv_lmbd * val_rloss
# Log averaged
print(f' [Overall] Train Loss {train_loss_mt.avg:.4f} Acc {train_acc_mt.avg*100:.1f}% '
f'| Val Acc {np.mean(val_acc_list)*100:.2f}%')
wandb.log({
f"train_loss": train_loss_mt.avg,
f"train_acc": train_acc_mt.avg,
f"val_loss": val_loss,
f"val_acc": np.mean(val_acc_list),
}, commit=False)
wandb.log({
f"shift{s} train cnt": cnt for s, cnt in enumerate(shift_tr_cnt_mt)
}, commit=False)
# ----------- Save checkpoint -----------
if np.mean(val_acc_list) > np.mean(best_acc):
best_epoch = a_iter
for client_idx in range(fed.client_num):
best_acc[client_idx] = val_acc_list[client_idx]
if args.verbose > 0:
print(' Best site-{:<10s}| Epoch:{} | Val Acc: {:.4f}'.format(
fed.clients[client_idx], best_epoch, best_acc[client_idx]))
print(' [Best Val] Acc {:.4f}'.format(np.mean(val_acc_list)))
# Save
print(f' Saving the local and server checkpoint to {SAVE_FILE}')
save_dict = {
'server_model': fed.model_accum.state_dict(),
'best_epoch': best_epoch,
'best_acc': best_acc,
'a_iter': a_iter,
'all_domains': fed.all_domains,
'train_elapsed': train_elapsed,
}
torch.save(save_dict, SAVE_FILE)
wandb.log({
f"best_val_acc": np.mean(best_acc),
}, commit=True)
| [
"wandb.log",
"torch.nn.CrossEntropyLoss",
"federated.learning.train_slimmable",
"federated.core.AdversaryCreator",
"numpy.argsort",
"torch.cuda.is_available",
"copy.deepcopy",
"utils.utils.CosineAnnealingLR",
"time.process_time",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"fe... | [((1000, 1032), 'federated.core.SplitFederation.render_run_name', 'Federation.render_run_name', (['args'], {}), '(args)\n', (1026, 1032), True, 'from federated.core import SplitFederation as Federation, AdversaryCreator\n'), ((1774, 1815), 'os.path.join', 'os.path.join', (['CHECKPOINT_ROOT', 'exp_folder'], {}), '(CHECKPOINT_ROOT, exp_folder)\n', (1786, 1815), False, 'import sys, os, argparse, copy, time\n'), ((1911, 1949), 'os.path.join', 'os.path.join', (['args.save_path', 'run_name'], {}), '(args.save_path, run_name)\n', (1923, 1949), False, 'import sys, os, argparse, copy, time\n'), ((4213, 4227), 'utils.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4225, 4227), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((6625, 6647), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (6634, 6647), True, 'import numpy as np\n'), ((6707, 6732), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6730, 6732), False, 'import sys, os, argparse, copy, time\n'), ((7585, 7616), 'federated.core.SplitFederation.add_argument', 'Federation.add_argument', (['parser'], {}), '(parser)\n', (7608, 7616), True, 'from federated.core import SplitFederation as Federation, AdversaryCreator\n'), ((9323, 9342), 'utils.utils.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (9331, 9342), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((9775, 9802), 'federated.core.SplitFederation', 'Federation', (['args.data', 'args'], {}), '(args.data, args)\n', (9785, 9802), True, 'from federated.core import SplitFederation as Federation, AdversaryCreator\n'), ((1827, 1857), 'os.path.exists', 'os.path.exists', (['args.save_path'], {}), '(args.save_path)\n', (1841, 1857), False, 'import sys, os, argparse, copy, time\n'), ((1867, 1894), 'os.makedirs', 'os.makedirs', (['args.save_path'], {}), '(args.save_path)\n', (1878, 1894), False, 'import sys, os, argparse, copy, time\n'), ((4263, 4277), 'utils.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4275, 4277), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((10728, 10789), 'federated.core.AdversaryCreator', 'AdversaryCreator', (["(args.test_noise if args.test else 'LinfPGD')"], {}), "(args.test_noise if args.test else 'LinfPGD')\n", (10744, 10789), False, 'from federated.core import SplitFederation as Federation, AdversaryCreator\n'), ((10935, 10977), 'utils.utils.LocalMaskCrossEntropyLoss', 'LocalMaskCrossEntropyLoss', (['fed.num_classes'], {}), '(fed.num_classes)\n', (10960, 10977), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((11007, 11028), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (11026, 11028), False, 'from torch import nn, optim\n'), ((11451, 11476), 'os.path.exists', 'os.path.exists', (['SAVE_FILE'], {}), '(SAVE_FILE)\n', (11465, 11476), False, 'import sys, os, argparse, copy, time\n'), ((14609, 14623), 'utils.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14621, 14623), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((16615, 16655), 'nets.profile_func.profile_model', 'profile_model', (['test_model'], {'device': 'device'}), '(test_model, device=device)\n', (16628, 16655), False, 'from nets.profile_func import profile_model\n'), ((16961, 16975), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (16973, 16975), False, 'import wandb\n'), ((17129, 17199), 'utils.utils.CosineAnnealingLR', 'CosineAnnealingLR', (['args.iters'], {'eta_max': 'args.lr', 'last_epoch': 'start_epoch'}), '(args.iters, eta_max=args.lr, last_epoch=start_epoch)\n', (17146, 17199), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((17972, 18021), 'wandb.log', 'wandb.log', (["{'global lr': global_lr}"], {'commit': '(False)'}), "({'global lr': global_lr}, commit=False)\n", (17981, 18021), False, 'import wandb\n'), ((5883, 5980), 'wandb.log', 'wandb.log', (["{f'{fed.clients[client_idx]} sm{slim_ratio:.2f} val_s-acc': val_acc}"], {'commit': '(False)'}), "({f'{fed.clients[client_idx]} sm{slim_ratio:.2f} val_s-acc':\n val_acc}, commit=False)\n", (5892, 5980), False, 'import wandb\n'), ((6583, 6608), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6606, 6608), False, 'import torch\n'), ((10506, 10534), 'copy.deepcopy', 'copy.deepcopy', (['running_model'], {}), '(running_model)\n', (10519, 10534), False, 'import sys, os, argparse, copy, time\n'), ((11556, 11577), 'torch.load', 'torch.load', (['SAVE_FILE'], {}), '(SAVE_FILE)\n', (11566, 11577), False, 'import torch\n'), ((17255, 17333), 'utils.utils.MultiStepLR', 'MultiStepLR', (['args.lr'], {'milestones': '[150, 250]', 'gamma': '(0.1)', 'last_epoch': 'start_epoch'}), '(args.lr, milestones=[150, 250], gamma=0.1, last_epoch=start_epoch)\n', (17266, 17333), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((18112, 18126), 'utils.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (18124, 18126), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((18128, 18142), 'utils.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (18140, 18142), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((18428, 18447), 'time.process_time', 'time.process_time', ([], {}), '()\n', (18445, 18447), False, 'import sys, os, argparse, copy, time\n'), ((19180, 19463), 'federated.learning.train_slimmable', 'train_slimmable', (['running_model', 'train_loaders[client_idx]', 'optimizer', 'loss_fun', 'device'], {'max_iter': 'local_iters', 'slim_ratios': 'slim_ratios', 'slim_shifts': 'slim_shifts', 'progress': '(args.verbose > 0)', 'loss_temp': 'args.loss_temp', 'adversary': 'adversary', 'adv_lmbd': 'args.adv_lmbd', 'att_BNn': '(True)'}), '(running_model, train_loaders[client_idx], optimizer,\n loss_fun, device, max_iter=local_iters, slim_ratios=slim_ratios,\n slim_shifts=slim_shifts, progress=args.verbose > 0, loss_temp=args.\n loss_temp, adversary=adversary, adv_lmbd=args.adv_lmbd, att_BNn=True)\n', (19195, 19463), False, 'from federated.learning import train_slimmable, test, fed_test_model, refresh_bn, test_dbn\n'), ((19837, 19903), 'wandb.log', 'wandb.log', (["{f'{client_name}_train_elapsed': elapsed}"], {'commit': '(False)'}), "({f'{client_name}_train_elapsed': elapsed}, commit=False)\n", (19846, 19903), False, 'import wandb\n'), ((20290, 20399), 'wandb.log', 'wandb.log', (["{f'{client_name} train_loss': train_loss, f'{client_name} train_acc': train_acc\n }"], {'commit': '(False)'}), "({f'{client_name} train_loss': train_loss,\n f'{client_name} train_acc': train_acc}, commit=False)\n", (20299, 20399), False, 'import wandb\n'), ((21995, 22016), 'numpy.mean', 'np.mean', (['val_acc_list'], {}), '(val_acc_list)\n', (22002, 22016), True, 'import numpy as np\n'), ((22019, 22036), 'numpy.mean', 'np.mean', (['best_acc'], {}), '(best_acc)\n', (22026, 22036), True, 'import numpy as np\n'), ((22888, 22920), 'torch.save', 'torch.save', (['save_dict', 'SAVE_FILE'], {}), '(save_dict, SAVE_FILE)\n', (22898, 22920), False, 'import torch\n'), ((5092, 5212), 'federated.learning.test_dbn', 'test_dbn', (['val_mix_model', 'val_loaders[client_idx]', 'loss_fun', 'device'], {'adversary': 'adversary', 'att_BNn': '(True)', 'detector': '"""gt"""'}), "(val_mix_model, val_loaders[client_idx], loss_fun, device,\n adversary=adversary, att_BNn=True, detector='gt')\n", (5100, 5212), False, 'from federated.learning import train_slimmable, test, fed_test_model, refresh_bn, test_dbn\n'), ((5308, 5396), 'federated.learning.test', 'test', (['val_mix_model', 'val_loaders[client_idx]', 'loss_fun', 'device'], {'adversary': 'adversary'}), '(val_mix_model, val_loaders[client_idx], loss_fun, device, adversary=\n adversary)\n', (5312, 5396), False, 'from federated.learning import train_slimmable, test, fed_test_model, refresh_bn, test_dbn\n'), ((6091, 6170), 'wandb.log', 'wandb.log', (["{f'{fed.clients[client_idx]} val_{mark}-acc': val_acc}"], {'commit': '(False)'}), "({f'{fed.clients[client_idx]} val_{mark}-acc': val_acc}, commit=False)\n", (6100, 6170), False, 'import wandb\n'), ((12973, 13038), 'federated.learning.fed_test_model', 'fed_test_model', (['fed', 'running_model', 'val_loaders', 'loss_fun', 'device'], {}), '(fed, running_model, val_loaders, loss_fun, device)\n', (12987, 13038), False, 'from federated.learning import train_slimmable, test, fed_test_model, refresh_bn, test_dbn\n'), ((13184, 13205), 'numpy.argsort', 'np.argsort', (['base_accs'], {}), '(base_accs)\n', (13194, 13205), True, 'import numpy as np\n'), ((14853, 15104), 'federated.learning.test_dbn', 'test_dbn', (['test_model', 'test_loader', 'loss_fun', 'device'], {'adversary': 'adversary', 'detector': '"""clean"""', 'att_BNn': '(True)', 'adversary_name': 'args.test_noise', 'mix_dual_logit_lmbd': 'args.test_adv_lmbd', 'attack_mix_dual_logit_lmbd': 'args.test_adv_lmbd', 'deep_mix': '(True)'}), "(test_model, test_loader, loss_fun, device, adversary=adversary,\n detector='clean', att_BNn=True, adversary_name=args.test_noise,\n mix_dual_logit_lmbd=args.test_adv_lmbd, attack_mix_dual_logit_lmbd=args\n .test_adv_lmbd, deep_mix=True)\n", (14861, 15104), False, 'from federated.learning import train_slimmable, test, fed_test_model, refresh_bn, test_dbn\n'), ((16218, 16286), 'federated.learning.test', 'test', (['test_model', 'test_loader', 'loss_fun', 'device'], {'adversary': 'adversary'}), '(test_model, test_loader, loss_fun, device, adversary=adversary)\n', (16222, 16286), False, 'from federated.learning import train_slimmable, test, fed_test_model, refresh_bn, test_dbn\n'), ((17391, 17484), 'utils.utils.MultiStepLR', 'MultiStepLR', (['args.lr'], {'milestones': '[150 + 50, 250 + 50]', 'gamma': '(0.1)', 'last_epoch': 'start_epoch'}), '(args.lr, milestones=[150 + 50, 250 + 50], gamma=0.1, last_epoch\n =start_epoch)\n', (17402, 17484), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((19792, 19811), 'time.process_time', 'time.process_time', ([], {}), '()\n', (19809, 19811), False, 'import sys, os, argparse, copy, time\n'), ((21759, 21780), 'numpy.mean', 'np.mean', (['val_acc_list'], {}), '(val_acc_list)\n', (21766, 21780), True, 'import numpy as np\n'), ((22970, 22987), 'numpy.mean', 'np.mean', (['best_acc'], {}), '(best_acc)\n', (22977, 22987), True, 'import numpy as np\n'), ((2433, 2578), 'nets.slimmable_models.EnsembleNet', 'EnsembleNet', ([], {'base_net': 'DigitModel', 'atom_slim_ratio': 'atom_slim_ratio', 'rescale_init': 'args.rescale_init', 'rescale_layer': 'args.rescale_layer'}), '(base_net=DigitModel, atom_slim_ratio=atom_slim_ratio,\n rescale_init=args.rescale_init, rescale_layer=args.rescale_layer, **kwargs)\n', (2444, 2578), False, 'from nets.slimmable_models import EnsembleNet, EnsembleSubnet\n'), ((17534, 17628), 'utils.utils.MultiStepLR', 'MultiStepLR', (['args.lr'], {'milestones': '[150 + 100, 250 + 100]', 'gamma': '(0.1)', 'last_epoch': 'start_epoch'}), '(args.lr, milestones=[150 + 100, 250 + 100], gamma=0.1,\n last_epoch=start_epoch)\n', (17545, 17628), False, 'from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, MultiStepLR, LocalMaskCrossEntropyLoss, str2bool\n'), ((20844, 20865), 'numpy.mean', 'np.mean', (['val_acc_list'], {}), '(val_acc_list)\n', (20851, 20865), True, 'import numpy as np\n'), ((21138, 21160), 'numpy.mean', 'np.mean', (['val_racc_list'], {}), '(val_racc_list)\n', (21145, 21160), True, 'import numpy as np\n'), ((22443, 22464), 'numpy.mean', 'np.mean', (['val_acc_list'], {}), '(val_acc_list)\n', (22450, 22464), True, 'import numpy as np\n'), ((2965, 3108), 'nets.slimmable_models.EnsembleNet', 'EnsembleNet', ([], {'base_net': 'AlexNet', 'atom_slim_ratio': 'atom_slim_ratio', 'rescale_init': 'args.rescale_init', 'rescale_layer': 'args.rescale_layer'}), '(base_net=AlexNet, atom_slim_ratio=atom_slim_ratio, rescale_init\n =args.rescale_init, rescale_layer=args.rescale_layer, **kwargs)\n', (2976, 3108), False, 'from nets.slimmable_models import EnsembleNet, EnsembleSubnet\n'), ((16132, 16187), 'federated.learning.refresh_bn', 'refresh_bn', (['test_model', 'train_loaders[test_idx]', 'device'], {}), '(test_model, train_loaders[test_idx], device)\n', (16142, 16187), False, 'from federated.learning import train_slimmable, test, fed_test_model, refresh_bn, test_dbn\n'), ((21556, 21577), 'numpy.mean', 'np.mean', (['val_acc_list'], {}), '(val_acc_list)\n', (21563, 21577), True, 'import numpy as np\n'), ((3684, 3827), 'nets.slimmable_models.EnsembleNet', 'EnsembleNet', ([], {'base_net': 'resnet18', 'atom_slim_ratio': 'atom_slim_ratio', 'rescale_init': 'args.rescale_init', 'rescale_layer': 'args.rescale_layer'}), '(base_net=resnet18, atom_slim_ratio=atom_slim_ratio,\n rescale_init=args.rescale_init, rescale_layer=args.rescale_layer, **kwargs)\n', (3695, 3827), False, 'from nets.slimmable_models import EnsembleNet, EnsembleSubnet\n'), ((20773, 20794), 'numpy.mean', 'np.mean', (['val_acc_list'], {}), '(val_acc_list)\n', (20780, 20794), True, 'import numpy as np\n'), ((21066, 21088), 'numpy.mean', 'np.mean', (['val_racc_list'], {}), '(val_racc_list)\n', (21073, 21088), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
def gradient_descent(D, x0, loss_f, grad_f, lr, tol, max_iter):
losses = np.zeros(max_iter)
y_old = x0
y = x0
for i in range(max_iter):
g = grad_f(D, y)
y = y_old - lr * g
stress = loss_f(D, y)
losses[i] = stress
if stress < tol:
msg = "\riter: {0}, stress: {1:}".format(i, stress)
print(msg, flush=True, end="\t")
losses = losses[:i]
break
if i % 50 == 0:
msg = "\riter: {0}, stress: {1:}".format(i, stress)
print(msg, flush=True, end="\t")
y_old = y
if i == max_iter-1:
msg = "\riter: {0}, stress: {1:}".format(i, stress)
print(msg, flush=True, end="\t")
print('\n')
return y, losses
'''
input data and output data are col vector (ie :
For X \in R^{NxV} X.shape = (V,N) not (N,V)
)
'''
class MDS:
def __init__(self,
n_dim=2,
input_type='raw'):
if input_type not in ['distance', 'raw']:
raise RuntimeError('Not implement type !')
self.input_type = input_type
self.n_dim = n_dim
def fit(self, X,
method='cmds', # or stress
lr=0.5):
if method == 'cmds':
return self._cmds(X)
else:
return self._stress_based_mds(X, lr=lr)
def _cmds(self, X):
"""
Classical(linear) multidimensional scaling (MDS)
Parameters
----------
X: (d, n) array or (n,n) array
input data. The data are placed in column-major order.
That is, samples are placed in the matrix (X) as column vectors
d: dimension of points
n: number of points
n_dim: dimension of target space
input_type: it indicates whether data are raw or distance
- raw: raw data. (n,d) array.
- distance: precomputed distances between the data. (n,n) array.
Returns
-------
Y: (n_dim, n) array. projected embeddings.
evals: (n_dim) eigen values
evecs: corresponding eigen vectors in column vectors
"""
Y = None
evals = None
evecs = None
if self.input_type == 'distance':
D = X
elif self.input_type == 'raw':
Xt = X.T
D = euclidean_distances(Xt, Xt)
n = len(D)
H = np.eye(n) - (1/n)*np.ones((n, n))
D = (D**2).astype(np.float64)
D = np.nan_to_num(D)
G = -(1/2) * (H.dot(D).dot(H))
evals, evecs = np.linalg.eigh(G)
index = evals.argsort()[::-1]
evals = evals[index]
evecs = evecs[:, index]
evals = evals[:self.n_dim]
evecs = evecs[:, :self.n_dim]
self.eigen_vectors = evecs
self.eigen_values = evals
Y = np.diag(evals**(1/2)) @ evecs.T
assert Y.shape[0] == self.n_dim
return Y
def _loss_sammon(self, D, y):
"""
Loss function (stress) - Sammon
Parameters
----------
D: (n,n) array. distance matrix in original space
This is a symetric matrix
y: (d,n) array
d is the dimensionality of target space.
n is the number of points.
Returns
-------
stress: scalar. stress
"""
yt = y.T
n = D.shape[0]
Delta = euclidean_distances(yt, yt)
stress = 0
for i in range(n):
f = 0
s = 0
for j in range(n):
s += (D[i, j] - Delta[i, j])**2
f += Delta[i, j]
stress += (s/f)
return stress
def _grad_sammon(self, D, y):
"""
Gradient function (first derivative) - Sammonn_dim
Parameters
----------
D: (n,n) array. distance matrix in original space
This is a symetric matrix
y: (d,n) array
d is the dimensionality of target space.
n is the number of points.
Returns
-------
g: (k,n) array.
Gradient matrix.
k is the dimensionality of target space.
n is the number of points.
"""
D2 = euclidean_distances(y.T, y.T)
n = len(D)
def grid(k):
s = np.zeros(y[:, k].shape)
for j in range(n):
if j != k:
s += (D2[k, j] - D[k, j])*(y[:, k] - y[:, j])/(D2[k, j])
return s
N = 1/np.tril(D, -1).sum()
g = np.zeros((y.shape[0], n))
for i in range(n):
g[:, i] = grid(i)
return N*g
def _stress_based_mds(self, x,
lr, tol=1e-9, max_iter=6000):
"""
Stress-based MDS
Parameters
----------
x: (d,n) array or (n,n) array
If it is raw data -> (d,n) array
otherwise, (n,n) array (distance matrix)
n is the number of points
d is the dimensionality of original space
n_dim: dimensionality of target space
loss_f: loss function
grad_f: gradient function
input_type: 'raw' or 'distance'
init: initialisation method
random: Initial y is set randomly
fixed: Initial y is set by pre-defined values
max_iter: maximum iteration of optimization
Returns
-------
y: (n_dim,n) array. Embedded coordinates in target space
losses: (max_iter,) History of stress
"""
# obtain distance
if self.input_type == 'raw':
x_t = x.T
D = euclidean_distances(x_t, x_t)
elif self.input_type == 'distance':
D = x
else:
raise ValueError('inappropriate input_type')
# Remaining initialisation
N = x.shape[1]
np.random.seed(10)
# Initialise y randomly
y = np.random.normal(0.0, 1.0, [self.n_dim, N])
# calculate optimal solution (embedded coordinates)
y, _ = gradient_descent(D, y, self._loss_sammon,
self._grad_sammon, lr, tol, max_iter)
return y
| [
"numpy.random.normal",
"numpy.eye",
"numpy.ones",
"sklearn.metrics.pairwise.euclidean_distances",
"numpy.diag",
"numpy.zeros",
"numpy.random.seed",
"numpy.tril",
"numpy.linalg.eigh",
"numpy.nan_to_num"
] | [((155, 173), 'numpy.zeros', 'np.zeros', (['max_iter'], {}), '(max_iter)\n', (163, 173), True, 'import numpy as np\n'), ((2586, 2602), 'numpy.nan_to_num', 'np.nan_to_num', (['D'], {}), '(D)\n', (2599, 2602), True, 'import numpy as np\n'), ((2666, 2683), 'numpy.linalg.eigh', 'np.linalg.eigh', (['G'], {}), '(G)\n', (2680, 2683), True, 'import numpy as np\n'), ((3498, 3525), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['yt', 'yt'], {}), '(yt, yt)\n', (3517, 3525), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((4330, 4359), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['y.T', 'y.T'], {}), '(y.T, y.T)\n', (4349, 4359), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((4645, 4670), 'numpy.zeros', 'np.zeros', (['(y.shape[0], n)'], {}), '((y.shape[0], n))\n', (4653, 4670), True, 'import numpy as np\n'), ((5975, 5993), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (5989, 5993), True, 'import numpy as np\n'), ((6038, 6081), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '[self.n_dim, N]'], {}), '(0.0, 1.0, [self.n_dim, N])\n', (6054, 6081), True, 'import numpy as np\n'), ((2500, 2509), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2506, 2509), True, 'import numpy as np\n'), ((2940, 2965), 'numpy.diag', 'np.diag', (['(evals ** (1 / 2))'], {}), '(evals ** (1 / 2))\n', (2947, 2965), True, 'import numpy as np\n'), ((4417, 4440), 'numpy.zeros', 'np.zeros', (['y[:, k].shape'], {}), '(y[:, k].shape)\n', (4425, 4440), True, 'import numpy as np\n'), ((5744, 5773), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x_t', 'x_t'], {}), '(x_t, x_t)\n', (5763, 5773), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((2439, 2466), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['Xt', 'Xt'], {}), '(Xt, Xt)\n', (2458, 2466), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((2518, 2533), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (2525, 2533), True, 'import numpy as np\n'), ((4612, 4626), 'numpy.tril', 'np.tril', (['D', '(-1)'], {}), '(D, -1)\n', (4619, 4626), True, 'import numpy as np\n')] |
"""
Utils to replicate IHDP experiments with catenets
"""
# Author: <NAME>
import csv
import os
from pathlib import Path
from typing import Optional, Union
import numpy as np
from sklearn import clone
from catenets.datasets.dataset_ihdp import get_one_data_set, load_raw, prepare_ihdp_data
from catenets.experiment_utils.base import eval_root_mse
from catenets.models.jax import RNet, TARNet, TNet
from catenets.models.jax import RNET_NAME, TARNET_NAME, T_NAME
DATA_DIR = Path("catenets/datasets/data/")
RESULT_DIR = Path("results/experiments_benchmarking/ihdp/")
SEP = "_"
PARAMS_DEPTH = {'n_layers_r': 3, 'n_layers_out': 2}
PARAMS_DEPTH_2 = {'n_layers_r': 3, 'n_layers_out': 2, 'n_layers_r_t': 3, 'n_layers_out_t': 2}
ALL_MODELS = {T_NAME: TNet(**PARAMS_DEPTH),
TARNET_NAME: TARNet(**PARAMS_DEPTH),
RNET_NAME: RNet(**PARAMS_DEPTH_2)
}
def do_ihdp_experiments(
n_exp: Union[int, list] = 100,
n_reps: int = 5,
file_name: str = "ihdp_all",
model_params: Optional[dict] = None,
models: Optional[dict] = None,
setting: str = "original",
) -> None:
if models is None:
models = ALL_MODELS
if (setting == 'original') or (setting == 'C'):
setting = 'C'
elif (setting == 'modified') or (setting == 'D'):
setting = 'D'
else:
raise ValueError('Setting should be one of original or modified. You passed {}.'.format(
setting))
# get file to write in
if not os.path.isdir(RESULT_DIR):
os.makedirs(RESULT_DIR)
out_file = open(RESULT_DIR / (file_name + SEP + setting + ".csv"), "w", buffering=1)
writer = csv.writer(out_file)
header = ['exp', 'run', 'cate_var_in', 'cate_var_out', 'y_var_in'] + \
[name + "_in" for name in models.keys()] + \
[name + "_out" for name in models.keys()]
writer.writerow(header)
# get data
data_train, data_test = load_raw(DATA_DIR)
if isinstance(n_exp, int):
experiment_loop = list(range(1, n_exp + 1))
elif isinstance(n_exp, list):
experiment_loop = n_exp
else:
raise ValueError("n_exp should be either an integer or a list of integers.")
for i_exp in experiment_loop:
# get data
data_exp = get_one_data_set(data_train, i_exp=i_exp, get_po=True)
data_exp_test = get_one_data_set(data_test, i_exp=i_exp, get_po=True)
X, y, w, cate_true_in, X_t, cate_true_out = prepare_ihdp_data(
data_exp, data_exp_test, setting=setting
)
# compute some stats
cate_var_in = np.var(cate_true_in)
cate_var_out = np.var(cate_true_out)
y_var_in = np.var(y)
for k in range(n_reps):
pehe_in = []
pehe_out = []
for model_name, estimator in models.items():
print(f"Experiment {i_exp}, run {k}, with {model_name}")
estimator_temp = clone(estimator)
estimator_temp.set_params(seed=k)
if model_params is not None:
estimator_temp.set_params(**model_params)
# fit estimator
estimator_temp.fit(X=X, y=y, w=w)
cate_pred_in = estimator_temp.predict(X, return_po=False)
cate_pred_out = estimator_temp.predict(X_t, return_po=False)
pehe_in.append(eval_root_mse(cate_pred_in, cate_true_in))
pehe_out.append(eval_root_mse(cate_pred_out, cate_true_out))
writer.writerow([i_exp, k, cate_var_in, cate_var_out, y_var_in] + pehe_in + pehe_out)
out_file.close()
| [
"catenets.models.jax.TNet",
"os.makedirs",
"pathlib.Path",
"catenets.datasets.dataset_ihdp.get_one_data_set",
"catenets.models.jax.TARNet",
"csv.writer",
"catenets.experiment_utils.base.eval_root_mse",
"catenets.datasets.dataset_ihdp.load_raw",
"catenets.models.jax.RNet",
"os.path.isdir",
"caten... | [((475, 506), 'pathlib.Path', 'Path', (['"""catenets/datasets/data/"""'], {}), "('catenets/datasets/data/')\n", (479, 506), False, 'from pathlib import Path\n'), ((520, 566), 'pathlib.Path', 'Path', (['"""results/experiments_benchmarking/ihdp/"""'], {}), "('results/experiments_benchmarking/ihdp/')\n", (524, 566), False, 'from pathlib import Path\n'), ((747, 767), 'catenets.models.jax.TNet', 'TNet', ([], {}), '(**PARAMS_DEPTH)\n', (751, 767), False, 'from catenets.models.jax import RNet, TARNet, TNet\n'), ((796, 818), 'catenets.models.jax.TARNet', 'TARNet', ([], {}), '(**PARAMS_DEPTH)\n', (802, 818), False, 'from catenets.models.jax import RNet, TARNet, TNet\n'), ((845, 867), 'catenets.models.jax.RNet', 'RNet', ([], {}), '(**PARAMS_DEPTH_2)\n', (849, 867), False, 'from catenets.models.jax import RNet, TARNet, TNet\n'), ((1650, 1670), 'csv.writer', 'csv.writer', (['out_file'], {}), '(out_file)\n', (1660, 1670), False, 'import csv\n'), ((1931, 1949), 'catenets.datasets.dataset_ihdp.load_raw', 'load_raw', (['DATA_DIR'], {}), '(DATA_DIR)\n', (1939, 1949), False, 'from catenets.datasets.dataset_ihdp import get_one_data_set, load_raw, prepare_ihdp_data\n'), ((1488, 1513), 'os.path.isdir', 'os.path.isdir', (['RESULT_DIR'], {}), '(RESULT_DIR)\n', (1501, 1513), False, 'import os\n'), ((1523, 1546), 'os.makedirs', 'os.makedirs', (['RESULT_DIR'], {}), '(RESULT_DIR)\n', (1534, 1546), False, 'import os\n'), ((2268, 2322), 'catenets.datasets.dataset_ihdp.get_one_data_set', 'get_one_data_set', (['data_train'], {'i_exp': 'i_exp', 'get_po': '(True)'}), '(data_train, i_exp=i_exp, get_po=True)\n', (2284, 2322), False, 'from catenets.datasets.dataset_ihdp import get_one_data_set, load_raw, prepare_ihdp_data\n'), ((2347, 2400), 'catenets.datasets.dataset_ihdp.get_one_data_set', 'get_one_data_set', (['data_test'], {'i_exp': 'i_exp', 'get_po': '(True)'}), '(data_test, i_exp=i_exp, get_po=True)\n', (2363, 2400), False, 'from catenets.datasets.dataset_ihdp import get_one_data_set, load_raw, prepare_ihdp_data\n'), ((2454, 2513), 'catenets.datasets.dataset_ihdp.prepare_ihdp_data', 'prepare_ihdp_data', (['data_exp', 'data_exp_test'], {'setting': 'setting'}), '(data_exp, data_exp_test, setting=setting)\n', (2471, 2513), False, 'from catenets.datasets.dataset_ihdp import get_one_data_set, load_raw, prepare_ihdp_data\n'), ((2588, 2608), 'numpy.var', 'np.var', (['cate_true_in'], {}), '(cate_true_in)\n', (2594, 2608), True, 'import numpy as np\n'), ((2632, 2653), 'numpy.var', 'np.var', (['cate_true_out'], {}), '(cate_true_out)\n', (2638, 2653), True, 'import numpy as np\n'), ((2673, 2682), 'numpy.var', 'np.var', (['y'], {}), '(y)\n', (2679, 2682), True, 'import numpy as np\n'), ((2931, 2947), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (2936, 2947), False, 'from sklearn import clone\n'), ((3372, 3413), 'catenets.experiment_utils.base.eval_root_mse', 'eval_root_mse', (['cate_pred_in', 'cate_true_in'], {}), '(cate_pred_in, cate_true_in)\n', (3385, 3413), False, 'from catenets.experiment_utils.base import eval_root_mse\n'), ((3447, 3490), 'catenets.experiment_utils.base.eval_root_mse', 'eval_root_mse', (['cate_pred_out', 'cate_true_out'], {}), '(cate_pred_out, cate_true_out)\n', (3460, 3490), False, 'from catenets.experiment_utils.base import eval_root_mse\n')] |
# doc-export: SendData
"""
Example that demonstrates sending (binary) array data from Python to JS.
The ``SendDataView`` widget (a ``JsComponent``) has an action to display the
data. This action is invoked from Python, with an array as input. The action
invokation is serialized with BSDF (http://bsdf.io), which natively supports
bytes and numpy arrays.
In this example we also provide a fallback for when Numpy is not available.
This fallback illustrates how any kind of data can be send to JS by providing
the serializer with an extension.
"""
from flexx import flx
# Prepare data array, preferably using Numpy
try:
import numpy as np
data_array = np.random.normal(0, 1, 1000)
except ImportError:
# Fallback to ctypes when numpy is not available
import random
import ctypes
from flexx.app import bsdf_lite
# Create data array
data_array = (ctypes.c_double * 1000)()
for i in range(len(data_array)):
data_array[i] = random.random()
# Add extension that encodes a ctypes array to ndarray extension data
@flx.serializer.add_extension
class CtypesArrayExtension(bsdf_lite.Extension):
name = 'ndarray'
cls = ctypes.Array
typemap = {
ctypes.c_bool: 'uint8', ctypes.c_int8: 'int8', ctypes.c_uint8: 'uint8',
ctypes.c_int16: 'int16', ctypes.c_uint16: 'uint16',
ctypes.c_int32: 'int32', ctypes.c_uint32: 'uint32',
ctypes.c_int64: 'int64', ctypes.c_uint64: 'uint64',
ctypes.c_float: 'float32', ctypes.c_double: 'float64',
}
def encode(self, s, v):
return dict(shape=(len(v), ),
dtype=self.typemap[v._type_],
data=bytes(v))
class SendData(flx.PyComponent):
""" A simple example demonstrating sending binary data from Python to JS.
"""
def init(self):
self.view = SendDataView()
self.view.set_data(data_array)
class SendDataView(flx.Widget):
""" A widget that displays array data.
"""
def init(self):
self.label = flx.Label()
self.apply_style('overflow-y: scroll;') # enable scrolling
@flx.action
def set_data(self, data):
# We receive the data as a typed array.
# If we would send raw bytes, we would receive it as a DataView, which
# we can map to e.g. a Int16Array like so:
# data = Int16Array(blob.buffer, blob.byteOffset, blob.byteLength/2)
# Show the data as text. We could also e.g. plot it.
text = ['%i: %f<br />' % (i, data[i]) for i in range(len(data))]
header = 'This data (%i elements) was send in binary form:<br />' % len(data)
self.label.set_html(header + ''.join(text))
if __name__ == '__main__':
m = flx.launch(SendData, 'app')
flx.run()
| [
"numpy.random.normal",
"flexx.flx.Label",
"flexx.flx.launch",
"flexx.flx.run",
"random.random"
] | [((664, 692), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (680, 692), True, 'import numpy as np\n'), ((2783, 2810), 'flexx.flx.launch', 'flx.launch', (['SendData', '"""app"""'], {}), "(SendData, 'app')\n", (2793, 2810), False, 'from flexx import flx\n'), ((2815, 2824), 'flexx.flx.run', 'flx.run', ([], {}), '()\n', (2822, 2824), False, 'from flexx import flx\n'), ((2089, 2100), 'flexx.flx.Label', 'flx.Label', ([], {}), '()\n', (2098, 2100), False, 'from flexx import flx\n'), ((969, 984), 'random.random', 'random.random', ([], {}), '()\n', (982, 984), False, 'import random\n')] |
import sys
import csv
import cv2
import numpy as np
images = []
angles = []
def load_data(base_dir):
lines = []
with open("{}/driving_log.csv".format(base_dir)) as csv_file:
# with open(f'{base_dir}/driving_log.csv') as csv_file:
reader = csv.reader(csv_file)
for line in reader:
lines.append(line)
for line in lines:
path = line[0]
file_name = path.split('/')[-1]
new_path = "{}/IMG/{}".format(base_dir, file_name)
image = cv2.imread(new_path)
angle = float(line[3])
images.append(image)
angles.append(angle)
image_flipped = np.fliplr(image)
images.append(image_flipped)
angles.append(-angle)
load_data('gs-data/basic1')
print(len(images))
load_data('gs-data/starter')
print(len(images))
load_data('gs-data/tricky')
print(len(images))
load_data('gs-data/track2x1')
print(len(images))
load_data('gs-data/track2x2')
print(len(images))
load_data('gs-data/recovery1')
print(len(images))
load_data('gs-data/recovery2')
print(len(images))
#sys.exit()
X_train = np.array(images)
y_train = np.array(angles)
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout
from keras.layers import Cropping2D
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70, 20), (0,0))))
model.add(Conv2D(filters=24, kernel_size=(5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(filters=36, kernel_size=(5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(filters=48, kernel_size=(5,5), strides=(2,2), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('relu'))
model.add(Dense(1))
model.compile(
loss='mse',
optimizer='adam',
)
history = model.fit(
X_train,
y_train,
epochs=5,
validation_split=0.25,
shuffle=True,
)
model.save('model.h5')
| [
"keras.layers.core.Flatten",
"cv2.imread",
"keras.layers.core.Activation",
"keras.layers.pooling.MaxPooling2D",
"numpy.fliplr",
"keras.layers.core.Lambda",
"keras.models.Sequential",
"numpy.array",
"keras.layers.convolutional.Conv2D",
"csv.reader",
"keras.layers.core.Dropout",
"keras.layers.Cr... | [((1083, 1099), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1091, 1099), True, 'import numpy as np\n'), ((1110, 1126), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (1118, 1126), True, 'import numpy as np\n'), ((1375, 1387), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1385, 1387), False, 'from keras.models import Sequential\n'), ((1398, 1458), 'keras.layers.core.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (1404, 1458), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((1472, 1511), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 20), (0, 0))'}), '(cropping=((70, 20), (0, 0)))\n', (1482, 1511), False, 'from keras.layers import Cropping2D\n'), ((1522, 1595), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(24)', 'kernel_size': '(5, 5)', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(filters=24, kernel_size=(5, 5), strides=(2, 2), activation='relu')\n", (1528, 1595), False, 'from keras.layers.convolutional import Conv2D\n'), ((1605, 1678), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(36)', 'kernel_size': '(5, 5)', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(filters=36, kernel_size=(5, 5), strides=(2, 2), activation='relu')\n", (1611, 1678), False, 'from keras.layers.convolutional import Conv2D\n'), ((1688, 1761), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(48)', 'kernel_size': '(5, 5)', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(filters=48, kernel_size=(5, 5), strides=(2, 2), activation='relu')\n", (1694, 1761), False, 'from keras.layers.convolutional import Conv2D\n'), ((1771, 1783), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1778, 1783), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((1795, 1852), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), activation='relu')\n", (1801, 1852), False, 'from keras.layers.convolutional import Conv2D\n'), ((1863, 1920), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), activation='relu')\n", (1869, 1920), False, 'from keras.layers.convolutional import Conv2D\n'), ((1931, 1945), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (1943, 1945), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((1957, 1969), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1964, 1969), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((1981, 1990), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (1988, 1990), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((2002, 2012), 'keras.layers.core.Dense', 'Dense', (['(100)'], {}), '(100)\n', (2007, 2012), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((2024, 2042), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2034, 2042), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((2054, 2063), 'keras.layers.core.Dense', 'Dense', (['(50)'], {}), '(50)\n', (2059, 2063), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((2075, 2093), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2085, 2093), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((2105, 2117), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2112, 2117), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((2129, 2138), 'keras.layers.core.Dense', 'Dense', (['(10)'], {}), '(10)\n', (2134, 2138), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((2150, 2168), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2160, 2168), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((2180, 2188), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2185, 2188), False, 'from keras.layers.core import Dense, Activation, Flatten, Lambda, Dropout\n'), ((260, 280), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (270, 280), False, 'import csv\n'), ((501, 521), 'cv2.imread', 'cv2.imread', (['new_path'], {}), '(new_path)\n', (511, 521), False, 'import cv2\n'), ((635, 651), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (644, 651), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
from openmdao.api import ExternalCode
import numpy as np
import os.path
import sys
from aerostructures.number_formatting.field_writer_8 import print_float_8
from aerostructures.number_formatting.is_number import isfloat, isint
class NastranStatic(ExternalCode):
template_file = 'nastran_static_template.inp'
def __init__(self, node_id, node_id_all, n_stress, tn, mn, case_name):
super(NastranStatic, self).__init__()
#Identification number of the outer surface nodes
self.node_id = node_id
#Identification number of all the structural nodes
self.node_id_all = node_id_all
#Number of nodes on the outer surface
self.ns = len(node_id)
#Total number of structural nodes
self.ns_all = len(node_id_all)
#Number of stress outputs
self.n_stress = n_stress
#Number of regions where the thicknesses are defined
self.tn = tn
#Number of concentrated masses
self.mn = mn
#Case name (for file naming)
self.case_name = case_name
#Forces on the nodes of the outer surface
self.add_param('f_node', val=np.zeros((self.ns, 3)))
#Coordinates of all structural nodes
self.add_param('node_coord_all', val=np.zeros((self.ns_all, 3)))
#Vector containing the thickness of each region
self.add_param('t', val=np.zeros(self.tn))
#Vector containing the concentrated masses' values
self.add_param('m', val=np.zeros(self.mn))
#Young's modulus
self.add_param('E', val=1.)
#Poisson's ratio
self.add_param('nu', val=0.3)
#Material density
self.add_param('rho_s', val=1.)
#Displacements of the nodes on the outer surface
self.add_output('u', val=np.zeros((self.ns, 3)))
#Von Mises stress of all elements
self.add_output('VMStress', val=np.zeros(self.n_stress))
#Structural mass
self.add_output('mass', val=1.)
self.input_filepath = 'nastran_static_'+self.case_name+'.inp'
self.output_filepath = 'nastran_static_'+self.case_name+'.pnh'
self.output_file = 'nastran_static_'+self.case_name+'.out'
#Check if the files exist (optional)
self.options['external_input_files'] = [self.input_filepath,]
#self.options['external_output_files'] = [self.output_filepath,]
#Command according to OS
if sys.platform == 'win32':
self.options['command'] = ['cmd.exe', '/c', r'nastran.bat', self.input_filepath.rstrip('.inp')]
else:
self.options['command'] = ['nastran.cmd', self.input_filepath.rstrip('.inp')]
def solve_nonlinear(self, params, unknowns, resids):
# Generate the input file for Nastran from the input file template and pressure values at the nodes
self.create_input_file(params)
# Parent solve_nonlinear function actually runs the external code
super(NastranStatic, self).solve_nonlinear(params, unknowns, resids)
output_data = self.get_output_data()
# Parse the output file from the external code and set the value of u
unknowns['u'] = output_data['u']
#Parse the output file from the external code and get the Von Mises Stresses
unknowns['VMStress'] = output_data['VMStress']
#Parse the output file from the external code and get the structural mass
unknowns['mass'] = output_data['mass']
def create_input_file(self, params):
f_node = params['f_node']
node_coord_all = params['node_coord_all']
t = params['t']
m = params['m']
E = params['E']
nu = params['nu']
rho_s = params['rho_s']
input_data = {}
#Assign each force value to its corresponding node ID in the input data dictionary
for i in range(len(f_node)):
input_data['Fx'+self.node_id[i]] = print_float_8(f_node[i, 0])
input_data['Fy'+self.node_id[i]] = print_float_8(f_node[i, 1])
input_data['Fz'+self.node_id[i]] = print_float_8(f_node[i, 2])
#Assign each node coordiantes to its corresponding node ID in the input data dictionary
for i in range(len(node_coord_all)):
input_data['x'+self.node_id_all[i]] = print_float_8(node_coord_all[i,0])
input_data['y'+self.node_id_all[i]] = print_float_8(node_coord_all[i,1])
input_data['z'+self.node_id_all[i]] = print_float_8(node_coord_all[i,2])
#Assign each thickness value to its corresponding ID in the input data dictionary
for i in range(len(t)):
input_data['t'+str(i+1)] = print_float_8(t[i])
#Assign each mass value to its corresponding ID in the input data dictionary
for i in range(len(m)):
input_data['m'+str(i+1)] = print_float_8(m[i])
#Assign the Young's modulus to its input data dictionary key
input_data['E'] = print_float_8(E)
#Assign the Poisson's ratio to its input data dictionary key
input_data['nu'] = print_float_8(nu)
#Assign the material density to its input data dictionary key
input_data['rho_s'] = print_float_8(rho_s)
#Read the input file template
f = open(self.template_file,'r')
tmp = f.read()
f.close()
#Replace the input data contained in the dictionary onto the new input file
new_file = tmp.format(**input_data)
inp = open(self.input_filepath,'w')
inp.write(new_file)
inp.close()
def get_output_data(self):
#Read the punch and output files only if they exist and their last modified date is older than input file one
while(not os.path.isfile(self.output_filepath)): pass
while(os.path.getmtime(self.output_filepath) <= os.path.getmtime(self.input_filepath)): pass
while(not os.path.isfile(self.output_file)): pass
while(os.path.getmtime(self.output_file) <= os.path.getmtime(self.input_filepath)): pass
u = np.zeros((self.ns,3))
shell_stress = []
mass = 0.
#Read the Nastran punch file (.pnh) and extract displacement and stress data
with open(self.output_filepath) as f:
lines = f.readlines()
lines = [i.split() for i in lines]
for i in range(len(lines)):
if len(lines[i]) > 1:
#Write nodal displacements onto u if the node belongs to the outer surface
if lines[i][0] in self.node_id and lines[i][1] == 'G':
u[self.node_id.index(lines[i][0])][0] = lines[i][2]
u[self.node_id.index(lines[i][0])][1] = lines[i][3]
u[self.node_id.index(lines[i][0])][2] = lines[i][4]
if isint(lines[i][0]) and isfloat(lines[i][1]):
#Store stresses only if the element is of shell type:
if lines[i+1][0] == '-CONT-' and lines[i+2][0] == '-CONT-' and lines[i+3][0] == '-CONT-' and lines[i+4][0] == '-CONT-' and lines[i+5][0] == '-CONT-':
#Write shell principal stresses onto a list (upper and lower shell faces)
shell_stress.append(((float(lines[i+1][3]), float(lines[i+2][1])), (float(lines[i+4][2]), float(lines[i+4][3]))))
#Compute the Von Mises Stress on the structure
VM = []
for s in shell_stress:
VM.append(np.sqrt(s[0][0]**2 - s[0][0]*s[0][1] + s[0][1]**2))
VM.append(np.sqrt(s[1][0]**2 - s[1][0]*s[1][1] + s[1][1]**2))
VMStress = np.asarray(VM)
#Read the Nastran output file (.out) and extract the total mass of the structure (M)
with open(self.output_file) as f:
lines = f.readlines()
lines = [i.split() for i in lines]
for i in range(len(lines)):
if len(lines[i]) > 4:
if lines[i][4] == 'MASS' and lines[i][5] == 'X-C.G.':
mass = float(lines[i+1][1].replace('D', 'E'))
output_data = {}
output_data['u'] = u
output_data['VMStress'] = VMStress
output_data['mass'] = mass
return output_data
| [
"aerostructures.number_formatting.is_number.isint",
"numpy.sqrt",
"aerostructures.number_formatting.is_number.isfloat",
"numpy.asarray",
"numpy.zeros",
"aerostructures.number_formatting.field_writer_8.print_float_8"
] | [((5189, 5205), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['E'], {}), '(E)\n', (5202, 5205), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((5306, 5323), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['nu'], {}), '(nu)\n', (5319, 5323), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((5428, 5448), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['rho_s'], {}), '(rho_s)\n', (5441, 5448), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((6306, 6328), 'numpy.zeros', 'np.zeros', (['(self.ns, 3)'], {}), '((self.ns, 3))\n', (6314, 6328), True, 'import numpy as np\n'), ((7935, 7949), 'numpy.asarray', 'np.asarray', (['VM'], {}), '(VM)\n', (7945, 7949), True, 'import numpy as np\n'), ((4140, 4167), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['f_node[i, 0]'], {}), '(f_node[i, 0])\n', (4153, 4167), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((4216, 4243), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['f_node[i, 1]'], {}), '(f_node[i, 1])\n', (4229, 4243), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((4292, 4319), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['f_node[i, 2]'], {}), '(f_node[i, 2])\n', (4305, 4319), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((4516, 4551), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['node_coord_all[i, 0]'], {}), '(node_coord_all[i, 0])\n', (4529, 4551), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((4602, 4637), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['node_coord_all[i, 1]'], {}), '(node_coord_all[i, 1])\n', (4615, 4637), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((4688, 4723), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['node_coord_all[i, 2]'], {}), '(node_coord_all[i, 2])\n', (4701, 4723), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((4889, 4908), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['t[i]'], {}), '(t[i])\n', (4902, 4908), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((5070, 5089), 'aerostructures.number_formatting.field_writer_8.print_float_8', 'print_float_8', (['m[i]'], {}), '(m[i])\n', (5083, 5089), False, 'from aerostructures.number_formatting.field_writer_8 import print_float_8\n'), ((1285, 1307), 'numpy.zeros', 'np.zeros', (['(self.ns, 3)'], {}), '((self.ns, 3))\n', (1293, 1307), True, 'import numpy as np\n'), ((1403, 1429), 'numpy.zeros', 'np.zeros', (['(self.ns_all, 3)'], {}), '((self.ns_all, 3))\n', (1411, 1429), True, 'import numpy as np\n'), ((1523, 1540), 'numpy.zeros', 'np.zeros', (['self.tn'], {}), '(self.tn)\n', (1531, 1540), True, 'import numpy as np\n'), ((1637, 1654), 'numpy.zeros', 'np.zeros', (['self.mn'], {}), '(self.mn)\n', (1645, 1654), True, 'import numpy as np\n'), ((1952, 1974), 'numpy.zeros', 'np.zeros', (['(self.ns, 3)'], {}), '((self.ns, 3))\n', (1960, 1974), True, 'import numpy as np\n'), ((2062, 2085), 'numpy.zeros', 'np.zeros', (['self.n_stress'], {}), '(self.n_stress)\n', (2070, 2085), True, 'import numpy as np\n'), ((7786, 7842), 'numpy.sqrt', 'np.sqrt', (['(s[0][0] ** 2 - s[0][0] * s[0][1] + s[0][1] ** 2)'], {}), '(s[0][0] ** 2 - s[0][0] * s[0][1] + s[0][1] ** 2)\n', (7793, 7842), True, 'import numpy as np\n'), ((7861, 7917), 'numpy.sqrt', 'np.sqrt', (['(s[1][0] ** 2 - s[1][0] * s[1][1] + s[1][1] ** 2)'], {}), '(s[1][0] ** 2 - s[1][0] * s[1][1] + s[1][1] ** 2)\n', (7868, 7917), True, 'import numpy as np\n'), ((7109, 7127), 'aerostructures.number_formatting.is_number.isint', 'isint', (['lines[i][0]'], {}), '(lines[i][0])\n', (7114, 7127), False, 'from aerostructures.number_formatting.is_number import isfloat, isint\n'), ((7132, 7152), 'aerostructures.number_formatting.is_number.isfloat', 'isfloat', (['lines[i][1]'], {}), '(lines[i][1])\n', (7139, 7152), False, 'from aerostructures.number_formatting.is_number import isfloat, isint\n')] |
import glob
from logging import getLogger
import os
import shutil
import tarfile
import tempfile
from chainer.dataset import download
import numpy
import pandas
from tqdm import tqdm
from chainer_chemistry.dataset.parsers.csv_file_parser import CSVFileParser
from chainer_chemistry.dataset.preprocessors.atomic_number_preprocessor import AtomicNumberPreprocessor # NOQA
download_url = 'https://ndownloader.figshare.com/files/3195389'
file_name = 'qm9.csv'
_root = 'pfnet/chainer/qm9'
_label_names = ['A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2',
'zpve', 'U0', 'U', 'H', 'G', 'Cv']
_smiles_column_names = ['SMILES1', 'SMILES2']
def get_qm9_label_names():
"""Returns label names of QM9 datasets."""
return _label_names
def get_qm9(preprocessor=None, labels=None, return_smiles=False):
"""Downloads, caches and preprocesses QM9 dataset.
Args:
preprocessor (BasePreprocessor): Preprocessor.
This should be chosen based on the network to be trained.
If it is None, default `AtomicNumberPreprocessor` is used.
labels (str or list): List of target labels.
return_smiles (bool): If set to ``True``,
smiles array is also returned.
Returns:
dataset, which is composed of `features`, which depends on
`preprocess_method`.
"""
labels = labels or get_qm9_label_names()
if isinstance(labels, str):
labels = [labels, ]
def postprocess_label(label_list):
# This is regression task, cast to float value.
return numpy.asarray(label_list, dtype=numpy.float32)
if preprocessor is None:
preprocessor = AtomicNumberPreprocessor()
parser = CSVFileParser(preprocessor, postprocess_label=postprocess_label,
labels=labels, smiles_col='SMILES1')
result = parser.parse(get_qm9_filepath(), return_smiles=return_smiles)
if return_smiles:
return result['dataset'], result['smiles']
else:
return result['dataset']
def get_qm9_filepath(download_if_not_exist=True):
"""Construct a filepath which stores qm9 dataset for config_name
This method check whether the file exist or not, and downloaded it if
necessary.
Args:
config_name: either 'train', 'val', or 'test'
Returns (str): filepath for qm9 dataset
"""
cache_path = _get_qm9_filepath()
if not os.path.exists(cache_path):
if download_if_not_exist:
is_successful = download_and_extract_qm9(save_filepath=cache_path)
if not is_successful:
logger = getLogger(__name__)
logger.warning('Download failed.')
return cache_path
def _get_qm9_filepath():
"""Construct a filepath which stores QM9 dataset in csv
This method does not check if the file is already downloaded or not.
Returns (str): filepath for tox21 dataset
"""
cache_root = download.get_dataset_directory(_root)
cache_path = os.path.join(cache_root, file_name)
return cache_path
def download_and_extract_qm9(save_filepath):
logger = getLogger(__name__)
logger.warning('Extracting QM9 dataset, it takes time...')
download_file_path = download.cached_download(download_url)
tf = tarfile.open(download_file_path, 'r')
temp_dir = tempfile.mkdtemp()
tf.extractall(temp_dir)
file_re = os.path.join(temp_dir, '*.xyz')
file_pathes = glob.glob(file_re)
# Make sure the order is sorted
file_pathes.sort()
ls = []
for path in tqdm(file_pathes):
with open(path, 'r') as f:
data = [line.strip() for line in f]
num_atom = int(data[0])
properties = list(map(float, data[1].split('\t')[1:]))
smiles = data[3 + num_atom].split('\t')
new_ls = smiles + properties
ls.append(new_ls)
df = pandas.DataFrame(ls, columns=_smiles_column_names + _label_names)
df.to_csv(save_filepath)
shutil.rmtree(temp_dir)
return True
| [
"logging.getLogger",
"os.path.exists",
"tarfile.open",
"tqdm.tqdm",
"os.path.join",
"chainer.dataset.download.get_dataset_directory",
"numpy.asarray",
"tempfile.mkdtemp",
"chainer.dataset.download.cached_download",
"shutil.rmtree",
"pandas.DataFrame",
"chainer_chemistry.dataset.preprocessors.a... | [((1713, 1819), 'chainer_chemistry.dataset.parsers.csv_file_parser.CSVFileParser', 'CSVFileParser', (['preprocessor'], {'postprocess_label': 'postprocess_label', 'labels': 'labels', 'smiles_col': '"""SMILES1"""'}), "(preprocessor, postprocess_label=postprocess_label, labels=\n labels, smiles_col='SMILES1')\n", (1726, 1819), False, 'from chainer_chemistry.dataset.parsers.csv_file_parser import CSVFileParser\n'), ((2940, 2977), 'chainer.dataset.download.get_dataset_directory', 'download.get_dataset_directory', (['_root'], {}), '(_root)\n', (2970, 2977), False, 'from chainer.dataset import download\n'), ((2995, 3030), 'os.path.join', 'os.path.join', (['cache_root', 'file_name'], {}), '(cache_root, file_name)\n', (3007, 3030), False, 'import os\n'), ((3113, 3132), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (3122, 3132), False, 'from logging import getLogger\n'), ((3221, 3259), 'chainer.dataset.download.cached_download', 'download.cached_download', (['download_url'], {}), '(download_url)\n', (3245, 3259), False, 'from chainer.dataset import download\n'), ((3269, 3306), 'tarfile.open', 'tarfile.open', (['download_file_path', '"""r"""'], {}), "(download_file_path, 'r')\n", (3281, 3306), False, 'import tarfile\n'), ((3322, 3340), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3338, 3340), False, 'import tempfile\n'), ((3383, 3414), 'os.path.join', 'os.path.join', (['temp_dir', '"""*.xyz"""'], {}), "(temp_dir, '*.xyz')\n", (3395, 3414), False, 'import os\n'), ((3433, 3451), 'glob.glob', 'glob.glob', (['file_re'], {}), '(file_re)\n', (3442, 3451), False, 'import glob\n'), ((3539, 3556), 'tqdm.tqdm', 'tqdm', (['file_pathes'], {}), '(file_pathes)\n', (3543, 3556), False, 'from tqdm import tqdm\n'), ((3858, 3923), 'pandas.DataFrame', 'pandas.DataFrame', (['ls'], {'columns': '(_smiles_column_names + _label_names)'}), '(ls, columns=_smiles_column_names + _label_names)\n', (3874, 3923), False, 'import pandas\n'), ((3957, 3980), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (3970, 3980), False, 'import shutil\n'), ((1573, 1619), 'numpy.asarray', 'numpy.asarray', (['label_list'], {'dtype': 'numpy.float32'}), '(label_list, dtype=numpy.float32)\n', (1586, 1619), False, 'import numpy\n'), ((1673, 1699), 'chainer_chemistry.dataset.preprocessors.atomic_number_preprocessor.AtomicNumberPreprocessor', 'AtomicNumberPreprocessor', ([], {}), '()\n', (1697, 1699), False, 'from chainer_chemistry.dataset.preprocessors.atomic_number_preprocessor import AtomicNumberPreprocessor\n'), ((2413, 2439), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (2427, 2439), False, 'import os\n'), ((2613, 2632), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (2622, 2632), False, 'from logging import getLogger\n')] |
from pymks.datasets import make_elastic_FE_strain_random
from pymks.datasets import make_elastic_FE_strain_delta
from pymks.datasets import make_elastic_stress_random
import numpy as np
def test_make_elastic_FE_strain_delta():
elastic_modulus = (1., 2.)
poissons_ratio = (0.3, 0.3)
X, y = make_elastic_FE_strain_delta(elastic_modulus=elastic_modulus,
poissons_ratio=poissons_ratio,
size=(5, 5))
def test_make_elastic_FE_strain_random():
elastic_modulus = (1., 2.)
poissons_ratio = (0.3, 0.3)
X, y = make_elastic_FE_strain_random(n_samples=1,
elastic_modulus=elastic_modulus,
poissons_ratio=poissons_ratio,
size=(5, 5))
def test_make_elastic_stress_randome():
X, y = make_elastic_stress_random(n_samples=1, elastic_modulus=(1, 1),
poissons_ratio=(1, 1),
grain_size=(3, 3), macro_strain=1.0)
assert np.allclose(y, np.ones(y.shape))
X, y = make_elastic_stress_random(n_samples=1, grain_size=(1, 1),
elastic_modulus=(100, 200),
size=(2, 2), poissons_ratio=(1, 3),
macro_strain=1., seed=4)
X_result = np.array([[[1, 1],
[0, 1]]])
assert float(np.round(y, decimals=5)[0]) == 228.74696
assert np.allclose(X, X_result)
X, y = make_elastic_stress_random(n_samples=1, grain_size=(1, 1, 1),
elastic_modulus=(100, 200),
poissons_ratio=(1, 3), seed=5,
macro_strain=1., size=(2, 2, 2))
X_result = np.array([[[1, 1],
[1, 0]],
[[1, 1],
[0, 0]]])
assert np.allclose(X, X_result)
assert y.astype(int) == 145
| [
"pymks.datasets.make_elastic_FE_strain_delta",
"pymks.datasets.make_elastic_FE_strain_random",
"numpy.allclose",
"numpy.ones",
"pymks.datasets.make_elastic_stress_random",
"numpy.array",
"numpy.round"
] | [((303, 412), 'pymks.datasets.make_elastic_FE_strain_delta', 'make_elastic_FE_strain_delta', ([], {'elastic_modulus': 'elastic_modulus', 'poissons_ratio': 'poissons_ratio', 'size': '(5, 5)'}), '(elastic_modulus=elastic_modulus,\n poissons_ratio=poissons_ratio, size=(5, 5))\n', (331, 412), False, 'from pymks.datasets import make_elastic_FE_strain_delta\n'), ((607, 730), 'pymks.datasets.make_elastic_FE_strain_random', 'make_elastic_FE_strain_random', ([], {'n_samples': '(1)', 'elastic_modulus': 'elastic_modulus', 'poissons_ratio': 'poissons_ratio', 'size': '(5, 5)'}), '(n_samples=1, elastic_modulus=elastic_modulus,\n poissons_ratio=poissons_ratio, size=(5, 5))\n', (636, 730), False, 'from pymks.datasets import make_elastic_FE_strain_random\n'), ((903, 1030), 'pymks.datasets.make_elastic_stress_random', 'make_elastic_stress_random', ([], {'n_samples': '(1)', 'elastic_modulus': '(1, 1)', 'poissons_ratio': '(1, 1)', 'grain_size': '(3, 3)', 'macro_strain': '(1.0)'}), '(n_samples=1, elastic_modulus=(1, 1),\n poissons_ratio=(1, 1), grain_size=(3, 3), macro_strain=1.0)\n', (929, 1030), False, 'from pymks.datasets import make_elastic_stress_random\n'), ((1158, 1311), 'pymks.datasets.make_elastic_stress_random', 'make_elastic_stress_random', ([], {'n_samples': '(1)', 'grain_size': '(1, 1)', 'elastic_modulus': '(100, 200)', 'size': '(2, 2)', 'poissons_ratio': '(1, 3)', 'macro_strain': '(1.0)', 'seed': '(4)'}), '(n_samples=1, grain_size=(1, 1), elastic_modulus=\n (100, 200), size=(2, 2), poissons_ratio=(1, 3), macro_strain=1.0, seed=4)\n', (1184, 1311), False, 'from pymks.datasets import make_elastic_stress_random\n'), ((1435, 1463), 'numpy.array', 'np.array', (['[[[1, 1], [0, 1]]]'], {}), '([[[1, 1], [0, 1]]])\n', (1443, 1463), True, 'import numpy as np\n'), ((1559, 1583), 'numpy.allclose', 'np.allclose', (['X', 'X_result'], {}), '(X, X_result)\n', (1570, 1583), True, 'import numpy as np\n'), ((1595, 1758), 'pymks.datasets.make_elastic_stress_random', 'make_elastic_stress_random', ([], {'n_samples': '(1)', 'grain_size': '(1, 1, 1)', 'elastic_modulus': '(100, 200)', 'poissons_ratio': '(1, 3)', 'seed': '(5)', 'macro_strain': '(1.0)', 'size': '(2, 2, 2)'}), '(n_samples=1, grain_size=(1, 1, 1),\n elastic_modulus=(100, 200), poissons_ratio=(1, 3), seed=5, macro_strain\n =1.0, size=(2, 2, 2))\n', (1621, 1758), False, 'from pymks.datasets import make_elastic_stress_random\n'), ((1879, 1925), 'numpy.array', 'np.array', (['[[[1, 1], [1, 0]], [[1, 1], [0, 0]]]'], {}), '([[[1, 1], [1, 0]], [[1, 1], [0, 0]]])\n', (1887, 1925), True, 'import numpy as np\n'), ((2014, 2038), 'numpy.allclose', 'np.allclose', (['X', 'X_result'], {}), '(X, X_result)\n', (2025, 2038), True, 'import numpy as np\n'), ((1129, 1145), 'numpy.ones', 'np.ones', (['y.shape'], {}), '(y.shape)\n', (1136, 1145), True, 'import numpy as np\n'), ((1507, 1530), 'numpy.round', 'np.round', (['y'], {'decimals': '(5)'}), '(y, decimals=5)\n', (1515, 1530), True, 'import numpy as np\n')] |
import cv2
import numpy
from numba import jit
def set_brightness(img, val, flag):
r"""
This function sets the brightness to an image brighter or darker depending on the flag.
:param img: The img you want to alter
:param val: The value by which you want to alter the image
:param flag: The flag that indicates if you want to brighten or darken the image
:return: The new edited image
"""
matrix = numpy.ones(img.shape, dtype=numpy.uint8) * val
if flag == 0:
img_brighter = cv2.add(img, matrix)
return img_brighter
elif flag == 1:
img_darker = cv2.subtract(img, matrix)
return img_darker
def rescale(scale, frame):
r"""
This function can rescale an img/frame.
:param scale: The scale of the new frame
:param frame: The frame/img that you want to resize
"""
w, h, _ = frame.shape
w, h = int(w * scale), int(h * scale)
dimensions = (w, h)
return cv2.resize(frame, dimensions, interpolation=cv2.INTER_AREA)
def translate(img, x, y):
r"""
This function translates (put img/frame in different position) img/frame.
:param img: The img/frame you want to translate
:param x: The new x coordinate
:param y: The new y coordinate
:return: The translated image
"""
trans_mat = numpy.float32([[1, 0, x], [0, 1, y]])
dimension = (img.shape[1], img.shape[0])
return cv2.warpAffine(img, trans_mat, dimension)
def rotate(img, angle, rotation_point=None, rot_scale=1.0):
r"""
This function rotates the img/frame.
:param img: The img you want to rotate
:param angle: The angle for the image rotation
:param rotation_point: The point of rotation
:param rot_scale: The scale of the rotation
:return: The rotated image
"""
width, height = img.shape[:2]
dimensions = (width, height)
if rotation_point is None:
rot_point = (width // 2, height // 2)
rot_mat = cv2.getRotationMatrix2D(rot_point, angle, rot_scale)
return cv2.warpAffine(img, rot_mat, dimensions)
@jit(nopython=True)
def process(frame, box_height=6, box_width=16):
r"""
"""
height, width, _ = frame.shape
for i in range(0, height, box_height):
for j in range(0, width, box_width):
roi = frame[i:i + box_height, j:j + box_width]
b_mean = numpy.mean(roi[:, :, 0])
g_mean = numpy.mean(roi[:, :, 1])
r_mean = numpy.mean(roi[:, :, 2])
roi[:, :, 0] = b_mean
roi[:, :, 1] = g_mean
roi[:, :, 2] = r_mean
return frame
| [
"numpy.mean",
"cv2.warpAffine",
"numpy.ones",
"numba.jit",
"cv2.getRotationMatrix2D",
"cv2.resize",
"cv2.subtract",
"numpy.float32",
"cv2.add"
] | [((2063, 2081), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2066, 2081), False, 'from numba import jit\n'), ((960, 1019), 'cv2.resize', 'cv2.resize', (['frame', 'dimensions'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, dimensions, interpolation=cv2.INTER_AREA)\n', (970, 1019), False, 'import cv2\n'), ((1317, 1354), 'numpy.float32', 'numpy.float32', (['[[1, 0, x], [0, 1, y]]'], {}), '([[1, 0, x], [0, 1, y]])\n', (1330, 1354), False, 'import numpy\n'), ((1411, 1452), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'trans_mat', 'dimension'], {}), '(img, trans_mat, dimension)\n', (1425, 1452), False, 'import cv2\n'), ((1955, 2007), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['rot_point', 'angle', 'rot_scale'], {}), '(rot_point, angle, rot_scale)\n', (1978, 2007), False, 'import cv2\n'), ((2019, 2059), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'rot_mat', 'dimensions'], {}), '(img, rot_mat, dimensions)\n', (2033, 2059), False, 'import cv2\n'), ((432, 472), 'numpy.ones', 'numpy.ones', (['img.shape'], {'dtype': 'numpy.uint8'}), '(img.shape, dtype=numpy.uint8)\n', (442, 472), False, 'import numpy\n'), ((521, 541), 'cv2.add', 'cv2.add', (['img', 'matrix'], {}), '(img, matrix)\n', (528, 541), False, 'import cv2\n'), ((611, 636), 'cv2.subtract', 'cv2.subtract', (['img', 'matrix'], {}), '(img, matrix)\n', (623, 636), False, 'import cv2\n'), ((2352, 2376), 'numpy.mean', 'numpy.mean', (['roi[:, :, 0]'], {}), '(roi[:, :, 0])\n', (2362, 2376), False, 'import numpy\n'), ((2398, 2422), 'numpy.mean', 'numpy.mean', (['roi[:, :, 1]'], {}), '(roi[:, :, 1])\n', (2408, 2422), False, 'import numpy\n'), ((2444, 2468), 'numpy.mean', 'numpy.mean', (['roi[:, :, 2]'], {}), '(roi[:, :, 2])\n', (2454, 2468), False, 'import numpy\n')] |
try:
import tensorflow as tf
except ImportError:
print("WARNING: Tensorflow not found, skipping test")
exit(0)
import numpy as np
import dace
from dace.frontend.tensorflow import TFSession
shape = [10, 11, 12, 13]
inp = tf.placeholder(tf.float64, shape)
outp_1 = tf.reduce_mean(inp, keepdims=True)
outp_3 = tf.reduce_mean(inp, axis=[0, 2], keepdims=True)
outp_0 = tf.reduce_mean(inp, axis=[0, 2])
outp_2 = tf.reduce_mean(inp, axis=[-2, -1])
outp_4 = tf.reduce_mean(inp, axis=[0, -1], keepdims=True)
sess_tf = tf.Session()
sess_dace = TFSession()
real_inp = np.random.rand(*shape)
for index, op in enumerate([outp_0, outp_1, outp_2, outp_3, outp_4]):
output_tf = sess_tf.run(op, feed_dict={inp: real_inp})
output_dace = sess_dace.run(op, feed_dict={inp: real_inp})
try:
assert tf.norm(output_dace - output_tf).eval(session=sess_tf) < 1e-10
except:
print(output_dace)
print(output_tf)
print(tf.norm(output_dace - output_tf).eval(session=sess_tf))
raise AssertionError("mean test {i} failed".format(i=index))
print("mean tests passed!")
inputs = [np.random.rand(*shape) for _ in range(10)]
addn_test_0 = tf.add_n(inputs)
output_tf = sess_tf.run(addn_test_0)
output_dace = sess_dace.run(addn_test_0)
try:
assert tf.norm(output_dace - output_tf).eval(session=sess_tf) < 1e-10
except:
print(output_dace)
print(output_tf)
print(tf.norm(output_dace - output_tf).eval(session=sess_tf))
raise AssertionError("AddN test failed")
print("AddN test passed!")
| [
"numpy.random.rand",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.add_n",
"tensorflow.reduce_mean",
"dace.frontend.tensorflow.TFSession",
"tensorflow.norm"
] | [((235, 268), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64', 'shape'], {}), '(tf.float64, shape)\n', (249, 268), True, 'import tensorflow as tf\n'), ((278, 312), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inp'], {'keepdims': '(True)'}), '(inp, keepdims=True)\n', (292, 312), True, 'import tensorflow as tf\n'), ((322, 369), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inp'], {'axis': '[0, 2]', 'keepdims': '(True)'}), '(inp, axis=[0, 2], keepdims=True)\n', (336, 369), True, 'import tensorflow as tf\n'), ((379, 411), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inp'], {'axis': '[0, 2]'}), '(inp, axis=[0, 2])\n', (393, 411), True, 'import tensorflow as tf\n'), ((421, 455), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inp'], {'axis': '[-2, -1]'}), '(inp, axis=[-2, -1])\n', (435, 455), True, 'import tensorflow as tf\n'), ((465, 513), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inp'], {'axis': '[0, -1]', 'keepdims': '(True)'}), '(inp, axis=[0, -1], keepdims=True)\n', (479, 513), True, 'import tensorflow as tf\n'), ((525, 537), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (535, 537), True, 'import tensorflow as tf\n'), ((550, 561), 'dace.frontend.tensorflow.TFSession', 'TFSession', ([], {}), '()\n', (559, 561), False, 'from dace.frontend.tensorflow import TFSession\n'), ((573, 595), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (587, 595), True, 'import numpy as np\n'), ((1174, 1190), 'tensorflow.add_n', 'tf.add_n', (['inputs'], {}), '(inputs)\n', (1182, 1190), True, 'import tensorflow as tf\n'), ((1117, 1139), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1131, 1139), True, 'import numpy as np\n'), ((1285, 1317), 'tensorflow.norm', 'tf.norm', (['(output_dace - output_tf)'], {}), '(output_dace - output_tf)\n', (1292, 1317), True, 'import tensorflow as tf\n'), ((812, 844), 'tensorflow.norm', 'tf.norm', (['(output_dace - output_tf)'], {}), '(output_dace - output_tf)\n', (819, 844), True, 'import tensorflow as tf\n'), ((1410, 1442), 'tensorflow.norm', 'tf.norm', (['(output_dace - output_tf)'], {}), '(output_dace - output_tf)\n', (1417, 1442), True, 'import tensorflow as tf\n'), ((953, 985), 'tensorflow.norm', 'tf.norm', (['(output_dace - output_tf)'], {}), '(output_dace - output_tf)\n', (960, 985), True, 'import tensorflow as tf\n')] |
# Copyright <NAME> 2019
# Author: <NAME>
"""
This script contains the utilities for plotting kernel functions.
"""
from matplotlib import pyplot as plt
import numpy as np
def rational_quadratic(alpha, lengthscale, kernel_variance, r):
"""
The rational quadratic kernel as featured in equation 4.19 on pg. 86 of Rasmussen and Williams. The rational quadratic
kernel can be seen as a scale mixture (an infinite sum) of squared exponential kernels with different characteristic lengthscales.
:param alpha: as alpha goes to infinity the RQ kernel becomes the SQE kernel.
:param lengthscale: the lengthscale
:param kernel_variance: the kernel variance
:param r: The absolute distance in input space
:return: The kernel function evaluated at a list of values r.
"""
fract = (r/lengthscale)**2 * 1/(2*alpha)
k_rq = (1 + fract)**(-alpha)
k_rq *= kernel_variance
return k_rq
def ornstein_uhlenbeck(lengthscale, kernel_variance, r):
"""
The Ornstein-Uhlenbeck kernel (special case of exponential kernel in 1 dimension) defined on pg. 85 of Rasmussen
and Williams.
:param lengthscale: The lengthscale
:param kernel_variance: The kernel variance
:param r: The absolute distance in input space
:return: The kernel function evaluated at a list of values r.
"""
k_ou = np.exp(-r/lengthscale)
k_ou *= kernel_variance
return k_ou
def squared_exponential(lengthscale, kernel_variance, r):
"""
The Squared exponential (RBF) kernel.
:param lengthscale: The lengthscale
:param kernel_variance: The kernel variance
:param r:
:return: The kernel function evaluated at a list of values r.
"""
scaled_squared_dist = (r/lengthscale)**2
k_sqe = np.exp(-0.5*scaled_squared_dist)
k_sqe *= kernel_variance
return k_sqe
def matern12(lengthscale, kernel_variance, r):
"""
The Matern -1/2 kernel.
:param lengthscale: The lengthscale
:param kernel_variance: The kernel variance
:param r: The absolute distance in input space
:return: the kernel function evaluated at a list of values r.
"""
scaled_distance = (r/lengthscale)
k = kernel_variance*np.exp(-scaled_distance)
return k
kernel = 'RQ' # One of ['Matern', 'RQ']
if __name__ == '__main__':
r_vals = np.arange(0, 10000, 10)
if kernel == 'Matern':
#autocorr_vals = matern12(0.010366143599172154, 0.9868114735198913, r_vals)
autocorr_vals = matern12(13.1488, 0.428, r_vals) # in days
#autocorr_vals = matern12(1136056, 0.428, r_vals) # in seconds
else:
# autocorr_vals = rational_quadratic(0.00321, 10e-5, 10.115, r_vals)
autocorr_vals = rational_quadratic(0.00321, 0.000954, 10.115, r_vals)
plt.loglog(r_vals, autocorr_vals, label=f'{kernel}')
#plt.yticks(np.arange(0, 1, 5))
plt.title(f'{kernel} Covariance for Mrk-335 X-ray Dataset')
#plt.title('Rational Quadratic Covariance for Mrk-335 X-ray Dataset')
plt.xlabel('Days')
plt.ylabel('Autocorrelation')
plt.tick_params(axis='both', which='minor', labelsize=7)
plt.yticks([])
plt.legend()
plt.savefig(f'figures/{kernel}.png')
plt.close()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tick_params",
"numpy.exp",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"numpy.arange"
] | [((1352, 1376), 'numpy.exp', 'np.exp', (['(-r / lengthscale)'], {}), '(-r / lengthscale)\n', (1358, 1376), True, 'import numpy as np\n'), ((1765, 1799), 'numpy.exp', 'np.exp', (['(-0.5 * scaled_squared_dist)'], {}), '(-0.5 * scaled_squared_dist)\n', (1771, 1799), True, 'import numpy as np\n'), ((2333, 2356), 'numpy.arange', 'np.arange', (['(0)', '(10000)', '(10)'], {}), '(0, 10000, 10)\n', (2342, 2356), True, 'import numpy as np\n'), ((2778, 2830), 'matplotlib.pyplot.loglog', 'plt.loglog', (['r_vals', 'autocorr_vals'], {'label': 'f"""{kernel}"""'}), "(r_vals, autocorr_vals, label=f'{kernel}')\n", (2788, 2830), True, 'from matplotlib import pyplot as plt\n'), ((2871, 2930), 'matplotlib.pyplot.title', 'plt.title', (['f"""{kernel} Covariance for Mrk-335 X-ray Dataset"""'], {}), "(f'{kernel} Covariance for Mrk-335 X-ray Dataset')\n", (2880, 2930), True, 'from matplotlib import pyplot as plt\n'), ((3009, 3027), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (3019, 3027), True, 'from matplotlib import pyplot as plt\n'), ((3032, 3061), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Autocorrelation"""'], {}), "('Autocorrelation')\n", (3042, 3061), True, 'from matplotlib import pyplot as plt\n'), ((3066, 3122), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""minor"""', 'labelsize': '(7)'}), "(axis='both', which='minor', labelsize=7)\n", (3081, 3122), True, 'from matplotlib import pyplot as plt\n'), ((3127, 3141), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3137, 3141), True, 'from matplotlib import pyplot as plt\n'), ((3146, 3158), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3156, 3158), True, 'from matplotlib import pyplot as plt\n'), ((3163, 3199), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""figures/{kernel}.png"""'], {}), "(f'figures/{kernel}.png')\n", (3174, 3199), True, 'from matplotlib import pyplot as plt\n'), ((3204, 3215), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3213, 3215), True, 'from matplotlib import pyplot as plt\n'), ((2208, 2232), 'numpy.exp', 'np.exp', (['(-scaled_distance)'], {}), '(-scaled_distance)\n', (2214, 2232), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -
"""This module is designed to hold components with their classes and
associated individual constraints (blocks) and groupings. Therefore this
module holds the class definition and the block directly located by each other.
SPDX-FileCopyrightText: <NAME> <<EMAIL>>
SPDX-FileCopyrightText: <NAME>
SPDX-FileCopyrightText: <NAME>
SPDX-FileCopyrightText: <NAME>
SPDX-FileCopyrightText: FranziPl
SPDX-FileCopyrightText: jnnr
SPDX-FileCopyrightText: <NAME>
SPDX-FileCopyrightText: FabianTU
SPDX-FileCopyrightText: <NAME>
SPDX-License-Identifier: MIT
"""
import numpy as np
from oemof.network import network
from oemof.solph.network import Transformer as solph_Transformer
from oemof.solph.options import Investment
from oemof.solph.plumbing import sequence as solph_sequence
from pyomo.core.base.block import SimpleBlock
from pyomo.environ import Binary
from pyomo.environ import BuildAction
from pyomo.environ import Constraint
from pyomo.environ import Expression
from pyomo.environ import NonNegativeReals
from pyomo.environ import Set
from pyomo.environ import Var
class GenericStorage(network.Transformer):
r"""
Component `GenericStorage` to model with basic characteristics of storages.
Parameters
----------
nominal_storage_capacity : numeric, :math:`E_{nom}`
Absolute nominal capacity of the storage
invest_relation_input_capacity : numeric or None, :math:`r_{cap,in}`
Ratio between the investment variable of the input Flow and the
investment variable of the storage:
:math:`\dot{E}_{in,invest} = E_{invest} \cdot r_{cap,in}`
invest_relation_output_capacity : numeric or None, :math:`r_{cap,out}`
Ratio between the investment variable of the output Flow and the
investment variable of the storage:
:math:`\dot{E}_{out,invest} = E_{invest} \cdot r_{cap,out}`
invest_relation_input_output : numeric or None, :math:`r_{in,out}`
Ratio between the investment variable of the output Flow and the
investment variable of the input flow. This ratio used to fix the
flow investments to each other.
Values < 1 set the input flow lower than the output and > 1 will
set the input flow higher than the output flow. If None no relation
will be set:
:math:`\dot{E}_{in,invest} = \dot{E}_{out,invest} \cdot r_{in,out}`
initial_storage_level : numeric, :math:`c(-1)`
The relative storage content in the timestep before the first
time step of optimization (between 0 and 1).
balanced : boolean
Couple storage level of first and last time step.
(Total inflow and total outflow are balanced.)
loss_rate : numeric (iterable or scalar)
The relative loss of the storage content per time unit.
fixed_losses_relative : numeric (iterable or scalar), :math:`\gamma(t)`
Losses independent of state of charge between two consecutive
timesteps relative to nominal storage capacity.
fixed_losses_absolute : numeric (iterable or scalar), :math:`\delta(t)`
Losses independent of state of charge and independent of
nominal storage capacity between two consecutive timesteps.
inflow_conversion_factor : numeric (iterable or scalar), :math:`\eta_i(t)`
The relative conversion factor, i.e. efficiency associated with the
inflow of the storage.
outflow_conversion_factor : numeric (iterable or scalar), :math:`\eta_o(t)`
see: inflow_conversion_factor
min_storage_level : numeric (iterable or scalar), :math:`c_{min}(t)`
The normed minimum storage content as fraction of the
nominal storage capacity (between 0 and 1).
To set different values in every time step use a sequence.
max_storage_level : numeric (iterable or scalar), :math:`c_{max}(t)`
see: min_storage_level
investment : :class:`oemof.solph.options.Investment` object
Object indicating if a nominal_value of the flow is determined by
the optimization problem. Note: This will refer all attributes to an
investment variable instead of to the nominal_storage_capacity. The
nominal_storage_capacity should not be set (or set to None) if an
investment object is used.
Note
----
The following sets, variables, constraints and objective parts are created
* :py:class:`~oemof.solph.components.GenericStorageBlock` (if no
Investment object present)
* :py:class:`~oemof.solph.components.GenericInvestmentStorageBlock` (if
Investment object present)
Examples
--------
Basic usage examples of the GenericStorage with a random selection of
attributes. See the Flow class for all Flow attributes.
>>> from oemof import solph
>>> my_bus = solph.Bus('my_bus')
>>> my_storage = solph.components.GenericStorage(
... label='storage',
... nominal_storage_capacity=1000,
... inputs={my_bus: solph.Flow(nominal_value=200, variable_costs=10)},
... outputs={my_bus: solph.Flow(nominal_value=200)},
... loss_rate=0.01,
... initial_storage_level=0,
... max_storage_level = 0.9,
... inflow_conversion_factor=0.9,
... outflow_conversion_factor=0.93)
>>> my_investment_storage = solph.components.GenericStorage(
... label='storage',
... investment=solph.Investment(ep_costs=50),
... inputs={my_bus: solph.Flow()},
... outputs={my_bus: solph.Flow()},
... loss_rate=0.02,
... initial_storage_level=None,
... invest_relation_input_capacity=1/6,
... invest_relation_output_capacity=1/6,
... inflow_conversion_factor=1,
... outflow_conversion_factor=0.8)
"""
def __init__(
self, *args, max_storage_level=1, min_storage_level=0, **kwargs
):
super().__init__(*args, **kwargs)
self.nominal_storage_capacity = kwargs.get("nominal_storage_capacity")
self.initial_storage_level = kwargs.get("initial_storage_level")
self.balanced = kwargs.get("balanced", True)
self.loss_rate = solph_sequence(kwargs.get("loss_rate", 0))
self.fixed_losses_relative = solph_sequence(
kwargs.get("fixed_losses_relative", 0)
)
self.fixed_losses_absolute = solph_sequence(
kwargs.get("fixed_losses_absolute", 0)
)
self.inflow_conversion_factor = solph_sequence(
kwargs.get("inflow_conversion_factor", 1)
)
self.outflow_conversion_factor = solph_sequence(
kwargs.get("outflow_conversion_factor", 1)
)
self.max_storage_level = solph_sequence(max_storage_level)
self.min_storage_level = solph_sequence(min_storage_level)
self.investment = kwargs.get("investment")
self.invest_relation_input_output = kwargs.get(
"invest_relation_input_output"
)
self.invest_relation_input_capacity = kwargs.get(
"invest_relation_input_capacity"
)
self.invest_relation_output_capacity = kwargs.get(
"invest_relation_output_capacity"
)
self._invest_group = isinstance(self.investment, Investment)
# Check attributes for the investment mode.
if self._invest_group is True:
self._check_invest_attributes()
# Check for old parameter names. This is a temporary fix and should
# be removed once a general solution is found.
# TODO: https://github.com/oemof/oemof-solph/issues/560
renamed_parameters = [
("nominal_capacity", "nominal_storage_capacity"),
("initial_capacity", "initial_storage_level"),
("capacity_loss", "loss_rate"),
("capacity_min", "min_storage_level"),
("capacity_max", "max_storage_level"),
]
messages = [
"`{0}` to `{1}`".format(old_name, new_name)
for old_name, new_name in renamed_parameters
if old_name in kwargs
]
if messages:
message = (
"The following attributes have been renamed from v0.2 to v0.3:"
"\n\n {}\n\n"
"You are using the old names as parameters, thus setting "
"deprecated\n"
"attributes, which is not what you might have intended.\n"
"Use the new names, or, if you know what you're doing, set "
"these\n"
"attributes explicitly after construction instead."
)
raise AttributeError(message.format("\n ".join(messages)))
def _set_flows(self):
for flow in self.inputs.values():
if (
self.invest_relation_input_capacity is not None
and not isinstance(flow.investment, Investment)
):
flow.investment = Investment()
for flow in self.outputs.values():
if (
self.invest_relation_output_capacity is not None
and not isinstance(flow.investment, Investment)
):
flow.investment = Investment()
def _check_invest_attributes(self):
if self.investment and self.nominal_storage_capacity is not None:
e1 = (
"If an investment object is defined the invest variable "
"replaces the nominal_storage_capacity.\n Therefore the "
"nominal_storage_capacity should be 'None'.\n"
)
raise AttributeError(e1)
if (
self.invest_relation_input_output is not None
and self.invest_relation_output_capacity is not None
and self.invest_relation_input_capacity is not None
):
e2 = (
"Overdetermined. Three investment object will be coupled"
"with three constraints. Set one invest relation to 'None'."
)
raise AttributeError(e2)
if (
self.investment
and sum(solph_sequence(self.fixed_losses_absolute)) != 0
and self.investment.existing == 0
and self.investment.minimum == 0
):
e3 = (
"With fixed_losses_absolute > 0, either investment.existing "
"or investment.minimum has to be non-zero."
)
raise AttributeError(e3)
self._set_flows()
def constraint_group(self):
if self._invest_group is True:
return GenericInvestmentStorageBlock
else:
return GenericStorageBlock
# Todo: accessed by
class GenericStorageBlock(SimpleBlock):
r"""Storage without an :class:`.Investment` object.
**The following sets are created:** (-> see basic sets at
:class:`.Model` )
STORAGES
A set with all :class:`.Storage` objects, which do not have an
attr:`investment` of type :class:`.Investment`.
STORAGES_BALANCED
A set of all :class:`.Storage` objects, with 'balanced' attribute set
to True.
STORAGES_WITH_INVEST_FLOW_REL
A set with all :class:`.Storage` objects with two investment flows
coupled with the 'invest_relation_input_output' attribute.
**The following variables are created:**
storage_content
Storage content for every storage and timestep. The value for the
storage content at the beginning is set by the parameter `initial_storage_level`
or not set if `initial_storage_level` is None.
The variable of storage s and timestep t can be accessed by:
`om.Storage.storage_content[s, t]`
**The following constraints are created:**
Set storage_content of last time step to one at t=0 if :attr:`balanced == True`
.. math::
E(t_{last}) = &E(-1)
Storage balance :attr:`om.Storage.balance[n, t]`
.. math:: E(t) = &E(t-1) \cdot
(1 - \beta(t)) ^{\tau(t)/(t_u)} \\
&- \gamma(t)\cdot E_{nom} \cdot {\tau(t)/(t_u)}\\
&- \delta(t) \cdot {\tau(t)/(t_u)}\\
&- \frac{\dot{E}_o(t)}{\eta_o(t)} \cdot \tau(t)
+ \dot{E}_i(t) \cdot \eta_i(t) \cdot \tau(t)
Connect the invest variables of the input and the output flow.
.. math::
InvestmentFlow.invest(source(n), n) + existing = \\
(InvestmentFlow.invest(n, target(n)) + existing) * \\
invest\_relation\_input\_output(n) \\
\forall n \in \textrm{INVEST\_REL\_IN\_OUT}
=========================== ======================= =========
symbol explanation attribute
=========================== ======================= =========
:math:`E(t)` energy currently stored :py:obj:`storage_content`
:math:`E_{nom}` nominal capacity of :py:obj:`nominal_storage_capacity`
the energy storage
:math:`c(-1)` state before :py:obj:`initial_storage_level`
initial time step
:math:`c_{min}(t)` minimum allowed storage :py:obj:`min_storage_level[t]`
:math:`c_{max}(t)` maximum allowed storage :py:obj:`max_storage_level[t]`
:math:`\beta(t)` fraction of lost energy :py:obj:`loss_rate[t]`
as share of
:math:`E(t)`
per time unit
:math:`\gamma(t)` fixed loss of energy :py:obj:`fixed_losses_relative[t]`
relative to
:math:`E_{nom}` per
time unit
:math:`\delta(t)` absolute fixed loss :py:obj:`fixed_losses_absolute[t]`
of energy per
time unit
:math:`\dot{E}_i(t)` energy flowing in :py:obj:`inputs`
:math:`\dot{E}_o(t)` energy flowing out :py:obj:`outputs`
:math:`\eta_i(t)` conversion factor :py:obj:`inflow_conversion_factor[t]`
(i.e. efficiency)
when storing energy
:math:`\eta_o(t)` conversion factor when :py:obj:`outflow_conversion_factor[t]`
(i.e. efficiency)
taking stored energy
:math:`\tau(t)` duration of time step
:math:`t_u` time unit of losses
:math:`\beta(t)`,
:math:`\gamma(t)`
:math:`\delta(t)` and
timeincrement
:math:`\tau(t)`
=========================== ======================= =========
**The following parts of the objective function are created:**
Nothing added to the objective function.
""" # noqa: E501
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
"""
Parameters
----------
group : list
List containing storage objects.
e.g. groups=[storage1, storage2,..]
"""
m = self.parent_block()
if group is None:
return None
i = {n: [i for i in n.inputs][0] for n in group}
o = {n: [o for o in n.outputs][0] for n in group}
# ************* SETS *********************************
self.STORAGES = Set(initialize=[n for n in group])
self.STORAGES_BALANCED = Set(
initialize=[n for n in group if n.balanced is True]
)
self.STORAGES_WITH_INVEST_FLOW_REL = Set(
initialize=[
n for n in group if n.invest_relation_input_output is not None
]
)
# ************* VARIABLES *****************************
def _storage_content_bound_rule(block, n, t):
"""
Rule definition for bounds of storage_content variable of
storage n in timestep t.
"""
bounds = (
n.nominal_storage_capacity * n.min_storage_level[t],
n.nominal_storage_capacity * n.max_storage_level[t],
)
return bounds
self.storage_content = Var(
self.STORAGES, m.TIMESTEPS, bounds=_storage_content_bound_rule
)
def _storage_init_content_bound_rule(block, n):
return 0, n.nominal_storage_capacity
self.init_content = Var(
self.STORAGES,
within=NonNegativeReals,
bounds=_storage_init_content_bound_rule,
)
# set the initial storage content
for n in group:
if n.initial_storage_level is not None:
self.init_content[n] = (
n.initial_storage_level * n.nominal_storage_capacity
)
self.init_content[n].fix()
# ************* Constraints ***************************
reduced_timesteps = [x for x in m.TIMESTEPS if x > 0]
# storage balance constraint (first time step)
def _storage_balance_first_rule(block, n):
"""
Rule definition for the storage balance of every storage n for
the first timestep.
"""
expr = 0
expr += block.storage_content[n, 0]
expr += (
-block.init_content[n]
* (1 - n.loss_rate[0]) ** m.timeincrement[0]
)
expr += (
n.fixed_losses_relative[0]
* n.nominal_storage_capacity
* m.timeincrement[0]
)
expr += n.fixed_losses_absolute[0] * m.timeincrement[0]
expr += (
-m.flow[i[n], n, 0] * n.inflow_conversion_factor[0]
) * m.timeincrement[0]
expr += (
m.flow[n, o[n], 0] / n.outflow_conversion_factor[0]
) * m.timeincrement[0]
return expr == 0
self.balance_first = Constraint(
self.STORAGES, rule=_storage_balance_first_rule
)
# storage balance constraint (every time step but the first)
def _storage_balance_rule(block, n, t):
"""
Rule definition for the storage balance of every storage n and
every timestep but the first (t > 0).
"""
expr = 0
expr += block.storage_content[n, t]
expr += (
-block.storage_content[n, t - 1]
* (1 - n.loss_rate[t]) ** m.timeincrement[t]
)
expr += (
n.fixed_losses_relative[t]
* n.nominal_storage_capacity
* m.timeincrement[t]
)
expr += n.fixed_losses_absolute[t] * m.timeincrement[t]
expr += (
-m.flow[i[n], n, t] * n.inflow_conversion_factor[t]
) * m.timeincrement[t]
expr += (
m.flow[n, o[n], t] / n.outflow_conversion_factor[t]
) * m.timeincrement[t]
return expr == 0
self.balance = Constraint(
self.STORAGES, reduced_timesteps, rule=_storage_balance_rule
)
def _balanced_storage_rule(block, n):
"""
Storage content of last time step == initial storage content
if balanced.
"""
return (
block.storage_content[n, m.TIMESTEPS[-1]]
== block.init_content[n]
)
self.balanced_cstr = Constraint(
self.STORAGES_BALANCED, rule=_balanced_storage_rule
)
def _power_coupled(block, n):
"""
Rule definition for constraint to connect the input power
and output power
"""
expr = (
m.InvestmentFlow.invest[n, o[n]]
+ m.flows[n, o[n]].investment.existing
) * n.invest_relation_input_output == (
m.InvestmentFlow.invest[i[n], n]
+ m.flows[i[n], n].investment.existing
)
return expr
self.power_coupled = Constraint(
self.STORAGES_WITH_INVEST_FLOW_REL, rule=_power_coupled
)
def _objective_expression(self):
r"""
Objective expression for storages with no investment.
Note: This adds nothing as variable costs are already
added in the Block :class:`Flow`.
"""
if not hasattr(self, "STORAGES"):
return 0
return 0
class GenericInvestmentStorageBlock(SimpleBlock):
r"""
Block for all storages with :attr:`Investment` being not None.
See :class:`oemof.solph.options.Investment` for all parameters of the
Investment class.
**Variables**
All Storages are indexed by :math:`n`, which is omitted in the following
for the sake of convenience.
The following variables are created as attributes of
:attr:`om.InvestmentStorage`:
* :math:`P_i(t)`
Inflow of the storage
(created in :class:`oemof.solph.models.BaseModel`).
* :math:`P_o(t)`
Outflow of the storage
(created in :class:`oemof.solph.models.BaseModel`).
* :math:`E(t)`
Current storage content (Absolute level of stored energy).
* :math:`E_{invest}`
Invested (nominal) capacity of the storage.
* :math:`E(-1)`
Initial storage content (before timestep 0).
* :math:`b_{invest}`
Binary variable for the status of the investment, if
:attr:`nonconvex` is `True`.
**Constraints**
The following constraints are created for all investment storages:
Storage balance (Same as for :class:`.GenericStorageBlock`)
.. math:: E(t) = &E(t-1) \cdot
(1 - \beta(t)) ^{\tau(t)/(t_u)} \\
&- \gamma(t)\cdot (E_{exist} + E_{invest}) \cdot {\tau(t)/(t_u)}\\
&- \delta(t) \cdot {\tau(t)/(t_u)}\\
&- \frac{P_o(t)}{\eta_o(t)} \cdot \tau(t)
+ P_i(t) \cdot \eta_i(t) \cdot \tau(t)
Depending on the attribute :attr:`nonconvex`, the constraints for the
bounds of the decision variable :math:`E_{invest}` are different:\
* :attr:`nonconvex = False`
.. math::
E_{invest, min} \le E_{invest} \le E_{invest, max}
* :attr:`nonconvex = True`
.. math::
&
E_{invest, min} \cdot b_{invest} \le E_{invest}\\
&
E_{invest} \le E_{invest, max} \cdot b_{invest}\\
The following constraints are created depending on the attributes of
the :class:`.components.GenericStorage`:
* :attr:`initial_storage_level is None`
Constraint for a variable initial storage content:
.. math::
E(-1) \le E_{invest} + E_{exist}
* :attr:`initial_storage_level is not None`
An initial value for the storage content is given:
.. math::
E(-1) = (E_{invest} + E_{exist}) \cdot c(-1)
* :attr:`balanced=True`
The energy content of storage of the first and the last timestep
are set equal:
.. math::
E(-1) = E(t_{last})
* :attr:`invest_relation_input_capacity is not None`
Connect the invest variables of the storage and the input flow:
.. math::
P_{i,invest} + P_{i,exist} =
(E_{invest} + E_{exist}) \cdot r_{cap,in}
* :attr:`invest_relation_output_capacity is not None`
Connect the invest variables of the storage and the output flow:
.. math::
P_{o,invest} + P_{o,exist} =
(E_{invest} + E_{exist}) \cdot r_{cap,out}
* :attr:`invest_relation_input_output is not None`
Connect the invest variables of the input and the output flow:
.. math::
P_{i,invest} + P_{i,exist} =
(P_{o,invest} + P_{o,exist}) \cdot r_{in,out}
* :attr:`max_storage_level`
Rule for upper bound constraint for the storage content:
.. math::
E(t) \leq E_{invest} \cdot c_{max}(t)
* :attr:`min_storage_level`
Rule for lower bound constraint for the storage content:
.. math:: E(t) \geq E_{invest} \cdot c_{min}(t)
**Objective function**
The part of the objective function added by the investment storages
also depends on whether a convex or nonconvex
investment option is selected. The following parts of the objective
function are created:
* :attr:`nonconvex = False`
.. math::
E_{invest} \cdot c_{invest,var}
* :attr:`nonconvex = True`
.. math::
E_{invest} \cdot c_{invest,var}
+ c_{invest,fix} \cdot b_{invest}\\
The total value of all investment costs of all *InvestmentStorages*
can be retrieved calling
:meth:`om.GenericInvestmentStorageBlock.investment_costs.expr()`.
.. csv-table:: List of Variables
:header: "symbol", "attribute", "explanation"
:widths: 1, 1, 1
":math:`P_i(t)`", ":attr:`flow[i[n], n, t]`", "Inflow of the storage"
":math:`P_o(t)`", ":attr:`flow[n, o[n], t]`", "Outlfow of the storage"
":math:`E(t)`", ":attr:`storage_content[n, t]`", "Current storage
content (current absolute stored energy)"
":math:`E_{invest}`", ":attr:`invest[n, t]`", "Invested (nominal)
capacity of the storage"
":math:`E(-1)`", ":attr:`init_cap[n]`", "Initial storage capacity
(before timestep 0)"
":math:`b_{invest}`", ":attr:`invest_status[i, o]`", "Binary variable
for the status of investment"
":math:`P_{i,invest}`", ":attr:`InvestmentFlow.invest[i[n], n]`", "
Invested (nominal) inflow (Investmentflow)"
":math:`P_{o,invest}`", ":attr:`InvestmentFlow.invest[n, o[n]]`", "
Invested (nominal) outflow (Investmentflow)"
.. csv-table:: List of Parameters
:header: "symbol", "attribute", "explanation"
:widths: 1, 1, 1
":math:`E_{exist}`", ":py:obj:`flows[i, o].investment.existing`", "
Existing storage capacity"
":math:`E_{invest,min}`", ":py:obj:`flows[i, o].investment.minimum`", "
Minimum investment value"
":math:`E_{invest,max}`", ":py:obj:`flows[i, o].investment.maximum`", "
Maximum investment value"
":math:`P_{i,exist}`", ":py:obj:`flows[i[n], n].investment.existing`
", "Existing inflow capacity"
":math:`P_{o,exist}`", ":py:obj:`flows[n, o[n]].investment.existing`
", "Existing outlfow capacity"
":math:`c_{invest,var}`", ":py:obj:`flows[i, o].investment.ep_costs`
", "Variable investment costs"
":math:`c_{invest,fix}`", ":py:obj:`flows[i, o].investment.offset`", "
Fix investment costs"
":math:`r_{cap,in}`", ":attr:`invest_relation_input_capacity`", "
Relation of storage capacity and nominal inflow"
":math:`r_{cap,out}`", ":attr:`invest_relation_output_capacity`", "
Relation of storage capacity and nominal outflow"
":math:`r_{in,out}`", ":attr:`invest_relation_input_output`", "
Relation of nominal in- and outflow"
":math:`\beta(t)`", ":py:obj:`loss_rate[t]`", "Fraction of lost energy
as share of :math:`E(t)` per time unit"
":math:`\gamma(t)`", ":py:obj:`fixed_losses_relative[t]`", "Fixed loss
of energy relative to :math:`E_{invest} + E_{exist}` per time unit"
":math:`\delta(t)`", ":py:obj:`fixed_losses_absolute[t]`", "Absolute
fixed loss of energy per time unit"
":math:`\eta_i(t)`", ":py:obj:`inflow_conversion_factor[t]`", "
Conversion factor (i.e. efficiency) when storing energy"
":math:`\eta_o(t)`", ":py:obj:`outflow_conversion_factor[t]`", "
Conversion factor when (i.e. efficiency) taking stored energy"
":math:`c(-1)`", ":py:obj:`initial_storage_level`", "Initial relativ
storage content (before timestep 0)"
":math:`c_{max}`", ":py:obj:`flows[i, o].max[t]`", "Normed maximum
value of storage content"
":math:`c_{min}`", ":py:obj:`flows[i, o].min[t]`", "Normed minimum
value of storage content"
":math:`\tau(t)`", "", "Duration of time step"
":math:`t_u`", "", "Time unit of losses :math:`\beta(t)`,
:math:`\gamma(t)`, :math:`\delta(t)` and timeincrement :math:`\tau(t)`"
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
"""
"""
m = self.parent_block()
if group is None:
return None
# ########################## SETS #####################################
self.INVESTSTORAGES = Set(initialize=[n for n in group])
self.CONVEX_INVESTSTORAGES = Set(
initialize=[n for n in group if n.investment.nonconvex is False]
)
self.NON_CONVEX_INVESTSTORAGES = Set(
initialize=[n for n in group if n.investment.nonconvex is True]
)
self.INVESTSTORAGES_BALANCED = Set(
initialize=[n for n in group if n.balanced is True]
)
self.INVESTSTORAGES_NO_INIT_CONTENT = Set(
initialize=[n for n in group if n.initial_storage_level is None]
)
self.INVESTSTORAGES_INIT_CONTENT = Set(
initialize=[
n for n in group if n.initial_storage_level is not None
]
)
self.INVEST_REL_CAP_IN = Set(
initialize=[
n
for n in group
if n.invest_relation_input_capacity is not None
]
)
self.INVEST_REL_CAP_OUT = Set(
initialize=[
n
for n in group
if n.invest_relation_output_capacity is not None
]
)
self.INVEST_REL_IN_OUT = Set(
initialize=[
n for n in group if n.invest_relation_input_output is not None
]
)
# The storage content is a non-negative variable, therefore it makes no
# sense to create an additional constraint if the lower bound is zero
# for all time steps.
self.MIN_INVESTSTORAGES = Set(
initialize=[
n
for n in group
if sum([n.min_storage_level[t] for t in m.TIMESTEPS]) > 0
]
)
# ######################### Variables ################################
self.storage_content = Var(
self.INVESTSTORAGES, m.TIMESTEPS, within=NonNegativeReals
)
def _storage_investvar_bound_rule(block, n):
"""
Rule definition to bound the invested storage capacity `invest`.
"""
if n in self.CONVEX_INVESTSTORAGES:
return n.investment.minimum, n.investment.maximum
elif n in self.NON_CONVEX_INVESTSTORAGES:
return 0, n.investment.maximum
self.invest = Var(
self.INVESTSTORAGES,
within=NonNegativeReals,
bounds=_storage_investvar_bound_rule,
)
self.init_content = Var(self.INVESTSTORAGES, within=NonNegativeReals)
# create status variable for a non-convex investment storage
self.invest_status = Var(self.NON_CONVEX_INVESTSTORAGES, within=Binary)
# ######################### CONSTRAINTS ###############################
i = {n: [i for i in n.inputs][0] for n in group}
o = {n: [o for o in n.outputs][0] for n in group}
reduced_timesteps = [x for x in m.TIMESTEPS if x > 0]
def _inv_storage_init_content_max_rule(block, n):
"""Constraint for a variable initial storage capacity."""
return (
block.init_content[n]
<= n.investment.existing + block.invest[n]
)
self.init_content_limit = Constraint(
self.INVESTSTORAGES_NO_INIT_CONTENT,
rule=_inv_storage_init_content_max_rule,
)
def _inv_storage_init_content_fix_rule(block, n):
"""Constraint for a fixed initial storage capacity."""
return block.init_content[n] == n.initial_storage_level * (
n.investment.existing + block.invest[n]
)
self.init_content_fix = Constraint(
self.INVESTSTORAGES_INIT_CONTENT,
rule=_inv_storage_init_content_fix_rule,
)
def _storage_balance_first_rule(block, n):
"""
Rule definition for the storage balance of every storage n for the
first time step.
"""
expr = 0
expr += block.storage_content[n, 0]
expr += (
-block.init_content[n]
* (1 - n.loss_rate[0]) ** m.timeincrement[0]
)
expr += (
n.fixed_losses_relative[0]
* (n.investment.existing + self.invest[n])
* m.timeincrement[0]
)
expr += n.fixed_losses_absolute[0] * m.timeincrement[0]
expr += (
-m.flow[i[n], n, 0] * n.inflow_conversion_factor[0]
) * m.timeincrement[0]
expr += (
m.flow[n, o[n], 0] / n.outflow_conversion_factor[0]
) * m.timeincrement[0]
return expr == 0
self.balance_first = Constraint(
self.INVESTSTORAGES, rule=_storage_balance_first_rule
)
def _storage_balance_rule(block, n, t):
"""
Rule definition for the storage balance of every storage n for the
every time step but the first.
"""
expr = 0
expr += block.storage_content[n, t]
expr += (
-block.storage_content[n, t - 1]
* (1 - n.loss_rate[t]) ** m.timeincrement[t]
)
expr += (
n.fixed_losses_relative[t]
* (n.investment.existing + self.invest[n])
* m.timeincrement[t]
)
expr += n.fixed_losses_absolute[t] * m.timeincrement[t]
expr += (
-m.flow[i[n], n, t] * n.inflow_conversion_factor[t]
) * m.timeincrement[t]
expr += (
m.flow[n, o[n], t] / n.outflow_conversion_factor[t]
) * m.timeincrement[t]
return expr == 0
self.balance = Constraint(
self.INVESTSTORAGES, reduced_timesteps, rule=_storage_balance_rule
)
def _balanced_storage_rule(block, n):
return (
block.storage_content[n, m.TIMESTEPS[-1]]
== block.init_content[n]
)
self.balanced_cstr = Constraint(
self.INVESTSTORAGES_BALANCED, rule=_balanced_storage_rule
)
def _power_coupled(block, n):
"""
Rule definition for constraint to connect the input power
and output power
"""
expr = (
m.InvestmentFlow.invest[n, o[n]]
+ m.flows[n, o[n]].investment.existing
) * n.invest_relation_input_output == (
m.InvestmentFlow.invest[i[n], n]
+ m.flows[i[n], n].investment.existing
)
return expr
self.power_coupled = Constraint(
self.INVEST_REL_IN_OUT, rule=_power_coupled
)
def _storage_capacity_inflow_invest_rule(block, n):
"""
Rule definition of constraint connecting the inflow
`InvestmentFlow.invest of storage with invested capacity `invest`
by nominal_storage_capacity__inflow_ratio
"""
expr = (
(
m.InvestmentFlow.invest[i[n], n]
+ m.flows[i[n], n].investment.existing
)
== (n.investment.existing + self.invest[n])
* n.invest_relation_input_capacity
)
return expr
self.storage_capacity_inflow = Constraint(
self.INVEST_REL_CAP_IN, rule=_storage_capacity_inflow_invest_rule
)
def _storage_capacity_outflow_invest_rule(block, n):
"""
Rule definition of constraint connecting outflow
`InvestmentFlow.invest` of storage and invested capacity `invest`
by nominal_storage_capacity__outflow_ratio
"""
expr = (
(
m.InvestmentFlow.invest[n, o[n]]
+ m.flows[n, o[n]].investment.existing
)
== (n.investment.existing + self.invest[n])
* n.invest_relation_output_capacity
)
return expr
self.storage_capacity_outflow = Constraint(
self.INVEST_REL_CAP_OUT, rule=_storage_capacity_outflow_invest_rule
)
def _max_storage_content_invest_rule(block, n, t):
"""
Rule definition for upper bound constraint for the
storage content.
"""
expr = (
self.storage_content[n, t]
<= (n.investment.existing + self.invest[n])
* n.max_storage_level[t]
)
return expr
self.max_storage_content = Constraint(
self.INVESTSTORAGES,
m.TIMESTEPS,
rule=_max_storage_content_invest_rule,
)
def _min_storage_content_invest_rule(block, n, t):
"""
Rule definition of lower bound constraint for the
storage content.
"""
expr = (
self.storage_content[n, t]
>= (n.investment.existing + self.invest[n])
* n.min_storage_level[t]
)
return expr
# Set the lower bound of the storage content if the attribute exists
self.min_storage_content = Constraint(
self.MIN_INVESTSTORAGES,
m.TIMESTEPS,
rule=_min_storage_content_invest_rule,
)
def maximum_invest_limit(block, n):
"""
Constraint for the maximal investment in non convex investment
storage.
"""
return (
n.investment.maximum * self.invest_status[n] - self.invest[n]
) >= 0
self.limit_max = Constraint(
self.NON_CONVEX_INVESTSTORAGES, rule=maximum_invest_limit
)
def smallest_invest(block, n):
"""
Constraint for the minimal investment in non convex investment
storage if the invest is greater than 0. So the invest variable
can be either 0 or greater than the minimum.
"""
return (
self.invest[n] - (n.investment.minimum * self.invest_status[n])
>= 0
)
self.limit_min = Constraint(
self.NON_CONVEX_INVESTSTORAGES, rule=smallest_invest
)
def _objective_expression(self):
"""Objective expression with fixed and investement costs."""
if not hasattr(self, "INVESTSTORAGES"):
return 0
investment_costs = 0
for n in self.CONVEX_INVESTSTORAGES:
investment_costs += self.invest[n] * n.investment.ep_costs
for n in self.NON_CONVEX_INVESTSTORAGES:
investment_costs += (
self.invest[n] * n.investment.ep_costs
+ self.invest_status[n] * n.investment.offset
)
self.investment_costs = Expression(expr=investment_costs)
return investment_costs
class GenericCHP(network.Transformer):
r"""
Component `GenericCHP` to model combined heat and power plants.
Can be used to model (combined cycle) extraction or back-pressure turbines
and used a mixed-integer linear formulation. Thus, it induces more
computational effort than the `ExtractionTurbineCHP` for the
benefit of higher accuracy.
The full set of equations is described in:
<NAME>., <NAME>. & <NAME>.
Evaluation of an energy- and exergy-based generic modeling
approach of combined heat and power plants
Int J Energy Environ Eng (2016) 7: 167.
https://doi.org/10.1007/s40095-016-0204-6
For a general understanding of (MI)LP CHP representation, see:
<NAME>, P.
Short - Term Operation Planning on Cogeneration Systems: A Survey
Electric Power Systems Research (2007)
Electric Power Systems Research
Volume 78, Issue 5, May 2008, Pages 835-848
https://doi.org/10.1016/j.epsr.2007.06.001
Note
----
An adaption for the flow parameter `H_L_FG_share_max` has been made to
set the flue gas losses at maximum heat extraction `H_L_FG_max` as share of
the fuel flow `H_F` e.g. for combined cycle extraction turbines.
The flow parameter `H_L_FG_share_min` can be used to set the flue gas
losses at minimum heat extraction `H_L_FG_min` as share of
the fuel flow `H_F` e.g. for motoric CHPs.
The boolean component parameter `back_pressure` can be set to model
back-pressure characteristics.
Also have a look at the examples on how to use it.
Parameters
----------
fuel_input : dict
Dictionary with key-value-pair of `oemof.Bus` and `oemof.Flow` object
for the fuel input.
electrical_output : dict
Dictionary with key-value-pair of `oemof.Bus` and `oemof.Flow` object
for the electrical output. Related parameters like `P_max_woDH` are
passed as attributes of the `oemof.Flow` object.
heat_output : dict
Dictionary with key-value-pair of `oemof.Bus` and `oemof.Flow` object
for the heat output. Related parameters like `Q_CW_min` are passed as
attributes of the `oemof.Flow` object.
Beta : list of numerical values
Beta values in same dimension as all other parameters (length of
optimization period).
back_pressure : boolean
Flag to use back-pressure characteristics. Set to `True` and
`Q_CW_min` to zero for back-pressure turbines. See paper above for more
information.
Note
----
The following sets, variables, constraints and objective parts are created
* :py:class:`~oemof.solph.components.GenericCHPBlock`
Examples
--------
>>> from oemof import solph
>>> bel = solph.Bus(label='electricityBus')
>>> bth = solph.Bus(label='heatBus')
>>> bgas = solph.Bus(label='commodityBus')
>>> ccet = solph.components.GenericCHP(
... label='combined_cycle_extraction_turbine',
... fuel_input={bgas: solph.Flow(
... H_L_FG_share_max=[0.183])},
... electrical_output={bel: solph.Flow(
... P_max_woDH=[155.946],
... P_min_woDH=[68.787],
... Eta_el_max_woDH=[0.525],
... Eta_el_min_woDH=[0.444])},
... heat_output={bth: solph.Flow(
... Q_CW_min=[10.552])},
... Beta=[0.122], back_pressure=False)
>>> type(ccet)
<class 'oemof.solph.components.GenericCHP'>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fuel_input = kwargs.get("fuel_input")
self.electrical_output = kwargs.get("electrical_output")
self.heat_output = kwargs.get("heat_output")
self.Beta = solph_sequence(kwargs.get("Beta"))
self.back_pressure = kwargs.get("back_pressure")
self._alphas = None
# map specific flows to standard API
fuel_bus = list(self.fuel_input.keys())[0]
fuel_flow = list(self.fuel_input.values())[0]
fuel_bus.outputs.update({self: fuel_flow})
self.outputs.update(kwargs.get("electrical_output"))
self.outputs.update(kwargs.get("heat_output"))
def _calculate_alphas(self):
"""
Calculate alpha coefficients.
A system of linear equations is created from passed capacities and
efficiencies and solved to calculate both coefficients.
"""
alphas = [[], []]
eb = list(self.electrical_output.keys())[0]
attrs = [
self.electrical_output[eb].P_min_woDH,
self.electrical_output[eb].Eta_el_min_woDH,
self.electrical_output[eb].P_max_woDH,
self.electrical_output[eb].Eta_el_max_woDH,
]
length = [len(a) for a in attrs if not isinstance(a, (int, float))]
max_length = max(length)
if all(len(a) == max_length for a in attrs):
if max_length == 0:
max_length += 1 # increment dimension for scalars from 0 to 1
for i in range(0, max_length):
A = np.array(
[
[1, self.electrical_output[eb].P_min_woDH[i]],
[1, self.electrical_output[eb].P_max_woDH[i]],
]
)
b = np.array(
[
self.electrical_output[eb].P_min_woDH[i]
/ self.electrical_output[eb].Eta_el_min_woDH[i],
self.electrical_output[eb].P_max_woDH[i]
/ self.electrical_output[eb].Eta_el_max_woDH[i],
]
)
x = np.linalg.solve(A, b)
alphas[0].append(x[0])
alphas[1].append(x[1])
else:
error_message = (
"Attributes to calculate alphas "
+ "must be of same dimension."
)
raise ValueError(error_message)
self._alphas = alphas
@property
def alphas(self):
"""Compute or return the _alphas attribute."""
if self._alphas is None:
self._calculate_alphas()
return self._alphas
def constraint_group(self):
return GenericCHPBlock
class GenericCHPBlock(SimpleBlock):
r"""
Block for the relation of the :math:`n` nodes with
type class:`.GenericCHP`.
**The following constraints are created:**
.. _GenericCHP-equations1-10:
.. math::
&
(1)\qquad \dot{H}_F(t) = fuel\ input \\
&
(2)\qquad \dot{Q}(t) = heat\ output \\
&
(3)\qquad P_{el}(t) = power\ output\\
&
(4)\qquad \dot{H}_F(t) = \alpha_0(t) \cdot Y(t) + \alpha_1(t) \cdot
P_{el,woDH}(t)\\
&
(5)\qquad \dot{H}_F(t) = \alpha_0(t) \cdot Y(t) + \alpha_1(t) \cdot
( P_{el}(t) + \beta \cdot \dot{Q}(t) )\\
&
(6)\qquad \dot{H}_F(t) \leq Y(t) \cdot
\frac{P_{el, max, woDH}(t)}{\eta_{el,max,woDH}(t)}\\
&
(7)\qquad \dot{H}_F(t) \geq Y(t) \cdot
\frac{P_{el, min, woDH}(t)}{\eta_{el,min,woDH}(t)}\\
&
(8)\qquad \dot{H}_{L,FG,max}(t) = \dot{H}_F(t) \cdot
\dot{H}_{L,FG,sharemax}(t)\\
&
(9)\qquad \dot{H}_{L,FG,min}(t) = \dot{H}_F(t) \cdot
\dot{H}_{L,FG,sharemin}(t)\\
&
(10)\qquad P_{el}(t) + \dot{Q}(t) + \dot{H}_{L,FG,max}(t) +
\dot{Q}_{CW, min}(t) \cdot Y(t) = / \leq \dot{H}_F(t)\\
where :math:`= / \leq` depends on the CHP being back pressure or not.
The coefficients :math:`\alpha_0` and :math:`\alpha_1`
can be determined given the efficiencies maximal/minimal load:
.. math::
&
\eta_{el,max,woDH}(t) = \frac{P_{el,max,woDH}(t)}{\alpha_0(t)
\cdot Y(t) + \alpha_1(t) \cdot P_{el,max,woDH}(t)}\\
&
\eta_{el,min,woDH}(t) = \frac{P_{el,min,woDH}(t)}{\alpha_0(t)
\cdot Y(t) + \alpha_1(t) \cdot P_{el,min,woDH}(t)}\\
**For the attribute** :math:`\dot{H}_{L,FG,min}` **being not None**,
e.g. for a motoric CHP, **the following is created:**
**Constraint:**
.. _GenericCHP-equations11:
.. math::
&
(11)\qquad P_{el}(t) + \dot{Q}(t) + \dot{H}_{L,FG,min}(t) +
\dot{Q}_{CW, min}(t) \cdot Y(t) \geq \dot{H}_F(t)\\[10pt]
The symbols used are defined as follows (with Variables (V) and Parameters (P)):
=============================== =============================== ==== =======================
math. symbol attribute type explanation
=============================== =============================== ==== =======================
:math:`\dot{H}_{F}` :py:obj:`H_F[n,t]` V input of enthalpy
through fuel input
:math:`P_{el}` :py:obj:`P[n,t]` V provided
electric power
:math:`P_{el,woDH}` :py:obj:`P_woDH[n,t]` V electric power without
district heating
:math:`P_{el,min,woDH}` :py:obj:`P_min_woDH[n,t]` P min. electric power
without district heating
:math:`P_{el,max,woDH}` :py:obj:`P_max_woDH[n,t]` P max. electric power
without district heating
:math:`\dot{Q}` :py:obj:`Q[n,t]` V provided heat
:math:`\dot{Q}_{CW, min}` :py:obj:`Q_CW_min[n,t]` P minimal therm. condenser
load to cooling water
:math:`\dot{H}_{L,FG,min}` :py:obj:`H_L_FG_min[n,t]` V flue gas enthalpy loss
at min heat extraction
:math:`\dot{H}_{L,FG,max}` :py:obj:`H_L_FG_max[n,t]` V flue gas enthalpy loss
at max heat extraction
:math:`\dot{H}_{L,FG,sharemin}` :py:obj:`H_L_FG_share_min[n,t]` P share of flue gas loss
at min heat extraction
:math:`\dot{H}_{L,FG,sharemax}` :py:obj:`H_L_FG_share_max[n,t]` P share of flue gas loss
at max heat extraction
:math:`Y` :py:obj:`Y[n,t]` V status variable
on/off
:math:`\alpha_0` :py:obj:`n.alphas[0][n,t]` P coefficient
describing efficiency
:math:`\alpha_1` :py:obj:`n.alphas[1][n,t]` P coefficient
describing efficiency
:math:`\beta` :py:obj:`Beta[n,t]` P power loss index
:math:`\eta_{el,min,woDH}` :py:obj:`Eta_el_min_woDH[n,t]` P el. eff. at min. fuel
flow w/o distr. heating
:math:`\eta_{el,max,woDH}` :py:obj:`Eta_el_max_woDH[n,t]` P el. eff. at max. fuel
flow w/o distr. heating
=============================== =============================== ==== =======================
""" # noqa: E501
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
"""
Create constraints for GenericCHPBlock.
Parameters
----------
group : list
List containing `GenericCHP` objects.
e.g. groups=[ghcp1, gchp2,..]
"""
m = self.parent_block()
if group is None:
return None
self.GENERICCHPS = Set(initialize=[n for n in group])
# variables
self.H_F = Var(self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals)
self.H_L_FG_max = Var(
self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals
)
self.H_L_FG_min = Var(
self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals
)
self.P_woDH = Var(
self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals
)
self.P = Var(self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals)
self.Q = Var(self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals)
self.Y = Var(self.GENERICCHPS, m.TIMESTEPS, within=Binary)
# constraint rules
def _H_flow_rule(block, n, t):
"""Link fuel consumption to component inflow."""
expr = 0
expr += self.H_F[n, t]
expr += -m.flow[list(n.fuel_input.keys())[0], n, t]
return expr == 0
self.H_flow = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_H_flow_rule
)
def _Q_flow_rule(block, n, t):
"""Link heat flow to component outflow."""
expr = 0
expr += self.Q[n, t]
expr += -m.flow[n, list(n.heat_output.keys())[0], t]
return expr == 0
self.Q_flow = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_Q_flow_rule
)
def _P_flow_rule(block, n, t):
"""Link power flow to component outflow."""
expr = 0
expr += self.P[n, t]
expr += -m.flow[n, list(n.electrical_output.keys())[0], t]
return expr == 0
self.P_flow = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_P_flow_rule
)
def _H_F_1_rule(block, n, t):
"""Set P_woDH depending on H_F."""
expr = 0
expr += -self.H_F[n, t]
expr += n.alphas[0][t] * self.Y[n, t]
expr += n.alphas[1][t] * self.P_woDH[n, t]
return expr == 0
self.H_F_1 = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_H_F_1_rule
)
def _H_F_2_rule(block, n, t):
"""Determine relation between H_F, P and Q."""
expr = 0
expr += -self.H_F[n, t]
expr += n.alphas[0][t] * self.Y[n, t]
expr += n.alphas[1][t] * (self.P[n, t] + n.Beta[t] * self.Q[n, t])
return expr == 0
self.H_F_2 = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_H_F_2_rule
)
def _H_F_3_rule(block, n, t):
"""Set upper value of operating range via H_F."""
expr = 0
expr += self.H_F[n, t]
expr += -self.Y[n, t] * (
list(n.electrical_output.values())[0].P_max_woDH[t]
/ list(n.electrical_output.values())[0].Eta_el_max_woDH[t]
)
return expr <= 0
self.H_F_3 = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_H_F_3_rule
)
def _H_F_4_rule(block, n, t):
"""Set lower value of operating range via H_F."""
expr = 0
expr += self.H_F[n, t]
expr += -self.Y[n, t] * (
list(n.electrical_output.values())[0].P_min_woDH[t]
/ list(n.electrical_output.values())[0].Eta_el_min_woDH[t]
)
return expr >= 0
self.H_F_4 = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_H_F_4_rule
)
def _H_L_FG_max_rule(block, n, t):
"""Set max. flue gas loss as share fuel flow share."""
expr = 0
expr += -self.H_L_FG_max[n, t]
expr += (
self.H_F[n, t]
* list(n.fuel_input.values())[0].H_L_FG_share_max[t]
)
return expr == 0
self.H_L_FG_max_def = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_H_L_FG_max_rule
)
def _Q_max_res_rule(block, n, t):
"""Set maximum Q depending on fuel and electrical flow."""
expr = 0
expr += self.P[n, t] + self.Q[n, t] + self.H_L_FG_max[n, t]
expr += list(n.heat_output.values())[0].Q_CW_min[t] * self.Y[n, t]
expr += -self.H_F[n, t]
# back-pressure characteristics or one-segment model
if n.back_pressure is True:
return expr == 0
else:
return expr <= 0
self.Q_max_res = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_Q_max_res_rule
)
def _H_L_FG_min_rule(block, n, t):
"""Set min. flue gas loss as fuel flow share."""
# minimum flue gas losses e.g. for motoric CHPs
if getattr(
list(n.fuel_input.values())[0], "H_L_FG_share_min", None
):
expr = 0
expr += -self.H_L_FG_min[n, t]
expr += (
self.H_F[n, t]
* list(n.fuel_input.values())[0].H_L_FG_share_min[t]
)
return expr == 0
else:
return Constraint.Skip
self.H_L_FG_min_def = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_H_L_FG_min_rule
)
def _Q_min_res_rule(block, n, t):
"""Set minimum Q depending on fuel and eletrical flow."""
# minimum restriction for heat flows e.g. for motoric CHPs
if getattr(
list(n.fuel_input.values())[0], "H_L_FG_share_min", None
):
expr = 0
expr += self.P[n, t] + self.Q[n, t] + self.H_L_FG_min[n, t]
expr += (
list(n.heat_output.values())[0].Q_CW_min[t] * self.Y[n, t]
)
expr += -self.H_F[n, t]
return expr >= 0
else:
return Constraint.Skip
self.Q_min_res = Constraint(
self.GENERICCHPS, m.TIMESTEPS, rule=_Q_min_res_rule
)
def _objective_expression(self):
r"""Objective expression for generic CHPs with no investment.
Note: This adds nothing as variable costs are already
added in the Block :class:`Flow`.
"""
if not hasattr(self, "GENERICCHPS"):
return 0
return 0
class ExtractionTurbineCHP(solph_Transformer):
r"""
A CHP with an extraction turbine in a linear model. For more options see
the :class:`~oemof.solph.components.GenericCHP` class.
One main output flow has to be defined and is tapped by the remaining flow.
The conversion factors have to be defined for the maximum tapped flow (
full CHP mode) and for no tapped flow (full condensing mode). Even though
it is possible to limit the variability of the tapped flow, so that the
full condensing mode will never be reached.
Parameters
----------
conversion_factors : dict
Dictionary containing conversion factors for conversion of inflow
to specified outflow. Keys are output bus objects.
The dictionary values can either be a scalar or a sequence with length
of time horizon for simulation.
conversion_factor_full_condensation : dict
The efficiency of the main flow if there is no tapped flow. Only one
key is allowed. Use one of the keys of the conversion factors. The key
indicates the main flow. The other output flow is the tapped flow.
Note
----
The following sets, variables, constraints and objective parts are created
* :py:class:`~oemof.solph.components.ExtractionTurbineCHPBlock`
Examples
--------
>>> from oemof import solph
>>> bel = solph.Bus(label='electricityBus')
>>> bth = solph.Bus(label='heatBus')
>>> bgas = solph.Bus(label='commodityBus')
>>> et_chp = solph.components.ExtractionTurbineCHP(
... label='variable_chp_gas',
... inputs={bgas: solph.Flow(nominal_value=10e10)},
... outputs={bel: solph.Flow(), bth: solph.Flow()},
... conversion_factors={bel: 0.3, bth: 0.5},
... conversion_factor_full_condensation={bel: 0.5})
"""
def __init__(self, conversion_factor_full_condensation, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conversion_factor_full_condensation = {
k: solph_sequence(v)
for k, v in conversion_factor_full_condensation.items()
}
def constraint_group(self):
return ExtractionTurbineCHPBlock
class ExtractionTurbineCHPBlock(SimpleBlock):
r"""Block for the linear relation of nodes with type
:class:`~oemof.solph.components.ExtractionTurbineCHP`
**The following two constraints are created:**
.. _ETCHP-equations:
.. math::
&
(1)\dot H_{Fuel}(t) =
\frac{P_{el}(t) + \dot Q_{th}(t) \cdot \beta(t)}
{\eta_{el,woExtr}(t)} \\
&
(2)P_{el}(t) \geq \dot Q_{th}(t) \cdot C_b =
\dot Q_{th}(t) \cdot
\frac{\eta_{el,maxExtr}(t)}
{\eta_{th,maxExtr}(t)}
where :math:`\beta` is defined as:
.. math::
\beta(t) = \frac{\eta_{el,woExtr}(t) -
\eta_{el,maxExtr}(t)}{\eta_{th,maxExtr}(t)}
where the first equation is the result of the relation between the input
flow and the two output flows, the second equation stems from how the two
output flows relate to each other, and the symbols used are defined as
follows (with Variables (V) and Parameters (P)):
========================= ==================================================== ==== =========
symbol attribute type explanation
========================= ==================================================== ==== =========
:math:`\dot H_{Fuel}` :py:obj:`flow[i, n, t]` V fuel input flow
:math:`P_{el}` :py:obj:`flow[n, main_output, t]` V electric power
:math:`\dot Q_{th}` :py:obj:`flow[n, tapped_output, t]` V thermal output
:math:`\beta` :py:obj:`main_flow_loss_index[n, t]` P power loss index
:math:`\eta_{el,woExtr}` :py:obj:`conversion_factor_full_condensation[n, t]` P electric efficiency
without heat extraction
:math:`\eta_{el,maxExtr}` :py:obj:`conversion_factors[main_output][n, t]` P electric efficiency
with max heat extraction
:math:`\eta_{th,maxExtr}` :py:obj:`conversion_factors[tapped_output][n, t]` P thermal efficiency with
maximal heat extraction
========================= ==================================================== ==== =========
""" # noqa: E501
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
""" Creates the linear constraint for the
:class:`oemof.solph.Transformer` block.
Parameters
----------
group : list
List of :class:`oemof.solph.ExtractionTurbineCHP` (trsf) objects
for which the linear relation of inputs and outputs is created
e.g. group = [trsf1, trsf2, trsf3, ...]. Note that the relation
is created for all existing relations of the inputs and all outputs
of the transformer. The components inside the list need to hold
all needed attributes.
"""
if group is None:
return None
m = self.parent_block()
for n in group:
n.inflow = list(n.inputs)[0]
n.main_flow = [
k for k, v in n.conversion_factor_full_condensation.items()
][0]
n.main_output = [o for o in n.outputs if n.main_flow == o][0]
n.tapped_output = [o for o in n.outputs if n.main_flow != o][0]
n.conversion_factor_full_condensation_sq = (
n.conversion_factor_full_condensation[n.main_output]
)
n.flow_relation_index = [
n.conversion_factors[n.main_output][t]
/ n.conversion_factors[n.tapped_output][t]
for t in m.TIMESTEPS
]
n.main_flow_loss_index = [
(
n.conversion_factor_full_condensation_sq[t]
- n.conversion_factors[n.main_output][t]
)
/ n.conversion_factors[n.tapped_output][t]
for t in m.TIMESTEPS
]
def _input_output_relation_rule(block):
"""Connection between input, main output and tapped output.
"""
for t in m.TIMESTEPS:
for g in group:
lhs = m.flow[g.inflow, g, t]
rhs = (
m.flow[g, g.main_output, t]
+ m.flow[g, g.tapped_output, t]
* g.main_flow_loss_index[t]
) / g.conversion_factor_full_condensation_sq[t]
block.input_output_relation.add((g, t), (lhs == rhs))
self.input_output_relation = Constraint(
group, m.TIMESTEPS, noruleinit=True
)
self.input_output_relation_build = BuildAction(
rule=_input_output_relation_rule
)
def _out_flow_relation_rule(block):
"""Relation between main and tapped output in full chp mode.
"""
for t in m.TIMESTEPS:
for g in group:
lhs = m.flow[g, g.main_output, t]
rhs = (
m.flow[g, g.tapped_output, t]
* g.flow_relation_index[t]
)
block.out_flow_relation.add((g, t), (lhs >= rhs))
self.out_flow_relation = Constraint(
group, m.TIMESTEPS, noruleinit=True
)
self.out_flow_relation_build = BuildAction(
rule=_out_flow_relation_rule
)
class OffsetTransformer(network.Transformer):
"""An object with one input and one output.
Parameters
----------
coefficients : tuple
Tuple containing the first two polynomial coefficients
i.e. the y-intersection and slope of a linear equation.
The tuple values can either be a scalar or a sequence with length
of time horizon for simulation.
Notes
-----
The sets, variables, constraints and objective parts are created
* :py:class:`~oemof.solph.components.OffsetTransformerBlock`
Examples
--------
>>> from oemof import solph
>>> bel = solph.Bus(label='bel')
>>> bth = solph.Bus(label='bth')
>>> ostf = solph.components.OffsetTransformer(
... label='ostf',
... inputs={bel: solph.Flow(
... nominal_value=60, min=0.5, max=1.0,
... nonconvex=solph.NonConvex())},
... outputs={bth: solph.Flow()},
... coefficients=(20, 0.5))
>>> type(ostf)
<class 'oemof.solph.components.OffsetTransformer'>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if kwargs.get("coefficients") is not None:
self.coefficients = tuple(
[solph_sequence(i) for i in kwargs.get("coefficients")]
)
if len(self.coefficients) != 2:
raise ValueError(
"Two coefficients or coefficient series have to be given."
)
if len(self.inputs) == 1:
for k, v in self.inputs.items():
if not v.nonconvex:
raise TypeError(
"Input flows must be of type NonConvexFlow!"
)
if len(self.inputs) > 1 or len(self.outputs) > 1:
raise ValueError(
"Component `OffsetTransformer` must not have "
+ "more than 1 input and 1 output!"
)
def constraint_group(self):
return OffsetTransformerBlock
class OffsetTransformerBlock(SimpleBlock):
r"""Block for the relation of nodes with type
:class:`~oemof.solph.components.OffsetTransformer`
**The following constraints are created:**
.. _OffsetTransformer-equations:
.. math::
&
P_{out}(t) = C_1(t) \cdot P_{in}(t) + C_0(t) \cdot Y(t) \\
.. csv-table:: Variables (V) and Parameters (P)
:header: "symbol", "attribute", "type", "explanation"
:widths: 1, 1, 1, 1
":math:`P_{out}(t)`", ":py:obj:`flow[n, o, t]`", "V", "Power of output"
":math:`P_{in}(t)`", ":py:obj:`flow[i, n, t]`", "V","Power of input"
":math:`Y(t)`", ":py:obj:`status[i, n, t]`", "V","binary
status variable of nonconvex input flow "
":math:`C_1(t)`", ":py:obj:`coefficients[1][n, t]`", "P", "linear
coefficient 1 (slope)"
":math:`C_0(t)`", ":py:obj:`coefficients[0][n, t]`", "P", "linear
coefficient 0 (y-intersection)"
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
""" Creates the relation for the class:`OffsetTransformer`.
Parameters
----------
group : list
List of oemof.solph.custom.OffsetTransformer objects for which
the relation of inputs and outputs is created
e.g. group = [ostf1, ostf2, ostf3, ...]. The components inside
the list need to hold an attribute `coefficients` of type dict
containing the conversion factors for all inputs to outputs.
"""
if group is None:
return None
m = self.parent_block()
self.OFFSETTRANSFORMERS = Set(initialize=[n for n in group])
def _relation_rule(block, n, t):
"""Link binary input and output flow to component outflow."""
expr = 0
expr += -m.flow[n, list(n.outputs.keys())[0], t]
expr += (
m.flow[list(n.inputs.keys())[0], n, t] * n.coefficients[1][t]
)
expr += (
m.NonConvexFlow.status[list(n.inputs.keys())[0], n, t]
* n.coefficients[0][t]
)
return expr == 0
self.relation = Constraint(
self.OFFSETTRANSFORMERS, m.TIMESTEPS, rule=_relation_rule
)
| [
"oemof.solph.plumbing.sequence",
"numpy.linalg.solve",
"pyomo.environ.BuildAction",
"numpy.array",
"pyomo.environ.Set",
"pyomo.environ.Var",
"oemof.solph.options.Investment",
"pyomo.environ.Expression",
"pyomo.environ.Constraint"
] | [((6681, 6714), 'oemof.solph.plumbing.sequence', 'solph_sequence', (['max_storage_level'], {}), '(max_storage_level)\n', (6695, 6714), True, 'from oemof.solph.plumbing import sequence as solph_sequence\n'), ((6748, 6781), 'oemof.solph.plumbing.sequence', 'solph_sequence', (['min_storage_level'], {}), '(min_storage_level)\n', (6762, 6781), True, 'from oemof.solph.plumbing import sequence as solph_sequence\n'), ((15605, 15639), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group]'}), '(initialize=[n for n in group])\n', (15608, 15639), False, 'from pyomo.environ import Set\n'), ((15674, 15730), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group if n.balanced is True]'}), '(initialize=[n for n in group if n.balanced is True])\n', (15677, 15730), False, 'from pyomo.environ import Set\n'), ((15799, 15884), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group if n.invest_relation_input_output is not None]'}), '(initialize=[n for n in group if n.invest_relation_input_output is not None]\n )\n', (15802, 15884), False, 'from pyomo.environ import Set\n'), ((16425, 16492), 'pyomo.environ.Var', 'Var', (['self.STORAGES', 'm.TIMESTEPS'], {'bounds': '_storage_content_bound_rule'}), '(self.STORAGES, m.TIMESTEPS, bounds=_storage_content_bound_rule)\n', (16428, 16492), False, 'from pyomo.environ import Var\n'), ((16650, 16739), 'pyomo.environ.Var', 'Var', (['self.STORAGES'], {'within': 'NonNegativeReals', 'bounds': '_storage_init_content_bound_rule'}), '(self.STORAGES, within=NonNegativeReals, bounds=\n _storage_init_content_bound_rule)\n', (16653, 16739), False, 'from pyomo.environ import Var\n'), ((18194, 18253), 'pyomo.environ.Constraint', 'Constraint', (['self.STORAGES'], {'rule': '_storage_balance_first_rule'}), '(self.STORAGES, rule=_storage_balance_first_rule)\n', (18204, 18253), False, 'from pyomo.environ import Constraint\n'), ((19298, 19370), 'pyomo.environ.Constraint', 'Constraint', (['self.STORAGES', 'reduced_timesteps'], {'rule': '_storage_balance_rule'}), '(self.STORAGES, reduced_timesteps, rule=_storage_balance_rule)\n', (19308, 19370), False, 'from pyomo.environ import Constraint\n'), ((19734, 19797), 'pyomo.environ.Constraint', 'Constraint', (['self.STORAGES_BALANCED'], {'rule': '_balanced_storage_rule'}), '(self.STORAGES_BALANCED, rule=_balanced_storage_rule)\n', (19744, 19797), False, 'from pyomo.environ import Constraint\n'), ((20339, 20406), 'pyomo.environ.Constraint', 'Constraint', (['self.STORAGES_WITH_INVEST_FLOW_REL'], {'rule': '_power_coupled'}), '(self.STORAGES_WITH_INVEST_FLOW_REL, rule=_power_coupled)\n', (20349, 20406), False, 'from pyomo.environ import Constraint\n'), ((29091, 29125), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group]'}), '(initialize=[n for n in group])\n', (29094, 29125), False, 'from pyomo.environ import Set\n'), ((29164, 29233), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group if n.investment.nonconvex is False]'}), '(initialize=[n for n in group if n.investment.nonconvex is False])\n', (29167, 29233), False, 'from pyomo.environ import Set\n'), ((29298, 29366), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group if n.investment.nonconvex is True]'}), '(initialize=[n for n in group if n.investment.nonconvex is True])\n', (29301, 29366), False, 'from pyomo.environ import Set\n'), ((29429, 29485), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group if n.balanced is True]'}), '(initialize=[n for n in group if n.balanced is True])\n', (29432, 29485), False, 'from pyomo.environ import Set\n'), ((29555, 29624), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group if n.initial_storage_level is None]'}), '(initialize=[n for n in group if n.initial_storage_level is None])\n', (29558, 29624), False, 'from pyomo.environ import Set\n'), ((29691, 29764), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group if n.initial_storage_level is not None]'}), '(initialize=[n for n in group if n.initial_storage_level is not None])\n', (29694, 29764), False, 'from pyomo.environ import Set\n'), ((29851, 29937), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group if n.invest_relation_input_capacity is not None]'}), '(initialize=[n for n in group if n.invest_relation_input_capacity is not\n None])\n', (29854, 29937), False, 'from pyomo.environ import Set\n'), ((30053, 30140), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group if n.invest_relation_output_capacity is not None]'}), '(initialize=[n for n in group if n.invest_relation_output_capacity is not\n None])\n', (30056, 30140), False, 'from pyomo.environ import Set\n'), ((30255, 30340), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group if n.invest_relation_input_output is not None]'}), '(initialize=[n for n in group if n.invest_relation_input_output is not None]\n )\n', (30258, 30340), False, 'from pyomo.environ import Set\n'), ((30900, 30962), 'pyomo.environ.Var', 'Var', (['self.INVESTSTORAGES', 'm.TIMESTEPS'], {'within': 'NonNegativeReals'}), '(self.INVESTSTORAGES, m.TIMESTEPS, within=NonNegativeReals)\n', (30903, 30962), False, 'from pyomo.environ import Var\n'), ((31386, 31478), 'pyomo.environ.Var', 'Var', (['self.INVESTSTORAGES'], {'within': 'NonNegativeReals', 'bounds': '_storage_investvar_bound_rule'}), '(self.INVESTSTORAGES, within=NonNegativeReals, bounds=\n _storage_investvar_bound_rule)\n', (31389, 31478), False, 'from pyomo.environ import Var\n'), ((31550, 31599), 'pyomo.environ.Var', 'Var', (['self.INVESTSTORAGES'], {'within': 'NonNegativeReals'}), '(self.INVESTSTORAGES, within=NonNegativeReals)\n', (31553, 31599), False, 'from pyomo.environ import Var\n'), ((31699, 31749), 'pyomo.environ.Var', 'Var', (['self.NON_CONVEX_INVESTSTORAGES'], {'within': 'Binary'}), '(self.NON_CONVEX_INVESTSTORAGES, within=Binary)\n', (31702, 31749), False, 'from pyomo.environ import Var\n'), ((32305, 32398), 'pyomo.environ.Constraint', 'Constraint', (['self.INVESTSTORAGES_NO_INIT_CONTENT'], {'rule': '_inv_storage_init_content_max_rule'}), '(self.INVESTSTORAGES_NO_INIT_CONTENT, rule=\n _inv_storage_init_content_max_rule)\n', (32315, 32398), False, 'from pyomo.environ import Constraint\n'), ((32730, 32820), 'pyomo.environ.Constraint', 'Constraint', (['self.INVESTSTORAGES_INIT_CONTENT'], {'rule': '_inv_storage_init_content_fix_rule'}), '(self.INVESTSTORAGES_INIT_CONTENT, rule=\n _inv_storage_init_content_fix_rule)\n', (32740, 32820), False, 'from pyomo.environ import Constraint\n'), ((33800, 33865), 'pyomo.environ.Constraint', 'Constraint', (['self.INVESTSTORAGES'], {'rule': '_storage_balance_first_rule'}), '(self.INVESTSTORAGES, rule=_storage_balance_first_rule)\n', (33810, 33865), False, 'from pyomo.environ import Constraint\n'), ((34852, 34930), 'pyomo.environ.Constraint', 'Constraint', (['self.INVESTSTORAGES', 'reduced_timesteps'], {'rule': '_storage_balance_rule'}), '(self.INVESTSTORAGES, reduced_timesteps, rule=_storage_balance_rule)\n', (34862, 34930), False, 'from pyomo.environ import Constraint\n'), ((35164, 35233), 'pyomo.environ.Constraint', 'Constraint', (['self.INVESTSTORAGES_BALANCED'], {'rule': '_balanced_storage_rule'}), '(self.INVESTSTORAGES_BALANCED, rule=_balanced_storage_rule)\n', (35174, 35233), False, 'from pyomo.environ import Constraint\n'), ((35775, 35830), 'pyomo.environ.Constraint', 'Constraint', (['self.INVEST_REL_IN_OUT'], {'rule': '_power_coupled'}), '(self.INVEST_REL_IN_OUT, rule=_power_coupled)\n', (35785, 35830), False, 'from pyomo.environ import Constraint\n'), ((36500, 36577), 'pyomo.environ.Constraint', 'Constraint', (['self.INVEST_REL_CAP_IN'], {'rule': '_storage_capacity_inflow_invest_rule'}), '(self.INVEST_REL_CAP_IN, rule=_storage_capacity_inflow_invest_rule)\n', (36510, 36577), False, 'from pyomo.environ import Constraint\n'), ((37248, 37327), 'pyomo.environ.Constraint', 'Constraint', (['self.INVEST_REL_CAP_OUT'], {'rule': '_storage_capacity_outflow_invest_rule'}), '(self.INVEST_REL_CAP_OUT, rule=_storage_capacity_outflow_invest_rule)\n', (37258, 37327), False, 'from pyomo.environ import Constraint\n'), ((37773, 37861), 'pyomo.environ.Constraint', 'Constraint', (['self.INVESTSTORAGES', 'm.TIMESTEPS'], {'rule': '_max_storage_content_invest_rule'}), '(self.INVESTSTORAGES, m.TIMESTEPS, rule=\n _max_storage_content_invest_rule)\n', (37783, 37861), False, 'from pyomo.environ import Constraint\n'), ((38403, 38495), 'pyomo.environ.Constraint', 'Constraint', (['self.MIN_INVESTSTORAGES', 'm.TIMESTEPS'], {'rule': '_min_storage_content_invest_rule'}), '(self.MIN_INVESTSTORAGES, m.TIMESTEPS, rule=\n _min_storage_content_invest_rule)\n', (38413, 38495), False, 'from pyomo.environ import Constraint\n'), ((38855, 38924), 'pyomo.environ.Constraint', 'Constraint', (['self.NON_CONVEX_INVESTSTORAGES'], {'rule': 'maximum_invest_limit'}), '(self.NON_CONVEX_INVESTSTORAGES, rule=maximum_invest_limit)\n', (38865, 38924), False, 'from pyomo.environ import Constraint\n'), ((39389, 39453), 'pyomo.environ.Constraint', 'Constraint', (['self.NON_CONVEX_INVESTSTORAGES'], {'rule': 'smallest_invest'}), '(self.NON_CONVEX_INVESTSTORAGES, rule=smallest_invest)\n', (39399, 39453), False, 'from pyomo.environ import Constraint\n'), ((40045, 40078), 'pyomo.environ.Expression', 'Expression', ([], {'expr': 'investment_costs'}), '(expr=investment_costs)\n', (40055, 40078), False, 'from pyomo.environ import Expression\n'), ((52398, 52432), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group]'}), '(initialize=[n for n in group])\n', (52401, 52432), False, 'from pyomo.environ import Set\n'), ((52473, 52532), 'pyomo.environ.Var', 'Var', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'within': 'NonNegativeReals'}), '(self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals)\n', (52476, 52532), False, 'from pyomo.environ import Var\n'), ((52559, 52618), 'pyomo.environ.Var', 'Var', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'within': 'NonNegativeReals'}), '(self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals)\n', (52562, 52618), False, 'from pyomo.environ import Var\n'), ((52667, 52726), 'pyomo.environ.Var', 'Var', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'within': 'NonNegativeReals'}), '(self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals)\n', (52670, 52726), False, 'from pyomo.environ import Var\n'), ((52771, 52830), 'pyomo.environ.Var', 'Var', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'within': 'NonNegativeReals'}), '(self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals)\n', (52774, 52830), False, 'from pyomo.environ import Var\n'), ((52870, 52929), 'pyomo.environ.Var', 'Var', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'within': 'NonNegativeReals'}), '(self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals)\n', (52873, 52929), False, 'from pyomo.environ import Var\n'), ((52947, 53006), 'pyomo.environ.Var', 'Var', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'within': 'NonNegativeReals'}), '(self.GENERICCHPS, m.TIMESTEPS, within=NonNegativeReals)\n', (52950, 53006), False, 'from pyomo.environ import Var\n'), ((53024, 53073), 'pyomo.environ.Var', 'Var', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'within': 'Binary'}), '(self.GENERICCHPS, m.TIMESTEPS, within=Binary)\n', (53027, 53073), False, 'from pyomo.environ import Var\n'), ((53374, 53434), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_H_flow_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_H_flow_rule)\n', (53384, 53434), False, 'from pyomo.environ import Constraint\n'), ((53723, 53783), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_Q_flow_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_Q_flow_rule)\n', (53733, 53783), False, 'from pyomo.environ import Constraint\n'), ((54079, 54139), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_P_flow_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_P_flow_rule)\n', (54089, 54139), False, 'from pyomo.environ import Constraint\n'), ((54461, 54520), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_H_F_1_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_H_F_1_rule)\n', (54471, 54520), False, 'from pyomo.environ import Constraint\n'), ((54878, 54937), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_H_F_2_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_H_F_2_rule)\n', (54888, 54937), False, 'from pyomo.environ import Constraint\n'), ((55363, 55422), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_H_F_3_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_H_F_3_rule)\n', (55373, 55422), False, 'from pyomo.environ import Constraint\n'), ((55848, 55907), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_H_F_4_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_H_F_4_rule)\n', (55858, 55907), False, 'from pyomo.environ import Constraint\n'), ((56301, 56365), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_H_L_FG_max_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_H_L_FG_max_rule)\n', (56311, 56365), False, 'from pyomo.environ import Constraint\n'), ((56925, 56988), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_Q_max_res_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_Q_max_res_rule)\n', (56935, 56988), False, 'from pyomo.environ import Constraint\n'), ((57633, 57697), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_H_L_FG_min_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_H_L_FG_min_rule)\n', (57643, 57697), False, 'from pyomo.environ import Constraint\n'), ((58396, 58459), 'pyomo.environ.Constraint', 'Constraint', (['self.GENERICCHPS', 'm.TIMESTEPS'], {'rule': '_Q_min_res_rule'}), '(self.GENERICCHPS, m.TIMESTEPS, rule=_Q_min_res_rule)\n', (58406, 58459), False, 'from pyomo.environ import Constraint\n'), ((65974, 66021), 'pyomo.environ.Constraint', 'Constraint', (['group', 'm.TIMESTEPS'], {'noruleinit': '(True)'}), '(group, m.TIMESTEPS, noruleinit=True)\n', (65984, 66021), False, 'from pyomo.environ import Constraint\n'), ((66087, 66132), 'pyomo.environ.BuildAction', 'BuildAction', ([], {'rule': '_input_output_relation_rule'}), '(rule=_input_output_relation_rule)\n', (66098, 66132), False, 'from pyomo.environ import BuildAction\n'), ((66668, 66715), 'pyomo.environ.Constraint', 'Constraint', (['group', 'm.TIMESTEPS'], {'noruleinit': '(True)'}), '(group, m.TIMESTEPS, noruleinit=True)\n', (66678, 66715), False, 'from pyomo.environ import Constraint\n'), ((66777, 66818), 'pyomo.environ.BuildAction', 'BuildAction', ([], {'rule': '_out_flow_relation_rule'}), '(rule=_out_flow_relation_rule)\n', (66788, 66818), False, 'from pyomo.environ import BuildAction\n'), ((70600, 70634), 'pyomo.environ.Set', 'Set', ([], {'initialize': '[n for n in group]'}), '(initialize=[n for n in group])\n', (70603, 70634), False, 'from pyomo.environ import Set\n'), ((71147, 71216), 'pyomo.environ.Constraint', 'Constraint', (['self.OFFSETTRANSFORMERS', 'm.TIMESTEPS'], {'rule': '_relation_rule'}), '(self.OFFSETTRANSFORMERS, m.TIMESTEPS, rule=_relation_rule)\n', (71157, 71216), False, 'from pyomo.environ import Constraint\n'), ((60816, 60833), 'oemof.solph.plumbing.sequence', 'solph_sequence', (['v'], {}), '(v)\n', (60830, 60833), True, 'from oemof.solph.plumbing import sequence as solph_sequence\n'), ((8914, 8926), 'oemof.solph.options.Investment', 'Investment', ([], {}), '()\n', (8924, 8926), False, 'from oemof.solph.options import Investment\n'), ((9165, 9177), 'oemof.solph.options.Investment', 'Investment', ([], {}), '()\n', (9175, 9177), False, 'from oemof.solph.options import Investment\n'), ((45179, 45288), 'numpy.array', 'np.array', (['[[1, self.electrical_output[eb].P_min_woDH[i]], [1, self.electrical_output[\n eb].P_max_woDH[i]]]'], {}), '([[1, self.electrical_output[eb].P_min_woDH[i]], [1, self.\n electrical_output[eb].P_max_woDH[i]]])\n', (45187, 45288), True, 'import numpy as np\n'), ((45413, 45612), 'numpy.array', 'np.array', (['[self.electrical_output[eb].P_min_woDH[i] / self.electrical_output[eb].\n Eta_el_min_woDH[i], self.electrical_output[eb].P_max_woDH[i] / self.\n electrical_output[eb].Eta_el_max_woDH[i]]'], {}), '([self.electrical_output[eb].P_min_woDH[i] / self.electrical_output\n [eb].Eta_el_min_woDH[i], self.electrical_output[eb].P_max_woDH[i] /\n self.electrical_output[eb].Eta_el_max_woDH[i]])\n', (45421, 45612), True, 'import numpy as np\n'), ((45781, 45802), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (45796, 45802), True, 'import numpy as np\n'), ((10067, 10109), 'oemof.solph.plumbing.sequence', 'solph_sequence', (['self.fixed_losses_absolute'], {}), '(self.fixed_losses_absolute)\n', (10081, 10109), True, 'from oemof.solph.plumbing import sequence as solph_sequence\n'), ((68086, 68103), 'oemof.solph.plumbing.sequence', 'solph_sequence', (['i'], {}), '(i)\n', (68100, 68103), True, 'from oemof.solph.plumbing import sequence as solph_sequence\n')] |
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from fastestimator.op import NumpyOp
EPSILON = 1e-7
class Zscore(NumpyOp):
"""
Standardize data using zscore method
"""
def forward(self, data, state):
"""
Standardizes the data
Args:
data: Data to be standardized
state: A dictionary containing background information such as 'mode'
Returns:
Array containing standardized data
"""
mean = np.mean(data)
std = np.std(data)
data = (data - mean) / max(std, EPSILON)
return data
| [
"numpy.mean",
"numpy.std"
] | [((1156, 1169), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (1163, 1169), True, 'import numpy as np\n'), ((1184, 1196), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (1190, 1196), True, 'import numpy as np\n')] |
#%%
import numpy as np
import pandas as pd
import torch
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
class dataloader():
# function to load data
def data_function(squared, trainsize, valsize, testsize, input_window, output_window):
# load data set
df = pd.read_csv('^GDAXI-10y.csv', delimiter=',')
# linear interpolation for missing values
df['Adj Close'].interpolate(method='index', inplace=True)
time = np.asarray(df['Date'])
price = np.asarray(df['Adj Close'])
log_returns = np.diff(np.log(price))
if squared==False:
data = log_returns
if squared==True:
data = log_returns**2
# split data set
training_size = int(trainsize*len(data))
test_size = int(testsize*len(data))
validation_size = int(valsize*len(data))
train_data = data[:training_size]
val_data = data[training_size:len(data)-test_size]
test_data = data[training_size+validation_size+2:]
train_val = data[:-len(test_data)]
# apply minmaxscaler
scaler1 = MinMaxScaler(feature_range=(0, 1)).fit(train_data.reshape(-1, 1))
train_arr = scaler1.transform(train_data.reshape(-1, 1))
val_arr = scaler1.transform(val_data.reshape(-1, 1))
scaler2 = MinMaxScaler(feature_range=(0, 1)).fit(train_val.reshape(-1, 1))
test_arr = scaler2.transform(test_data.reshape(-1, 1))
# Generate Sequence
def transform_data(arr, input_window, output_window):
x = np.asarray([arr[i : i + input_window] for i in range(len(arr) - input_window)])
y = np.asarray([arr[i + output_window : i + input_window + output_window] for i in range(len(arr) - input_window)])
x_var = torch.FloatTensor(torch.from_numpy(x).float())
y_var = torch.FloatTensor(torch.from_numpy(y).float())
return x_var, y_var
x_train, y_train = transform_data(train_arr, input_window, output_window)
x_val, y_val = transform_data(val_arr, input_window, output_window)
x_test, y_test = transform_data(test_arr, input_window, output_window)
return x_train, y_train, x_val, y_val, x_test, y_test, scaler1, scaler2, time, price
# function to generate batches during training and evaluation
def generate_batch_data(x, y, batch_size):
for batch, i in enumerate(range(0, len(x) - batch_size, batch_size)):
x_batch = x[i : i + batch_size]
y_batch = y[i : i + batch_size]
yield x_batch, y_batch
| [
"pandas.read_csv",
"numpy.log",
"numpy.asarray",
"torch.from_numpy",
"sklearn.preprocessing.MinMaxScaler"
] | [((311, 355), 'pandas.read_csv', 'pd.read_csv', (['"""^GDAXI-10y.csv"""'], {'delimiter': '""","""'}), "('^GDAXI-10y.csv', delimiter=',')\n", (322, 355), True, 'import pandas as pd\n'), ((488, 510), 'numpy.asarray', 'np.asarray', (["df['Date']"], {}), "(df['Date'])\n", (498, 510), True, 'import numpy as np\n'), ((527, 554), 'numpy.asarray', 'np.asarray', (["df['Adj Close']"], {}), "(df['Adj Close'])\n", (537, 554), True, 'import numpy as np\n'), ((585, 598), 'numpy.log', 'np.log', (['price'], {}), '(price)\n', (591, 598), True, 'import numpy as np\n'), ((1181, 1215), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1193, 1215), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler\n'), ((1393, 1427), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1405, 1427), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler\n'), ((1880, 1899), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1896, 1899), False, 'import torch\n'), ((1947, 1966), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (1963, 1966), False, 'import torch\n')] |
import sys
sys.path.append('./fine-tuning')
sys.path.append('./gender')
from bias_rv.MutantGeneration import MutantGeneration
import random
import numpy as np
seed = 42
random.seed(seed)
np.random.seed(seed)
def check_property_1(original_result, female_mut_results, male_mut_results, N):
return sum(female_mut_results) == sum(male_mut_results) and sum(female_mut_results) == original_result * N
class biasRV():
def __init__(self, predict, X, Y, alpha):
self.predict = predict
self.X = X
self.Y = Y
self.alpha = alpha
def set_predictor(self, predict):
self.predict = predict
def set_X(self, X):
self.X = X
def set_Y(self, Y):
self.Y = Y
def set_alpha(self, alpha):
self.alpha = alpha
def verify_only_property_2(self, text:str):
'''
Only verify using the distributional individual fairness.
'''
is_satisfy_prop_2 = True
original_result = self.predict(text)
mg = MutantGeneration(text)
alpha = self.alpha
if len(mg.getMutants()) > 0:
### if there are mutants generated
male_mutants = mg.get_male_mutants()
female_mutants = mg.get_female_mutants()
male_mut_results = []
for each_text in male_mutants:
male_mut_results.append(self.predict(each_text))
female_mut_results = []
for each_text in female_mutants:
female_mut_results.append(self.predict(each_text))
pos_M = 1.0 * sum(male_mut_results) / (len(male_mut_results))
pos_F = 1.0 * sum(female_mut_results) / (len(female_mut_results))
is_satisfy_prop_2 = True if abs(pos_M - pos_F) < alpha else False
return original_result, is_satisfy_prop_2
def verify(self, text: str):
N = self.X
L = self.Y
alpha = self.alpha
is_bias = False
is_satisfy_prop_1 = True
is_satisfy_prop_2 = True
# generate mutants
original_result = self.predict(text)
mg = MutantGeneration(text)
if len(mg.getMutants()) > 0:
### if there are mutants generated
male_mutants = mg.get_male_mutants()
female_mutants = mg.get_female_mutants()
assert len(male_mutants) == len(female_mutants)
if N > len(female_mutants):
N = len(female_mutants)
L = 0
elif N + L > len(female_mutants):
L = len(female_mutants) - N
### select N mutants from each gender
# random selection
sampled_male_mutants = random.sample(male_mutants, N + L)
sampled_female_mutants = random.sample(female_mutants, N + L)
## processing male_mutants
male_mut_results = []
for each_text in sampled_male_mutants[0: N]:
male_mut_results.append(self.predict(each_text))
## processing female_mutants
female_mut_results = []
for each_text in sampled_female_mutants[0: N]:
female_mut_results.append(self.predict(each_text))
### verify property (1)
is_satisfy_prop_1 = check_property_1(original_result, female_mut_results, male_mut_results, N)
if is_satisfy_prop_1:
### satisfy property (1), no bias
pass
else:
### progress to step (2)
# compute pos_M for male
for each_text in sampled_male_mutants[N: N + L]:
male_mut_results.append(self.predict(each_text))
pos_M = 1.0 * sum(male_mut_results) / (N + L)
# compute pos_F for female
for each_text in sampled_female_mutants[N: N + L]:
female_mut_results.append(self.predict(each_text))
pos_F = 1.0 * sum(female_mut_results) / (N + L)
### verify property (2) |pos_M - pos_F| < alpha
is_satisfy_prop_2 = True if abs(pos_M - pos_F) < alpha else False
if not is_satisfy_prop_2:
is_bias = True
return original_result, is_bias | [
"random.sample",
"random.seed",
"numpy.random.seed",
"bias_rv.MutantGeneration.MutantGeneration",
"sys.path.append"
] | [((11, 43), 'sys.path.append', 'sys.path.append', (['"""./fine-tuning"""'], {}), "('./fine-tuning')\n", (26, 43), False, 'import sys\n'), ((44, 71), 'sys.path.append', 'sys.path.append', (['"""./gender"""'], {}), "('./gender')\n", (59, 71), False, 'import sys\n'), ((170, 187), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (181, 187), False, 'import random\n'), ((188, 208), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (202, 208), True, 'import numpy as np\n'), ((1015, 1037), 'bias_rv.MutantGeneration.MutantGeneration', 'MutantGeneration', (['text'], {}), '(text)\n', (1031, 1037), False, 'from bias_rv.MutantGeneration import MutantGeneration\n'), ((2135, 2157), 'bias_rv.MutantGeneration.MutantGeneration', 'MutantGeneration', (['text'], {}), '(text)\n', (2151, 2157), False, 'from bias_rv.MutantGeneration import MutantGeneration\n'), ((2725, 2759), 'random.sample', 'random.sample', (['male_mutants', '(N + L)'], {}), '(male_mutants, N + L)\n', (2738, 2759), False, 'import random\n'), ((2797, 2833), 'random.sample', 'random.sample', (['female_mutants', '(N + L)'], {}), '(female_mutants, N + L)\n', (2810, 2833), False, 'import random\n')] |
import numpy as np
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.spatial import ConvexHull
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
if torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
dtype_l = torch.cuda.LongTensor
torch.cuda.manual_seed(0)
else:
dtype = torch.FloatTensor
dtype_l = torch.LongTensor
torch.manual_seed(0)
def compute_recovery_rate(pred, labels):
pred = pred.max(2)[1]
error = 1 - torch.eq(pred, labels).type(dtype)#.squeeze(2)
frob_norm = error.mean(1)#.squeeze(1)
accuracy = 1 - frob_norm
accuracy = accuracy.mean(0).squeeze()
return accuracy.data.cpu().numpy()#[0]
class Logger(object):
def __init__(self, path_logger):
directory = os.path.join(path_logger, 'plots/')
self.path = path_logger
self.path_dir = directory
# Create directory if necessary
try:
os.stat(directory)
except:
os.mkdir(directory)
self.loss_train = []
self.loss_test = []
self.accuracy_train = []
self.accuracy_test = []
self.args = None
def write_settings(self, args):
self.args = {}
# write info
path = os.path.join(self.path, 'experiment.txt')
with open(path, 'w') as file:
for arg in vars(args):
file.write(str(arg) + ' : ' + str(getattr(args, arg)) + '\n')
self.args[str(arg)] = getattr(args, arg)
def save_model(self, model):
save_dir = os.path.join(self.path, 'parameters/')
# Create directory if necessary
try:
os.stat(save_dir)
except:
os.mkdir(save_dir)
path = os.path.join(save_dir, 'gnn.pt')
torch.save(model, path)
print('Model Saved.')
def load_model(self):
load_dir = os.path.join(self.path, 'parameters/')
# check if any training has been done before.
try:
os.stat(load_dir)
except:
print("Training has not been done before testing. This session will be terminated.")
sys.exit()
path = os.path.join(load_dir, 'gnn.pt')
print('Loading the most recent model...')
siamese_gnn = torch.load(path)
return siamese_gnn
def add_train_loss(self, loss):
self.loss_train.append(loss.data.cpu().numpy())
def add_test_loss(self, loss):
self.loss_test.append(loss)
def add_train_accuracy(self, pred, labels):
accuracy = compute_recovery_rate(pred, labels)
self.accuracy_train.append(accuracy)
def add_test_accuracy(self, pred, labels):
accuracy = compute_recovery_rate(pred, labels)
self.accuracy_test.append(accuracy)
def plot_train_loss(self):
plt.figure(0)
plt.clf()
iters = range(len(self.loss_train))
plt.semilogy(iters, self.loss_train, 'b')
plt.xlabel('iterations')
plt.ylabel('Cross Entropy Loss')
plt.title('Training Loss: p={}, p_e={}'
.format(self.args['edge_density'], self.args['noise']))
path = os.path.join(self.path_dir, 'training_loss.png')
plt.savefig(path)
def plot_test_loss(self):
plt.figure(1)
plt.clf()
test_freq = self.args['test_freq']
iters = test_freq * range(len(self.loss_test))
plt.semilogy(iters, self.loss_test, 'b')
plt.xlabel('iterations')
plt.ylabel('Cross Entropy Loss')
plt.title('Testing Loss: p={}, p_e={}'
.format(self.args['edge_density'], self.args['noise']))
path = os.path.join(self.path_dir, 'testing_loss.png')
plt.savefig(path)
def plot_train_accuracy(self):
plt.figure(0)
plt.clf()
iters = range(len(self.accuracy_train))
plt.plot(iters, self.accuracy_train, 'b')
plt.xlabel('iterations')
plt.ylabel('Accuracy')
plt.title('Training Accuracy: p={}, p_e={}'
.format(self.args['edge_density'], self.args['noise']))
path = os.path.join(self.path_dir, 'training_accuracy.png')
plt.savefig(path)
def plot_test_accuracy(self):
plt.figure(1)
plt.clf()
test_freq = self.args['test_freq']
iters = test_freq * range(len(self.accuracy_test))
plt.plot(iters, self.accuracy_test, 'b')
plt.xlabel('iterations')
plt.ylabel('Accuracy')
plt.title('Testing Accuracy: p={}, p_e={}'
.format(self.args['edge_density'], self.args['noise']))
path = os.path.join(self.path_dir, 'testing_accuracy.png')
plt.savefig(path)
def save_results(self):
path = os.path.join(self.path, 'results.npz')
np.savez(path, accuracy_train=np.array(self.accuracy_train),
accuracy_test=np.array(self.accuracy_test),
loss_train=self.loss_train, loss_test=self.loss_test)
| [
"torch.manual_seed",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"torch.load",
"matplotlib.pyplot.clf",
"os.path.join",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torch.eq",
"numpy.array",
"matplotlib.pyplot.figure",
... | [((50, 71), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (64, 71), False, 'import matplotlib\n'), ((311, 336), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (334, 336), False, 'import torch\n'), ((416, 441), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(0)'], {}), '(0)\n', (438, 441), False, 'import torch\n'), ((517, 537), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (534, 537), False, 'import torch\n'), ((917, 952), 'os.path.join', 'os.path.join', (['path_logger', '"""plots/"""'], {}), "(path_logger, 'plots/')\n", (929, 952), False, 'import os\n'), ((1411, 1452), 'os.path.join', 'os.path.join', (['self.path', '"""experiment.txt"""'], {}), "(self.path, 'experiment.txt')\n", (1423, 1452), False, 'import os\n'), ((1721, 1759), 'os.path.join', 'os.path.join', (['self.path', '"""parameters/"""'], {}), "(self.path, 'parameters/')\n", (1733, 1759), False, 'import os\n'), ((1911, 1943), 'os.path.join', 'os.path.join', (['save_dir', '"""gnn.pt"""'], {}), "(save_dir, 'gnn.pt')\n", (1923, 1943), False, 'import os\n'), ((1953, 1976), 'torch.save', 'torch.save', (['model', 'path'], {}), '(model, path)\n', (1963, 1976), False, 'import torch\n'), ((2106, 2144), 'os.path.join', 'os.path.join', (['self.path', '"""parameters/"""'], {}), "(self.path, 'parameters/')\n", (2118, 2144), False, 'import os\n'), ((2400, 2432), 'os.path.join', 'os.path.join', (['load_dir', '"""gnn.pt"""'], {}), "(load_dir, 'gnn.pt')\n", (2412, 2432), False, 'import os\n'), ((2507, 2523), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (2517, 2523), False, 'import torch\n'), ((3070, 3083), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (3080, 3083), True, 'import matplotlib.pyplot as plt\n'), ((3093, 3102), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3100, 3102), True, 'import matplotlib.pyplot as plt\n'), ((3157, 3198), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['iters', 'self.loss_train', '"""b"""'], {}), "(iters, self.loss_train, 'b')\n", (3169, 3198), True, 'import matplotlib.pyplot as plt\n'), ((3208, 3232), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {}), "('iterations')\n", (3218, 3232), True, 'import matplotlib.pyplot as plt\n'), ((3242, 3274), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross Entropy Loss"""'], {}), "('Cross Entropy Loss')\n", (3252, 3274), True, 'import matplotlib.pyplot as plt\n'), ((3415, 3463), 'os.path.join', 'os.path.join', (['self.path_dir', '"""training_loss.png"""'], {}), "(self.path_dir, 'training_loss.png')\n", (3427, 3463), False, 'import os\n'), ((3474, 3491), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (3485, 3491), True, 'import matplotlib.pyplot as plt\n'), ((3534, 3547), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3544, 3547), True, 'import matplotlib.pyplot as plt\n'), ((3557, 3566), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3564, 3566), True, 'import matplotlib.pyplot as plt\n'), ((3676, 3716), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['iters', 'self.loss_test', '"""b"""'], {}), "(iters, self.loss_test, 'b')\n", (3688, 3716), True, 'import matplotlib.pyplot as plt\n'), ((3726, 3750), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {}), "('iterations')\n", (3736, 3750), True, 'import matplotlib.pyplot as plt\n'), ((3760, 3792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross Entropy Loss"""'], {}), "('Cross Entropy Loss')\n", (3770, 3792), True, 'import matplotlib.pyplot as plt\n'), ((3932, 3979), 'os.path.join', 'os.path.join', (['self.path_dir', '"""testing_loss.png"""'], {}), "(self.path_dir, 'testing_loss.png')\n", (3944, 3979), False, 'import os\n'), ((3990, 4007), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (4001, 4007), True, 'import matplotlib.pyplot as plt\n'), ((4055, 4068), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (4065, 4068), True, 'import matplotlib.pyplot as plt\n'), ((4078, 4087), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4085, 4087), True, 'import matplotlib.pyplot as plt\n'), ((4146, 4187), 'matplotlib.pyplot.plot', 'plt.plot', (['iters', 'self.accuracy_train', '"""b"""'], {}), "(iters, self.accuracy_train, 'b')\n", (4154, 4187), True, 'import matplotlib.pyplot as plt\n'), ((4197, 4221), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {}), "('iterations')\n", (4207, 4221), True, 'import matplotlib.pyplot as plt\n'), ((4231, 4253), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (4241, 4253), True, 'import matplotlib.pyplot as plt\n'), ((4398, 4450), 'os.path.join', 'os.path.join', (['self.path_dir', '"""training_accuracy.png"""'], {}), "(self.path_dir, 'training_accuracy.png')\n", (4410, 4450), False, 'import os\n'), ((4461, 4478), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (4472, 4478), True, 'import matplotlib.pyplot as plt\n'), ((4525, 4538), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4535, 4538), True, 'import matplotlib.pyplot as plt\n'), ((4548, 4557), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4555, 4557), True, 'import matplotlib.pyplot as plt\n'), ((4671, 4711), 'matplotlib.pyplot.plot', 'plt.plot', (['iters', 'self.accuracy_test', '"""b"""'], {}), "(iters, self.accuracy_test, 'b')\n", (4679, 4711), True, 'import matplotlib.pyplot as plt\n'), ((4721, 4745), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {}), "('iterations')\n", (4731, 4745), True, 'import matplotlib.pyplot as plt\n'), ((4755, 4777), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (4765, 4777), True, 'import matplotlib.pyplot as plt\n'), ((4921, 4972), 'os.path.join', 'os.path.join', (['self.path_dir', '"""testing_accuracy.png"""'], {}), "(self.path_dir, 'testing_accuracy.png')\n", (4933, 4972), False, 'import os\n'), ((4983, 5000), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (4994, 5000), True, 'import matplotlib.pyplot as plt\n'), ((5048, 5086), 'os.path.join', 'os.path.join', (['self.path', '"""results.npz"""'], {}), "(self.path, 'results.npz')\n", (5060, 5086), False, 'import os\n'), ((1089, 1107), 'os.stat', 'os.stat', (['directory'], {}), '(directory)\n', (1096, 1107), False, 'import os\n'), ((1828, 1845), 'os.stat', 'os.stat', (['save_dir'], {}), '(save_dir)\n', (1835, 1845), False, 'import os\n'), ((2227, 2244), 'os.stat', 'os.stat', (['load_dir'], {}), '(load_dir)\n', (2234, 2244), False, 'import os\n'), ((626, 648), 'torch.eq', 'torch.eq', (['pred', 'labels'], {}), '(pred, labels)\n', (634, 648), False, 'import torch\n'), ((1138, 1157), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (1146, 1157), False, 'import os\n'), ((1876, 1894), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (1884, 1894), False, 'import os\n'), ((5126, 5155), 'numpy.array', 'np.array', (['self.accuracy_train'], {}), '(self.accuracy_train)\n', (5134, 5155), True, 'import numpy as np\n'), ((5189, 5217), 'numpy.array', 'np.array', (['self.accuracy_test'], {}), '(self.accuracy_test)\n', (5197, 5217), True, 'import numpy as np\n')] |
import io
import logging
import pathlib
from typing import Dict, Union
import numpy as np
import torchaudio
import torch
from pydub import AudioSegment
logger = logging.getLogger(__name__)
def get_waveform(file: Union[str, pathlib.Path, bytes], params: Dict, global_normalizer=None) -> torch.Tensor:
if type(file) == str or type(file) == pathlib.PosixPath :
logger.debug(f"Loading: {file}")
asegment = AudioSegment.from_file(file)
elif type(file) == bytes or type(file) == bytearray:
asegment = AudioSegment.from_file(io.BytesIO(file))
origin_sr = asegment.frame_rate
channel_sounds = asegment.split_to_mono()
samples = [s.get_array_of_samples() for s in channel_sounds]
# Convert to float32
fp_arr = np.array(samples).T.astype(np.float32)
fp_arr /= np.iinfo(samples[0].typecode).max
# Convert to tensor
waveform = torch.from_numpy(fp_arr).T
return waveform_preprocessing(waveform, origin_sr, params, global_normalizer)
def waveform_preprocessing(waveform: torch.Tensor, origin_sr: int, params: Dict, global_normalizer=None) -> torch.Tensor:
origin_ch = waveform.size()[0]
target_sr = params["sampling_rate"]
target_ch = params["number_of_channels"]
if not origin_sr == target_sr:
logger.debug(f"Original shape: {waveform.shape} and sampling rate {origin_sr}")
waveform = torchaudio.transforms.Resample(origin_sr, target_sr)(waveform)
logger.debug(f"Resampled shape: {waveform.shape} and sampling rate {target_sr}")
# TODO: Separar las pistas como audios independientes (duplicar a nivel de dataset)
if target_ch == 1 and origin_ch == 2:
how_to = params['combine_channels']
if how_to == 'mean':
waveform = torch.mean(waveform, dim=0, keepdim=True)
elif how_to == 'left':
waveform = waveform[0, :].view(1,-1)
elif how_to == 'right':
waveform = waveform[1, :].view(1,-1)
if target_ch == 2 and origin_ch == 1:
waveform = waveform.repeat(2, 1)
if 'waveform_normalization' in params:
if params['waveform_normalization']['scope'] == 'local':
waveform = local_normalizer(waveform, params)
elif params['waveform_normalization']['scope'] == 'global' and global_normalizer is not None:
waveform = global_normalizer(waveform)
return waveform
def zscore(waveform):
# TODO: NORMALIZE BY CHANNEL
return torch.mean(waveform), torch.std(waveform)
def minmax(waveform):
# TODO: NORMALIZE BY CHANNEL
min = torch.min(waveform)
max = torch.max(waveform)
return min, max-min
def local_normalizer(waveform, params):
how_to = params['waveform_normalization']['type']
if how_to == 'zscore':
center, scale = zscore(waveform)
elif how_to == 'minmax':
center, scale = minmax(waveform)
elif how_to == 'rms':
center, scale = 0, 1
elif how_to == 'peak':
center, scale = 0, 1
else:
center, scale = 0, 1
return (waveform - center)/scale
| [
"logging.getLogger",
"torch.mean",
"torch.max",
"io.BytesIO",
"numpy.iinfo",
"torch.from_numpy",
"torch.min",
"torchaudio.transforms.Resample",
"numpy.array",
"pydub.AudioSegment.from_file",
"torch.std"
] | [((162, 189), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (179, 189), False, 'import logging\n'), ((2572, 2591), 'torch.min', 'torch.min', (['waveform'], {}), '(waveform)\n', (2581, 2591), False, 'import torch\n'), ((2602, 2621), 'torch.max', 'torch.max', (['waveform'], {}), '(waveform)\n', (2611, 2621), False, 'import torch\n'), ((424, 452), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['file'], {}), '(file)\n', (446, 452), False, 'from pydub import AudioSegment\n'), ((809, 838), 'numpy.iinfo', 'np.iinfo', (['samples[0].typecode'], {}), '(samples[0].typecode)\n', (817, 838), True, 'import numpy as np\n'), ((882, 906), 'torch.from_numpy', 'torch.from_numpy', (['fp_arr'], {}), '(fp_arr)\n', (898, 906), False, 'import torch\n'), ((2464, 2484), 'torch.mean', 'torch.mean', (['waveform'], {}), '(waveform)\n', (2474, 2484), False, 'import torch\n'), ((2486, 2505), 'torch.std', 'torch.std', (['waveform'], {}), '(waveform)\n', (2495, 2505), False, 'import torch\n'), ((1380, 1432), 'torchaudio.transforms.Resample', 'torchaudio.transforms.Resample', (['origin_sr', 'target_sr'], {}), '(origin_sr, target_sr)\n', (1410, 1432), False, 'import torchaudio\n'), ((1759, 1800), 'torch.mean', 'torch.mean', (['waveform'], {'dim': '(0)', 'keepdim': '(True)'}), '(waveform, dim=0, keepdim=True)\n', (1769, 1800), False, 'import torch\n'), ((553, 569), 'io.BytesIO', 'io.BytesIO', (['file'], {}), '(file)\n', (563, 569), False, 'import io\n'), ((756, 773), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (764, 773), True, 'import numpy as np\n')] |
from datetime import timedelta
import numpy as np
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from fedot.core.data.data import InputData
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.pipelines.tuning.sequential import SequentialTuner
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
from fedot.utilities.synth_dataset_generator import regression_dataset
np.random.seed(2020)
def get_regression_dataset(features_options, samples_amount=250,
features_amount=5):
"""
Prepares four numpy arrays with different scale features and target
:param samples_amount: Total amount of samples in the resulted dataset.
:param features_amount: Total amount of features per sample.
:param features_options: The dictionary containing features options in key-value
format:
- informative: the amount of informative features;
- bias: bias term in the underlying linear model;
:return x_data_train: features to train
:return y_data_train: target to train
:return x_data_test: features to test
:return y_data_test: target to test
"""
x_data, y_data = regression_dataset(samples_amount=samples_amount,
features_amount=features_amount,
features_options=features_options,
n_targets=1,
noise=0.0, shuffle=True)
# Changing the scale of the data
for i, coeff in zip(range(0, features_amount),
np.random.randint(1, 100, features_amount)):
# Get column
feature = np.array(x_data[:, i])
# Change scale for this feature
rescaled = feature * coeff
x_data[:, i] = rescaled
# Train and test split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data,
test_size=0.3)
return x_train, y_train, x_test, y_test
def run_experiment(pipeline, tuner):
samples = [50, 250, 150]
features = [1, 5, 10]
options = [{'informative': 1, 'bias': 0.0},
{'informative': 2, 'bias': 2.0},
{'informative': 1, 'bias': 3.0}]
for samples_amount, features_amount, features_options in zip(samples, features, options):
print('=======================================')
print(f'\nAmount of samples {samples_amount}, '
f'amount of features {features_amount}, '
f'additional options {features_options}')
x_train, y_train, x_test, y_test = get_regression_dataset(features_options,
samples_amount,
features_amount)
# Define regression task
task = Task(TaskTypesEnum.regression)
# Prepare data to train the model
train_input = InputData(idx=np.arange(0, len(x_train)),
features=x_train,
target=y_train,
task=task,
data_type=DataTypesEnum.table)
predict_input = InputData(idx=np.arange(0, len(x_test)),
features=x_test,
target=None,
task=task,
data_type=DataTypesEnum.table)
# Fit it
pipeline.fit_from_scratch(train_input)
# Predict
predicted_values = pipeline.predict(predict_input)
pipeline_prediction = predicted_values.predict
mae_value = mean_absolute_error(y_test, pipeline_prediction)
print(f'Mean absolute error - {mae_value:.4f}\n')
if tuner is not None:
print(f'Start tuning process ...')
pipeline_tuner = tuner(pipeline=pipeline, task=task,
iterations=50, timeout=timedelta(seconds=50))
tuned_pipeline = pipeline_tuner.tune_pipeline(input_data=train_input,
loss_function=mean_absolute_error)
# Predict
predicted_values_tuned = tuned_pipeline.predict(predict_input)
preds_tuned = predicted_values_tuned.predict
mae_value = mean_absolute_error(y_test, preds_tuned)
print(f'Obtained metrics after tuning:')
print(f'MAE - {mae_value:.4f}\n')
# Script for testing is pipeline can process different datasets for regression task
if __name__ == '__main__':
# Prepare pipeline
node_ransac = PrimaryNode('ransac_lin_reg')
node_scaling = SecondaryNode('scaling', nodes_from=[node_ransac])
node_final = SecondaryNode('ridge', nodes_from=[node_scaling])
pipeline = Pipeline(node_final)
run_experiment(pipeline, tuner=SequentialTuner)
| [
"fedot.utilities.synth_dataset_generator.regression_dataset",
"fedot.core.pipelines.node.SecondaryNode",
"sklearn.model_selection.train_test_split",
"fedot.core.pipelines.pipeline.Pipeline",
"numpy.array",
"numpy.random.randint",
"numpy.random.seed",
"sklearn.metrics.mean_absolute_error",
"datetime.... | [((573, 593), 'numpy.random.seed', 'np.random.seed', (['(2020)'], {}), '(2020)\n', (587, 593), True, 'import numpy as np\n'), ((1341, 1506), 'fedot.utilities.synth_dataset_generator.regression_dataset', 'regression_dataset', ([], {'samples_amount': 'samples_amount', 'features_amount': 'features_amount', 'features_options': 'features_options', 'n_targets': '(1)', 'noise': '(0.0)', 'shuffle': '(True)'}), '(samples_amount=samples_amount, features_amount=\n features_amount, features_options=features_options, n_targets=1, noise=\n 0.0, shuffle=True)\n', (1359, 1506), False, 'from fedot.utilities.synth_dataset_generator import regression_dataset\n'), ((2052, 2099), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_data', 'y_data'], {'test_size': '(0.3)'}), '(x_data, y_data, test_size=0.3)\n', (2068, 2099), False, 'from sklearn.model_selection import train_test_split\n'), ((4865, 4894), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', (['"""ransac_lin_reg"""'], {}), "('ransac_lin_reg')\n", (4876, 4894), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((4914, 4964), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['"""scaling"""'], {'nodes_from': '[node_ransac]'}), "('scaling', nodes_from=[node_ransac])\n", (4927, 4964), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((4982, 5031), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['"""ridge"""'], {'nodes_from': '[node_scaling]'}), "('ridge', nodes_from=[node_scaling])\n", (4995, 5031), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((5047, 5067), 'fedot.core.pipelines.pipeline.Pipeline', 'Pipeline', (['node_final'], {}), '(node_final)\n', (5055, 5067), False, 'from fedot.core.pipelines.pipeline import Pipeline\n'), ((1770, 1812), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)', 'features_amount'], {}), '(1, 100, features_amount)\n', (1787, 1812), True, 'import numpy as np\n'), ((1854, 1876), 'numpy.array', 'np.array', (['x_data[:, i]'], {}), '(x_data[:, i])\n', (1862, 1876), True, 'import numpy as np\n'), ((3058, 3088), 'fedot.core.repository.tasks.Task', 'Task', (['TaskTypesEnum.regression'], {}), '(TaskTypesEnum.regression)\n', (3062, 3088), False, 'from fedot.core.repository.tasks import Task, TaskTypesEnum\n'), ((3893, 3941), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test', 'pipeline_prediction'], {}), '(y_test, pipeline_prediction)\n', (3912, 3941), False, 'from sklearn.metrics import mean_absolute_error\n'), ((4570, 4610), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test', 'preds_tuned'], {}), '(y_test, preds_tuned)\n', (4589, 4610), False, 'from sklearn.metrics import mean_absolute_error\n'), ((4201, 4222), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(50)'}), '(seconds=50)\n', (4210, 4222), False, 'from datetime import timedelta\n')] |
import cv2, sys, time
import numpy as np
import tensorflow as tf
from PIL import Image
class SelfieSegMNV2:
def __init__(self, width=320, height=240):
# Initialize tflite-interpreter
self.width = width
self.height = height
self.interpreter = tf.lite.Interpreter(model_path="models/mnv2_seg/deconv_fin_munet.tflite")
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
self.input_shape = self.input_details[0]['shape'][1:3]
# Image overlay
self.overlay = np.zeros((self.input_shape[0], self.input_shape[1], 3), np.uint8)
self.overlay[:] = (127, 0, 0)
def seg(self, frame):
# BGR->RGB, CV2->PIL
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(rgb)
# Resize image
image = image.resize(self.input_shape, Image.ANTIALIAS)
# Normalization
image = np.asarray(image)
prepimg = image / 255.0
prepimg = prepimg[np.newaxis, :, :, :]
# Segmentation
self.interpreter.set_tensor(self.input_details[0]['index'], np.array(prepimg, dtype=np.float32))
self.interpreter.invoke()
outputs = self.interpreter.get_tensor(self.output_details[0]['index'])
# Process the output
output = np.uint8(outputs[0] > 0.5)
res = np.reshape(output, self.input_shape)
mask = Image.fromarray(np.uint8(res), mode="P")
mask = np.array(mask.convert("RGB")) * self.overlay
mask = cv2.resize(np.asarray(mask), (self.width, self.height), interpolation=cv2.INTER_CUBIC)
mask = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY)
_, mask = cv2.threshold(mask, 10, 255, cv2.THRESH_BINARY)
# frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_CUBIC)
return mask
if __name__ == "__main__":
width = 320
height = 240
seg = SelfieSegMNV2(width, height)
# Capture video from camera
cap = cv2.VideoCapture(0)
cap.set(3, width)
cap.set(4, height)
# Load and resize the background image
bgd = cv2.imread('./images/background.jpeg')
bgd = cv2.resize(bgd, (width, height))
elapsedTime = 0
count = 0
while cv2.waitKey(1) < 0:
t1 = time.time()
# Read input frames
success, frame = cap.read()
if not success:
cap.release()
break
# Get segmentation mask
mask = seg.seg(frame)
# Merge with background
fg = cv2.bitwise_or(frame, frame, mask=mask)
bg = cv2.bitwise_or(bgd, bgd, mask=~mask)
out = cv2.bitwise_or(fg, bg)
elapsedTime += (time.time() - t1)
count += 1
fps = "{:.1f} FPS".format(count / elapsedTime)
# Show output in window
cv2.putText(out, fps, (10, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38, 255, 38), 1, cv2.LINE_AA)
cv2.imshow('Selfie Segmentation', out)
cv2.destroyAllWindows()
cap.release()
| [
"tensorflow.lite.Interpreter",
"numpy.uint8",
"PIL.Image.fromarray",
"numpy.reshape",
"cv2.threshold",
"numpy.asarray",
"cv2.putText",
"cv2.imshow",
"numpy.array",
"numpy.zeros",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"time.time",
"cv2.bitwise_or",
... | [((2082, 2101), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2098, 2101), False, 'import cv2, sys, time\n'), ((2201, 2239), 'cv2.imread', 'cv2.imread', (['"""./images/background.jpeg"""'], {}), "('./images/background.jpeg')\n", (2211, 2239), False, 'import cv2, sys, time\n'), ((2250, 2282), 'cv2.resize', 'cv2.resize', (['bgd', '(width, height)'], {}), '(bgd, (width, height))\n', (2260, 2282), False, 'import cv2, sys, time\n'), ((3045, 3068), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3066, 3068), False, 'import cv2, sys, time\n'), ((280, 353), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': '"""models/mnv2_seg/deconv_fin_munet.tflite"""'}), "(model_path='models/mnv2_seg/deconv_fin_munet.tflite')\n", (299, 353), True, 'import tensorflow as tf\n'), ((643, 708), 'numpy.zeros', 'np.zeros', (['(self.input_shape[0], self.input_shape[1], 3)', 'np.uint8'], {}), '((self.input_shape[0], self.input_shape[1], 3), np.uint8)\n', (651, 708), True, 'import numpy as np\n'), ((817, 855), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (829, 855), False, 'import cv2, sys, time\n'), ((872, 892), 'PIL.Image.fromarray', 'Image.fromarray', (['rgb'], {}), '(rgb)\n', (887, 892), False, 'from PIL import Image\n'), ((1022, 1039), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1032, 1039), True, 'import numpy as np\n'), ((1408, 1434), 'numpy.uint8', 'np.uint8', (['(outputs[0] > 0.5)'], {}), '(outputs[0] > 0.5)\n', (1416, 1434), True, 'import numpy as np\n'), ((1449, 1485), 'numpy.reshape', 'np.reshape', (['output', 'self.input_shape'], {}), '(output, self.input_shape)\n', (1459, 1485), True, 'import numpy as np\n'), ((1719, 1757), 'cv2.cvtColor', 'cv2.cvtColor', (['mask', 'cv2.COLOR_RGB2GRAY'], {}), '(mask, cv2.COLOR_RGB2GRAY)\n', (1731, 1757), False, 'import cv2, sys, time\n'), ((1776, 1823), 'cv2.threshold', 'cv2.threshold', (['mask', '(10)', '(255)', 'cv2.THRESH_BINARY'], {}), '(mask, 10, 255, cv2.THRESH_BINARY)\n', (1789, 1823), False, 'import cv2, sys, time\n'), ((2329, 2343), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2340, 2343), False, 'import cv2, sys, time\n'), ((2362, 2373), 'time.time', 'time.time', ([], {}), '()\n', (2371, 2373), False, 'import cv2, sys, time\n'), ((2614, 2653), 'cv2.bitwise_or', 'cv2.bitwise_or', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (2628, 2653), False, 'import cv2, sys, time\n'), ((2667, 2703), 'cv2.bitwise_or', 'cv2.bitwise_or', (['bgd', 'bgd'], {'mask': '(~mask)'}), '(bgd, bgd, mask=~mask)\n', (2681, 2703), False, 'import cv2, sys, time\n'), ((2718, 2740), 'cv2.bitwise_or', 'cv2.bitwise_or', (['fg', 'bg'], {}), '(fg, bg)\n', (2732, 2740), False, 'import cv2, sys, time\n'), ((2899, 2997), 'cv2.putText', 'cv2.putText', (['out', 'fps', '(10, 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(38, 255, 38)', '(1)', 'cv2.LINE_AA'], {}), '(out, fps, (10, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38, 255, 38\n ), 1, cv2.LINE_AA)\n', (2910, 2997), False, 'import cv2, sys, time\n'), ((3001, 3039), 'cv2.imshow', 'cv2.imshow', (['"""Selfie Segmentation"""', 'out'], {}), "('Selfie Segmentation', out)\n", (3011, 3039), False, 'import cv2, sys, time\n'), ((1211, 1246), 'numpy.array', 'np.array', (['prepimg'], {'dtype': 'np.float32'}), '(prepimg, dtype=np.float32)\n', (1219, 1246), True, 'import numpy as np\n'), ((1517, 1530), 'numpy.uint8', 'np.uint8', (['res'], {}), '(res)\n', (1525, 1530), True, 'import numpy as np\n'), ((1628, 1644), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (1638, 1644), True, 'import numpy as np\n'), ((2766, 2777), 'time.time', 'time.time', ([], {}), '()\n', (2775, 2777), False, 'import cv2, sys, time\n')] |
import os
import uuid
from pathlib import Path
import semver
import streamlit as st
from configs.config import config
from utils.helpers import (create_dir, data_uploader, get_model_dir,
train_test_split, underscore_seperated_path)
import numpy as np
np.random.seed(43)
model_version = semver.VersionInfo.parse("1.0.0")
st.header("Data Uploader")
workspace_exists = os.path.exists(config.WORKSPACE)
# Check workspace exists
try:
assert workspace_exists
except Exception as e:
st.error(
"Error: Local workspace not found. Did you setup enviornment with `make init`?"
)
if (
workspace_exists
): # dont render UI without workspace init (helps reduce multiple folder exists checks)
data = {}
# Input fields
model_name = Path(st.text_input("Enter Model Name"))
class_count = st.number_input("How many classes?", 1, 1000, value=2)
# Save values in URL
st.experimental_set_query_params(class_count=class_count, model_name=model_name)
# Retrieve app state
app_state = st.experimental_get_query_params()
# Render file uploaders for each class
if "class_count" in app_state:
class_count = app_state["class_count"][0]
for class_x in range(int(class_count)):
st.markdown("---")
st.subheader("Upload Class")
class_name = st.text_input("Class Name", key=class_x)
data[class_name] = st.file_uploader(
"Upload images", key=class_x, accept_multiple_files=True
)
else:
st.write("No result to display, compute a value first.")
st.markdown("---")
# Train test split ratio
st.title("Train test split")
train_split_percentage = st.slider(
"Train data %", min_value=0, max_value=100, value=config.TRAIN_PERCENT, step=5
)
test_split_percentage = st.slider(
"Test data %", min_value=0, max_value=100, value=100 - train_split_percentage
)
# Validate Split
total_data_coverage = train_split_percentage + test_split_percentage
st.write("Data coverage:", total_data_coverage, "%")
if total_data_coverage != 100:
st.error("Split error, adjust train to cover 100%")
train_split_percentage /= 100
# Save dataset
if st.button("Save Data"):
local_model_path = underscore_seperated_path(str(model_name))
model_path = config.WORKSPACE / local_model_path
model_dir, model_version = get_model_dir(path=model_path, version=model_version)
st.experimental_set_query_params(
class_count=class_count,
model_name=local_model_path,
model_version=model_version,
)
# Create model folder in workspace
create_dir(model_dir)
# Create class folders with uploaded data
df, csv_dir = data_uploader(data, model_dir)
train_df, test_df = train_test_split(
df, csv_path=csv_dir, train_percentage=train_split_percentage
)
st.success(f"Data saved successfully for {model_name} v{model_version} 🎊")
st.markdown("---")
# st.subheader('Add more data')
# st.subheader("To append data, push new images to:")
# st.write(model_dir.absolute())
# st.markdown("---")
st.title("Train")
st.text("To start training, run")
st.code(f"make train")
st.text(f"And select from following in the dropdown")
st.write("Model Name:", model_name)
st.write("Model Version:", model_version)
| [
"utils.helpers.create_dir",
"streamlit.button",
"streamlit.code",
"streamlit.text_input",
"streamlit.header",
"streamlit.title",
"os.path.exists",
"streamlit.experimental_get_query_params",
"utils.helpers.data_uploader",
"utils.helpers.train_test_split",
"numpy.random.seed",
"utils.helpers.get... | [((282, 300), 'numpy.random.seed', 'np.random.seed', (['(43)'], {}), '(43)\n', (296, 300), True, 'import numpy as np\n'), ((318, 351), 'semver.VersionInfo.parse', 'semver.VersionInfo.parse', (['"""1.0.0"""'], {}), "('1.0.0')\n", (342, 351), False, 'import semver\n'), ((353, 379), 'streamlit.header', 'st.header', (['"""Data Uploader"""'], {}), "('Data Uploader')\n", (362, 379), True, 'import streamlit as st\n'), ((399, 431), 'os.path.exists', 'os.path.exists', (['config.WORKSPACE'], {}), '(config.WORKSPACE)\n', (413, 431), False, 'import os\n'), ((847, 901), 'streamlit.number_input', 'st.number_input', (['"""How many classes?"""', '(1)', '(1000)'], {'value': '(2)'}), "('How many classes?', 1, 1000, value=2)\n", (862, 901), True, 'import streamlit as st\n'), ((932, 1017), 'streamlit.experimental_set_query_params', 'st.experimental_set_query_params', ([], {'class_count': 'class_count', 'model_name': 'model_name'}), '(class_count=class_count, model_name=model_name\n )\n', (964, 1017), True, 'import streamlit as st\n'), ((1054, 1088), 'streamlit.experimental_get_query_params', 'st.experimental_get_query_params', ([], {}), '()\n', (1086, 1088), True, 'import streamlit as st\n'), ((1619, 1637), 'streamlit.markdown', 'st.markdown', (['"""---"""'], {}), "('---')\n", (1630, 1637), True, 'import streamlit as st\n'), ((1672, 1700), 'streamlit.title', 'st.title', (['"""Train test split"""'], {}), "('Train test split')\n", (1680, 1700), True, 'import streamlit as st\n'), ((1730, 1824), 'streamlit.slider', 'st.slider', (['"""Train data %"""'], {'min_value': '(0)', 'max_value': '(100)', 'value': 'config.TRAIN_PERCENT', 'step': '(5)'}), "('Train data %', min_value=0, max_value=100, value=config.\n TRAIN_PERCENT, step=5)\n", (1739, 1824), True, 'import streamlit as st\n'), ((1862, 1954), 'streamlit.slider', 'st.slider', (['"""Test data %"""'], {'min_value': '(0)', 'max_value': '(100)', 'value': '(100 - train_split_percentage)'}), "('Test data %', min_value=0, max_value=100, value=100 -\n train_split_percentage)\n", (1871, 1954), True, 'import streamlit as st\n'), ((2064, 2116), 'streamlit.write', 'st.write', (['"""Data coverage:"""', 'total_data_coverage', '"""%"""'], {}), "('Data coverage:', total_data_coverage, '%')\n", (2072, 2116), True, 'import streamlit as st\n'), ((2274, 2296), 'streamlit.button', 'st.button', (['"""Save Data"""'], {}), "('Save Data')\n", (2283, 2296), True, 'import streamlit as st\n'), ((518, 617), 'streamlit.error', 'st.error', (['"""Error: Local workspace not found. Did you setup enviornment with `make init`?"""'], {}), "(\n 'Error: Local workspace not found. Did you setup enviornment with `make init`?'\n )\n", (526, 617), True, 'import streamlit as st\n'), ((794, 827), 'streamlit.text_input', 'st.text_input', (['"""Enter Model Name"""'], {}), "('Enter Model Name')\n", (807, 827), True, 'import streamlit as st\n'), ((1558, 1614), 'streamlit.write', 'st.write', (['"""No result to display, compute a value first."""'], {}), "('No result to display, compute a value first.')\n", (1566, 1614), True, 'import streamlit as st\n'), ((2160, 2211), 'streamlit.error', 'st.error', (['"""Split error, adjust train to cover 100%"""'], {}), "('Split error, adjust train to cover 100%')\n", (2168, 2211), True, 'import streamlit as st\n'), ((2460, 2513), 'utils.helpers.get_model_dir', 'get_model_dir', ([], {'path': 'model_path', 'version': 'model_version'}), '(path=model_path, version=model_version)\n', (2473, 2513), False, 'from utils.helpers import create_dir, data_uploader, get_model_dir, train_test_split, underscore_seperated_path\n'), ((2523, 2643), 'streamlit.experimental_set_query_params', 'st.experimental_set_query_params', ([], {'class_count': 'class_count', 'model_name': 'local_model_path', 'model_version': 'model_version'}), '(class_count=class_count, model_name=\n local_model_path, model_version=model_version)\n', (2555, 2643), True, 'import streamlit as st\n'), ((2738, 2759), 'utils.helpers.create_dir', 'create_dir', (['model_dir'], {}), '(model_dir)\n', (2748, 2759), False, 'from utils.helpers import create_dir, data_uploader, get_model_dir, train_test_split, underscore_seperated_path\n'), ((2833, 2863), 'utils.helpers.data_uploader', 'data_uploader', (['data', 'model_dir'], {}), '(data, model_dir)\n', (2846, 2863), False, 'from utils.helpers import create_dir, data_uploader, get_model_dir, train_test_split, underscore_seperated_path\n'), ((2893, 2972), 'utils.helpers.train_test_split', 'train_test_split', (['df'], {'csv_path': 'csv_dir', 'train_percentage': 'train_split_percentage'}), '(df, csv_path=csv_dir, train_percentage=train_split_percentage)\n', (2909, 2972), False, 'from utils.helpers import create_dir, data_uploader, get_model_dir, train_test_split, underscore_seperated_path\n'), ((3004, 3078), 'streamlit.success', 'st.success', (['f"""Data saved successfully for {model_name} v{model_version} 🎊"""'], {}), "(f'Data saved successfully for {model_name} v{model_version} 🎊')\n", (3014, 3078), True, 'import streamlit as st\n'), ((3087, 3105), 'streamlit.markdown', 'st.markdown', (['"""---"""'], {}), "('---')\n", (3098, 3105), True, 'import streamlit as st\n'), ((3286, 3303), 'streamlit.title', 'st.title', (['"""Train"""'], {}), "('Train')\n", (3294, 3303), True, 'import streamlit as st\n'), ((3312, 3345), 'streamlit.text', 'st.text', (['"""To start training, run"""'], {}), "('To start training, run')\n", (3319, 3345), True, 'import streamlit as st\n'), ((3354, 3376), 'streamlit.code', 'st.code', (['f"""make train"""'], {}), "(f'make train')\n", (3361, 3376), True, 'import streamlit as st\n'), ((3385, 3438), 'streamlit.text', 'st.text', (['f"""And select from following in the dropdown"""'], {}), "(f'And select from following in the dropdown')\n", (3392, 3438), True, 'import streamlit as st\n'), ((3447, 3482), 'streamlit.write', 'st.write', (['"""Model Name:"""', 'model_name'], {}), "('Model Name:', model_name)\n", (3455, 3482), True, 'import streamlit as st\n'), ((3491, 3532), 'streamlit.write', 'st.write', (['"""Model Version:"""', 'model_version'], {}), "('Model Version:', model_version)\n", (3499, 3532), True, 'import streamlit as st\n'), ((1278, 1296), 'streamlit.markdown', 'st.markdown', (['"""---"""'], {}), "('---')\n", (1289, 1296), True, 'import streamlit as st\n'), ((1309, 1337), 'streamlit.subheader', 'st.subheader', (['"""Upload Class"""'], {}), "('Upload Class')\n", (1321, 1337), True, 'import streamlit as st\n'), ((1363, 1403), 'streamlit.text_input', 'st.text_input', (['"""Class Name"""'], {'key': 'class_x'}), "('Class Name', key=class_x)\n", (1376, 1403), True, 'import streamlit as st\n'), ((1435, 1509), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload images"""'], {'key': 'class_x', 'accept_multiple_files': '(True)'}), "('Upload images', key=class_x, accept_multiple_files=True)\n", (1451, 1509), True, 'import streamlit as st\n')] |
import os
import json
import numpy
import math
from PIL import Image, ImageDraw, ImageFont
import copy
from tqdm import tqdm
type_dict = {0:(0,255,0),1:(255,0,0),2:(230,230,0),3:(230,0,233),4:(255,0,255),5:(125, 255, 233)}
def get_point(points, threshold):
count = 0
points_clean = []
for point in points:
if point['score'] > threshold:
count += 1
points_clean.append(point)
return points_clean
def drawLine(im, x, y, w, h, type):
'''
在图片上绘制矩形图
:param im: 图片
:param width: 矩形宽占比
:param height: 矩形高占比
:return:
'''
im = Image.fromarray(im)
draw = ImageDraw.Draw(im)
xy_list = [(x, y), (x+w, y), (x+w, y+h), (x, y+h)]
xy_list2 = [(x, y), (x, y+h)]
draw.line(xy_list, fill = type, width = 2)
draw.line(xy_list2, fill= type , width= 2)
del draw
def drawData(im, x, y, w, h, datum):
draw = ImageDraw.Draw(im)
fillColor = "#66ccff"
draw.text((int(x), int(y-8)), u'%.2f' % datum, fill=fillColor)
del draw
def draw_group(groups, im):
for group in groups:
drawLine(im, group[0], group[1], group[2]-group[0], group[3]-group[1], (0, 0, 255))
def draw_data(groups, im, min_value, max_value, plot_area):
for group in groups:
x = group[0]
frac_x = (x - plot_area[0]) / (plot_area[2] - plot_area[0])
y = group[3] - group[1]
frac_y = (y) / (plot_area[3] - plot_area[1])
drawData(im, group[0], group[1], group[2]-group[0], group[3]-group[1], (max_value - min_value) * frac_y + min_value)
def get_data(groups, plot_area):
data = []
for group in groups:
x = group[0]
frac_x = (x - plot_area[0]) / (plot_area[2] - plot_area[0])
y = group[3] - group[1]
frac_y = (y) / (plot_area[3] - plot_area[1])
data.append([frac_x, frac_y])
data.sort(key = lambda x: x[0]*1000+x[1])
data_pure = []
for datum in data:
data_pure.append(datum[1])
return data_pure
def get_data_divided(groups, plot_area):
data_divided = []
for gset in groups:
data = []
for group in gset:
x = group[0]
frac_x = (x - plot_area[0]) / (plot_area[2] - plot_area[0])
y = group[3] - group[1]
frac_y = (y) / (plot_area[3] - plot_area[1])
data.append([frac_x, frac_y])
data.sort(key = lambda x: x[0]*1000+x[1])
data_pure = []
for datum in data:
data_pure.append(datum[1])
data_divided.append(data_pure)
return data_divided
def scale_adjust(data, x_min, x_max, y_min, y_max):
true_data = []
for point in data:
true_x = (x_max - x_min) * point[0] + x_min
true_y = (y_max - y_min) * point[0] + y_min
true_data.append([true_x, true_y])
return true_data
def cal_dis(a, b):
return -(a['bbox'][0]-b['bbox'][0]+0.1*(a['bbox'][1]-b['bbox'][1]))
def estimate_zero_line(br_keys):
ys_sum = 0
score_sum = 0
for key in br_keys:
ys_sum += key['score']*key['bbox'][1]
score_sum += key['score']
mean = ys_sum/score_sum
temp = 0
for key in br_keys:
temp += math.pow(key['score']-mean, 2)*key['score']
temp /= score_sum
new_ys = []
std = math.sqrt(temp)
for y in br_keys:
if abs(y['bbox'][1]-mean) < std:
new_ys.append(y['bbox'][1])
return numpy.array(new_ys).mean()
def group_point(tl_keys, br_keys):
pairs = []
for tl_key in tl_keys:
min_dis_score = 9999999999
target_br = None
for br_key in br_keys:
#print("TL_K",tl_keys)
#print("BR_K",br_key)
#if br_key['bbox'][0] > tl_key['bbox'][0] + 4 and br_key['bbox'][1] > tl_key['bbox'][1] + 4:
if br_key['bbox'][0] > tl_key['bbox'][0] + 20 and br_key['bbox'][1] > tl_key['bbox'][1] + 20:
dis = cal_dis(tl_key, br_key)
score = br_key['score']
#dis_score = dis * math.pow(1 - score, 1/16)
dis_score = dis
if dis_score < min_dis_score:
min_dis_score = dis_score
target_br = br_key
if target_br != None:
pairs.append([tl_key['bbox'][0], tl_key['bbox'][1], target_br['bbox'][0], target_br['bbox'][1]])
return pairs
class UnionFindSet(object):
def __init__(self, data_list):
self.father_dict = {}
self.size_dict = {}
for i in range(len(data_list)):
self.father_dict[i] = i
self.size_dict[i] = 1
def find_head(self, ID):
father = self.father_dict[ID]
if(ID != father):
father = self.find_head(father)
self.father_dict[ID] = father
return father
def is_same_set(self, ID_a, ID_b):
return self.find_head(ID_a) == self.find_head(ID_b)
def union(self, ID_a, ID_b):
if ID_a is None or ID_a is None:
return
a_head = self.find_head(ID_a)
b_head = self.find_head(ID_b)
if(a_head != b_head):
a_set_size = self.size_dict[a_head]
b_set_size = self.size_dict[b_head]
if(a_set_size >= b_set_size):
self.father_dict[b_head] = a_head
self.size_dict[a_head] = a_set_size + b_set_size
else:
self.father_dict[a_head] = b_head
self.size_dict[b_head] = a_set_size + b_set_size
def get_color_dis(bbox_a, bbox_b, image):
area_a = image[int(bbox_a[1]+1):int(bbox_a[3]-1), int(bbox_a[0]+1):int(bbox_a[2]-1)].mean(axis=0).mean(axis=0)
area_b = image[int(bbox_b[1]+1):int(bbox_b[3]-1), int(bbox_b[0]+1):int(bbox_b[2]-1)].mean(axis=0).mean(axis=0)
mean_dis = numpy.abs(area_a-area_b).mean()/255
return mean_dis
def divided_by_color(groups, raw_image):
raw_image.save('debug.png')
threshold_color = 0.1
raw_image = numpy.array(raw_image)
dis_list = []
for i in range(len(groups)):
for j in range(i+1, len(groups)):
color_dis = get_color_dis(groups[i], groups[j], raw_image)
dis_list.append([color_dis, i, j])
dis_list.sort(key=lambda x:x[0])
unionset = UnionFindSet(groups)
for dis_pair in dis_list:
if dis_pair[0] > threshold_color:
break
unionset.union(dis_pair[1], dis_pair[2])
grouped = {}
for i in range(len(groups)):
if unionset.size_dict[i] > 0:
grouped[i] = []
for i in range(len(groups)):
grouped[unionset.father_dict[i]].append(groups[i])
grouped = [x for x in grouped.values() if len(x)>0]
return grouped
def GroupBar(image, tls_raw, brs_raw, plot_area, min_value, max_value):
image_raw = copy.deepcopy(image)
tls = []
for temp in tls_raw.values():
for point in temp:
bbox = [point[2], point[3], 6, 6]
bbox = [float(e) for e in bbox]
category_id = int(point[1])
score = float(point[0])
tls.append({'bbox':bbox, 'category_id': category_id, 'score': score})
brs = []
for temp in brs_raw.values():
for point in temp:
bbox = [point[2], point[3], 6, 6]
bbox = [float(e) for e in bbox]
category_id = int(point[1])
score = float(point[0])
brs.append({'bbox': bbox, 'category_id': category_id, 'score': score})
tls = get_point(tls, 0.4)
brs = get_point(brs, 0.4)
for key in tls:
drawLine(image, key['bbox'][0], key['bbox'][1], 3, 3, (int(255 * key['score']), 0, 0))
for key in brs:
drawLine(image, key['bbox'][0], key['bbox'][1], 3, 3, (0, int(255 * key['score']), 0))
#image.save(tar_dir + id2name[id])
info = {}
if len(tls) > 0:
for tar_id in range(1):
tl_same = []
br_same = []
for tl in tls:
if tl['category_id'] == tar_id:
tl_same.append(tl)
for br in brs:
if br['category_id'] == tar_id:
br_same.append(br)
#zero_y = estimate_zero_line(brs)
groups = group_point(tl_same, br_same)
draw_group(groups, image)
data = get_data(groups, plot_area)
draw_data(groups, image, min_value, max_value, plot_area)
groups_divided = divided_by_color(groups, image_raw)
data_divided = get_data_divided(groups_divided, plot_area)
return image, data_divided
def GroupBarRaw(image, tls_raw, brs_raw):
image_raw = copy.deepcopy(image)
tls = []
for temp in tls_raw.values():
for point in temp:
bbox = [point[2], point[3], 6, 6]
bbox = [float(e) for e in bbox]
category_id = int(point[1])
score = float(point[0])
tls.append({'bbox':bbox, 'category_id': category_id, 'score': score})
brs = []
for temp in brs_raw.values():
for point in temp:
bbox = [point[2], point[3], 6, 6]
bbox = [float(e) for e in bbox]
category_id = int(point[1])
score = float(point[0])
brs.append({'bbox': bbox, 'category_id': category_id, 'score': score})
#print("TLS_b1",tls)
#print("BRS_b1",brs)
tls = get_point(tls, 0.05)
brs = get_point(brs, 0.05)
#print("TLS_b2",tls)
#print("BRS_b2",brs)
for key in tls:
drawLine(image, key['bbox'][0], key['bbox'][1], 3, 3, (int(255 * key['score']), 0, 0))
for key in brs:
drawLine(image, key['bbox'][0], key['bbox'][1], 3, 3, (0, int(255 * key['score']), 0))
#image.save(tar_dir + id2name[id])
info = {}
groups = None
if len(tls) > 0:
for tar_id in range(1):
tl_same = []
br_same = []
for tl in tls:
if tl['category_id'] == tar_id:
tl_same.append(tl)
for br in brs:
if br['category_id'] == tar_id:
br_same.append(br)
#print("TLS_b3",tl_same)
#print("BRS_b3",br_same)
#zero_y = estimate_zero_line(brs)
groups = group_point(tl_same, br_same)
draw_group(groups, image)
return groups | [
"numpy.abs",
"PIL.Image.fromarray",
"math.pow",
"math.sqrt",
"numpy.array",
"PIL.ImageDraw.Draw",
"copy.deepcopy"
] | [((600, 619), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (615, 619), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((631, 649), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (645, 649), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((895, 913), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (909, 913), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3250, 3265), 'math.sqrt', 'math.sqrt', (['temp'], {}), '(temp)\n', (3259, 3265), False, 'import math\n'), ((5902, 5924), 'numpy.array', 'numpy.array', (['raw_image'], {}), '(raw_image)\n', (5913, 5924), False, 'import numpy\n'), ((6720, 6740), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (6733, 6740), False, 'import copy\n'), ((8551, 8571), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (8564, 8571), False, 'import copy\n'), ((3158, 3190), 'math.pow', 'math.pow', (["(key['score'] - mean)", '(2)'], {}), "(key['score'] - mean, 2)\n", (3166, 3190), False, 'import math\n'), ((3380, 3399), 'numpy.array', 'numpy.array', (['new_ys'], {}), '(new_ys)\n', (3391, 3399), False, 'import numpy\n'), ((5730, 5756), 'numpy.abs', 'numpy.abs', (['(area_a - area_b)'], {}), '(area_a - area_b)\n', (5739, 5756), False, 'import numpy\n')] |
from collections import namedtuple
from collections.abc import Iterable
from scipy.stats import rv_discrete, rv_continuous, multivariate_normal, norm
from scipy.stats._distn_infrastructure import rv_sample
from numpy import interp
from os.path import dirname
import numpy as np
import pickle
import os
__all__ = ['cum_probs', 'disc_gen', 'discrete', 'empiric', 'gaussian_mixture']
# for pickling distributions
Gaussian_Mixture = namedtuple('Gaussian_Mixture', 'weights mu cov')
def cum_probs(values) -> np.ndarray:
"""Unique values & cumulated probabilities.
example:
val: [ 1, 2, 1] -> [ 1, 2]
p: [.2, .5, .3] -> [.5, .5]
"""
x, px = np.reshape(values, [2, -1])
y = np.unique(x)
py = np.zeros(y.shape)
for i in range(py.size):
py[i] = px[np.where(x == y[i])].sum()
return np.stack((y, py))
# https://github.com/scipy/scipy/blob/b225fd7a650c5beee18f26d98bd08d358f23a5d9/scipy/stats/_distn_infrastructure.py#L2675
class disc_gen(rv_sample):
"""Discrete distribution with reduced values and probabilities.
example:
val: [ 1, 2, 1] -> [ 1, 2]
p: [.2, .5, .3] -> [.5, .5]
"""
def __new__(cls, *args, **kwds):
return super(disc_gen, cls).__new__(cls)
def __init__(self, **kwargs):
kwargs['values'] = cum_probs(kwargs['values'])
super().__init__(**kwargs)
def discrete(values) -> np.ndarray:
"""Discrete distribution with reduced values and probabilities.
example:
val: [ 1, 2, 1] -> [ 1, 2]
p: [.2, .5, .3] -> [.5, .5]
"""
return rv_discrete(name='discrete', values=cum_probs(values))
class empiric(rv_sample):
"""Empiric distribution with linear interpolation."""
def __new__(cls, *args, **kwds):
return super(empiric, cls).__new__(cls)
def __init__(self, reconstruct=False, **kwargs):
if not reconstruct:
xk, pk = np.unique(kwargs['values'], return_counts=True)
kwargs['values'] = np.stack((xk, pk/pk.sum()))
super().__init__(**kwargs)
self.ck = np.cumsum(self.pk)
self.ck[-1] = self.ck[-1] - min(0.000001, self.ck[0]/2)
def pdf(self, x):
return interp(x, self.xk, self.pk)
def logpdf(self, x):
return np.log(self.pdf(x))
def cdf(self, x):
return interp(x, self.xk, self.ck)
def logcdf(self, x):
return np.log(self.cdf(x))
def sf(self, x):
sf = 1-self.ck
sf[-1] = 0
return interp(x, self.xk, sf)#1.-self.ck)
def logsf(self, x):
return np.log(self.sf(x))
def isf(self, x):
sf = 1-self.ck
sf[-1] = 0
return interp(x, sf[::-1], self.xk[::-1])
def ppf(self, x):
return interp(x, self.ck, self.xk)
def likelihood(self, x):
return self.pdf(x).prod()
def log_likelihood(self, x):
return self.logpdf(x).sum()
def uniformization(self, x, inv=False):
return self.ppf(x) if inv else self.cdf(x)
def gaussianization(self, x, inv=False):
a, b = (self, norm) if inv else (norm, self)
return a.ppf(b.cdf(x))
class gaussian_mixture(rv_continuous):
"""Gaussian mixture distribution."""
def __new__(cls, *args, **kwds):
return super(gaussian_mixture, cls).__new__(cls)
def __init__(self, weights, mean=None, cov=None, **kwargs):
"""
weights - mixing coefficients
"""
self.weights, self.mean, self.cov = self.__prep(weights, mean, cov)
super().__init__(**kwargs)
def __prep(self, weights, mean, cov):
"""Handling data types, values and shapes.
shapes:
weights - w_dim
mean - w_dim x dim
cov - w_dim x dim x dim
"""
if not isinstance(weights, Iterable):
raise TypeError('weights must be iterable')
if np.any(np.array(weights) < 0) or np.any(np.array(weights) > 1) or np.sum(weights) != 1:
raise ValueError('weights must be in range (0, 1) and sum to 1')
self.w_dim = len(weights)
if cov is None or not isinstance(cov, np.ndarray):
raise TypeError('cov must be numpy array/not None')
if cov.ndim != 3 or cov.shape[1] != cov.shape[2] or cov.shape[0] != self.w_dim:
raise IndexError('cov must be a list of %d square matrices' % (self.w_dim))
self.dim = cov.shape[1]
if mean is None:
mean = np.zeros((self.w_dim, self.dim))
if not isinstance(mean, np.ndarray):
raise TypeError('mean must be numpy array')
if mean.shape != (self.w_dim, self.dim):
raise IndexError('mean must be of shape (%d, %d)' % (self.w_dim, self.dim))
return weights, mean, cov
def __prep_x(self, x):
"""Handling data types, values and shapes of x.
shapes:
x - (n, dim) or (dim)
"""
if not isinstance(x, np.ndarray):
raise TypeError('x must be numpy array')
if x.ndim not in (1,2) or x.shape[-1] != self.dim:
raise IndexError('x must be of shape (n, %d) or (%d,)' % (self.dim, self.dim))
def __prep_k(self, x, k):
"""Handling data types, values and shapes of k.
shapes:
x - (n, dim) or (dim)
k - (n) or scalar
"""
self.__prep_x(x)
if x.ndim == 2 and not isinstance(k, Iterable):
raise TypeError('k must be of shape (%d,)' % (x.shape[0]))
if x.ndim == 1 and (np.ndim(k) != 0 or k is None):
raise TypeError('k must be scalar')
if np.any(k < 0) or np.any(k >= self.w_dim):
raise ValueError('k must be in range (0, %d)' % (self.w_dim))
def pdf(self, x):
"""pdf from mix."""
self.__prep_x(x)
p = 0
if x.ndim > 1:
D = len(x)
p = np.zeros(D)
for k in range(self.w_dim):
p += self.weights[k] * \
multivariate_normal.pdf(x, mean=self.mean[k], cov=self.cov[k])
return p
def logpdf(self, x):
"""logpdf from mix."""
return np.log(self.pdf(x))
def pdf_w(self, x, k):
"""pdf from known distribution.
k specifies which gaussian is used
"""
self.__prep_k(x, k)
if np.ndim(k) == 0:
return multivariate_normal.pdf(x, mean=self.mean[k], cov=self.cov[k])
D = len(k)
p = np.zeros(D)
for i, k_ in enumerate(k):
p[i] = multivariate_normal.pdf(x[i], mean=self.mean[k_], cov=self.cov[k_])
return p
def logpdf_w(self, x, k):
"""logpdf from known distribution.
k specifies which gaussian is used
"""
self.__prep_k(x, k)
if np.ndim(k) == 0:
return multivariate_normal.logpdf(x, mean=self.mean[k], cov=self.cov[k])
D = len(k)
p = np.zeros(D)
for i, k_ in enumerate(k):
p[i] = multivariate_normal.logpdf(x[i], mean=self.mean[k_], cov=self.cov[k_])
return p
def rvs(self,
size: int = 1,
with_index: bool = False):
"""
Draw random samples.
ToDo: numpy.linalg.LinAlgError: SVD did not converge
"""
# sample-wise weighted randomization
r = discrete(values=(range(self.w_dim), self.weights)).rvs(size=size)
# draw K samples and choose 1
samples = np.zeros((self.w_dim, size, self.dim))
for k in range(self.w_dim):
samples[k] = multivariate_normal.rvs(
mean=self.mean[k], cov=self.cov[k], size=size)
# return with index of sampled distribution
if with_index:
return samples[r, range(size)], r
# return without index
return samples[r, range(size)]
def logpdfrelevant(self, x,
ndim: int = 10):
"""Likelihood of most relevant features."""
self.__prep_x(x)
if ndim > self.dim:
ndim = self.dim
nmix = self.w_dim
# split first nmix x ndim dims in nmix parts
if np.ndim(x) == 1:
# shape: nmix x ndims
mix = x[:nmix*ndim].reshape(nmix, ndim)
else:
# shape: nmix x nsamples x ndims
mix = x[:, :nmix*ndim].reshape(nmix, -1, ndim)
# return index of mix with highest variance
var = np.var(mix, axis=-1)
k = np.argmax(var, axis=0)
# range
l, r = k*ndim, (k+1)*ndim
# relevant features
y = mix[k] if np.ndim(x) == 1 else mix[k, range(len(k))]
def cov(k, l, r):
"""Retrieve relevant variances."""
return np.diag(np.diag(self.cov[k])[l:r])
mv = multivariate_normal
if np.ndim(x) == 1:
return mv.logpdf(y, cov=cov(k, l, r))
return np.array([mv.logpdf(Y, cov=cov(K, L, R)) for Y, K, L, R in zip(y, k, l, r)])
def save(self, file):
os.makedirs(dirname(file), exist_ok=True)
with open(file, 'wb') as f:
pickle.dump(Gaussian_Mixture(*[self.weights, self.mean, self.cov]), f)
def load(file):
with open(file, 'rb') as f:
return gaussian_mixture(*pickle.load(f))
| [
"scipy.stats.multivariate_normal.rvs",
"numpy.array",
"numpy.reshape",
"numpy.where",
"numpy.ndim",
"numpy.stack",
"collections.namedtuple",
"pickle.load",
"numpy.argmax",
"numpy.any",
"os.path.dirname",
"numpy.interp",
"scipy.stats.multivariate_normal.logpdf",
"numpy.unique",
"scipy.sta... | [((433, 481), 'collections.namedtuple', 'namedtuple', (['"""Gaussian_Mixture"""', '"""weights mu cov"""'], {}), "('Gaussian_Mixture', 'weights mu cov')\n", (443, 481), False, 'from collections import namedtuple\n'), ((671, 698), 'numpy.reshape', 'np.reshape', (['values', '[2, -1]'], {}), '(values, [2, -1])\n', (681, 698), True, 'import numpy as np\n'), ((707, 719), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (716, 719), True, 'import numpy as np\n'), ((729, 746), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (737, 746), True, 'import numpy as np\n'), ((833, 850), 'numpy.stack', 'np.stack', (['(y, py)'], {}), '((y, py))\n', (841, 850), True, 'import numpy as np\n'), ((2072, 2090), 'numpy.cumsum', 'np.cumsum', (['self.pk'], {}), '(self.pk)\n', (2081, 2090), True, 'import numpy as np\n'), ((2193, 2220), 'numpy.interp', 'interp', (['x', 'self.xk', 'self.pk'], {}), '(x, self.xk, self.pk)\n', (2199, 2220), False, 'from numpy import interp\n'), ((2324, 2351), 'numpy.interp', 'interp', (['x', 'self.xk', 'self.ck'], {}), '(x, self.xk, self.ck)\n', (2330, 2351), False, 'from numpy import interp\n'), ((2496, 2518), 'numpy.interp', 'interp', (['x', 'self.xk', 'sf'], {}), '(x, self.xk, sf)\n', (2502, 2518), False, 'from numpy import interp\n'), ((2678, 2712), 'numpy.interp', 'interp', (['x', 'sf[::-1]', 'self.xk[::-1]'], {}), '(x, sf[::-1], self.xk[::-1])\n', (2684, 2712), False, 'from numpy import interp\n'), ((2751, 2778), 'numpy.interp', 'interp', (['x', 'self.ck', 'self.xk'], {}), '(x, self.ck, self.xk)\n', (2757, 2778), False, 'from numpy import interp\n'), ((6446, 6457), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (6454, 6457), True, 'import numpy as np\n'), ((6901, 6912), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (6909, 6912), True, 'import numpy as np\n'), ((7443, 7481), 'numpy.zeros', 'np.zeros', (['(self.w_dim, size, self.dim)'], {}), '((self.w_dim, size, self.dim))\n', (7451, 7481), True, 'import numpy as np\n'), ((8413, 8433), 'numpy.var', 'np.var', (['mix'], {'axis': '(-1)'}), '(mix, axis=-1)\n', (8419, 8433), True, 'import numpy as np\n'), ((8446, 8468), 'numpy.argmax', 'np.argmax', (['var'], {'axis': '(0)'}), '(var, axis=0)\n', (8455, 8468), True, 'import numpy as np\n'), ((1912, 1959), 'numpy.unique', 'np.unique', (["kwargs['values']"], {'return_counts': '(True)'}), "(kwargs['values'], return_counts=True)\n", (1921, 1959), True, 'import numpy as np\n'), ((4465, 4497), 'numpy.zeros', 'np.zeros', (['(self.w_dim, self.dim)'], {}), '((self.w_dim, self.dim))\n', (4473, 4497), True, 'import numpy as np\n'), ((5601, 5614), 'numpy.any', 'np.any', (['(k < 0)'], {}), '(k < 0)\n', (5607, 5614), True, 'import numpy as np\n'), ((5618, 5641), 'numpy.any', 'np.any', (['(k >= self.w_dim)'], {}), '(k >= self.w_dim)\n', (5624, 5641), True, 'import numpy as np\n'), ((5878, 5889), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (5886, 5889), True, 'import numpy as np\n'), ((6315, 6325), 'numpy.ndim', 'np.ndim', (['k'], {}), '(k)\n', (6322, 6325), True, 'import numpy as np\n'), ((6351, 6413), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['x'], {'mean': 'self.mean[k]', 'cov': 'self.cov[k]'}), '(x, mean=self.mean[k], cov=self.cov[k])\n', (6374, 6413), False, 'from scipy.stats import rv_discrete, rv_continuous, multivariate_normal, norm\n'), ((6513, 6580), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['x[i]'], {'mean': 'self.mean[k_]', 'cov': 'self.cov[k_]'}), '(x[i], mean=self.mean[k_], cov=self.cov[k_])\n', (6536, 6580), False, 'from scipy.stats import rv_discrete, rv_continuous, multivariate_normal, norm\n'), ((6767, 6777), 'numpy.ndim', 'np.ndim', (['k'], {}), '(k)\n', (6774, 6777), True, 'import numpy as np\n'), ((6803, 6868), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['x'], {'mean': 'self.mean[k]', 'cov': 'self.cov[k]'}), '(x, mean=self.mean[k], cov=self.cov[k])\n', (6829, 6868), False, 'from scipy.stats import rv_discrete, rv_continuous, multivariate_normal, norm\n'), ((6968, 7038), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['x[i]'], {'mean': 'self.mean[k_]', 'cov': 'self.cov[k_]'}), '(x[i], mean=self.mean[k_], cov=self.cov[k_])\n', (6994, 7038), False, 'from scipy.stats import rv_discrete, rv_continuous, multivariate_normal, norm\n'), ((7543, 7613), 'scipy.stats.multivariate_normal.rvs', 'multivariate_normal.rvs', ([], {'mean': 'self.mean[k]', 'cov': 'self.cov[k]', 'size': 'size'}), '(mean=self.mean[k], cov=self.cov[k], size=size)\n', (7566, 7613), False, 'from scipy.stats import rv_discrete, rv_continuous, multivariate_normal, norm\n'), ((8126, 8136), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (8133, 8136), True, 'import numpy as np\n'), ((8787, 8797), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (8794, 8797), True, 'import numpy as np\n'), ((8993, 9006), 'os.path.dirname', 'dirname', (['file'], {}), '(file)\n', (9000, 9006), False, 'from os.path import dirname\n'), ((3939, 3954), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3945, 3954), True, 'import numpy as np\n'), ((5980, 6042), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['x'], {'mean': 'self.mean[k]', 'cov': 'self.cov[k]'}), '(x, mean=self.mean[k], cov=self.cov[k])\n', (6003, 6042), False, 'from scipy.stats import rv_discrete, rv_continuous, multivariate_normal, norm\n'), ((8571, 8581), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (8578, 8581), True, 'import numpy as np\n'), ((795, 814), 'numpy.where', 'np.where', (['(x == y[i])'], {}), '(x == y[i])\n', (803, 814), True, 'import numpy as np\n'), ((3880, 3897), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (3888, 3897), True, 'import numpy as np\n'), ((3913, 3930), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (3921, 3930), True, 'import numpy as np\n'), ((5511, 5521), 'numpy.ndim', 'np.ndim', (['k'], {}), '(k)\n', (5518, 5521), True, 'import numpy as np\n'), ((8715, 8735), 'numpy.diag', 'np.diag', (['self.cov[k]'], {}), '(self.cov[k])\n', (8722, 8735), True, 'import numpy as np\n'), ((9236, 9250), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9247, 9250), False, 'import pickle\n')] |
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
from random import randint
import numpy as np
import logging
from random import choice
import time
logging.basicConfig(level=logging.DEBUG)
class FieldGen:
def __init__(self, wordlist_path):
self.last_zipf_indexes = None
file = codecs.open(wordlist_path, 'rU', 'utf-8')
raw = file.read()
tokens = raw.split()
#Swap comments if file contains frequencies after words
#self.wf = [(tokens[i], tokens[i+1]) for i in range(0,len(tokens),2)]
#self.w = [tokens[i] for i in range(0,len(tokens),2)]
self.w = [tokens[i] for i in range(0,len(tokens))]
self.wnp = np.array(self.w)
logging.info("numTerms: %s" % len(self.w))
file.close()
file = codecs.open("../bin/ateco_codes.txt", 'rU', 'utf-8')
raw = file.read()
tokens = raw.split()
self.ateco_codes = [tokens[i] for i in range(0,len(tokens))]
file.close()
def random(self, num_words=50, word_source = None):
if word_source == None:
word_source = self.w
words = []
text = " "
for i in xrange(0,num_words):
word = word_source[randint(0,len(word_source)-1)]
words.append(word)
#text += " " + word
return text.join(words)
def zipf(self, num_words=50):
words = []
text = " "
emitted = 0
len_w = len(self.w)
while True:
indexes = np.random.zipf(1.2, num_words)
filtered = indexes[indexes < len_w]
for i in filtered:
words.append(self.w[i])
emitted += 1
if emitted >= num_words:
#return text
return text.join(words)
def zipf_fast(self, num_words=50):
if (self.last_zipf_indexes is None or len(self.last_zipf_indexes)<num_words):
self.last_zipf_indexes = np.array(self._zipf_indexes(num_words))
len_w = len(self.w)
indexes = np.random.zipf(1.2, num_words)
bad_idxs = (indexes >= len_w)
indexes[bad_idxs] = self.last_zipf_indexes[bad_idxs]
#words = self.wnp[indexes]
'''
TODO: improve performance
currently it's a bit faster than normal zipf but:
lot of time is spent conversion of words from ndarray to list
if not even more time is spent joining words in ndarray
this doesn't work
np.core.defchararray.join("-", np.array(["1", "2"]))
array(['1', '2'],
dtype='|S1')
'''
#words_list = words.tolist()
words_list = [self.w[i] for i in indexes]
return " ".join(words_list)
def _zipf_indexes(self, num_words):
emitted = 0
len_w = len(self.w)
res = []
while True:
indexes = np.random.zipf(1.1, num_words)
filtered = indexes[indexes < len_w]
for i in filtered:
res.append(i)
emitted += 1
if emitted >= num_words:
#return text
return res
def random_code(self, num_chars, letters="abcdefghilmnopqrstuvz1234567890"):
code = ''.join([choice(letters) for i in xrange(0,num_chars)])
return code
def random_tuple(self, num_el, el_source):
res = [choice(el_source) for i in xrange(0, num_el)]
return res
def go():
tg = FieldGen('../bin/wordlist_wiki.txt')
for i in xrange(0, 10000):
value = tg.zipf_fast(400)
if __name__ == "__main__":
start_time = time.time()
tg = FieldGen('../bin/wordlist_wiki.txt')
for i in xrange(0, 10000):
value = tg.zipf_fast(400)
#value = tg.random_tuple(5, tg.ateco_codes)
print("%f seconds" % (time.time() - start_time))
#print value
| [
"logging.basicConfig",
"numpy.random.zipf",
"random.choice",
"numpy.array",
"codecs.open",
"time.time"
] | [((681, 721), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (700, 721), False, 'import logging\n'), ((4162, 4173), 'time.time', 'time.time', ([], {}), '()\n', (4171, 4173), False, 'import time\n'), ((832, 873), 'codecs.open', 'codecs.open', (['wordlist_path', '"""rU"""', '"""utf-8"""'], {}), "(wordlist_path, 'rU', 'utf-8')\n", (843, 873), False, 'import codecs\n'), ((1212, 1228), 'numpy.array', 'np.array', (['self.w'], {}), '(self.w)\n', (1220, 1228), True, 'import numpy as np\n'), ((1317, 1369), 'codecs.open', 'codecs.open', (['"""../bin/ateco_codes.txt"""', '"""rU"""', '"""utf-8"""'], {}), "('../bin/ateco_codes.txt', 'rU', 'utf-8')\n", (1328, 1369), False, 'import codecs\n'), ((2582, 2612), 'numpy.random.zipf', 'np.random.zipf', (['(1.2)', 'num_words'], {}), '(1.2, num_words)\n', (2596, 2612), True, 'import numpy as np\n'), ((2034, 2064), 'numpy.random.zipf', 'np.random.zipf', (['(1.2)', 'num_words'], {}), '(1.2, num_words)\n', (2048, 2064), True, 'import numpy as np\n'), ((3420, 3450), 'numpy.random.zipf', 'np.random.zipf', (['(1.1)', 'num_words'], {}), '(1.1, num_words)\n', (3434, 3450), True, 'import numpy as np\n'), ((3930, 3947), 'random.choice', 'choice', (['el_source'], {}), '(el_source)\n', (3936, 3947), False, 'from random import choice\n'), ((3800, 3815), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (3806, 3815), False, 'from random import choice\n'), ((4361, 4372), 'time.time', 'time.time', ([], {}), '()\n', (4370, 4372), False, 'import time\n')] |
# Compare Algorithms
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score,confusion_matrix
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import seaborn as sns; sns.set()
from sklearn.ensemble import RandomForestClassifier
import plot
np.seterr(divide='ignore', invalid='ignore')
def fit_func(X,Y,k):
"""Count accuracy of machine learning algoriths
Parameters
----------
X : array
A 2-d array with the entropy and godel numbering
Y : array
The value of the class
k: int
K-mer value
Returns
-------
An array of the mean accuracy of the comparison algorithms.
"""
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size=0.20, random_state=None)
# prepare models
models = []
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('MUNB', MultinomialNB()))
# evaluate each model in turn
results = []
mean_results=[]
names = []
scoring = 'accuracy'
for name, model in models:
kfold = model_selection.KFold(n_splits=2, random_state=None)
cv_results = model_selection.cross_val_score(model, X, Y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
mean_results.append(cv_results.mean())
# boxplot algorithm comparison
plot.compare_algorithms(results,names,k)
return mean_results
def pca(X,Y,k):
"""Count accuracy of PCA method
Parameters
----------
X : array
A 2-d array with the entropy and godel numbering
Y : array
The value of the class
k: int
K-mer value
Returns
-------
The accuracy of the pca method in RandomForestClassifier.
"""
# test_size: what proportion of original data is used for test set
train_img, test_img, y_train, test_y = train_test_split( X, Y, test_size=0.20, random_state=0)
# Standardizing the features
scaler = StandardScaler()
# Fit on training set only.
scaler.fit(train_img)
# Apply transform to both the training set and the test set.
train_img = scaler.transform(train_img)
test_img = scaler.transform(test_img)
# Make an instance of the Model
pca = PCA(n_components=1)
pca.fit(train_img)
#Apply the mapping (transform) to both the training set and the test set.
train_img = pca.transform(train_img)
test_img = pca.transform(test_img)
X_new = pca.inverse_transform(train_img)
# Plot scatter
plt.scatter(X[:, 0], X[:, 1], alpha=0.2)
plt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.8)
plt.axis('equal');
plt.savefig('k=%i/pca.png'%k)
classifier = RandomForestClassifier(max_depth=2, random_state=0)
classifier.fit(train_img, y_train)
# Predicting the Test set results
y_pred = classifier.predict(test_img)
cm = confusion_matrix(test_y, y_pred)
return accuracy_score(test_y, y_pred) | [
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.model_selection.KFold",
"seaborn.set",
"sklearn.decomposition.PCA",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.naive_bayes.MultinomialNB",
"matplotlib.pyplot.scatter",
"plot.compare_algorithms",
"matplotlib.pyplot.axis",
"sklearn.model_select... | [((841, 850), 'seaborn.set', 'sns.set', ([], {}), '()\n', (848, 850), True, 'import seaborn as sns\n'), ((918, 962), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (927, 962), True, 'import numpy as np\n'), ((1359, 1415), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': 'None'}), '(X, Y, test_size=0.2, random_state=None)\n', (1375, 1415), False, 'from sklearn.model_selection import train_test_split\n'), ((2104, 2146), 'plot.compare_algorithms', 'plot.compare_algorithms', (['results', 'names', 'k'], {}), '(results, names, k)\n', (2127, 2146), False, 'import plot\n'), ((2617, 2670), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(X, Y, test_size=0.2, random_state=0)\n', (2633, 2670), False, 'from sklearn.model_selection import train_test_split\n'), ((2715, 2731), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2729, 2731), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2976, 2995), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (2979, 2995), False, 'from sklearn.decomposition import PCA\n'), ((3237, 3277), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'alpha': '(0.2)'}), '(X[:, 0], X[:, 1], alpha=0.2)\n', (3248, 3277), True, 'import matplotlib.pyplot as plt\n'), ((3280, 3328), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_new[:, 0]', 'X_new[:, 1]'], {'alpha': '(0.8)'}), '(X_new[:, 0], X_new[:, 1], alpha=0.8)\n', (3291, 3328), True, 'import matplotlib.pyplot as plt\n'), ((3331, 3348), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (3339, 3348), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3383), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('k=%i/pca.png' % k)"], {}), "('k=%i/pca.png' % k)\n", (3363, 3383), True, 'import matplotlib.pyplot as plt\n'), ((3400, 3451), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(2)', 'random_state': '(0)'}), '(max_depth=2, random_state=0)\n', (3422, 3451), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3577, 3609), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_y', 'y_pred'], {}), '(test_y, y_pred)\n', (3593, 3609), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix\n'), ((3622, 3652), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_y', 'y_pred'], {}), '(test_y, y_pred)\n', (3636, 3652), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix\n'), ((1830, 1882), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {'n_splits': '(2)', 'random_state': 'None'}), '(n_splits=2, random_state=None)\n', (1851, 1882), False, 'from sklearn import model_selection\n'), ((1899, 1970), 'sklearn.model_selection.cross_val_score', 'model_selection.cross_val_score', (['model', 'X', 'Y'], {'cv': 'kfold', 'scoring': 'scoring'}), '(model, X, Y, cv=kfold, scoring=scoring)\n', (1930, 1970), False, 'from sklearn import model_selection\n'), ((1473, 1501), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (1499, 1501), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((1528, 1550), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (1548, 1550), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1578, 1602), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1600, 1602), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1628, 1640), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1638, 1640), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((1668, 1683), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (1681, 1683), False, 'from sklearn.naive_bayes import MultinomialNB\n')] |
#!/usr/bin/env python2
# -----------------------------------------------------------------------------
# @author:
# <NAME>, Jun 23rd, 2017
# -----------------------------------------------------------------------------
import graph_util.init_path as init_path
from util import logger
import graph_util.mujoco_parser as mujoco_parser
import numpy as np
_BASE_DIR = init_path.get_base_dir()
def map_output(transfer_env, i_value, added_constant, gnn_option_list):
'''
@brief:
i_value could be the logstd (1, num_action), policy_output/w
(64, num_action), policy_output/b (1, num_action)
'''
assert len(gnn_option_list) == 4
i_value = np.transpose(i_value) # make the num_action to the front
ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
if len(i_value.shape) > 1:
o_value = np.zeros([len(oenv_info['output_list']), i_value.shape[1]])
else:
# the b matrix
o_value = np.zeros([len(oenv_info['output_list'])])
assert len(i_value) == len(ienv_info['output_list'])
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for output_id, output_node_id in enumerate(oenv_info['output_list']):
# get the name of the joint
node_name = oenv_info['tree'][output_node_id]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
if ienv_node_name_list.index(node_name) not in \
ienv_info['output_list']:
logger.warning('Missing joint: {}'.format(node_name))
continue
o_value[output_id] = i_value[
ienv_info['output_list'].index(
ienv_node_name_list.index(node_name)
)
]
else:
# the name format: "@type_@name_@number", e.g.: joint_leg_1
assert len(node_name.split('_')) == 3
# find all the repetitive node and calculate the average
repetitive_struct_node_list = [
ienv_node_name_list.index(name)
for name in ienv_node_name_list
if node_name.split('_')[1] == name.split('_')[1]
]
num_reptitive_nodes = float(len(repetitive_struct_node_list))
assert len(repetitive_struct_node_list) >= 1
for i_node_id in repetitive_struct_node_list:
o_value[output_id] += i_value[
ienv_info['output_list'].index(i_node_id)
] / num_reptitive_nodes
return np.transpose(o_value) + added_constant
def map_input(transfer_env, i_value, added_constant, gnn_option_list):
assert len(gnn_option_list) == 4
ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
o_value = np.zeros([oenv_info['debug_info']['ob_size'], i_value.shape[1]])
assert len(i_value) == ienv_info['debug_info']['ob_size']
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for output_id, output_node_id in oenv_info['input_dict'].iteritems():
# get the name of the joint
node_name = oenv_info['tree'][output_id]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
o_value[output_node_id] = i_value[
ienv_info['input_dict'][
ienv_node_name_list.index(node_name)
]
]
else:
continue
return o_value
def map_transfer_env_running_mean(ienv, oenv, running_mean_info,
observation_size,
gnn_node_option, root_connection_option,
gnn_output_option, gnn_embedding_option):
# parse the mujoco information
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_node_option,
root_connection_option=root_connection_option,
gnn_output_option=gnn_output_option,
gnn_embedding_option=gnn_embedding_option
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_node_option,
root_connection_option=root_connection_option,
gnn_output_option=gnn_output_option,
gnn_embedding_option=gnn_embedding_option
)
i_running_mean_info = running_mean_info
# we start the running mean by cutting the mean to 0.1
start_coeff = 1
o_running_mean_info = {
'step': i_running_mean_info['step'] * start_coeff,
'mean': np.zeros([observation_size]),
'variance': np.zeros([observation_size]),
'square_sum': np.zeros([observation_size]),
'sum': np.zeros([observation_size])
}
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for node, oenv_digit in oenv_info['input_dict'].iteritems():
node_name = oenv_info['tree'][node]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
ienv_digit = ienv_info['input_dict'][
ienv_node_name_list.index(node_name)
]
assert len(ienv_digit) == len(oenv_digit)
# assign the value!
for key in ['square_sum', 'sum']:
o_running_mean_info[key][oenv_digit] = \
i_running_mean_info[key][ienv_digit] * start_coeff
for key in ['mean', 'variance']:
o_running_mean_info[key][oenv_digit] = \
i_running_mean_info[key][ienv_digit]
else:
# the name format: "@type_@name_@number", e.g.: joint_leg_1
assert len(node_name.split('_')) == 3
# find all the repetitive node and calculate the average
repetitive_struct_node_list = [
ienv_node_name_list.index(name)
for name in ienv_node_name_list
if node_name.split('_')[1] == name.split('_')[1]
]
assert len(repetitive_struct_node_list) >= 1
num_reptitive_nodes = float(len(repetitive_struct_node_list))
for i_node_id in repetitive_struct_node_list:
ienv_digit = ienv_info['input_dict'][i_node_id]
assert len(ienv_digit) == len(oenv_digit)
# assign the value!
for key in ['square_sum', 'sum']:
o_running_mean_info[key][oenv_digit] += \
i_running_mean_info[key][ienv_digit] * \
start_coeff / num_reptitive_nodes
for key in ['mean', 'variance']:
o_running_mean_info[key][oenv_digit] += \
i_running_mean_info[key][ienv_digit] / \
num_reptitive_nodes
return o_running_mean_info
| [
"numpy.zeros",
"numpy.transpose",
"graph_util.init_path.get_base_dir",
"graph_util.mujoco_parser.parse_mujoco_graph"
] | [((376, 400), 'graph_util.init_path.get_base_dir', 'init_path.get_base_dir', ([], {}), '()\n', (398, 400), True, 'import graph_util.init_path as init_path\n'), ((693, 714), 'numpy.transpose', 'np.transpose', (['i_value'], {}), '(i_value)\n', (705, 714), True, 'import numpy as np\n'), ((833, 1038), 'graph_util.mujoco_parser.parse_mujoco_graph', 'mujoco_parser.parse_mujoco_graph', (['ienv'], {'gnn_node_option': 'gnn_option_list[0]', 'root_connection_option': 'gnn_option_list[1]', 'gnn_output_option': 'gnn_option_list[2]', 'gnn_embedding_option': 'gnn_option_list[3]'}), '(ienv, gnn_node_option=gnn_option_list[0],\n root_connection_option=gnn_option_list[1], gnn_output_option=\n gnn_option_list[2], gnn_embedding_option=gnn_option_list[3])\n', (865, 1038), True, 'import graph_util.mujoco_parser as mujoco_parser\n'), ((1092, 1297), 'graph_util.mujoco_parser.parse_mujoco_graph', 'mujoco_parser.parse_mujoco_graph', (['oenv'], {'gnn_node_option': 'gnn_option_list[0]', 'root_connection_option': 'gnn_option_list[1]', 'gnn_output_option': 'gnn_option_list[2]', 'gnn_embedding_option': 'gnn_option_list[3]'}), '(oenv, gnn_node_option=gnn_option_list[0],\n root_connection_option=gnn_option_list[1], gnn_output_option=\n gnn_option_list[2], gnn_embedding_option=gnn_option_list[3])\n', (1124, 1297), True, 'import graph_util.mujoco_parser as mujoco_parser\n'), ((3327, 3532), 'graph_util.mujoco_parser.parse_mujoco_graph', 'mujoco_parser.parse_mujoco_graph', (['ienv'], {'gnn_node_option': 'gnn_option_list[0]', 'root_connection_option': 'gnn_option_list[1]', 'gnn_output_option': 'gnn_option_list[2]', 'gnn_embedding_option': 'gnn_option_list[3]'}), '(ienv, gnn_node_option=gnn_option_list[0],\n root_connection_option=gnn_option_list[1], gnn_output_option=\n gnn_option_list[2], gnn_embedding_option=gnn_option_list[3])\n', (3359, 3532), True, 'import graph_util.mujoco_parser as mujoco_parser\n'), ((3586, 3791), 'graph_util.mujoco_parser.parse_mujoco_graph', 'mujoco_parser.parse_mujoco_graph', (['oenv'], {'gnn_node_option': 'gnn_option_list[0]', 'root_connection_option': 'gnn_option_list[1]', 'gnn_output_option': 'gnn_option_list[2]', 'gnn_embedding_option': 'gnn_option_list[3]'}), '(oenv, gnn_node_option=gnn_option_list[0],\n root_connection_option=gnn_option_list[1], gnn_output_option=\n gnn_option_list[2], gnn_embedding_option=gnn_option_list[3])\n', (3618, 3791), True, 'import graph_util.mujoco_parser as mujoco_parser\n'), ((3843, 3907), 'numpy.zeros', 'np.zeros', (["[oenv_info['debug_info']['ob_size'], i_value.shape[1]]"], {}), "([oenv_info['debug_info']['ob_size'], i_value.shape[1]])\n", (3851, 3907), True, 'import numpy as np\n'), ((4865, 5072), 'graph_util.mujoco_parser.parse_mujoco_graph', 'mujoco_parser.parse_mujoco_graph', (['ienv'], {'gnn_node_option': 'gnn_node_option', 'root_connection_option': 'root_connection_option', 'gnn_output_option': 'gnn_output_option', 'gnn_embedding_option': 'gnn_embedding_option'}), '(ienv, gnn_node_option=gnn_node_option,\n root_connection_option=root_connection_option, gnn_output_option=\n gnn_output_option, gnn_embedding_option=gnn_embedding_option)\n', (4897, 5072), True, 'import graph_util.mujoco_parser as mujoco_parser\n'), ((5126, 5333), 'graph_util.mujoco_parser.parse_mujoco_graph', 'mujoco_parser.parse_mujoco_graph', (['oenv'], {'gnn_node_option': 'gnn_node_option', 'root_connection_option': 'root_connection_option', 'gnn_output_option': 'gnn_output_option', 'gnn_embedding_option': 'gnn_embedding_option'}), '(oenv, gnn_node_option=gnn_node_option,\n root_connection_option=root_connection_option, gnn_output_option=\n gnn_output_option, gnn_embedding_option=gnn_embedding_option)\n', (5158, 5333), True, 'import graph_util.mujoco_parser as mujoco_parser\n'), ((3096, 3117), 'numpy.transpose', 'np.transpose', (['o_value'], {}), '(o_value)\n', (3108, 3117), True, 'import numpy as np\n'), ((5597, 5625), 'numpy.zeros', 'np.zeros', (['[observation_size]'], {}), '([observation_size])\n', (5605, 5625), True, 'import numpy as np\n'), ((5647, 5675), 'numpy.zeros', 'np.zeros', (['[observation_size]'], {}), '([observation_size])\n', (5655, 5675), True, 'import numpy as np\n'), ((5699, 5727), 'numpy.zeros', 'np.zeros', (['[observation_size]'], {}), '([observation_size])\n', (5707, 5727), True, 'import numpy as np\n'), ((5744, 5772), 'numpy.zeros', 'np.zeros', (['[observation_size]'], {}), '([observation_size])\n', (5752, 5772), True, 'import numpy as np\n')] |
# BSD 3-Clause License
#
# Copyright (c) 2016-19, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Parser module specific to CCMpred predictions
"""
from __future__ import division
__author__ = "<NAME>"
__date__ = "03 Aug 2016"
__version__ = "0.1"
import numpy as np
import sys
from conkit.io._parser import ContactFileParser
from conkit.core.contact import Contact
from conkit.core.contactmap import ContactMap
from conkit.core.contactfile import ContactFile
class CCMpredParser(ContactFileParser):
"""
Class to parse a CCMpred contact matrix
"""
def __init__(self):
super(CCMpredParser, self).__init__()
def read(self, f_handle, f_id="ccmpred"):
"""Read a contact file
Parameters
----------
f_handle
Open file handle [read permissions]
f_id : str, optional
Unique contact file identifier
Returns
-------
:obj:`~conkit.core.contactfile.ContactFile`
"""
contact_file = ContactFile(f_id)
contact_file.method = "Contact map predicted using CCMpred"
contact_map = ContactMap("map_1")
contact_file.add(contact_map)
# Bits ripped from <NAME>'s script shipped with CCMpred
mat = np.loadtxt(f_handle)
if mat.size > 0:
raw_contacts = self._get_contact_pairs(mat)
for res1_seq, res2_seq, raw_score in zip(raw_contacts[0], raw_contacts[1], mat[raw_contacts]):
if res1_seq > res2_seq:
continue
# Matrix starts count at 0 so increment numbers by one straight away
contact = Contact(int(res1_seq + 1), int(res2_seq + 1), float(raw_score))
contact_map.add(contact)
return contact_file
def _get_contact_pairs(self, mat):
"""Get all contact pairs in the matrix
Parameters
----------
mat : :obj:`~numpy.ndarray`
A :mod:`numpy` matrix
Returns
-------
list
A list of contact pairs
"""
contacts = mat.argsort(axis=None)[::-1]
contacts = (contacts % mat.shape[0]).astype(np.uint16), np.floor(contacts / mat.shape[0]).astype(np.uint16)
return contacts
def write(self, f_handle, hierarchy):
"""Write a contact file instance to to file
Parameters
----------
f_handle
Open file handle [write permissions]
hierarchy : :obj:`~conkit.core.contactfile.ContactFile`, :obj:`~conkit.core.contactmap.ContactMap`
or :obj:`~conkit.core.contact.Contact`
Raises
------
:exc:`RuntimeError`
More than one contact map in the hierarchy
:exc:`TypeError`
Python3 requires f_handle to be in `wb` or `ab` mode
"""
# Python3 support requires bytes mode
if sys.version_info.major == 3 and not (f_handle.mode == "wb" or f_handle.mode == "ab"):
raise TypeError("Python3 requires f_handle to be in 'wb' or 'ab' mode")
# Double check the type of hierarchy and reconstruct if necessary
contact_file = self._reconstruct(hierarchy)
if len(contact_file) > 1:
raise RuntimeError("More than one contact map provided")
for contact_map in contact_file:
len_mat = max([c.res1_seq for c in contact_map] + [c.res2_seq for c in contact_map])
mat = np.zeros((len_mat, len_mat), np.float64)
for contact in contact_map:
mat[contact.res1_seq - 1][contact.res2_seq - 1] = contact.raw_score
mat[contact.res2_seq - 1][contact.res1_seq - 1] = contact.raw_score
np.savetxt(f_handle, mat, delimiter="\t")
return
| [
"numpy.floor",
"conkit.core.contactfile.ContactFile",
"numpy.zeros",
"conkit.core.contactmap.ContactMap",
"numpy.savetxt",
"numpy.loadtxt"
] | [((2494, 2511), 'conkit.core.contactfile.ContactFile', 'ContactFile', (['f_id'], {}), '(f_id)\n', (2505, 2511), False, 'from conkit.core.contactfile import ContactFile\n'), ((2602, 2621), 'conkit.core.contactmap.ContactMap', 'ContactMap', (['"""map_1"""'], {}), "('map_1')\n", (2612, 2621), False, 'from conkit.core.contactmap import ContactMap\n'), ((2739, 2759), 'numpy.loadtxt', 'np.loadtxt', (['f_handle'], {}), '(f_handle)\n', (2749, 2759), True, 'import numpy as np\n'), ((4934, 4974), 'numpy.zeros', 'np.zeros', (['(len_mat, len_mat)', 'np.float64'], {}), '((len_mat, len_mat), np.float64)\n', (4942, 4974), True, 'import numpy as np\n'), ((5197, 5238), 'numpy.savetxt', 'np.savetxt', (['f_handle', 'mat'], {'delimiter': '"""\t"""'}), "(f_handle, mat, delimiter='\\t')\n", (5207, 5238), True, 'import numpy as np\n'), ((3663, 3696), 'numpy.floor', 'np.floor', (['(contacts / mat.shape[0])'], {}), '(contacts / mat.shape[0])\n', (3671, 3696), True, 'import numpy as np\n')] |
import math
import gym
import numpy as np
class GridWorldSearch(gym.Env):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 30}
def __init__(self, xwidth=50, ywidth=50, velocity_lim=5, random_goal=False, goal_position=[30,-45],action_lim=1,goal_threshold=1,max_len=1000):
self.xwidth =xwidth
self.ywidth = ywidth
self.velocity_lim = velocity_lim
self.random_goal = random_goal
self.goal_position = goal_position
self.action_lim = action_lim
self.goal_threshold = goal_threshold
self.max_len = max_len
self.num_steps = 0
self.dt = 1
self.low_state = np.array([-self.xwidth,-self.ywidth, -self.velocity_lim,-self.velocity_lim])
self.high_state = np.array([self.xwidth,self.ywidth, self.velocity_lim,self.velocity_lim])
self.action_space = gym.spaces.Box(low=-self.action_lim, high=self.action_lim, shape=(2,),dtype=np.float32)
self.observation_space = gym.spaces.Box(low=self.low_state, high=self.high_state, dtype=np.float32)
self.viewer = None
if self.random_goal:
self.goal_position = np.random.uniform(low=np.array([-self.xwidth, -self.ywidth]),high=np.array([self.xwidth,self.ywidth]),shape=(2,1))
self.goalx, self.goaly = self.goal_position
self.seed()
self.reset()
self.state = np.zeros([4,])
self.done=False
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
def reset(self):
self.state = np.random.normal(0,0.01, size=(4,))
self.done = False
return self.state
def step(self, action):
#clip action
a_x,a_y = action
a_x = min(max(a_x,-self.action_lim), self.action_lim)
a_y = min(max(a_y,-self.action_lim), self.action_lim)
#print("as: ",a_x, a_y)
xpos, ypos, velx,vely = self.state
velx = self.dt * a_x
vely = self.dt * a_y
#clip velocity
velx = min(max(velx, -self.velocity_lim), self.velocity_lim)
vely = min(max(vely, -self.velocity_lim), self.velocity_lim)
#apply update
xpos += velx
ypos += vely
#clip positions
xpos = min(max(xpos, -self.xwidth), self.xwidth)
ypos = min(max(ypos,-self.ywidth), self.ywidth)
#check for goal
reward = 0
if abs(xpos - self.goalx) <=self.goal_threshold and abs(ypos - self.goaly) <= self.goal_threshold:
reward = 100
#check if episode has ended
self.num_steps +=1
if self.num_steps >= self.max_len:
self.done=True
self.num_steps = 0
self.state = np.array([xpos, ypos, velx, vely]).reshape((4,))
return self.state, reward, self.done, {}
def render(self, mode="human"):
raise NotImplementedError("Rendering not implemented for this custom environment")
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| [
"numpy.random.normal",
"gym.spaces.Box",
"numpy.array",
"numpy.zeros",
"gym.utils.seeding.np_random"
] | [((677, 755), 'numpy.array', 'np.array', (['[-self.xwidth, -self.ywidth, -self.velocity_lim, -self.velocity_lim]'], {}), '([-self.xwidth, -self.ywidth, -self.velocity_lim, -self.velocity_lim])\n', (685, 755), True, 'import numpy as np\n'), ((780, 854), 'numpy.array', 'np.array', (['[self.xwidth, self.ywidth, self.velocity_lim, self.velocity_lim]'], {}), '([self.xwidth, self.ywidth, self.velocity_lim, self.velocity_lim])\n', (788, 854), True, 'import numpy as np\n'), ((881, 973), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(-self.action_lim)', 'high': 'self.action_lim', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=-self.action_lim, high=self.action_lim, shape=(2,),\n dtype=np.float32)\n', (895, 973), False, 'import gym\n'), ((1002, 1076), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'self.low_state', 'high': 'self.high_state', 'dtype': 'np.float32'}), '(low=self.low_state, high=self.high_state, dtype=np.float32)\n', (1016, 1076), False, 'import gym\n'), ((1397, 1410), 'numpy.zeros', 'np.zeros', (['[4]'], {}), '([4])\n', (1405, 1410), True, 'import numpy as np\n'), ((1499, 1532), 'gym.utils.seeding.np_random', 'gym.utils.seeding.np_random', (['seed'], {}), '(seed)\n', (1526, 1532), False, 'import gym\n'), ((1598, 1634), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.01)'], {'size': '(4,)'}), '(0, 0.01, size=(4,))\n', (1614, 1634), True, 'import numpy as np\n'), ((2741, 2775), 'numpy.array', 'np.array', (['[xpos, ypos, velx, vely]'], {}), '([xpos, ypos, velx, vely])\n', (2749, 2775), True, 'import numpy as np\n'), ((1189, 1227), 'numpy.array', 'np.array', (['[-self.xwidth, -self.ywidth]'], {}), '([-self.xwidth, -self.ywidth])\n', (1197, 1227), True, 'import numpy as np\n'), ((1233, 1269), 'numpy.array', 'np.array', (['[self.xwidth, self.ywidth]'], {}), '([self.xwidth, self.ywidth])\n', (1241, 1269), True, 'import numpy as np\n')] |
import sys
from dataclasses import dataclass
import numpy as np
from scipy import spatial
from utils import MinHeap, Quadric, Plane
def quadric_error_function(src, tgt, halfedge):
# If this is a boundary edge, form the boundary condition quadric
if halfedge is not None:
if halfedge.is_boundary() or halfedge.twin.is_boundary():
# Get the non-boundary edge that connects them and use it's face's normal
if halfedge.is_boundary():
face_normal = halfedge.twin.face.normal
else:
face_normal = halfedge.face.normal
# Compute the normal of the plane perpendicular to the boundary face for which this pair forms an edge
boundary_normal = np.cross(tgt.XYZ - src.XYZ, face_normal)
# Compute the boundary quadric and weight it heavily
boundary_quadric = Quadric(Plane.from_pt_and_normal(
boundary_normal, src.XYZ),
weight=1e3)
# Add the boundary quadric to both vertices
src.quadric += boundary_quadric
tgt.quadric += boundary_quadric
# Optimal quadric is the sum of the src and tgt quadrics
opt_quadric = src.quadric + tgt.quadric
# Try to compute the optimal point and its cost
opt_xyz, opt_cost = opt_quadric.optimal_point()
# If the quadric was not invertible, use the min cost of either vertex or their midpoint on the edge
if opt_xyz is None:
mid_pt = (src.XYZ + tgt.XYZ) / 2
src_cost = opt_quadric.apply(src.XYZ)
tgt_cost = opt_quadric.apply(tgt.XYZ)
mid_cost = opt_quadric.apply(mid_pt)
# Return the minimum cost point
if (src_cost < tgt_cost) and (src_cost < mid_cost):
return src.XYZ, src_cost
elif (tgt_cost < src_cost) and (tgt_cost < mid_cost):
return tgt.XYZ, tgt_cost
else:
return mid_pt, mid_cost
# Return the optimal cost point
return opt_xyz, opt_cost
class Pair:
# Class for a vertex pair
def __init__(self,
src: "HEVertex",
tgt: "HEVertex",
halfedge: "HalfEdge" = None,
cost_function=quadric_error_function):
self.src = src
self.tgt = tgt
self.halfedge = halfedge # None if not connected
self.opt_xyz, self.cost = cost_function(src, tgt, halfedge)
def __lt__(self, other):
return self.cost < other.cost
def is_safe_merge(self):
# Safe to merge unconnected vertices, generally
if self.halfedge is None:
return True
# For connected vertices, they must have exactly two edge-adjacent vertices in common. So we gather the two sets of adjacent vertex IDs and look at the size of their intersection
src_adj_vx_id = set()
for vx in self.src.adjacent_vertices():
src_adj_vx_id.add(vx.id)
tgt_adj_vx_id = set()
for vx in self.tgt.adjacent_vertices():
tgt_adj_vx_id.add(vx.id)
if len(src_adj_vx_id.intersection(tgt_adj_vx_id)) == 2:
return True
# Otherwise, return false
return False
def merge_vertices(self):
"""
Contracts the edge by moving all tgt edges to the src and updating the src vertex
"""
# =======================================
# Note elements to remove
# =======================================
# Identify the faces to remove (only if an edge collapse)
if self.halfedge is not None:
removed_faces = [self.halfedge.face, self.halfedge.twin.face]
else:
removed_faces = []
# Mark the half edges interior to the faces being removed, if relevant
removed_halfedges = []
if removed_faces:
for he in removed_faces[0].adjacent_halfedges():
removed_halfedges.append(he)
for he in removed_faces[1].adjacent_halfedges():
removed_halfedges.append(he)
# Just removing the tgt vertex
removed_vertices = [tgt]
# =======================================
# Re-link halfedges
# =======================================
# Set the origin of outgoing halfedges in the tgt vertex to instead originate from the src vertex as long as they're not part of the faces being removed. Also update src's halfedge as we may have just removed it.
for out_he in tgt.outgoing_halfedges():
if (out_he.face.id != removed_faces[0].id) and (
out_he.face.id != removed_faces[1].id):
out_he.src = src
src.halfedge = out_he
# Set up twin relations for one removed face (a picture really helps here)
self.halfedge.next_edge.twin.twin = self.halfedge.next_edge.next_edge.twin
self.halfedge.next_edge.next_edge.twin = self.halfedge.next_edge.twin.twin
# Set up twin relations for the other removed face (a picture also really helps here)
self.halfedge.twin.next_edge.twin.twin = self.halfedge.twin.next_edge.next_edge.twin
self.halfedge.twin.next_edge.next_edge.twin = self.halfedge.twin.next_edge.twin.twin
# Update wing vertices' halfedges to point to halfedges that are guaranteed to still exist after this procedure
self.halfedge.next_edge.tgt.halfedge = self.halfedge.next_edge.twin
self.halfedge.twin.next_edge.tgt.halfedge = self.halfedge.twin.next_edge.twin
# =======================================
# Update src vertex
# =======================================
src.XYZ = self.opt_xyz
return removed_faces, removed_halfedges, removed_vertices
class EdgeCollapse:
def __init__(self, threshold=0.0):
# Min heap for priority queue
self.min_heap = MinHeap(key=lambda x: x.cost)
self.threshold = threshold
self.kdtree_leaf_size = 1000
def create_pair(self, mesh, src, src_idx, tgt, tgt_idx):
# Optimal quadric is the sum of the src and tgt quadrics
opt_quadric = src.quadric + tgt.quadric
# If summed quadric is invertible, compute optimal vertex solution
if np.linalg.cond(opt_quadric) < 1 / sys.float_info.epsilon:
opt_v = np.linalg.inv(opt_quadric)[:, -1]
# Otherwise, take the average of the pair
# TODO(Marc): Better minimizer?
else:
opt_v = ((src.XYZ + tgt.XYZ) / 2.0).append(1.0)
# Compute cost
cost = opt_v.dot(opt_quadric).dot(opt_v)
opt_v = opt_v[:3] / opt_v[-1]
# Create a pair object. Always store edge vertices as ascending tuple.
pair = Pair(src_idx if src_idx < tgt_idx else tgt_idx,
tgt_idx if src_idx < tgt_idx else src_idx, opt_v, cost)
return pair
def contract_edge(self, mesh, src_idx, tgt_idx, opt_XYZ):
"""
Changes the XYZ of src_idx to be the new optimal value, attaches all halfedges of tgt_idx to those at src_idx, and removes the tgt_idx vertex
"""
# Get the vertex elements
src = mesh.vertex(src_idx)
tgt = mesh.vertex(tgt_idx)
# Change the src vertex to the new location
src.XYZ = opt_XYZ
# for outmesh.vertex_outgoing_halfedges(tgt_idx)
def collapse(self, mesh):
# ======================================================================
# 1. Compute the error quadrics for all vertices
# ======================================================================
for _, v in mesh.vertices.items():
v.compute_quadric()
v.pairs = [] # Create list to hold pairs
# Create the set of valid pairs defined by edge relations.
edge_set = set()
for _, he in mesh.halfedges.items():
# Use a canonical vertex ordering for edges
if he.src.id < he.tgt.id:
src = he.src
tgt = he.tgt
else:
src = he.tgt
tgt = he.src
# Edges are always ordered as ascending vertex indices
edge = (src, tgt)
# If this edge is already represented, continue
if edge in edge_set:
continue
# Create a pair
pair = Pair(src, tgt, he)
# Add pair to priority queue
self.min_heap.push(pair)
# Also track the pair from within the vertices
src.pairs.append(pair)
tgt.pairs.append(pair)
# Add the vertex pairs that represent this edge
edge_set.add(edge)
# If the threshold is non-zero, we also look at unconnected, nearby vertices to find pairs
if (self.threshold > 0.0) and (mesh.num_boundary_vertices() > 0):
# Gather up the boundary vertices and their respective indices (only ones potentially being contracted)
boundary_pts = np.array([
v.XYZ for vx_id, v in mesh.vertices.items()
if v.is_boundary_vertex()
])
ids = np.array([
vx_id for vx_id, v in mesh.vertices.items()
if v.is_boundary_vertex()
])
# Create KDTree for fast spatial lookup
kdtree = spatial.KDTree(boundary_pts,
leafsize=self.kdtree_leaf_size)
# Query all boundary vertices against themselves to see if they are within the threshold distance
queries = kdtree.query_ball_tree(kdtree, r=self.threshold)
# Go through each matched vertex (skipping first one as it's identity)
for src_id, matches in zip(ids, queries):
for tgt_id in matches[1:]:
# Use a canonical vertex ordering for edges
if src_id < tgt_id:
src = mesh.vertex(src_id)
tgt = mesh.vertex(tgt_id)
else:
src = mesh.vertex(tgt_id)
tgt = mesh.vertex(src_id)
# Edges are always ordered as ascending vertex indices
edge = (src, tgt)
# If this edge is already represented, continue
if edge in edge_set:
continue
# Create a pair
pair = Pair(src, tgt)
# Add pair to priority queue
self.min_heap.push(pair)
# Also track the pair from within the vertices
src.pairs.append(pair)
tgt.pairs.append(pair)
# Add the vertex pairs that represent this edge
edge_set.add(edge)
import ipdb
ipdb.set_trace()
for i in range(3):
min_cost_pair = self.min_heap.pop()
if not min_cost_pair.is_safe_merge():
continue
removed_faces, removed_halfedges, removed_vertices = pair.merge_vertices(
)
for face in removed_faces:
mesh.faces.pop(face.id)
for halfedge in removed_halfedges:
mesh.halfedges.pop(halfedge.id)
for vertex in removed_vertices:
mesh.vertices.pop(vertex.id) | [
"numpy.cross",
"numpy.linalg.cond",
"ipdb.set_trace",
"utils.MinHeap",
"scipy.spatial.KDTree",
"utils.Plane.from_pt_and_normal",
"numpy.linalg.inv"
] | [((5895, 5924), 'utils.MinHeap', 'MinHeap', ([], {'key': '(lambda x: x.cost)'}), '(key=lambda x: x.cost)\n', (5902, 5924), False, 'from utils import MinHeap, Quadric, Plane\n'), ((10877, 10893), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (10891, 10893), False, 'import ipdb\n'), ((747, 787), 'numpy.cross', 'np.cross', (['(tgt.XYZ - src.XYZ)', 'face_normal'], {}), '(tgt.XYZ - src.XYZ, face_normal)\n', (755, 787), True, 'import numpy as np\n'), ((6261, 6288), 'numpy.linalg.cond', 'np.linalg.cond', (['opt_quadric'], {}), '(opt_quadric)\n', (6275, 6288), True, 'import numpy as np\n'), ((9364, 9424), 'scipy.spatial.KDTree', 'spatial.KDTree', (['boundary_pts'], {'leafsize': 'self.kdtree_leaf_size'}), '(boundary_pts, leafsize=self.kdtree_leaf_size)\n', (9378, 9424), False, 'from scipy import spatial\n'), ((893, 943), 'utils.Plane.from_pt_and_normal', 'Plane.from_pt_and_normal', (['boundary_normal', 'src.XYZ'], {}), '(boundary_normal, src.XYZ)\n', (917, 943), False, 'from utils import MinHeap, Quadric, Plane\n'), ((6339, 6365), 'numpy.linalg.inv', 'np.linalg.inv', (['opt_quadric'], {}), '(opt_quadric)\n', (6352, 6365), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.