code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np #(activate this if CPU is used)
# import cupy as np #(activate this if GPU is used)
from mlxtend.data import loadlocal_mnist
import json
def Read_MNIST(par, Agent):
################################################################################################################################################
##### MNIST
##### type=np.ndarray (x_train: 60000 x 784 (float:0.~1.), y_train: 60000 x 1 (int:0~9), ... )
################################################################################################################################################
par.num_features = 784 # 28*28
par.num_classes = 10 # 0 to 9 digits
par.split_number = int(Agent)
# Load MNIST
x_train, y_train = loadlocal_mnist(images_path='./Inputs/train-images-idx3-ubyte', labels_path='./Inputs/train-labels-idx1-ubyte')
x_test, y_test = loadlocal_mnist(images_path='./Inputs/t10k-images-idx3-ubyte', labels_path='./Inputs/t10k-labels-idx1-ubyte')
# Convert to float32.
x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32)
y_train, y_test = np.array(y_train), np.array(y_test)
# Flatten images to 1-D vector of 784 features (28*28).
x_train, x_test = x_train.reshape([-1, par.num_features]), x_test.reshape([-1, par.num_features])
# Normalize images value from [0, 255] to [0, 1].
x_train, x_test = x_train / 255., x_test / 255.
# Split data per agent
x_train_agent = np.split(x_train, par.split_number)
y_train_agent = np.split(y_train, par.split_number)
x_train_agent, y_train_agent = np.array(x_train_agent), np.array(y_train_agent)
x_list = []; y_list = [];
for p in range(par.split_number):
x_list.append(x_train_agent[p])
y_list.append(y_train_agent[p])
x_train_new = np.concatenate( np.array(x_list) )
y_train_new = np.concatenate( np.array(y_list) )
return x_test, y_test, x_train_new, y_train_new, x_train_agent, y_train_agent
def Read_FEMNIST(par, Agent):
################################################################################################################################################
##### FEMNIST (datatype=dict)
##### Example: "users": ["f3795_00", "f3608_13"], "num_samples": [149, 162], "user_data": {"f3795_00": {"x": [], ..., []}, "y": [4, ..., 31]},
################################################################################################################################################
par.num_features = 784 # 28*28
par.num_classes = 62 # 0 to 9 digits + alphabet (26 + 26)
TS = 36 ## TS <= 36 for FEMNIST dataset
train_data={}; x_train_agent={}; y_train_agent={};
tmp_x_train=[]; tmp_y_train=[];
tmpcnt = 0
for testset in range(TS):
with open('./Inputs/Femnist_Train_%s/all_data_%s_niid_05_keep_0_train_9.json'%(Agent,testset)) as f:
train_data[testset] = json.load(f)
for user in train_data[testset]["users"]:
Temp_x_train = []; Temp_y_train = [];
## x_train
for x_elem in train_data[testset]["user_data"][user]["x"]:
tmp_x_train.append( 1.0-np.array(x_elem) )
Temp_x_train.append( 1.0-np.array(x_elem) )
Temp_x_train=np.array( Temp_x_train,np.float32 )
x_train_agent[tmpcnt] = Temp_x_train
## y_train
for y_elem in train_data[testset]["user_data"][user]["y"]:
tmp_y_train.append( np.array(y_elem) )
Temp_y_train.append( np.array(y_elem) )
Temp_y_train=np.array( Temp_y_train,np.uint8 )
y_train_agent[tmpcnt] = Temp_y_train
tmpcnt += 1
x_train_new = np.array(tmp_x_train,np.float32)
y_train_new = np.array(tmp_y_train,np.uint8)
x_train_new = np.array(tmp_x_train,np.float32)
y_train_new = np.array(tmp_y_train,np.uint8)
par.split_number = tmpcnt
## Testing
test_data={}
temp_x_test=[]; temp_y_test=[]; total_num_test_data=0;
for testset in range(TS):
with open('./Inputs/Femnist_Test_%s/all_data_%s_niid_05_keep_0_test_9.json'%(Agent,testset)) as f:
test_data[testset] = json.load(f)
for user in test_data[testset]["users"]:
total_num_test_data += len(test_data[testset]["user_data"][user]["y"])
## x_test
for x_elem in test_data[testset]["user_data"][user]["x"]:
temp_x_test.append( 1.0-np.array(x_elem) )
## y_test
for y_elem in test_data[testset]["user_data"][user]["y"]:
temp_y_test.append( y_elem )
x_test = np.array(temp_x_test,np.float32)
y_test = np.array(temp_y_test,np.uint8)
return x_test, y_test, x_train_new, y_train_new, x_train_agent, y_train_agent | [
"json.load",
"mlxtend.data.loadlocal_mnist",
"numpy.array",
"numpy.split"
] | [((734, 850), 'mlxtend.data.loadlocal_mnist', 'loadlocal_mnist', ([], {'images_path': '"""./Inputs/train-images-idx3-ubyte"""', 'labels_path': '"""./Inputs/train-labels-idx1-ubyte"""'}), "(images_path='./Inputs/train-images-idx3-ubyte', labels_path\n ='./Inputs/train-labels-idx1-ubyte')\n", (749, 850), False, 'from mlxtend.data import loadlocal_mnist\n'), ((865, 979), 'mlxtend.data.loadlocal_mnist', 'loadlocal_mnist', ([], {'images_path': '"""./Inputs/t10k-images-idx3-ubyte"""', 'labels_path': '"""./Inputs/t10k-labels-idx1-ubyte"""'}), "(images_path='./Inputs/t10k-images-idx3-ubyte', labels_path=\n './Inputs/t10k-labels-idx1-ubyte')\n", (880, 979), False, 'from mlxtend.data import loadlocal_mnist\n'), ((1450, 1485), 'numpy.split', 'np.split', (['x_train', 'par.split_number'], {}), '(x_train, par.split_number)\n', (1458, 1485), True, 'import numpy as np\n'), ((1504, 1539), 'numpy.split', 'np.split', (['y_train', 'par.split_number'], {}), '(y_train, par.split_number)\n', (1512, 1539), True, 'import numpy as np\n'), ((3639, 3672), 'numpy.array', 'np.array', (['tmp_x_train', 'np.float32'], {}), '(tmp_x_train, np.float32)\n', (3647, 3672), True, 'import numpy as np\n'), ((3688, 3719), 'numpy.array', 'np.array', (['tmp_y_train', 'np.uint8'], {}), '(tmp_y_train, np.uint8)\n', (3696, 3719), True, 'import numpy as np\n'), ((3736, 3769), 'numpy.array', 'np.array', (['tmp_x_train', 'np.float32'], {}), '(tmp_x_train, np.float32)\n', (3744, 3769), True, 'import numpy as np\n'), ((3785, 3816), 'numpy.array', 'np.array', (['tmp_y_train', 'np.uint8'], {}), '(tmp_y_train, np.uint8)\n', (3793, 3816), True, 'import numpy as np\n'), ((4540, 4573), 'numpy.array', 'np.array', (['temp_x_test', 'np.float32'], {}), '(temp_x_test, np.float32)\n', (4548, 4573), True, 'import numpy as np\n'), ((4584, 4615), 'numpy.array', 'np.array', (['temp_y_test', 'np.uint8'], {}), '(temp_y_test, np.uint8)\n', (4592, 4615), True, 'import numpy as np\n'), ((1022, 1051), 'numpy.array', 'np.array', (['x_train', 'np.float32'], {}), '(x_train, np.float32)\n', (1030, 1051), True, 'import numpy as np\n'), ((1053, 1081), 'numpy.array', 'np.array', (['x_test', 'np.float32'], {}), '(x_test, np.float32)\n', (1061, 1081), True, 'import numpy as np\n'), ((1104, 1121), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1112, 1121), True, 'import numpy as np\n'), ((1123, 1139), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (1131, 1139), True, 'import numpy as np\n'), ((1575, 1598), 'numpy.array', 'np.array', (['x_train_agent'], {}), '(x_train_agent)\n', (1583, 1598), True, 'import numpy as np\n'), ((1600, 1623), 'numpy.array', 'np.array', (['y_train_agent'], {}), '(y_train_agent)\n', (1608, 1623), True, 'import numpy as np\n'), ((1800, 1816), 'numpy.array', 'np.array', (['x_list'], {}), '(x_list)\n', (1808, 1816), True, 'import numpy as np\n'), ((1851, 1867), 'numpy.array', 'np.array', (['y_list'], {}), '(y_list)\n', (1859, 1867), True, 'import numpy as np\n'), ((2873, 2885), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2882, 2885), False, 'import json\n'), ((3221, 3255), 'numpy.array', 'np.array', (['Temp_x_train', 'np.float32'], {}), '(Temp_x_train, np.float32)\n', (3229, 3255), True, 'import numpy as np\n'), ((3513, 3545), 'numpy.array', 'np.array', (['Temp_y_train', 'np.uint8'], {}), '(Temp_y_train, np.uint8)\n', (3521, 3545), True, 'import numpy as np\n'), ((4095, 4107), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4104, 4107), False, 'import json\n'), ((3424, 3440), 'numpy.array', 'np.array', (['y_elem'], {}), '(y_elem)\n', (3432, 3440), True, 'import numpy as np\n'), ((3473, 3489), 'numpy.array', 'np.array', (['y_elem'], {}), '(y_elem)\n', (3481, 3489), True, 'import numpy as np\n'), ((3110, 3126), 'numpy.array', 'np.array', (['x_elem'], {}), '(x_elem)\n', (3118, 3126), True, 'import numpy as np\n'), ((3163, 3179), 'numpy.array', 'np.array', (['x_elem'], {}), '(x_elem)\n', (3171, 3179), True, 'import numpy as np\n'), ((4367, 4383), 'numpy.array', 'np.array', (['x_elem'], {}), '(x_elem)\n', (4375, 4383), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 13 13:43:45 2021
@author: Hatlab_3
"""
import numpy as np
import matplotlib.pyplot as plt
import sympy as sp
from data_processing.models.SNAIL_supporting_modules.Participation_and_Alpha_Fitter import slider_fit
from data_processing.fitting.QFit import fit, plotRes
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
from timeit import default_timer as timer
from data_processing.Helper_Functions import find_all_ddh5
from data_processing.ddh5_Plotting.TACO_multiplot_b1 import superTACO_Bars
import pandas as pd
from scipy.optimize import curve_fit
from scipy.signal import savgol_filter
def find_quanta(currents, res_freqs, smooth_window = 0):
if smooth_window != 0:
res_freqs = savgol_filter(res_freqs, smooth_window, 2)
max_res_current = currents[np.argmax(res_freqs)]
min_res_current = currents[np.argmin(res_freqs)]
quanta_size = 2*np.abs(min_res_current - max_res_current)
quanta_offset = max_res_current
current_to_quanta_conversion_function = lambda c: (c-quanta_offset)/quanta_size
quanta_to_current_function = lambda q: q*quanta_size+quanta_offset
return quanta_size, quanta_offset, current_to_quanta_conversion_function, quanta_to_current_function
def parallel(v1, v2):
return 1/(1/v1+1/v2)
def get_phi_min_funcs(alpha, phi_ext_arr):
a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
U_snail_norm = -a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3)
c1 = sp.series(U_snail_norm, phi_s, x0 = phi_m, n = 2).removeO().coeff((phi_s-phi_m))
#generate a lambda function that outputs another lambda function for a given phi_ext
#which then depends on phi_m only
func_arr = []
for phi_ext in phi_ext_arr:
c1_num = sp.lambdify(phi_m, c1.subs(a, alpha).subs(phi_e, phi_ext), "numpy")
func_arr.append(c1_num)
return func_arr
def get_phi_min_fsolve(alpha, phi_ext_arr):
funcs = get_phi_min_funcs(alpha, phi_ext_arr)
sol_arr = np.ones(np.size(funcs))
for i, func in enumerate(funcs):
sol_arr[i] = fsolve(func, phi_ext_arr[i])
return sol_arr
def get_phi_min(alpha, phi_ext):
func = get_phi_min_funcs(alpha, [phi_ext])[0]
return(fsolve(func, phi_ext)[0])
def c4_func_gen_vectorize(alpha_val): #can be fed an array
a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
U_snail = (-a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3))
expansion = sp.series(U_snail, phi_s, x0 = phi_m, n = 5)
coeff = expansion.removeO().coeff(sp.Pow(phi_s-phi_m, 4))*24
c4exp = lambda phi_ext: coeff.subs([(a, alpha_val), (phi_e, phi_ext), (phi_m, get_phi_min(alpha_val, phi_ext))])
return np.vectorize(c4exp)
def c3_func_gen_vectorize(alpha_val): #can be fed an array
a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
U_snail = (-a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3))
expansion = sp.series(U_snail, phi_s, x0 = phi_m, n = 4)
coeff = expansion.removeO().coeff(sp.Pow(phi_s-phi_m, 3))*6
c3exp = lambda phi_ext: coeff.subs([(a, alpha_val), (phi_e, phi_ext), (phi_m, get_phi_min(alpha_val, phi_ext))])
return np.vectorize(c3exp)
def c2_func_gen_vectorize(alpha_val):
a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
U_snail = (-a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3))
expansion = sp.series(U_snail, phi_s, x0 = phi_m, n = 3)
coeff = expansion.removeO().coeff(sp.Pow(phi_s-phi_m, 2))*2
c2exp = lambda phi_ext: coeff.subs([(a, alpha_val), (phi_e, phi_ext), (phi_m, get_phi_min(alpha_val, phi_ext))])
return np.vectorize(c2exp)
class SnailAmp():
def __init__(self): #uA/um^2
'''
Parameters
----------
junction_sizes : tuple
(small_size, large_size) in micrometers squared
quanta_start : float
0-flux point in Amps
quanta_size : float
quanta ize in Amps
Returns
-------
None.
'''
self.hbar = 1.0545718e-34
self.e = 1.60218e-19
self.phi0 = 2*np.pi*self.hbar/(2*self.e)
def generate_quanta_function(self, quanta_offset, quanta_size):
#function for converting bias currents to quanta fractions
self.quanta_offset = quanta_offset
self.quanta_size = quanta_size
self.conv_func = lambda c: (c-quanta_offset)/quanta_size
def info_from_junction_sizes(self, junction_sizes, res = 100, Jc = 0.8, verbose = False):
self.s_size, self.l_size = junction_sizes
self.alpha_from_sizes = self.s_size/self.l_size
self.I0s, self.I0l = Jc*self.s_size*1e-6, Jc*self.l_size*1e-6
self.Lss, self.Lsl = self.Ic_to_Lj(self.I0s), self.Ic_to_Lj(self.I0l)
self.Ejs, self.Ejl = self.Ic_to_Ej(self.I0s), self.Ic_to_Ej(self.I0l)
self.Ls0 = parallel(self.Lss, self.Lsl)
self.c2_func, self.c3_func, self.c4_func = self.generate_coefficient_functions(self.alpha_from_sizes, res = res, verbose = False)
return self.c2_func, self.c3_func, self.c4_func
def info_from_junction_i0(self, junction_i0_small, junction_i0_large, res = 100, Jc = 0.8, verbose = False):
'''
junction_i0_small: junction critical current in A
junction_i0_large: junction critical current in A
'''
self.I0s, self.I0l = junction_i0_small, junction_i0_large
self.Lss, self.Lsl = self.Ic_to_Lj(self.I0s), self.Ic_to_Lj(self.I0l)
self.Ejs, self.Ejl = self.Ic_to_Ej(self.I0s), self.Ic_to_Ej(self.I0l)
self.alpha_from_i0 = self.Ejs/self.Ejl
self.c2_func, self.c3_func, self.c4_func = self.generate_coefficient_functions(self.alpha_from_i0, res = res, verbose = False)
return self.c2_func, self.c3_func, self.c4_func
def Ic_to_Ej(self, Ic: float):
'''
Parameters
----------
Ic : float
critical current in amps
Returns
-------
Ej in Joules
src: https://en.wikipedia.org/wiki/Josephson_effect
'''
return Ic*self.phi0/(2*np.pi)
def Ic_to_Lj(self, Ic: float):
'''
Parameters
----------
Ic : float
critical current in amps
Returns
-------
Lj in Henries
src: https://en.wikipedia.org/wiki/Josephson_effect
'''
return self.phi0/(2*np.pi*Ic)
def generate_coefficient_functions(self, alpha_val, res = int(100), plot = False, show_coefficients = False, verbose = False):
'''
Parameters
----------
alpha_val : float
alpha value between 0 and 0.33
res : int, optional
number of points to base interpolation off of. The default is 100.
Returns
-------
c2_func : lambda function
function that will return the value of c2
c3_func : lambda function
DESCRIPTION.
c4_func : lambda function
DESCRIPTION.
'''
if verbose:
print("Calculating expansion coefficients")
start_time = timer()
phi_ext_arr = np.linspace(0,2*np.pi, res)
c4_arr = c4_func_gen_vectorize(alpha_val)(phi_ext_arr)
end_time = timer()
if verbose:
print(f"Elapsed time: {np.round(end_time-start_time, 2)} seconds")
c4_func = interp1d(phi_ext_arr, c4_arr, 'quadratic')
#c3:
start_time = timer()
phi_ext_arr = np.linspace(0,2*np.pi, res)
c3_arr = c3_func_gen_vectorize(alpha_val)(phi_ext_arr)
end_time = timer()
if verbose:
print(f"Elapsed time: {np.round(end_time-start_time, 2)} seconds")
c3_func = interp1d(phi_ext_arr, c3_arr, 'quadratic')
#c2:
start_time = timer()
phi_ext_arr = np.linspace(0,2*np.pi, res)
c2_arr = c2_func_gen_vectorize(alpha_val)(phi_ext_arr)
end_time = timer()
if verbose:
print(f"Elapsed time: {np.round(end_time-start_time, 2)} seconds")
c2_func = interp1d(phi_ext_arr, c2_arr, 'quadratic')
if plot:
plt.plot(phi_ext_arr, self.c2_func(phi_ext_arr), label = "c2")
plt.plot(phi_ext_arr, self.c3_func(phi_ext_arr), label = "c3")
plt.plot(phi_ext_arr, self.c4_func(phi_ext_arr), label = 'c4')
plt.legend()
return c2_func, c3_func, c4_func
def gradient_descent_participation_fitter(self, fitted_res_func, initial_p_guess, initial_alpha_guess, init_f0_guess, res = 100, bounds = None):
'''
Parameters
----------
fitted_res_func : function:ndarray->ndarray
function which takes in flux fraction in [0, 1] and produces the resonant frequency of the experimental device
initial_p_guess: float
guess for the participation ratio of the SNAIL at 0-flux
initial_alpha_guess: float
guess for the ratio of large junction inductance to small junciton inductance of the SNAIL
kwargs:
res - the number of points with which to do the fitting. Fewer is faster, more is better
Returns
-------
fitted alpha
fitted p
'''
fit_quanta = np.sort(np.append(np.append(np.linspace(0,1, int(res/4)), np.linspace(0.25,0.75, int(res/4))),np.linspace(0.45,0.55, int(res/2))))*2*np.pi
def fit_func(quanta_arr, alpha, p_rat, f0):
print(f"P: {p_rat}, alpha: {alpha}, f0: {f0}")
#make the c2 we need from the supplied alpha
c2_func = c2_func_gen_vectorize(alpha)
res_freqs = f0/(np.sqrt(1+p_rat/c2_func(quanta_arr).astype(float)))
return res_freqs
#fit the data
if bounds ==None:
bounds = [[0.1, 0.001, fitted_res_func(0)*0.7],
[0.33, 1, fitted_res_func(0)*1.3]]
popt, pcov = curve_fit(fit_func, fit_quanta, fitted_res_func(fit_quanta), p0 = [initial_alpha_guess, initial_p_guess, init_f0_guess],
bounds = bounds)
[fitted_alpha, fitted_p, fitted_f0] = popt
[d_alpha, d_p, d_f0] = [np.sqrt(pcov[0,0]), np.sqrt(pcov[1,1]), np.sqrt(pcov[2,2])]
return fit_func, [fitted_alpha, fitted_p, fitted_f0], [d_alpha, d_p, d_f0]
def frattini_p_to_part(self, fp, alpha):
return lambda flux: 1/(1/fp*c2_func_gen_vectorize(alpha)(flux)+1)
def slider_participation_fitter(self, stored_fits_filepath: str, fluxsweep_filepath: str, ret_sliders = False, start_freq = 7e9):
'''
Parameters
----------
stored_fits_filepath : str
path to a pickled fit file
fluxsweep_filepath : str
path to a fluxsweep stored in plottr's datadict format'
Returns
-------
4x matplotlib.widgets.slider objects, call slider.val to get value
'''
self.p_arr = np.linspace(0.01, 0.3, 50)
self.alpha_arr = np.linspace(0.1, 0.32, 50)
#the below function returns the slider fit, which you then have to call .val on
self.p_slider, self.a_slider, self.f_slider = slider_fit(fluxsweep_filepath,
stored_fits_filepath,
self.quanta_offset,
self.quanta_size,
self.p_arr,
self.alpha_arr,
start_freq = start_freq)
if ret_sliders:
return self.p_arr, self.alpha_arr, self.p_slider, self.a_slider, self.f_slider
else:
pass
def vals_from_sliders(self):
'''
A supporting function to slider_participation_fitter for extracting
the alpha and p values after the sliders have been used to fit
'''
self.alpha_from_FS = self.alpha_arr[self.a_slider.val]
self.p_from_FS = self.p_arr[self.p_slider.val]
return self.alpha_from_FS, self.p_from_FS, self.f_slider.val
def set_linear_inductance(self, L0):
self.L0 = L0
def set_linear_capacitance(self, C0):
self.C0 = C0
def generate_participation_function(self, L0, Lfunc):
return lambda phi: Lfunc(phi)/(L0+Lfunc(phi))
def generate_inductance_function(self, L_large, c2_func):
return lambda phi: L_large/c2_func(phi)
def generate_resonance_function_via_LC(self, L0, C0, Ls_func):
return lambda phi: 1/np.sqrt((L0+Ls_func(phi))*C0)
def generate_resonance_function_via_fit(self, p, f0, c2_func):
return lambda phi: 2*np.pi*f0/(np.sqrt(1+(p/(1-p))/c2_func(phi)))
def generate_gsss_function(self, C0, p_func, res_func, c2_func, c3_func):
'''
source: https://arxiv.org/pdf/1806.06093.pdf
(The frattini paper)
calculates the g3 wrt flux given linear capacitance, participation ratio, and alpha
return value is in Joules
'''
#calculate Ec
Ec = self.e**2/(2*C0)
return lambda phi: 1/6*p_func(phi)**2*c3_func(phi)/c2_func(phi)*np.sqrt(Ec*self.hbar*res_func(phi))
def collect_TACO_data(self, gain_folder, plot = False, tla_pump = 0):
gain_cwd = gain_folder
res = find_all_ddh5(gain_cwd)
info_dict, bias_currents, best_gen_freqs, best_gen_powers, gains = superTACO_Bars(res, angles = [60,20], quanta_size = self.quanta_size, quanta_offset = self.quanta_offset, bardims = [0.001, 0.7], barbase = -24, plot = False)
if plot:
fig2 = plt.figure(2)
ax = fig2.add_subplot(131)
ax.plot(self.conv_func(bias_currents), np.array(best_gen_powers)-tla_pump, 'b.', markersize = 15)
ax.set_title(r'Lowest 20dB Power (dBm) vs. Flux ($\Phi_0$)')
ax.set_xlabel('Flux Quanta ($\Phi/\Phi_0)$')
ax.set_ylabel('Generator Power @20dB Gain (dBm)')
ax.grid()
return bias_currents, best_gen_freqs, best_gen_powers-tla_pump, gains
def g3_from_pump_power(self,
dBgains: np.ndarray,
pump_powers: np.ndarray,
mode_kappas: np.ndarray,
pump_omegas: np.ndarray,
pump_detunings_from_res: np.ndarray
):
'''
Source for calculation: https://arxiv.org/abs/1605.00539
"Introduction to Quantum-limited Parametric Amplification of Quantum Signals with Josephson Circuits"
by <NAME> and <NAME>
Parameters
----------
gains : np.ndarray
gain in dB, whose positions correspond to the powers given in the pump_powers section
pump_powers : np.ndarray
pump power in dBm that the amplifier sees. This must include all attenuation in the entire line
mode_kappas : np.ndarray
mode kappa in 2pi*Hz
pump_omegas : np.ndarray
pump frequency in 2pi*Hz
pump_detunings_from_res : np.ndarray
pump detuning in 2pi(f-f0) where f0 is the resonator frequency in hz
Returns
-------
numPumpPhotons : np.ndarray
The sqrt of the number of pump photons expected in the pumping resonator.
g3_arr : np.ndarray
The third order coupling in Hz for each combination of inputs
'''
lin_pump_powers = np.power(10,pump_powers/10)*0.001 #pump power in watts
#get the expected value of pump photons present in the resonator
npTZC_arr = []
numPumpPhotonsTZC = lin_pump_powers/(pump_omegas*self.hbar)*(np.sqrt(mode_kappas)/(mode_kappas/2-1j*(pump_detunings_from_res)))**2
for val in numPumpPhotonsTZC:
npTZC_arr.append(np.linalg.norm(val))
numPumpPhotons = np.array(npTZC_arr)
numPumpPhotonsDev = np.sqrt(8*mode_kappas*lin_pump_powers/(pump_omegas*self.hbar))/np.absolute(mode_kappas-2j*pump_detunings_from_res)
Lin_Power_gains = np.power(10,dBgains/20)
lpg = Lin_Power_gains
g3_arr = -0.5*(mode_kappas/numPumpPhotons)*np.sqrt((np.sqrt(lpg)-1)/(np.sqrt(lpg)+1))
return numPumpPhotonsDev, g3_arr, numPumpPhotons
def process_ref_HFSS_sweep(self, HFSS_filepath, ref_port_name = 'B', lumped_port_name = 'sl', ind_name = 'Ls', trans_port_name = 'U'):
data = pd.read_csv(HFSS_filepath)
HFSS_dicts = []
for inductance in np.unique(data[f'{ind_name} [pH]'].to_numpy()):
filt = (data[f'{ind_name} [pH]'].to_numpy() == inductance)
HFSS_dicts.append(dict(
SNAIL_inductance = inductance,
freq = data['Freq [GHz]'].to_numpy()[filt]*1e9,
freqrad = data['Freq [GHz]'].to_numpy()[filt]*1e9*2*np.pi, #fitter takes rad*hz
mag = data[f'mag(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt],
phase = data[f'cang_deg_val(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt],
phaserad = data[f'cang_deg_val(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]*2*np.pi/360,
dBmag = np.power(10, data[f'mag(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]/20),
real = data[f'mag(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]*np.cos(data[f'cang_deg_val(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]*2*np.pi/360),
imag = data[f'mag(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]*np.sin(data[f'cang_deg_val(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]*2*np.pi/360),
imY = data[f'im(Y({lumped_port_name},{lumped_port_name})) []'].to_numpy()[filt],
leakage = data[f'dB(S({ref_port_name},{trans_port_name})) []'].to_numpy()[filt]
))
return HFSS_dicts
def fit_modes(self, *args, bounds = None, f0Guess_arr = None, Qguess = (1e2, 1e4), window_size = 600e6, plot = False):
QextGuess, QintGuess = Qguess
magBackGuess = 1
HFSS_inductances, HFSS_res_freqs, HFSS_kappas = [], [], []
for i, md in enumerate(args):
# print(type(f0Guess_arr))
if type(f0Guess_arr) == np.ndarray:
f0Guess_arr = np.copy(f0Guess_arr)
filt = (md['freqrad']>f0Guess_arr[i]-window_size/2)*(md['freqrad']<f0Guess_arr[i]+window_size/2)
f0Guess = f0Guess_arr[i]
else:
filt = np.ones(np.size(md['freqrad'])).astype(bool)
# print(np.diff(md['phaserad']))
# plt.plot(md['freq'][:-1]/1e9, np.diff(md['phaserad']))
f0Guess = md['freq'][np.argmin(savgol_filter(np.gradient(md['phaserad']), 15,3))]*2*np.pi
filt = (md['freqrad']>f0Guess-window_size/2)*(md['freqrad']<f0Guess+window_size/2)
if bounds == None:
bounds = ([QextGuess / 10, QintGuess /10, f0Guess-500e6, magBackGuess / 2, 0],
[QextGuess * 10, QintGuess * 10, f0Guess+500e6, magBackGuess * 2, np.pi])
popt, pcov = fit(md['freqrad'][filt], md['real'][filt], md['imag'][filt], md['mag'][filt], md['phaserad'][filt], Qguess = Qguess, f0Guess = f0Guess, phaseGuess = 0)
if plot:
# print("inductance: ", md['SNAIL_inductance'])
plotRes(md['freqrad'][filt], md['real'][filt], md['imag'][filt], md['mag'][filt], md['phaserad'][filt], popt)
Qtot = popt[0] * popt[1] / (popt[0] + popt[1])
kappa = popt[2]/2/np.pi/Qtot
f0 = popt[2]/(2*np.pi)
inductance = md['SNAIL_inductance']
HFSS_inductances.append(inductance)
HFSS_res_freqs.append(f0)
HFSS_kappas.append(kappa)
md['res_freq_rad'] = f0*2*np.pi
md['kappa'] = kappa
return HFSS_inductances, HFSS_res_freqs, HFSS_kappas
def g3_from_admittance(self, Ej_large, c3_val, mds):
phi_zpf_arr = []
g3 = Ej_large*c3_val/6*(2*np.pi/self.phi0)**3
for md in mds:
res_omega = md['res_freq_rad']
# print("res_omega/2pi", res_omega/2/np.pi)
omegas = md['freqrad']
imY = md['imY']
f_res_loc = np.argmin(np.abs(omegas-res_omega))
slope = np.gradient(imY)[f_res_loc]/np.gradient(omegas)[f_res_loc]
Zpeff = 2/(res_omega*slope)
# print("omega/2pi: ", res_omega/2/np.pi)
# print("slope: ", slope)
# print("Impedance: ", Zpeff)
g3 *= np.sqrt(self.hbar/2*Zpeff)
phi_zpf_arr.append(Zpeff)
return g3
def g3_from_admittance_raw(self, Ej_large, c3_val, res_omega):
phi_zpf_arr = []
g3 = Ej_large*c3_val/6*(2*np.pi/self.phi0)**3
for res_omega in res_omegas:
res_omega = md['res_freq_rad']
omegas = md['freqrad']
imY = md['imY']
f_res_loc = np.argmin(np.abs(omegas-res_omega))
slope = np.gradient(imY)[f_res_loc]/np.gradient(omegas)[f_res_loc]
Zpeff = 2/(res_omega*slope)
# print("omega/2pi: ", res_omega/2/np.pi)
# print("slope: ", slope)
# print("Impedance: ", Zpeff)
g3 *= np.sqrt(self.hbar/2*Zpeff)
phi_zpf_arr.append(Zpeff)
return g3
if __name__ == '__main__':
SA = SnailAmp()
HFSS_filepath = r'D:\HFSS_Sims\SA_2X\mode_s.csv'
HFSS_dicts = SA.process_HFSS_sweep(HFSS_filepath)
#fit all of them, try to choose a guess frequency and Q's that cooperate with all of them
HFSS_inductances, HFSS_res_freqs, HFSS_kappas = SA.fit_modes(*HFSS_dicts,
Qguess = (5e1,1e3),
window_size = 100e6,
plot = True,
f0Guess_arr = None)
HFSS_inductances = np.array(HFSS_inductances)
HFSS_kappas = np.array(HFSS_kappas)
| [
"data_processing.ddh5_Plotting.TACO_multiplot_b1.superTACO_Bars",
"data_processing.models.SNAIL_supporting_modules.Participation_and_Alpha_Fitter.slider_fit",
"sympy.series",
"numpy.absolute",
"numpy.abs",
"numpy.argmax",
"pandas.read_csv",
"sympy.cos",
"numpy.argmin",
"matplotlib.pyplot.figure",
... | [((1405, 1449), 'sympy.symbols', 'sp.symbols', (['"""alpha,E_j,phi_s,phi_e, phi_min"""'], {}), "('alpha,E_j,phi_s,phi_e, phi_min')\n", (1415, 1449), True, 'import sympy as sp\n'), ((2372, 2416), 'sympy.symbols', 'sp.symbols', (['"""alpha,E_j,phi_s,phi_e, phi_min"""'], {}), "('alpha,E_j,phi_s,phi_e, phi_min')\n", (2382, 2416), True, 'import sympy as sp\n'), ((2494, 2534), 'sympy.series', 'sp.series', (['U_snail', 'phi_s'], {'x0': 'phi_m', 'n': '(5)'}), '(U_snail, phi_s, x0=phi_m, n=5)\n', (2503, 2534), True, 'import sympy as sp\n'), ((2732, 2751), 'numpy.vectorize', 'np.vectorize', (['c4exp'], {}), '(c4exp)\n', (2744, 2751), True, 'import numpy as np\n'), ((2845, 2889), 'sympy.symbols', 'sp.symbols', (['"""alpha,E_j,phi_s,phi_e, phi_min"""'], {}), "('alpha,E_j,phi_s,phi_e, phi_min')\n", (2855, 2889), True, 'import sympy as sp\n'), ((2967, 3007), 'sympy.series', 'sp.series', (['U_snail', 'phi_s'], {'x0': 'phi_m', 'n': '(4)'}), '(U_snail, phi_s, x0=phi_m, n=4)\n', (2976, 3007), True, 'import sympy as sp\n'), ((3204, 3223), 'numpy.vectorize', 'np.vectorize', (['c3exp'], {}), '(c3exp)\n', (3216, 3223), True, 'import numpy as np\n'), ((3297, 3341), 'sympy.symbols', 'sp.symbols', (['"""alpha,E_j,phi_s,phi_e, phi_min"""'], {}), "('alpha,E_j,phi_s,phi_e, phi_min')\n", (3307, 3341), True, 'import sympy as sp\n'), ((3419, 3459), 'sympy.series', 'sp.series', (['U_snail', 'phi_s'], {'x0': 'phi_m', 'n': '(3)'}), '(U_snail, phi_s, x0=phi_m, n=3)\n', (3428, 3459), True, 'import sympy as sp\n'), ((3656, 3675), 'numpy.vectorize', 'np.vectorize', (['c2exp'], {}), '(c2exp)\n', (3668, 3675), True, 'import numpy as np\n'), ((22953, 22979), 'numpy.array', 'np.array', (['HFSS_inductances'], {}), '(HFSS_inductances)\n', (22961, 22979), True, 'import numpy as np\n'), ((22998, 23019), 'numpy.array', 'np.array', (['HFSS_kappas'], {}), '(HFSS_kappas)\n', (23006, 23019), True, 'import numpy as np\n'), ((766, 808), 'scipy.signal.savgol_filter', 'savgol_filter', (['res_freqs', 'smooth_window', '(2)'], {}), '(res_freqs, smooth_window, 2)\n', (779, 808), False, 'from scipy.signal import savgol_filter\n'), ((840, 860), 'numpy.argmax', 'np.argmax', (['res_freqs'], {}), '(res_freqs)\n', (849, 860), True, 'import numpy as np\n'), ((893, 913), 'numpy.argmin', 'np.argmin', (['res_freqs'], {}), '(res_freqs)\n', (902, 913), True, 'import numpy as np\n'), ((935, 976), 'numpy.abs', 'np.abs', (['(min_res_current - max_res_current)'], {}), '(min_res_current - max_res_current)\n', (941, 976), True, 'import numpy as np\n'), ((2035, 2049), 'numpy.size', 'np.size', (['funcs'], {}), '(funcs)\n', (2042, 2049), True, 'import numpy as np\n'), ((2110, 2138), 'scipy.optimize.fsolve', 'fsolve', (['func', 'phi_ext_arr[i]'], {}), '(func, phi_ext_arr[i])\n', (2116, 2138), False, 'from scipy.optimize import fsolve\n'), ((2253, 2274), 'scipy.optimize.fsolve', 'fsolve', (['func', 'phi_ext'], {}), '(func, phi_ext)\n', (2259, 2274), False, 'from scipy.optimize import fsolve\n'), ((7277, 7284), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7282, 7284), True, 'from timeit import default_timer as timer\n'), ((7316, 7346), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'res'], {}), '(0, 2 * np.pi, res)\n', (7327, 7346), True, 'import numpy as np\n'), ((7426, 7433), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7431, 7433), True, 'from timeit import default_timer as timer\n'), ((7552, 7594), 'scipy.interpolate.interp1d', 'interp1d', (['phi_ext_arr', 'c4_arr', '"""quadratic"""'], {}), "(phi_ext_arr, c4_arr, 'quadratic')\n", (7560, 7594), False, 'from scipy.interpolate import interp1d\n'), ((7648, 7655), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7653, 7655), True, 'from timeit import default_timer as timer\n'), ((7678, 7708), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'res'], {}), '(0, 2 * np.pi, res)\n', (7689, 7708), True, 'import numpy as np\n'), ((7788, 7795), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7793, 7795), True, 'from timeit import default_timer as timer\n'), ((7914, 7956), 'scipy.interpolate.interp1d', 'interp1d', (['phi_ext_arr', 'c3_arr', '"""quadratic"""'], {}), "(phi_ext_arr, c3_arr, 'quadratic')\n", (7922, 7956), False, 'from scipy.interpolate import interp1d\n'), ((8010, 8017), 'timeit.default_timer', 'timer', ([], {}), '()\n', (8015, 8017), True, 'from timeit import default_timer as timer\n'), ((8040, 8070), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'res'], {}), '(0, 2 * np.pi, res)\n', (8051, 8070), True, 'import numpy as np\n'), ((8150, 8157), 'timeit.default_timer', 'timer', ([], {}), '()\n', (8155, 8157), True, 'from timeit import default_timer as timer\n'), ((8276, 8318), 'scipy.interpolate.interp1d', 'interp1d', (['phi_ext_arr', 'c2_arr', '"""quadratic"""'], {}), "(phi_ext_arr, c2_arr, 'quadratic')\n", (8284, 8318), False, 'from scipy.interpolate import interp1d\n'), ((11289, 11315), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.3)', '(50)'], {}), '(0.01, 0.3, 50)\n', (11300, 11315), True, 'import numpy as np\n'), ((11341, 11367), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.32)', '(50)'], {}), '(0.1, 0.32, 50)\n', (11352, 11367), True, 'import numpy as np\n'), ((11510, 11655), 'data_processing.models.SNAIL_supporting_modules.Participation_and_Alpha_Fitter.slider_fit', 'slider_fit', (['fluxsweep_filepath', 'stored_fits_filepath', 'self.quanta_offset', 'self.quanta_size', 'self.p_arr', 'self.alpha_arr'], {'start_freq': 'start_freq'}), '(fluxsweep_filepath, stored_fits_filepath, self.quanta_offset,\n self.quanta_size, self.p_arr, self.alpha_arr, start_freq=start_freq)\n', (11520, 11655), False, 'from data_processing.models.SNAIL_supporting_modules.Participation_and_Alpha_Fitter import slider_fit\n'), ((13875, 13898), 'data_processing.Helper_Functions.find_all_ddh5', 'find_all_ddh5', (['gain_cwd'], {}), '(gain_cwd)\n', (13888, 13898), False, 'from data_processing.Helper_Functions import find_all_ddh5\n'), ((13974, 14129), 'data_processing.ddh5_Plotting.TACO_multiplot_b1.superTACO_Bars', 'superTACO_Bars', (['res'], {'angles': '[60, 20]', 'quanta_size': 'self.quanta_size', 'quanta_offset': 'self.quanta_offset', 'bardims': '[0.001, 0.7]', 'barbase': '(-24)', 'plot': '(False)'}), '(res, angles=[60, 20], quanta_size=self.quanta_size,\n quanta_offset=self.quanta_offset, bardims=[0.001, 0.7], barbase=-24,\n plot=False)\n', (13988, 14129), False, 'from data_processing.ddh5_Plotting.TACO_multiplot_b1 import superTACO_Bars\n'), ((16489, 16508), 'numpy.array', 'np.array', (['npTZC_arr'], {}), '(npTZC_arr)\n', (16497, 16508), True, 'import numpy as np\n'), ((16678, 16704), 'numpy.power', 'np.power', (['(10)', '(dBgains / 20)'], {}), '(10, dBgains / 20)\n', (16686, 16704), True, 'import numpy as np\n'), ((17070, 17096), 'pandas.read_csv', 'pd.read_csv', (['HFSS_filepath'], {}), '(HFSS_filepath)\n', (17081, 17096), True, 'import pandas as pd\n'), ((1472, 1485), 'sympy.cos', 'sp.cos', (['phi_s'], {}), '(phi_s)\n', (1478, 1485), True, 'import sympy as sp\n'), ((1490, 1517), 'sympy.cos', 'sp.cos', (['((phi_e - phi_s) / 3)'], {}), '((phi_e - phi_s) / 3)\n', (1496, 1517), True, 'import sympy as sp\n'), ((2435, 2448), 'sympy.cos', 'sp.cos', (['phi_s'], {}), '(phi_s)\n', (2441, 2448), True, 'import sympy as sp\n'), ((2453, 2480), 'sympy.cos', 'sp.cos', (['((phi_e - phi_s) / 3)'], {}), '((phi_e - phi_s) / 3)\n', (2459, 2480), True, 'import sympy as sp\n'), ((2577, 2601), 'sympy.Pow', 'sp.Pow', (['(phi_s - phi_m)', '(4)'], {}), '(phi_s - phi_m, 4)\n', (2583, 2601), True, 'import sympy as sp\n'), ((2908, 2921), 'sympy.cos', 'sp.cos', (['phi_s'], {}), '(phi_s)\n', (2914, 2921), True, 'import sympy as sp\n'), ((2926, 2953), 'sympy.cos', 'sp.cos', (['((phi_e - phi_s) / 3)'], {}), '((phi_e - phi_s) / 3)\n', (2932, 2953), True, 'import sympy as sp\n'), ((3050, 3074), 'sympy.Pow', 'sp.Pow', (['(phi_s - phi_m)', '(3)'], {}), '(phi_s - phi_m, 3)\n', (3056, 3074), True, 'import sympy as sp\n'), ((3360, 3373), 'sympy.cos', 'sp.cos', (['phi_s'], {}), '(phi_s)\n', (3366, 3373), True, 'import sympy as sp\n'), ((3378, 3405), 'sympy.cos', 'sp.cos', (['((phi_e - phi_s) / 3)'], {}), '((phi_e - phi_s) / 3)\n', (3384, 3405), True, 'import sympy as sp\n'), ((3502, 3526), 'sympy.Pow', 'sp.Pow', (['(phi_s - phi_m)', '(2)'], {}), '(phi_s - phi_m, 2)\n', (3508, 3526), True, 'import sympy as sp\n'), ((8596, 8608), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8606, 8608), True, 'import matplotlib.pyplot as plt\n'), ((10511, 10530), 'numpy.sqrt', 'np.sqrt', (['pcov[0, 0]'], {}), '(pcov[0, 0])\n', (10518, 10530), True, 'import numpy as np\n'), ((10531, 10550), 'numpy.sqrt', 'np.sqrt', (['pcov[1, 1]'], {}), '(pcov[1, 1])\n', (10538, 10550), True, 'import numpy as np\n'), ((10551, 10570), 'numpy.sqrt', 'np.sqrt', (['pcov[2, 2]'], {}), '(pcov[2, 2])\n', (10558, 10570), True, 'import numpy as np\n'), ((14179, 14192), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (14189, 14192), True, 'import matplotlib.pyplot as plt\n'), ((16086, 16116), 'numpy.power', 'np.power', (['(10)', '(pump_powers / 10)'], {}), '(10, pump_powers / 10)\n', (16094, 16116), True, 'import numpy as np\n'), ((16537, 16607), 'numpy.sqrt', 'np.sqrt', (['(8 * mode_kappas * lin_pump_powers / (pump_omegas * self.hbar))'], {}), '(8 * mode_kappas * lin_pump_powers / (pump_omegas * self.hbar))\n', (16544, 16607), True, 'import numpy as np\n'), ((16600, 16657), 'numpy.absolute', 'np.absolute', (['(mode_kappas - 2.0j * pump_detunings_from_res)'], {}), '(mode_kappas - 2.0j * pump_detunings_from_res)\n', (16611, 16657), True, 'import numpy as np\n'), ((19879, 20029), 'data_processing.fitting.QFit.fit', 'fit', (["md['freqrad'][filt]", "md['real'][filt]", "md['imag'][filt]", "md['mag'][filt]", "md['phaserad'][filt]"], {'Qguess': 'Qguess', 'f0Guess': 'f0Guess', 'phaseGuess': '(0)'}), "(md['freqrad'][filt], md['real'][filt], md['imag'][filt], md['mag'][filt\n ], md['phaserad'][filt], Qguess=Qguess, f0Guess=f0Guess, phaseGuess=0)\n", (19882, 20029), False, 'from data_processing.fitting.QFit import fit, plotRes\n'), ((21419, 21449), 'numpy.sqrt', 'np.sqrt', (['(self.hbar / 2 * Zpeff)'], {}), '(self.hbar / 2 * Zpeff)\n', (21426, 21449), True, 'import numpy as np\n'), ((22155, 22185), 'numpy.sqrt', 'np.sqrt', (['(self.hbar / 2 * Zpeff)'], {}), '(self.hbar / 2 * Zpeff)\n', (22162, 22185), True, 'import numpy as np\n'), ((16443, 16462), 'numpy.linalg.norm', 'np.linalg.norm', (['val'], {}), '(val)\n', (16457, 16462), True, 'import numpy as np\n'), ((19008, 19028), 'numpy.copy', 'np.copy', (['f0Guess_arr'], {}), '(f0Guess_arr)\n', (19015, 19028), True, 'import numpy as np\n'), ((20133, 20247), 'data_processing.fitting.QFit.plotRes', 'plotRes', (["md['freqrad'][filt]", "md['real'][filt]", "md['imag'][filt]", "md['mag'][filt]", "md['phaserad'][filt]", 'popt'], {}), "(md['freqrad'][filt], md['real'][filt], md['imag'][filt], md['mag'][\n filt], md['phaserad'][filt], popt)\n", (20140, 20247), False, 'from data_processing.fitting.QFit import fit, plotRes\n'), ((21117, 21143), 'numpy.abs', 'np.abs', (['(omegas - res_omega)'], {}), '(omegas - res_omega)\n', (21123, 21143), True, 'import numpy as np\n'), ((21853, 21879), 'numpy.abs', 'np.abs', (['(omegas - res_omega)'], {}), '(omegas - res_omega)\n', (21859, 21879), True, 'import numpy as np\n'), ((1523, 1568), 'sympy.series', 'sp.series', (['U_snail_norm', 'phi_s'], {'x0': 'phi_m', 'n': '(2)'}), '(U_snail_norm, phi_s, x0=phi_m, n=2)\n', (1532, 1568), True, 'import sympy as sp\n'), ((14283, 14308), 'numpy.array', 'np.array', (['best_gen_powers'], {}), '(best_gen_powers)\n', (14291, 14308), True, 'import numpy as np\n'), ((16306, 16326), 'numpy.sqrt', 'np.sqrt', (['mode_kappas'], {}), '(mode_kappas)\n', (16313, 16326), True, 'import numpy as np\n'), ((21163, 21179), 'numpy.gradient', 'np.gradient', (['imY'], {}), '(imY)\n', (21174, 21179), True, 'import numpy as np\n'), ((21191, 21210), 'numpy.gradient', 'np.gradient', (['omegas'], {}), '(omegas)\n', (21202, 21210), True, 'import numpy as np\n'), ((21899, 21915), 'numpy.gradient', 'np.gradient', (['imY'], {}), '(imY)\n', (21910, 21915), True, 'import numpy as np\n'), ((21927, 21946), 'numpy.gradient', 'np.gradient', (['omegas'], {}), '(omegas)\n', (21938, 21946), True, 'import numpy as np\n'), ((7490, 7524), 'numpy.round', 'np.round', (['(end_time - start_time)', '(2)'], {}), '(end_time - start_time, 2)\n', (7498, 7524), True, 'import numpy as np\n'), ((7852, 7886), 'numpy.round', 'np.round', (['(end_time - start_time)', '(2)'], {}), '(end_time - start_time, 2)\n', (7860, 7886), True, 'import numpy as np\n'), ((8214, 8248), 'numpy.round', 'np.round', (['(end_time - start_time)', '(2)'], {}), '(end_time - start_time, 2)\n', (8222, 8248), True, 'import numpy as np\n'), ((16801, 16813), 'numpy.sqrt', 'np.sqrt', (['lpg'], {}), '(lpg)\n', (16808, 16813), True, 'import numpy as np\n'), ((16818, 16830), 'numpy.sqrt', 'np.sqrt', (['lpg'], {}), '(lpg)\n', (16825, 16830), True, 'import numpy as np\n'), ((19233, 19255), 'numpy.size', 'np.size', (["md['freqrad']"], {}), "(md['freqrad'])\n", (19240, 19255), True, 'import numpy as np\n'), ((19453, 19480), 'numpy.gradient', 'np.gradient', (["md['phaserad']"], {}), "(md['phaserad'])\n", (19464, 19480), True, 'import numpy as np\n')] |
from operator import attrgetter, itemgetter
import numpy
import talib
from pymongo import MongoClient
import line_messageer
from strategy.bull_market import BullMarket
from strategy.force_sell import ForceSell
from strategy.foreign_investor_total import GoldKDJ
from strategy.main_force import MainForce
from strategy.strategy import Strategy
from strategy.value_avg_up import ValueUp
from strategy.value_concentrated import ValueConcentrated
client = MongoClient('localhost', 27017)
db = client['stock']
collect = db['stock']
collectTWSE = db['twse_list']
collectAnalysis = db['analysis']
analysisItems = []
def get_change_price(details):
changePrice = []
for detail in details:
cp = detail['changePrice']
if '+' in cp:
cp = str(cp).replace('+', '▲ ')
else:
cp = '▼ ' + cp
changePrice.append(cp)
return changePrice
def get_date(details):
date = []
for detail in details:
date_str = '{0:%Y/%m/%d}'.format(detail['date'])
date.append(date_str)
return date
def get_value(details):
value = []
for detail in details:
try:
v = float(detail['dealValue'])
except Exception as e:
print("except no {0}".format(e))
v = 0.0
value.append(v)
nvalue = numpy.array(value)
return nvalue
def get_price(details):
price = []
for detail in details:
try:
p = float(detail['closePrice'])
except Exception as e:
print("except no {0}".format(e))
p = 0.0
price.append(p)
nprice = numpy.array(price)
return nprice, price[-1]
def get_high_price(details):
price = []
for detail in details:
try:
p = float(detail['highPrice'])
except Exception as e:
print("except no {0}".format(e))
p = 0.0
price.append(p)
nprice = numpy.array(price)
return nprice
def get_low_price(details):
price = []
for detail in details:
try:
p = float(detail['lowPrice'])
except Exception as e:
print("except no {0}".format(e))
p = 0.0
price.append(p)
nprice = numpy.array(price)
return nprice
twse = collect.find()
for item in twse:
stock = item['stockNo']
name = item['stockName']
details = item['details']
details.sort(key=itemgetter('date'), reverse=False)
price, close_price = get_price(details)
high = get_high_price(details)
low = get_low_price(details)
values = get_value(details)
date = get_date(details)
change_price = get_change_price(details)
strategy = []
strategy.append(BullMarket(price, 5, 20, 60))
strategy.append(ForceSell(details))
strategy.append(GoldKDJ(details))
strategy.append(ValueConcentrated(details, concentrated=1, percentage=15))
count = 0
for s in strategy:
if s.run():
count += 1
if count == len(strategy):
print('{0} - {1}'.format(name, stock))
# collectAnalysis.insert_one(analysis)
"""
post = collect.aggregate([
{
'$project': {
"stockNo": 1,
"stockName": 1,
'details': {
'$filter': {
'input': "$details",
'as': "item",
'cond': {'$and': [
{"$gt": ["$$item.buySell", 1]},
{"$lt": ["$$item.buyBrokerageCount", 0]},
{"$gt": ["$$item.investment_trust_total", 1]},
]}
}
}
}
}
])
for p in post:
details = p['details']
stock = p['stockNo']
if len(details) > 3:
details.sort(key=itemgetter('date'), reverse=True)
print(stock)
print(len(details))
date = []
price = []
value = []
buySell = []
investment_trust_total = []
text = '投信 主力買'
post = collect.find()
for items in post:
stock = items['stockNo']
try:
details = items['details']
details.sort(key=itemgetter('date'), reverse=False)
for detail in details:
v = str(detail['dealValue']).replace(',', '')
d = detail['date']
date.append(detail['date'])
buySell.append(detail['buySell'])
if 'investment_trust_total' in detail:
investment_trust_total.append(detail['investment_trust_total'])
try:
p = float(detail['closePrice'])
except Exception as e:
print("except no {0}".format(e))
p = 0.0
price.append(p)
nprice = numpy.array(price)
ndate = numpy.array(date)
value = numpy.array(value)
avg5 = talib.SMA(nprice, timeperiod=5)
upper, middle, lower = talib.BBANDS(nprice, timeperiod=20, nbdevup=2.1, nbdevdn=2.1, matype=0)
BBANDS = price[-1] >= upper[-1]
new_price = price[-30:-1]
last_price = price[-1]
if BBANDS and last_price >= max(new_price):
text += '\r\n {0}, price {1} upper {2} date{3}'.format(stock, price[-1], upper[-1], date[-1])
price.clear()
date.clear()
price.clear()
buySell.clear()
investment_trust_total.clear()
except Exception as e:
print(stock)
line_messageer.send_message(text)
print(text)
if 2 < len(d) < 4:
close = []
for v in d:
close.append(v['closePrice'])
diff = float(close[1]) - float(close[0])
if diff > 0:
if diff / float(close[0]) / diff < 1:
print(s)
print(v)
text += '\r\n' + s
"""
| [
"pymongo.MongoClient",
"strategy.value_concentrated.ValueConcentrated",
"strategy.foreign_investor_total.GoldKDJ",
"numpy.array",
"operator.itemgetter",
"strategy.force_sell.ForceSell",
"strategy.bull_market.BullMarket"
] | [((454, 485), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (465, 485), False, 'from pymongo import MongoClient\n'), ((1315, 1333), 'numpy.array', 'numpy.array', (['value'], {}), '(value)\n', (1326, 1333), False, 'import numpy\n'), ((1610, 1628), 'numpy.array', 'numpy.array', (['price'], {}), '(price)\n', (1621, 1628), False, 'import numpy\n'), ((1920, 1938), 'numpy.array', 'numpy.array', (['price'], {}), '(price)\n', (1931, 1938), False, 'import numpy\n'), ((2217, 2235), 'numpy.array', 'numpy.array', (['price'], {}), '(price)\n', (2228, 2235), False, 'import numpy\n'), ((2700, 2728), 'strategy.bull_market.BullMarket', 'BullMarket', (['price', '(5)', '(20)', '(60)'], {}), '(price, 5, 20, 60)\n', (2710, 2728), False, 'from strategy.bull_market import BullMarket\n'), ((2750, 2768), 'strategy.force_sell.ForceSell', 'ForceSell', (['details'], {}), '(details)\n', (2759, 2768), False, 'from strategy.force_sell import ForceSell\n'), ((2790, 2806), 'strategy.foreign_investor_total.GoldKDJ', 'GoldKDJ', (['details'], {}), '(details)\n', (2797, 2806), False, 'from strategy.foreign_investor_total import GoldKDJ\n'), ((2828, 2885), 'strategy.value_concentrated.ValueConcentrated', 'ValueConcentrated', (['details'], {'concentrated': '(1)', 'percentage': '(15)'}), '(details, concentrated=1, percentage=15)\n', (2845, 2885), False, 'from strategy.value_concentrated import ValueConcentrated\n'), ((2406, 2424), 'operator.itemgetter', 'itemgetter', (['"""date"""'], {}), "('date')\n", (2416, 2424), False, 'from operator import attrgetter, itemgetter\n')] |
from collections import OrderedDict
# compatibility
from six.moves import range
# nose tools
from nose.tools import assert_raises
from nose.plugins.attrib import attr
# modules
import loopy as lp
import numpy as np
# local imports
from pyjac.core import array_creator as arc
from pyjac.tests import TestClass
from pyjac.core.rate_subs import assign_rates
from pyjac.core.enum_types import RateSpecialization
from pyjac.tests import get_test_langs
from pyjac.utils import listify
from pyjac.tests.test_utils import OptionLoopWrapper
def opts_loop(width=[4, None],
depth=[4, None],
order=['C', 'F'],
lang=get_test_langs(),
is_simd=[True, False]):
oploop = OrderedDict(
[('width', width),
('depth', depth),
('order', order),
('lang', lang),
('order', order),
('is_simd', is_simd),
('unr', [None]),
('ilp', [None])])
for opts in OptionLoopWrapper.from_dict(oploop):
yield opts
def _dummy_opts(order='C'):
for opts in opts_loop(order=listify(order),
lang=['c']):
return opts
def test_creator_asserts():
# check dtype
with assert_raises(AssertionError):
arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=np.float64))
# test shape
with assert_raises(AssertionError):
arc.creator('', arc.kint_type, (11,), 'C',
initializer=np.arange(10, dtype=np.float32))
def test_non_contiguous_input():
lp_opt = _dummy_opts()
# test that creation of mapstore with non-contiguous map forces
# generation of input map
c = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(4)) + list(range(6, 12)),
dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
mstore.finalize()
assert len(mstore.transformed_domains) == 1
assert mstore.tree.parent is not None
assert np.allclose(mstore.tree.parent.domain.initializer, np.arange(10))
def test_contiguous_input():
# test that creation of mapstore with contiguous map has no effect
lp_opt = _dummy_opts()
c = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
assert len(mstore.transformed_domains) == 0
def __create_var(name, size=(10,)):
return arc.creator(name, arc.kint_type, size, 'C')
def test_contiguous_offset_input():
lp_opt = _dummy_opts()
c = arc.creator('c', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# add a creator that can be mapped affinely
c2 = arc.creator('c2', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
x = __create_var('x')
mstore.check_and_add_transform(x, c2, 'i')
mstore.finalize()
# test affine mapping in there
assert len(mstore.transformed_domains) == 1
assert mstore.domain_to_nodes[c2] in mstore.transformed_domains
assert mstore.domain_to_nodes[x].parent.domain == c2
assert mstore.domain_to_nodes[x].iname == 'i + -3'
def test_contiguous_offset_input_map():
# same as the above, but check that a non-affine mappable transform
# results in an input map
lp_opt = _dummy_opts()
c = arc.creator('c', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# add a creator that can be mapped affinely
c2 = arc.creator('c2', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
x = __create_var('x')
mstore.check_and_add_transform(x, c2, 'i')
# and another creator that can't be affinely mapped
c3 = arc.creator('c3', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(4)) + list(range(6, 12)),
dtype=arc.kint_type))
x2 = __create_var('x2')
mstore.check_and_add_transform(x2, c3, 'i')
mstore.finalize()
# test affine mapping is not transformed (should be moved to input map)
assert len(mstore.transformed_domains) == 2
assert mstore.domain_to_nodes[x] not in mstore.transformed_domains
# check that non-affine and original indicies in there
assert mstore.domain_to_nodes[c3] in mstore.transformed_domains
assert mstore.domain_to_nodes[x2].parent.domain == c3
# and that the tree has been transformed
assert mstore.tree in mstore.transformed_domains
def test_input_map_domain_transfer():
# check that a domain on the tree that matches the input map gets
# transfered to the input map
lp_opt = _dummy_opts()
c = arc.creator('c', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# add a creator that matches the coming input map
c2 = arc.creator('c2', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
x = __create_var('x')
mstore.check_and_add_transform(x, c2, 'i')
# and another creator that forces the input map
c3 = arc.creator('c3', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(4)) + list(range(6, 12)),
dtype=arc.kint_type))
x2 = __create_var('x2')
mstore.check_and_add_transform(x2, c3, 'i')
mstore.finalize()
# test that c2 isn't transformed, and resides on new base
assert len(mstore.transformed_domains) == 2
assert mstore.domain_to_nodes[c2] not in mstore.transformed_domains
assert mstore.domain_to_nodes[c2].parent == mstore.tree.parent
assert mstore.domain_to_nodes[c2].insn is None
# check that non-affine mapping in there
assert mstore.domain_to_nodes[c3] in mstore.transformed_domains
# and the original base
assert mstore.domain_to_nodes[c] in mstore.transformed_domains
def test_duplicate_iname_detection():
# ensures the same transform isn't picked up multiple times
lp_opt = _dummy_opts()
# create dummy map
c = arc.creator('c', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# create a mapped domain
c2 = arc.creator('c', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(3)) +
list(range(4, 11)), dtype=arc.kint_type))
# add two variables to the same domain
mstore.check_and_add_transform(__create_var('x'), c2)
mstore.check_and_add_transform(__create_var('x2'), c2)
mstore.finalize()
# ensure there's only one transform insn issued
assert len(mstore.transform_insns) == 1
assert [x for x in mstore.transform_insns][0] == \
mstore.domain_to_nodes[c2].insn
# now repeat with the variables having initializers
# to test that leaves aren't mapped
lp_opt = _dummy_opts()
# create dummy map
c = arc.creator('c', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# create a mapped domain
c2 = arc.creator('c', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(3)) +
list(range(4, 11)), dtype=arc.kint_type))
# add two variables to the same domain
x = __create_var('x')
x.initializer = np.arange(10)
x2 = __create_var('x2')
x2.initializer = np.arange(10)
mstore.check_and_add_transform(x, c2)
mstore.check_and_add_transform(x2, c2)
mstore.finalize()
# ensure there's only one transform insn issued
assert len(mstore.transform_insns) == 1
assert [y for y in mstore.transform_insns][0] == \
mstore.domain_to_nodes[c2].insn
def test_map_range_update():
lp_opt = _dummy_opts()
# test a complicated chaining / input map case
# create dummy map
c = arc.creator('c', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# next add a creator that doesn't need a map
c2 = arc.creator('c2', arc.kint_type, (10,), 'C',
initializer=np.arange(10, 0, -1, dtype=arc.kint_type))
mstore.check_and_add_transform(c2, c, 'i')
# and a creator that only needs an affine map
c3 = arc.creator('c3', arc.kint_type, (10,), 'C',
initializer=np.arange(4, 14, dtype=arc.kint_type))
mstore.check_and_add_transform(c3, c2, 'i')
# and add a creator that will trigger a transform for c3
c4 = arc.creator('c4', arc.kint_type, (10,), 'C',
initializer=np.arange(4, 14, dtype=arc.kint_type))
mstore.check_and_add_transform(c4, c3, 'i')
# and another affine
c5 = arc.creator('c5', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore.check_and_add_transform(c5, c4, 'i')
# and we need a final variable to test c5
x = __create_var('x')
mstore.check_and_add_transform(x, c5, 'i')
mstore.finalize()
# there should be an affine input map of + 3
assert (mstore.domain_to_nodes[c] == mstore.tree and
mstore.tree.insn is None and mstore.tree.iname == 'i + 3'
and mstore.tree.parent is not None)
# c2 should be on the tree
assert (mstore.domain_to_nodes[c2].parent == mstore.tree and
mstore.domain_to_nodes[c2].insn == '<> i_1 = c2[i + 3] {id=index_i_1}')
# c3 should be an regular transform off c2
assert (mstore.domain_to_nodes[c3].parent == mstore.domain_to_nodes[c2] and
mstore.domain_to_nodes[c3].insn == '<> i_2 = c3[i_1] {id=index_i_2}')
# c4 should not have a transform (and thus should take the iname of c3)
assert (mstore.domain_to_nodes[c4].parent == mstore.domain_to_nodes[c3] and
mstore.domain_to_nodes[c4].insn is None
and mstore.domain_to_nodes[c4].iname == 'i_2')
# and c5 should be an affine of -1 off c4 (using c3's iname)
assert (mstore.domain_to_nodes[c5].parent == mstore.domain_to_nodes[c4] and
mstore.domain_to_nodes[c5].insn is None
and mstore.domain_to_nodes[c5].iname == 'i_2 + -1')
def test_multiple_inputs():
lp_opt = _dummy_opts()
c = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# add a variable
c2 = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
mstore.check_and_add_transform(__create_var('x2'), c2, 'i')
# add a mapped variable
c3 = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(5)) + list(range(6, 11)),
dtype=arc.kint_type))
mstore.check_and_add_transform(__create_var('x3'), c3, 'i')
# test different vaiable with same map
c4 = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(5)) + list(range(6, 11)),
dtype=arc.kint_type))
mstore.check_and_add_transform(__create_var('x4'), c4, 'i')
# add another mapped variable
c5 = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(4)) + list(range(5, 11)),
dtype=arc.kint_type))
mstore.check_and_add_transform(__create_var('x5'), c5, 'i')
mstore.finalize()
assert mstore.domain_to_nodes[c2] not in mstore.transformed_domains
assert mstore.domain_to_nodes[c3] in mstore.transformed_domains
assert mstore.domain_to_nodes[c4] in mstore.transformed_domains
assert mstore.domain_to_nodes[c5] in mstore.transformed_domains
assert len(mstore.transformed_domains) == 3
assert np.array_equal(mstore.map_domain.initializer,
np.arange(10, dtype=arc.kint_type))
def test_bad_multiple_variable_map():
lp_opt = _dummy_opts()
c = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# add a variable
c2 = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
x2 = __create_var('x2')
mstore.check_and_add_transform(x2, c2, 'i')
c3 = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
# add the same variable as a different domain, and check error
with assert_raises(AssertionError):
mstore.check_and_add_transform(x2, c3, 'i')
def test_offset_base():
lp_opt = _dummy_opts()
c = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
assert len(mstore.transformed_domains) == 0
# add a variable
c2 = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(4)) + list(range(5, 11)),
dtype=arc.kint_type))
x = __create_var('x')
mstore.check_and_add_transform(x, c2, 'i')
mstore.finalize()
assert len(mstore.transformed_domains) == 2
assert np.array_equal(mstore.map_domain.initializer,
np.arange(10, dtype=arc.kint_type))
assert mstore.domain_to_nodes[c2] in mstore.transformed_domains
assert mstore.domain_to_nodes[x].parent == mstore.domain_to_nodes[c2]
def test_map_variable_creator():
lp_opt = _dummy_opts()
c = arc.creator('base', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
assert len(mstore.transformed_domains) == 0
# add a variable
var = arc.creator('var', arc.kint_type, (10,), 'C')
domain = arc.creator('domain', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(4)) +
list(range(5, 11)),
dtype=arc.kint_type))
mstore.check_and_add_transform(var, domain, 'i')
var, var_str = mstore.apply_maps(var, 'i')
assert isinstance(var, lp.ArrayArg)
assert var_str == 'var[i_1]'
assert '<> i_1 = domain[i + 3] {id=index_i_1}' in mstore.transform_insns
def test_map_to_larger():
lp_opt = _dummy_opts()
c = arc.creator('base', arc.kint_type, (5,), 'C',
initializer=np.arange(5, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
assert len(mstore.transformed_domains) == 0
# add a variable
var = arc.creator('var', arc.kint_type, (10,), 'C')
domain = arc.creator('domain', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
# this should work
mstore.check_and_add_transform(var, domain, 'i')
var, var_str = mstore.apply_maps(var, 'i')
assert isinstance(var, lp.ArrayArg)
assert var_str == 'var[i_0]'
assert '<> i_0 = domain[i] {id=index_i_0}' in mstore.transform_insns
def test_chained_maps():
lp_opt = _dummy_opts()
c = arc.creator('base', arc.kint_type, (5,), 'C',
initializer=np.arange(5, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
assert len(mstore.transformed_domains) == 0
def __get_iname(domain):
return mstore.domain_to_nodes[domain].iname
# add a variable
var = arc.creator('var', arc.kint_type, (10,), 'C')
domain = arc.creator('domain', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
# this should work
mstore.check_and_add_transform(var, domain, 'i')
# now add a chained map
var2 = arc.creator('var2', arc.kint_type, (10,), 'C')
domain2 = arc.creator('domain2', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
mstore.check_and_add_transform(domain2, domain)
mstore.check_and_add_transform(var2, domain2)
# and finally put another chained map that does require a transform
var3 = arc.creator('var3', arc.kint_type, (10,), 'C')
domain3 = arc.creator('domain3', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(3)) +
list(range(4, 11)),
dtype=arc.kint_type))
mstore.check_and_add_transform(domain3, domain2)
mstore.check_and_add_transform(var3, domain3)
# now create variables and test
var_lp, var_str = mstore.apply_maps(var, 'i')
# test that the base map is there
assert '<> {0} = domain[i] {{id=index_{0}}}'.format(__get_iname(domain)) in \
mstore.transform_insns
# var 1 should be based off domain's iname i_0
assert var_str == 'var[{}]'.format(__get_iname(var))
# var 2's iname should be based off domain2's iname
# however since there is no need for map between domain and domain 2
# this should _still_be i_0
var2_lp, var2_str = mstore.apply_maps(var2, 'i')
assert var2_str == 'var2[{}]'.format(__get_iname(var2))
# and var 3 should be based off domain 3's iname, i_3
var3_lp, var3_str = mstore.apply_maps(var3, 'i')
assert var3_str == 'var3[{}]'.format(__get_iname(var3))
assert (
'<> {0} = domain3[{1}] {{id=index_{0}}}'.format(
__get_iname(var3), __get_iname(domain2))
in mstore.transform_insns)
def test_map_iname_domains():
lp_opt = _dummy_opts()
c = arc.creator('base', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
mstore.finalize()
assert mstore.get_iname_domain() == ('i', '3 <= i <= 12')
# add an affine map
mstore = arc.MapStore(lp_opt, c, True, 'i')
mapv = np.arange(10, dtype=arc.kint_type)
var = arc.creator('var', arc.kint_type, (10,), 'C')
domain = arc.creator('domain', arc.kint_type, (10,), 'C',
initializer=mapv)
mstore.check_and_add_transform(var, domain, 'i')
mstore.finalize()
assert mstore.get_iname_domain() == ('i', '3 <= i <= 12')
# add a non-affine map, domain should bounce to 0-based
mstore = arc.MapStore(lp_opt, c, True, 'i')
mapv = np.array(list(range(3)) + list(range(4, 11)), dtype=arc.kint_type)
var = arc.creator('var2', arc.kint_type, (10,), 'C')
domain = arc.creator('domain', arc.kint_type, (10,), 'C',
initializer=mapv)
mstore.check_and_add_transform(var, domain, 'i')
mstore.finalize()
assert mstore.get_iname_domain() == ('i', '0 <= i <= 9')
# check non-contigous
c = arc.creator('base', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(3)) + list(range(4, 11)),
dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
mstore.finalize()
assert mstore.get_iname_domain() == ('i', '0 <= i <= 9')
def test_leaf_inames():
lp_opt = _dummy_opts()
c = arc.creator('base', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# create one map
mapv = np.array(list(range(3)) + list(range(4, 11)), dtype=arc.kint_type)
mapv2 = np.array(list(range(2)) + list(range(3, 11)), dtype=arc.kint_type)
domain2 = arc.creator('domain2', arc.kint_type, (10,), 'C',
initializer=mapv2)
domain = arc.creator('domain', arc.kint_type, (10,), 'C',
initializer=mapv)
mstore.check_and_add_transform(domain2, domain, 'i')
# and another
var = arc.creator('var', arc.kint_type, (10,), 'C')
mstore.check_and_add_transform(var, domain2, 'i')
# now create var
_, d_str = mstore.apply_maps(domain, 'i')
_, d2_str = mstore.apply_maps(domain2, 'i')
_, v_str = mstore.apply_maps(var, 'i')
assert d_str == 'domain[i]'
assert d2_str == 'domain2[i_0]'
assert v_str == 'var[i_1]'
assert '<> i_0 = domain[i] {id=index_i_0}' in mstore.transform_insns
assert '<> i_1 = domain2[i_0] {id=index_i_1}' in mstore.transform_insns
def test_input_map_pickup():
lp_opt = _dummy_opts()
# test that creation of mapstore with non-contiguous map forces
# non-transformed variables to pick up the right iname
c = arc.creator('', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(4)) + list(range(6, 12)),
dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# create a variable
x = __create_var('x')
_, x_str = mstore.apply_maps(x, 'i')
assert 'i_0' in x_str
def test_fixed_creator_indices():
c = arc.creator('base', arc.kint_type, ('isize', 'jsize'), 'C',
fixed_indicies=[(0, 1)])
assert c('j')[1] == 'base[1, j]'
def test_force_inline():
lp_opt = _dummy_opts()
mapv = np.arange(0, 5, dtype=arc.kint_type)
c = arc.creator('base', arc.kint_type, mapv.shape, 'C',
initializer=mapv)
mstore = arc.MapStore(lp_opt, c, True, 'i')
# add an affine map
mapv = np.array(mapv, copy=True) + 1
var = arc.creator('var', arc.kint_type, mapv.shape, 'C')
domain = arc.creator('domain', arc.kint_type, mapv.shape, 'C',
initializer=mapv)
mstore.check_and_add_transform(var, domain, 'i')
_, var_str = mstore.apply_maps(var, 'i')
assert var_str == 'var[i + 1]'
assert len(mstore.transform_insns) == 0
def test_working_buffer_creations():
for lp_opt in opts_loop():
def __shape_compare(shape1, shape2):
for s1, s2 in zip(*(shape1, shape2)):
assert str(s1) == str(s2)
return True
# make a creator to form the base of the mapstore
c = arc.creator('', arc.kint_type, (10,), lp_opt.order,
initializer=np.arange(10, dtype=arc.kint_type))
# and the array to test
arr = arc.creator('a', arc.kint_type, (10, 10), lp_opt.order)
# and a final "input" array
inp = arc.creator('b', arc.kint_type, (10, 10), lp_opt.order)
mstore = arc.MapStore(lp_opt, c, 8192, 'i')
arr_lp, arr_str = mstore.apply_maps(
arr, 'j', 'i',
reshape_to_working_buffer=arc.work_size.name,
working_buffer_index='k')
assert isinstance(arr_lp, lp.ArrayArg) and \
__shape_compare(arr_lp.shape, (arc.work_size.name, 10))
assert arr_str == 'a[k, i]' if lp_opt.pre_split else 'a[j, i]'
inp_lp, inp_str = mstore.apply_maps(inp, 'j', 'i',
reshape_to_working_buffer=False,
working_buffer_index=None)
assert isinstance(inp_lp, lp.ArrayArg) and __shape_compare(
inp_lp.shape, (10, 10))
assert inp_str == 'b[j, i]'
# now test input without the global index
arr_lp, arr_str = mstore.apply_maps(arr, 'k', 'i')
assert isinstance(arr_lp, lp.ArrayArg) and __shape_compare(
arr_lp.shape, (10, 10))
assert arr_str == 'a[k, i]'
def test_affine_dict_with_input_map():
lp_opt = _dummy_opts()
# make a creator to form the base of the mapstore
c1 = arc.creator('c1', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(4)) + list(range(6, 12)),
dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c1, True, 'i')
# create a variable
x = __create_var('x')
assert mstore.apply_maps(x, 'i', affine={'i': 1})[1] == 'x[i_0 + 1]'
def test_tree_node_children():
lp_opt = _dummy_opts()
# create mapstore
c = arc.creator('c', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# add children
c2 = arc.creator('c2', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
x = __create_var('x')
mstore.check_and_add_transform(x, c2, 'i')
c3 = arc.creator('c3', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(4)) + list(range(6, 12)),
dtype=arc.kint_type))
x2 = __create_var('x2')
mstore.check_and_add_transform(x2, c3, 'i')
mstore.finalize()
# check children
assert mstore.tree.has_children([x, x2]) == [False, False]
assert mstore.domain_to_nodes[c2].has_children([x, x2]) == [True, False]
assert mstore.domain_to_nodes[c3].has_children([x, x2]) == [False, True]
# and finally check the tree search
x3 = __create_var('x3')
assert arc.search_tree(mstore.tree.parent, [x, x2, x3]) == [
mstore.domain_to_nodes[c2], mstore.domain_to_nodes[c3], None]
def test_absolute_root():
lp_opt = _dummy_opts()
# create mapstore
c = arc.creator('c', arc.kint_type, (10,), 'C',
initializer=np.arange(3, 13, dtype=arc.kint_type))
mstore = arc.MapStore(lp_opt, c, True, 'i')
# add children
c2 = arc.creator('c2', arc.kint_type, (10,), 'C',
initializer=np.arange(10, dtype=arc.kint_type))
x = __create_var('x')
mstore.check_and_add_transform(x, c2, 'i')
assert mstore.absolute_root == mstore.domain_to_nodes[c] and \
mstore.absolute_root.name == 'c'
# force input map
c3 = arc.creator('c3', arc.kint_type, (10,), 'C',
initializer=np.array(list(range(4)) + list(range(6, 12)),
dtype=arc.kint_type))
x2 = __create_var('x2')
mstore.check_and_add_transform(x2, c3, 'i')
mstore.finalize()
assert mstore.absolute_root != mstore.domain_to_nodes[c] and \
mstore.absolute_root.name == 'c_map'
class SubTest(TestClass):
@attr('long')
def test_namestore_init(self):
lp_opt = _dummy_opts()
rate_info = assign_rates(self.store.reacs, self.store.specs,
RateSpecialization.fixed)
arc.NameStore(lp_opt, rate_info, True, self.store.test_size)
@attr('long')
def test_input_private_memory_creations(self):
lp_opt = _dummy_opts()
rate_info = assign_rates(self.store.reacs, self.store.specs,
RateSpecialization.fixed)
# create name and mapstores
nstore = arc.NameStore(lp_opt, rate_info, True, self.store.test_size)
mstore = arc.MapStore(lp_opt, nstore.phi_inds, self.store.test_size, 'i')
# create known input
jac_lp, jac_str = mstore.apply_maps(nstore.jac, 'j', 'k', 'i')
assert isinstance(jac_lp, lp.ArrayArg) and jac_lp.shape == nstore.jac.shape
assert jac_str == 'jac[j, k, i]'
| [
"pyjac.core.rate_subs.assign_rates",
"six.moves.range",
"pyjac.tests.test_utils.OptionLoopWrapper.from_dict",
"pyjac.core.array_creator.MapStore",
"pyjac.tests.get_test_langs",
"pyjac.utils.listify",
"numpy.arange",
"numpy.array",
"nose.tools.assert_raises",
"pyjac.core.array_creator.creator",
"... | [((651, 667), 'pyjac.tests.get_test_langs', 'get_test_langs', ([], {}), '()\n', (665, 667), False, 'from pyjac.tests import get_test_langs\n'), ((721, 886), 'collections.OrderedDict', 'OrderedDict', (["[('width', width), ('depth', depth), ('order', order), ('lang', lang), (\n 'order', order), ('is_simd', is_simd), ('unr', [None]), ('ilp', [None])]"], {}), "([('width', width), ('depth', depth), ('order', order), ('lang',\n lang), ('order', order), ('is_simd', is_simd), ('unr', [None]), ('ilp',\n [None])])\n", (732, 886), False, 'from collections import OrderedDict\n'), ((967, 1002), 'pyjac.tests.test_utils.OptionLoopWrapper.from_dict', 'OptionLoopWrapper.from_dict', (['oploop'], {}), '(oploop)\n', (994, 1002), False, 'from pyjac.tests.test_utils import OptionLoopWrapper\n'), ((1904, 1938), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (1916, 1938), True, 'from pyjac.core import array_creator as arc\n'), ((2391, 2425), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (2403, 2425), True, 'from pyjac.core import array_creator as arc\n'), ((2523, 2566), 'pyjac.core.array_creator.creator', 'arc.creator', (['name', 'arc.kint_type', 'size', '"""C"""'], {}), "(name, arc.kint_type, size, 'C')\n", (2534, 2566), True, 'from pyjac.core import array_creator as arc\n'), ((2769, 2803), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (2781, 2803), True, 'from pyjac.core import array_creator as arc\n'), ((3644, 3678), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (3656, 3678), True, 'from pyjac.core import array_creator as arc\n'), ((5064, 5098), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (5076, 5098), True, 'from pyjac.core import array_creator as arc\n'), ((6499, 6533), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (6511, 6533), True, 'from pyjac.core import array_creator as arc\n'), ((7421, 7455), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (7433, 7455), True, 'from pyjac.core import array_creator as arc\n'), ((7772, 7785), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (7781, 7785), True, 'import numpy as np\n'), ((7835, 7848), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (7844, 7848), True, 'import numpy as np\n'), ((8419, 8453), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (8431, 8453), True, 'from pyjac.core import array_creator as arc\n'), ((10815, 10849), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (10827, 10849), True, 'from pyjac.core import array_creator as arc\n'), ((12610, 12644), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (12622, 12644), True, 'from pyjac.core import array_creator as arc\n'), ((13337, 13371), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (13349, 13371), True, 'from pyjac.core import array_creator as arc\n'), ((14244, 14278), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (14256, 14278), True, 'from pyjac.core import array_creator as arc\n'), ((14359, 14404), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""var"""', 'arc.kint_type', '(10,)', '"""C"""'], {}), "('var', arc.kint_type, (10,), 'C')\n", (14370, 14404), True, 'from pyjac.core import array_creator as arc\n'), ((15105, 15139), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (15117, 15139), True, 'from pyjac.core import array_creator as arc\n'), ((15220, 15265), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""var"""', 'arc.kint_type', '(10,)', '"""C"""'], {}), "('var', arc.kint_type, (10,), 'C')\n", (15231, 15265), True, 'from pyjac.core import array_creator as arc\n'), ((15860, 15894), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (15872, 15894), True, 'from pyjac.core import array_creator as arc\n'), ((16057, 16102), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""var"""', 'arc.kint_type', '(10,)', '"""C"""'], {}), "('var', arc.kint_type, (10,), 'C')\n", (16068, 16102), True, 'from pyjac.core import array_creator as arc\n'), ((16354, 16400), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""var2"""', 'arc.kint_type', '(10,)', '"""C"""'], {}), "('var2', arc.kint_type, (10,), 'C')\n", (16365, 16400), True, 'from pyjac.core import array_creator as arc\n'), ((16726, 16772), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""var3"""', 'arc.kint_type', '(10,)', '"""C"""'], {}), "('var3', arc.kint_type, (10,), 'C')\n", (16737, 16772), True, 'from pyjac.core import array_creator as arc\n'), ((18294, 18328), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (18306, 18328), True, 'from pyjac.core import array_creator as arc\n'), ((18451, 18485), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (18463, 18485), True, 'from pyjac.core import array_creator as arc\n'), ((18497, 18531), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (18506, 18531), True, 'import numpy as np\n'), ((18542, 18587), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""var"""', 'arc.kint_type', '(10,)', '"""C"""'], {}), "('var', arc.kint_type, (10,), 'C')\n", (18553, 18587), True, 'from pyjac.core import array_creator as arc\n'), ((18601, 18667), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""domain"""', 'arc.kint_type', '(10,)', '"""C"""'], {'initializer': 'mapv'}), "('domain', arc.kint_type, (10,), 'C', initializer=mapv)\n", (18612, 18667), True, 'from pyjac.core import array_creator as arc\n'), ((18904, 18938), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (18916, 18938), True, 'from pyjac.core import array_creator as arc\n'), ((19027, 19073), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""var2"""', 'arc.kint_type', '(10,)', '"""C"""'], {}), "('var2', arc.kint_type, (10,), 'C')\n", (19038, 19073), True, 'from pyjac.core import array_creator as arc\n'), ((19087, 19153), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""domain"""', 'arc.kint_type', '(10,)', '"""C"""'], {'initializer': 'mapv'}), "('domain', arc.kint_type, (10,), 'C', initializer=mapv)\n", (19098, 19153), True, 'from pyjac.core import array_creator as arc\n'), ((19552, 19586), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (19564, 19586), True, 'from pyjac.core import array_creator as arc\n'), ((19860, 19894), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (19872, 19894), True, 'from pyjac.core import array_creator as arc\n'), ((20088, 20156), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""domain2"""', 'arc.kint_type', '(10,)', '"""C"""'], {'initializer': 'mapv2'}), "('domain2', arc.kint_type, (10,), 'C', initializer=mapv2)\n", (20099, 20156), True, 'from pyjac.core import array_creator as arc\n'), ((20196, 20262), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""domain"""', 'arc.kint_type', '(10,)', '"""C"""'], {'initializer': 'mapv'}), "('domain', arc.kint_type, (10,), 'C', initializer=mapv)\n", (20207, 20262), True, 'from pyjac.core import array_creator as arc\n'), ((20374, 20419), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""var"""', 'arc.kint_type', '(10,)', '"""C"""'], {}), "('var', arc.kint_type, (10,), 'C')\n", (20385, 20419), True, 'from pyjac.core import array_creator as arc\n'), ((21274, 21308), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (21286, 21308), True, 'from pyjac.core import array_creator as arc\n'), ((21472, 21561), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""base"""', 'arc.kint_type', "('isize', 'jsize')", '"""C"""'], {'fixed_indicies': '[(0, 1)]'}), "('base', arc.kint_type, ('isize', 'jsize'), 'C', fixed_indicies=\n [(0, 1)])\n", (21483, 21561), True, 'from pyjac.core import array_creator as arc\n'), ((21679, 21715), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {'dtype': 'arc.kint_type'}), '(0, 5, dtype=arc.kint_type)\n', (21688, 21715), True, 'import numpy as np\n'), ((21724, 21793), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""base"""', 'arc.kint_type', 'mapv.shape', '"""C"""'], {'initializer': 'mapv'}), "('base', arc.kint_type, mapv.shape, 'C', initializer=mapv)\n", (21735, 21793), True, 'from pyjac.core import array_creator as arc\n'), ((21828, 21862), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (21840, 21862), True, 'from pyjac.core import array_creator as arc\n'), ((21939, 21989), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""var"""', 'arc.kint_type', 'mapv.shape', '"""C"""'], {}), "('var', arc.kint_type, mapv.shape, 'C')\n", (21950, 21989), True, 'from pyjac.core import array_creator as arc\n'), ((22003, 22074), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""domain"""', 'arc.kint_type', 'mapv.shape', '"""C"""'], {'initializer': 'mapv'}), "('domain', arc.kint_type, mapv.shape, 'C', initializer=mapv)\n", (22014, 22074), True, 'from pyjac.core import array_creator as arc\n'), ((24259, 24294), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c1', '(True)', '"""i"""'], {}), "(lp_opt, c1, True, 'i')\n", (24271, 24294), True, 'from pyjac.core import array_creator as arc\n'), ((24638, 24672), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (24650, 24672), True, 'from pyjac.core import array_creator as arc\n'), ((25840, 25874), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(True)', '"""i"""'], {}), "(lp_opt, c, True, 'i')\n", (25852, 25874), True, 'from pyjac.core import array_creator as arc\n'), ((26663, 26675), 'nose.plugins.attrib.attr', 'attr', (['"""long"""'], {}), "('long')\n", (26667, 26675), False, 'from nose.plugins.attrib import attr\n'), ((26945, 26957), 'nose.plugins.attrib.attr', 'attr', (['"""long"""'], {}), "('long')\n", (26949, 26957), False, 'from nose.plugins.attrib import attr\n'), ((1217, 1246), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError'], {}), '(AssertionError)\n', (1230, 1246), False, 'from nose.tools import assert_raises\n'), ((1390, 1419), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError'], {}), '(AssertionError)\n', (1403, 1419), False, 'from nose.tools import assert_raises\n'), ((2113, 2126), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2122, 2126), True, 'import numpy as np\n'), ((12374, 12408), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (12383, 12408), True, 'import numpy as np\n'), ((13065, 13094), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError'], {}), '(AssertionError)\n', (13078, 13094), False, 'from nose.tools import assert_raises\n'), ((13864, 13898), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (13873, 13898), True, 'import numpy as np\n'), ((21899, 21924), 'numpy.array', 'np.array', (['mapv'], {'copy': '(True)'}), '(mapv, copy=True)\n', (21907, 21924), True, 'import numpy as np\n'), ((22750, 22805), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""a"""', 'arc.kint_type', '(10, 10)', 'lp_opt.order'], {}), "('a', arc.kint_type, (10, 10), lp_opt.order)\n", (22761, 22805), True, 'from pyjac.core import array_creator as arc\n'), ((22857, 22912), 'pyjac.core.array_creator.creator', 'arc.creator', (['"""b"""', 'arc.kint_type', '(10, 10)', 'lp_opt.order'], {}), "('b', arc.kint_type, (10, 10), lp_opt.order)\n", (22868, 22912), True, 'from pyjac.core import array_creator as arc\n'), ((22931, 22965), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'c', '(8192)', '"""i"""'], {}), "(lp_opt, c, 8192, 'i')\n", (22943, 22965), True, 'from pyjac.core import array_creator as arc\n'), ((25503, 25551), 'pyjac.core.array_creator.search_tree', 'arc.search_tree', (['mstore.tree.parent', '[x, x2, x3]'], {}), '(mstore.tree.parent, [x, x2, x3])\n', (25518, 25551), True, 'from pyjac.core import array_creator as arc\n'), ((26762, 26836), 'pyjac.core.rate_subs.assign_rates', 'assign_rates', (['self.store.reacs', 'self.store.specs', 'RateSpecialization.fixed'], {}), '(self.store.reacs, self.store.specs, RateSpecialization.fixed)\n', (26774, 26836), False, 'from pyjac.core.rate_subs import assign_rates\n'), ((26878, 26938), 'pyjac.core.array_creator.NameStore', 'arc.NameStore', (['lp_opt', 'rate_info', '(True)', 'self.store.test_size'], {}), '(lp_opt, rate_info, True, self.store.test_size)\n', (26891, 26938), True, 'from pyjac.core import array_creator as arc\n'), ((27060, 27134), 'pyjac.core.rate_subs.assign_rates', 'assign_rates', (['self.store.reacs', 'self.store.specs', 'RateSpecialization.fixed'], {}), '(self.store.reacs, self.store.specs, RateSpecialization.fixed)\n', (27072, 27134), False, 'from pyjac.core.rate_subs import assign_rates\n'), ((27221, 27281), 'pyjac.core.array_creator.NameStore', 'arc.NameStore', (['lp_opt', 'rate_info', '(True)', 'self.store.test_size'], {}), '(lp_opt, rate_info, True, self.store.test_size)\n', (27234, 27281), True, 'from pyjac.core import array_creator as arc\n'), ((27299, 27363), 'pyjac.core.array_creator.MapStore', 'arc.MapStore', (['lp_opt', 'nstore.phi_inds', 'self.store.test_size', '"""i"""'], {}), "(lp_opt, nstore.phi_inds, self.store.test_size, 'i')\n", (27311, 27363), True, 'from pyjac.core import array_creator as arc\n'), ((1085, 1099), 'pyjac.utils.listify', 'listify', (['order'], {}), '(order)\n', (1092, 1099), False, 'from pyjac.utils import listify\n'), ((2341, 2375), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (2350, 2375), True, 'import numpy as np\n'), ((2716, 2753), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (2725, 2753), True, 'import numpy as np\n'), ((2940, 2974), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (2949, 2974), True, 'import numpy as np\n'), ((3591, 3628), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (3600, 3628), True, 'import numpy as np\n'), ((3815, 3849), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (3824, 3849), True, 'import numpy as np\n'), ((5011, 5048), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (5020, 5048), True, 'import numpy as np\n'), ((5241, 5275), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (5250, 5275), True, 'import numpy as np\n'), ((6446, 6483), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (6455, 6483), True, 'import numpy as np\n'), ((7368, 7405), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (7377, 7405), True, 'import numpy as np\n'), ((8366, 8403), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (8375, 8403), True, 'import numpy as np\n'), ((8591, 8632), 'numpy.arange', 'np.arange', (['(10)', '(0)', '(-1)'], {'dtype': 'arc.kint_type'}), '(10, 0, -1, dtype=arc.kint_type)\n', (8600, 8632), True, 'import numpy as np\n'), ((8819, 8856), 'numpy.arange', 'np.arange', (['(4)', '(14)'], {'dtype': 'arc.kint_type'}), '(4, 14, dtype=arc.kint_type)\n', (8828, 8856), True, 'import numpy as np\n'), ((9055, 9092), 'numpy.arange', 'np.arange', (['(4)', '(14)'], {'dtype': 'arc.kint_type'}), '(4, 14, dtype=arc.kint_type)\n', (9064, 9092), True, 'import numpy as np\n'), ((9255, 9292), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (9264, 9292), True, 'import numpy as np\n'), ((10765, 10799), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (10774, 10799), True, 'import numpy as np\n'), ((10957, 10991), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (10966, 10991), True, 'import numpy as np\n'), ((12560, 12594), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (12569, 12594), True, 'import numpy as np\n'), ((12752, 12786), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (12761, 12786), True, 'import numpy as np\n'), ((12950, 12987), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (12959, 12987), True, 'import numpy as np\n'), ((13284, 13321), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (13293, 13321), True, 'import numpy as np\n'), ((14191, 14228), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (14200, 14228), True, 'import numpy as np\n'), ((15056, 15089), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'arc.kint_type'}), '(5, dtype=arc.kint_type)\n', (15065, 15089), True, 'import numpy as np\n'), ((15365, 15399), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (15374, 15399), True, 'import numpy as np\n'), ((15811, 15844), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'arc.kint_type'}), '(5, dtype=arc.kint_type)\n', (15820, 15844), True, 'import numpy as np\n'), ((16202, 16236), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (16211, 16236), True, 'import numpy as np\n'), ((16503, 16537), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (16512, 16537), True, 'import numpy as np\n'), ((18241, 18278), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (18250, 18278), True, 'import numpy as np\n'), ((19811, 19845), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (19820, 19845), True, 'import numpy as np\n'), ((24586, 24623), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (24595, 24623), True, 'import numpy as np\n'), ((24780, 24814), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (24789, 24814), True, 'import numpy as np\n'), ((25788, 25825), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {'dtype': 'arc.kint_type'}), '(3, 13, dtype=arc.kint_type)\n', (25797, 25825), True, 'import numpy as np\n'), ((25982, 26016), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (25991, 26016), True, 'import numpy as np\n'), ((1331, 1362), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (1340, 1362), True, 'import numpy as np\n'), ((1504, 1535), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'np.float32'}), '(10, dtype=np.float32)\n', (1513, 1535), True, 'import numpy as np\n'), ((18964, 18972), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (18969, 18972), False, 'from six.moves import range\n'), ((18981, 18993), 'six.moves.range', 'range', (['(4)', '(11)'], {}), '(4, 11)\n', (18986, 18993), False, 'from six.moves import range\n'), ((19942, 19950), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (19947, 19950), False, 'from six.moves import range\n'), ((19959, 19971), 'six.moves.range', 'range', (['(4)', '(11)'], {}), '(4, 11)\n', (19964, 19971), False, 'from six.moves import range\n'), ((20021, 20029), 'six.moves.range', 'range', (['(2)'], {}), '(2)\n', (20026, 20029), False, 'from six.moves import range\n'), ((20038, 20050), 'six.moves.range', 'range', (['(3)', '(11)'], {}), '(3, 11)\n', (20043, 20050), False, 'from six.moves import range\n'), ((22667, 22701), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'arc.kint_type'}), '(10, dtype=arc.kint_type)\n', (22676, 22701), True, 'import numpy as np\n'), ((1795, 1803), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (1800, 1803), False, 'from six.moves import range\n'), ((1812, 1824), 'six.moves.range', 'range', (['(6)', '(12)'], {}), '(6, 12)\n', (1817, 1824), False, 'from six.moves import range\n'), ((4082, 4090), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (4087, 4090), False, 'from six.moves import range\n'), ((4099, 4111), 'six.moves.range', 'range', (['(6)', '(12)'], {}), '(6, 12)\n', (4104, 4111), False, 'from six.moves import range\n'), ((5504, 5512), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (5509, 5512), False, 'from six.moves import range\n'), ((5521, 5533), 'six.moves.range', 'range', (['(6)', '(12)'], {}), '(6, 12)\n', (5526, 5533), False, 'from six.moves import range\n'), ((6664, 6672), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (6669, 6672), False, 'from six.moves import range\n'), ((6723, 6735), 'six.moves.range', 'range', (['(4)', '(11)'], {}), '(4, 11)\n', (6728, 6735), False, 'from six.moves import range\n'), ((7586, 7594), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (7591, 7594), False, 'from six.moves import range\n'), ((7645, 7657), 'six.moves.range', 'range', (['(4)', '(11)'], {}), '(4, 11)\n', (7650, 7657), False, 'from six.moves import range\n'), ((11185, 11193), 'six.moves.range', 'range', (['(5)'], {}), '(5)\n', (11190, 11193), False, 'from six.moves import range\n'), ((11202, 11214), 'six.moves.range', 'range', (['(6)', '(11)'], {}), '(6, 11)\n', (11207, 11214), False, 'from six.moves import range\n'), ((11488, 11496), 'six.moves.range', 'range', (['(5)'], {}), '(5)\n', (11493, 11496), False, 'from six.moves import range\n'), ((11505, 11517), 'six.moves.range', 'range', (['(6)', '(11)'], {}), '(6, 11)\n', (11510, 11517), False, 'from six.moves import range\n'), ((11782, 11790), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (11787, 11790), False, 'from six.moves import range\n'), ((11799, 11811), 'six.moves.range', 'range', (['(5)', '(11)'], {}), '(5, 11)\n', (11804, 11811), False, 'from six.moves import range\n'), ((13541, 13549), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (13546, 13549), False, 'from six.moves import range\n'), ((13558, 13570), 'six.moves.range', 'range', (['(5)', '(11)'], {}), '(5, 11)\n', (13563, 13570), False, 'from six.moves import range\n'), ((14518, 14526), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (14523, 14526), False, 'from six.moves import range\n'), ((14581, 14593), 'six.moves.range', 'range', (['(5)', '(11)'], {}), '(5, 11)\n', (14586, 14593), False, 'from six.moves import range\n'), ((16889, 16897), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (16894, 16897), False, 'from six.moves import range\n'), ((16953, 16965), 'six.moves.range', 'range', (['(4)', '(11)'], {}), '(4, 11)\n', (16958, 16965), False, 'from six.moves import range\n'), ((19443, 19451), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (19448, 19451), False, 'from six.moves import range\n'), ((19460, 19472), 'six.moves.range', 'range', (['(4)', '(11)'], {}), '(4, 11)\n', (19465, 19472), False, 'from six.moves import range\n'), ((21165, 21173), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (21170, 21173), False, 'from six.moves import range\n'), ((21182, 21194), 'six.moves.range', 'range', (['(6)', '(12)'], {}), '(6, 12)\n', (21187, 21194), False, 'from six.moves import range\n'), ((24149, 24157), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (24154, 24157), False, 'from six.moves import range\n'), ((24166, 24178), 'six.moves.range', 'range', (['(6)', '(12)'], {}), '(6, 12)\n', (24171, 24178), False, 'from six.moves import range\n'), ((24990, 24998), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (24995, 24998), False, 'from six.moves import range\n'), ((25007, 25019), 'six.moves.range', 'range', (['(6)', '(12)'], {}), '(6, 12)\n', (25012, 25019), False, 'from six.moves import range\n'), ((26324, 26332), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (26329, 26332), False, 'from six.moves import range\n'), ((26341, 26353), 'six.moves.range', 'range', (['(6)', '(12)'], {}), '(6, 12)\n', (26346, 26353), False, 'from six.moves import range\n')] |
# Implement and train a neural network from scratch in Python for the MNIST dataset (no PyTorch).
# The neural network should be trained on the Training Set using stochastic gradient descent.
import numpy as np
import h5py
#data file type h5py
import time
import copy
from random import randint
# cd Desktop/CS\ 398/Assignments/A2/
#load MNIST data
MNIST_data = h5py.File('MNISTdata.hdf5', 'r')
x_train = np.float32(MNIST_data['x_train'][:])
y_train = np.int32(np.array(MNIST_data['y_train'][:,0]))
x_test = np.float32(MNIST_data['x_test'][:])
y_test = np.int32(np.array(MNIST_data['y_test'][:,0]))
MNIST_data.close()
####################################################################################
#Implementation of stochastic gradient descent algorithm
class NN:
first_layer = {}
second_layer = {}
def __init__(self, inputs, hidden, outputs):
# initialize the model parameters, including the first and second layer
# parameters and biases
self.first_layer['para'] = np.random.randn(hidden,inputs) / np.sqrt(num_inputs)
self.first_layer['bias'] = np.random.randn(hidden,1) / np.sqrt(hidden)
self.second_layer['para'] = np.random.randn(outputs,hidden) / np.sqrt(hidden)
self.second_layer['bias'] = np.random.randn(outputs,1) / np.sqrt(hidden)
self.input_size = inputs
self.hid_size = hidden
self.output_size = outputs
def __activfunc(self,Z,type = 'ReLU',deri = False):
# implement the activation function
if type == 'ReLU':
if deri == True:
return np.array([1 if i>0 else 0 for i in np.squeeze(Z)])
else:
return np.array([i if i>0 else 0 for i in np.squeeze(Z)])
elif type == 'Sigmoid':
if deri == True:
return 1/(1+np.exp(-Z))*(1-1/(1+np.exp(-Z)))
else:
return 1/(1+np.exp(-Z))
elif type == 'tanh':
if deri == True:
return
else:
return 1-(np.tanh(Z))**2
else:
raise TypeError('Invalid type!')
def __Softmax(self,z):
# implement the softmax function
return 1/sum(np.exp(z)) * np.exp(z)
def __cross_entropy_error(self,v,y):
# implement the cross entropy error
return -np.log(v[y])
def __forward(self,x,y):
# implement the forward computation, calculation of prediction list and error
Z = np.matmul(self.first_layer['para'],x).reshape((self.hid_size,1)) + self.first_layer['bias']
H = np.array(self.__activfunc(Z)).reshape((self.hid_size,1))
U = np.matmul(self.second_layer['para'],H).reshape((self.output_size,1)) + self.second_layer['bias']
predict_list = np.squeeze(self.__Softmax(U))
error = self.__cross_entropy_error(predict_list,y)
dic = {
'Z':Z,
'H':H,
'U':U,
'f_X':predict_list.reshape((1,self.output_size)),
'error':error
}
return dic
def __back_propagation(self,x,y,f_result):
# implement the back propagation process, compute the gradients
E = np.array([0]*self.output_size).reshape((1,self.output_size))
E[0][y] = 1
dU = (-(E - f_result['f_X'])).reshape((self.output_size,1))
db_2 = copy.copy(dU)
dC = np.matmul(dU,f_result['H'].transpose())
delta = np.matmul(self.second_layer['para'].transpose(),dU)
db_1 = delta.reshape(self.hid_size,1)*self.__activfunc(f_result['Z'],deri=True).reshape(self.hid_size,1)
dW = np.matmul(db_1.reshape((self.hid_size,1)),x.reshape((1,784)))
grad = {
'dC':dC,
'db_2':db_2,
'db_1':db_1,
'dW':dW
}
return grad
def __optimize(self,b_result, learning_rate):
# update the hyperparameters
self.second_layer['para'] -= learning_rate*b_result['dC']
self.second_layer['bias'] -= learning_rate*b_result['db_2']
self.first_layer['bias'] -= learning_rate*b_result['db_1']
self.first_layer['para'] -= learning_rate*b_result['dW']
def __loss(self,X_train,Y_train):
# implement the loss function of the training set
loss = 0
for n in range(len(X_train)):
y = Y_train[n]
x = X_train[n][:]
loss += self.__forward(x,y)['error']
return loss
def train(self, X_train, Y_train, num_iterations = 1000, learning_rate = 0.5):
# generate a random list of indices for the training set
rand_indices = np.random.choice(len(X_train), num_iterations, replace=True)
def l_rate(base_rate, ite, num_iterations, schedule = False):
# determine whether to use the learning schedule
if schedule == True:
return base_rate * 10 ** (-np.floor(ite/num_iterations*5))
else:
return base_rate
count = 1
loss_dict = {}
test_dict = {}
for i in rand_indices:
f_result = self.__forward(X_train[i],Y_train[i])
b_result = self.__back_propagation(X_train[i],Y_train[i],f_result)
self.__optimize(b_result,l_rate(learning_rate,i,num_iterations,True))
if count % 1000 == 0:
if count % 5000 == 0:
loss = self.__loss(X_train,Y_train)
test = self.testing(x_test,y_test)
print('Trained for {} times,'.format(count),'loss = {}, test = {}'.format(loss,test))
loss_dict[str(count)]=loss
test_dict[str(count)]=test
else:
print('Trained for {} times,'.format(count))
count += 1
print('Training finished!')
return loss_dict, test_dict
def testing(self,X_test, Y_test):
# test the model on the training dataset
total_correct = 0
for n in range(len(X_test)):
y = Y_test[n]
x = X_test[n][:]
prediction = np.argmax(self.__forward(x,y)['f_X'])
if (prediction == y):
total_correct += 1
print('Accuarcy Test: ',total_correct/len(X_test))
return total_correct/np.float(len(X_test))
####################################################################################
# set the number of iterations
num_iterations = 200000
# set the base learning rate
learning_rate = 0.01
# number of inputs
num_inputs = 28*28
# number of outputs
num_outputs = 10
# size of hidden layer
hidden_size = 300
# data fitting, training and accuracy evaluation
model = NN(num_inputs,hidden_size,num_outputs)
cost_dict, tests_dict = model.train(x_train,y_train,num_iterations=num_iterations,learning_rate=learning_rate)
accu = model.testing(x_test,y_test)
# plotting the loss function and test accuracy corresponding to the number of iterations
import matplotlib.pyplot as plt
plt.plot(cost_dict.keys(),cost_dict.values())
plt.ylabel('Loss function')
plt.xlabel('Number of iterations')
plt.xticks(rotation=60)
plt.title('Loss function w.r.t. number of iterations')
plt.show()
plt.plot(tests_dict.keys(),tests_dict.values())
plt.ylabel('Test Accuracy')
plt.xlabel('Number of iterations')
plt.xticks(rotation=60)
plt.title('Test accuracy w.r.t. number of iterations')
plt.show()
| [
"matplotlib.pyplot.title",
"h5py.File",
"matplotlib.pyplot.show",
"numpy.log",
"numpy.tanh",
"numpy.random.randn",
"numpy.float32",
"numpy.floor",
"copy.copy",
"numpy.array",
"matplotlib.pyplot.xticks",
"numpy.exp",
"numpy.matmul",
"numpy.squeeze",
"matplotlib.pyplot.ylabel",
"matplotl... | [((366, 398), 'h5py.File', 'h5py.File', (['"""MNISTdata.hdf5"""', '"""r"""'], {}), "('MNISTdata.hdf5', 'r')\n", (375, 398), False, 'import h5py\n'), ((409, 445), 'numpy.float32', 'np.float32', (["MNIST_data['x_train'][:]"], {}), "(MNIST_data['x_train'][:])\n", (419, 445), True, 'import numpy as np\n'), ((512, 547), 'numpy.float32', 'np.float32', (["MNIST_data['x_test'][:]"], {}), "(MNIST_data['x_test'][:])\n", (522, 547), True, 'import numpy as np\n'), ((7040, 7067), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss function"""'], {}), "('Loss function')\n", (7050, 7067), True, 'import matplotlib.pyplot as plt\n'), ((7068, 7102), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of iterations"""'], {}), "('Number of iterations')\n", (7078, 7102), True, 'import matplotlib.pyplot as plt\n'), ((7103, 7126), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(60)'}), '(rotation=60)\n', (7113, 7126), True, 'import matplotlib.pyplot as plt\n'), ((7127, 7181), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss function w.r.t. number of iterations"""'], {}), "('Loss function w.r.t. number of iterations')\n", (7136, 7181), True, 'import matplotlib.pyplot as plt\n'), ((7182, 7192), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7190, 7192), True, 'import matplotlib.pyplot as plt\n'), ((7242, 7269), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Test Accuracy"""'], {}), "('Test Accuracy')\n", (7252, 7269), True, 'import matplotlib.pyplot as plt\n'), ((7270, 7304), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of iterations"""'], {}), "('Number of iterations')\n", (7280, 7304), True, 'import matplotlib.pyplot as plt\n'), ((7305, 7328), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(60)'}), '(rotation=60)\n', (7315, 7328), True, 'import matplotlib.pyplot as plt\n'), ((7329, 7383), 'matplotlib.pyplot.title', 'plt.title', (['"""Test accuracy w.r.t. number of iterations"""'], {}), "('Test accuracy w.r.t. number of iterations')\n", (7338, 7383), True, 'import matplotlib.pyplot as plt\n'), ((7384, 7394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7392, 7394), True, 'import matplotlib.pyplot as plt\n'), ((465, 502), 'numpy.array', 'np.array', (["MNIST_data['y_train'][:, 0]"], {}), "(MNIST_data['y_train'][:, 0])\n", (473, 502), True, 'import numpy as np\n'), ((566, 602), 'numpy.array', 'np.array', (["MNIST_data['y_test'][:, 0]"], {}), "(MNIST_data['y_test'][:, 0])\n", (574, 602), True, 'import numpy as np\n'), ((3351, 3364), 'copy.copy', 'copy.copy', (['dU'], {}), '(dU)\n', (3360, 3364), False, 'import copy\n'), ((1017, 1048), 'numpy.random.randn', 'np.random.randn', (['hidden', 'inputs'], {}), '(hidden, inputs)\n', (1032, 1048), True, 'import numpy as np\n'), ((1050, 1069), 'numpy.sqrt', 'np.sqrt', (['num_inputs'], {}), '(num_inputs)\n', (1057, 1069), True, 'import numpy as np\n'), ((1105, 1131), 'numpy.random.randn', 'np.random.randn', (['hidden', '(1)'], {}), '(hidden, 1)\n', (1120, 1131), True, 'import numpy as np\n'), ((1133, 1148), 'numpy.sqrt', 'np.sqrt', (['hidden'], {}), '(hidden)\n', (1140, 1148), True, 'import numpy as np\n'), ((1185, 1217), 'numpy.random.randn', 'np.random.randn', (['outputs', 'hidden'], {}), '(outputs, hidden)\n', (1200, 1217), True, 'import numpy as np\n'), ((1219, 1234), 'numpy.sqrt', 'np.sqrt', (['hidden'], {}), '(hidden)\n', (1226, 1234), True, 'import numpy as np\n'), ((1271, 1298), 'numpy.random.randn', 'np.random.randn', (['outputs', '(1)'], {}), '(outputs, 1)\n', (1286, 1298), True, 'import numpy as np\n'), ((1300, 1315), 'numpy.sqrt', 'np.sqrt', (['hidden'], {}), '(hidden)\n', (1307, 1315), True, 'import numpy as np\n'), ((2221, 2230), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (2227, 2230), True, 'import numpy as np\n'), ((2333, 2345), 'numpy.log', 'np.log', (['v[y]'], {}), '(v[y])\n', (2339, 2345), True, 'import numpy as np\n'), ((3187, 3219), 'numpy.array', 'np.array', (['([0] * self.output_size)'], {}), '([0] * self.output_size)\n', (3195, 3219), True, 'import numpy as np\n'), ((2208, 2217), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (2214, 2217), True, 'import numpy as np\n'), ((2474, 2512), 'numpy.matmul', 'np.matmul', (["self.first_layer['para']", 'x'], {}), "(self.first_layer['para'], x)\n", (2483, 2512), True, 'import numpy as np\n'), ((2647, 2686), 'numpy.matmul', 'np.matmul', (["self.second_layer['para']", 'H'], {}), "(self.second_layer['para'], H)\n", (2656, 2686), True, 'import numpy as np\n'), ((1630, 1643), 'numpy.squeeze', 'np.squeeze', (['Z'], {}), '(Z)\n', (1640, 1643), True, 'import numpy as np\n'), ((1722, 1735), 'numpy.squeeze', 'np.squeeze', (['Z'], {}), '(Z)\n', (1732, 1735), True, 'import numpy as np\n'), ((1906, 1916), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (1912, 1916), True, 'import numpy as np\n'), ((4890, 4924), 'numpy.floor', 'np.floor', (['(ite / num_iterations * 5)'], {}), '(ite / num_iterations * 5)\n', (4898, 4924), True, 'import numpy as np\n'), ((1827, 1837), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (1833, 1837), True, 'import numpy as np\n'), ((2044, 2054), 'numpy.tanh', 'np.tanh', (['Z'], {}), '(Z)\n', (2051, 2054), True, 'import numpy as np\n'), ((1847, 1857), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (1853, 1857), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: alexyang
@contact: <EMAIL>
@file: inference.py
@time: 2018/4/22 14:32
@desc:
"""
import os
import random
from argparse import ArgumentParser
import numpy as np
import pickle
import pandas as pd
from models import EntDect, RelNet, SubTransE, SubTypeVec
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
def pickle_load(data_path):
return pickle.load(open(data_path, 'rb'))
def pickle_save(obj, data_path):
pickle.dump(obj, open(data_path, 'wb'))
def get_all_ngram(text):
tokens = text.split()
all_ngrams = {} # only consider unigram, bigram, trgram
for i in range(1, 4):
all_ngrams[i] = find_ngrams(tokens, i)
return all_ngrams
def find_ngrams(tokens, n):
ngrams = list(set(zip(*[tokens[i:] for i in range(n)])))
ngrams = [' '.join(ngram) for ngram in ngrams]
return ngrams
def read_data(data_path, word2idx, relation2idx, subject2idx, max_sequence_len=60):
data_csv = pd.read_csv(data_path, header=None, index_col=None, sep='\t',
names=['line_id', 'subject', 'entity_name', 'entity_type', 'relation', 'object',
'tokens', 'labels'])
data_size = data_csv.shape[0]
q_lineid = []
questions = []
q_word_ids = np.zeros(shape=(data_size, max_sequence_len))
q_seq_len = np.zeros(shape=data_size)
gold_sub_ids = []
gold_rel_ids = []
for index, row in data_csv.iterrows():
tokens = row['tokens'].split()
token_has_vector = [token for token in tokens if token in word2idx]
token_has_vector = token_has_vector[:max_sequence_len]
token_idx_has_vector = [word2idx[token] for token in token_has_vector]
q_lineid.append(row['line_id'])
questions.append(' '.join(token_has_vector))
q_word_ids[index, :len(token_idx_has_vector)] = token_idx_has_vector
q_seq_len[index] = len(token_idx_has_vector)
gold_sub_ids.append(subject2idx[row['subject']])
gold_rel_ids.append(relation2idx[row['relation']])
return q_lineid, questions, q_word_ids, q_seq_len, gold_sub_ids, gold_rel_ids
def link_entity(mentions, name2subject, ngram2subject, kb_triple, subject2idx, relation2idx):
# the suffix "ids" means we store the index of subject(relation) rather than subject(relation) itself
cand_sub_ids = []
cand_rel_ids = []
cand_subrel_ids = []
match_count = [0, 0, 0, 0] # index 0 for exact match, index 1, 2, 3 for unigram / bigram / trigram match
for mention in mentions:
cand_sub = set()
cand_rel = set()
cand_subrel = []
if mention in name2subject:
match_count[0] += 1
cand_sub.update(name2subject[mention])
else:
ngrams = get_all_ngram(mention)
for i in [3, 2, 1]:
for ngram in ngrams[i]:
if ngram in ngram2subject:
cand_sub.update(list(zip(*ngram2subject[ngram]))[0][:256])
if len(cand_sub) > 0:
match_count[i] += 1
break
#if len(cand_sub) > 256:
# cand_sub = random.sample(cand_sub, 256)
for sub in list(cand_sub):
rels = set([rel for rel, _ in kb_triple[sub]])
cand_rel.update(rels)
cand_subrel.extend([(sub, rel) for rel in list(rels)])
cand_sub_ids.append([subject2idx[sub] for sub in list(cand_sub)])
cand_rel_ids.append([relation2dix[rel] for rel in list(cand_rel)])
cand_subrel_ids.append([(subject2idx[sub], relation2dix[rel]) for sub, rel in cand_subrel])
print('exact match: {} / {} '.format(match_count[0], len(mentions)))
print('trigram match: {} / {}'.format(match_count[3], len(mentions)))
print('bigram match: {} / {}'.format(match_count[2], len(mentions)))
print('unigram match: {} / {}'.format(match_count[1], len(mentions)))
return cand_sub_ids, cand_rel_ids, cand_subrel_ids
def inference(gold_sub_ids, gold_rel_ids, cand_subrel_ids, rel_scores, sub_scores):
for alpha in [0.45]:
subrel_hit = 0
sub_hit = 0
rel_hit = 0
data_size = len(gold_sub_ids)
for i in range(data_size):
subrel_scores = {}
for (sub_id, rel_id) in cand_subrel_ids[i]:
score = rel_scores[i][rel_id] * (alpha + (1 - alpha)*sub_scores[i][sub_id])
subrel_scores[(sub_id, rel_id)] = score
subrel_scores = sorted(subrel_scores.items(), key=lambda x: x[1], reverse=True)
if len(subrel_scores) == 0:
continue
top_sub_id = subrel_scores[0][0][0]
top_rel_id = subrel_scores[0][0][1]
if top_sub_id == gold_sub_ids[i]:
sub_hit += 1
if top_rel_id == gold_rel_ids[i]:
rel_hit += 1
if top_sub_id == gold_sub_ids[i] and top_rel_id == gold_rel_ids[i]:
subrel_hit += 1
print('alpha: %f, sub acc: %f, rel acc: %f, (sub, rel): %f' % (alpha, sub_hit / data_size, rel_hit / data_size,
subrel_hit / data_size))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--data', type=str, default='../../data/test.csv', help='path to test data')
parser.add_argument('--word2idx', type=str, default='../../data/fb_word2idx.pkl', help='path to word2idx.pkl')
parser.add_argument('--rel2idx', type=str, default='../../data/FB5M_relation2idx.pkl',
help='path to relation2idx.pkl')
parser.add_argument('--sub2idx', type=str, default='../../data/FB5M_subject2idx.pkl',
help='path to subject2idx.pkl')
parser.add_argument('--idx2sub', type=str, default='../../data/FB5M_idx2subject.pkl',
help='path to idx2subject.pkl')
parser.add_argument('--sub2type', type=str, default='../../data/trim_subject2type.pkl',
help='path to subject2type')
parser.add_argument('--type2idx', type=str, default='../../data/FB5M_type2idx.pkl', help='path to type2idx.pkl')
parser.add_argument('--name2sub', type=str, default='../../data/name2subject.pkl',
help='path to subject2name.pkl')
parser.add_argument('--ngram2sub', type=str, default='../../data/ngram2subject.pkl',
help='path to subngram2entity.pkl')
parser.add_argument('--kb', type=str, default='../../data/FB5M_triple.pkl', help='path to knowledge graph')
parser.add_argument('--entdect_type', type=str, required=True,
help='model type of entity detection, options are [lstm | lstm_crf | bilstm | bilstm_crf]')
parser.add_argument('--subnet_type', type=str, required=True,
help='model type of subject network, options are [transe | typevec]')
parser.add_argument('--entdect', type=str, required=True, help='path to entity detection tensorflow model')
parser.add_argument('--relnet', type=str, required=True, help='path to relation network tensorflow model')
parser.add_argument('--subnet', type=str, required=True, help='path to subject network tensorflow model')
args = parser.parse_args()
# load needed data
print('loading word2idx...')
word2idx = pickle_load(args.word2idx)
print('loading relation2idx...')
relation2dix = pickle_load(args.rel2idx)
print('loading idx2subject...')
idx2subject = pickle_load(args.idx2sub)
print('loading subject2idx...')
subject2idx = pickle_load(args.sub2idx)
print('loading type2idx...')
type2idx = pickle_load(args.type2idx)
print('loading subject2type...')
subject2type = pickle_load(args.sub2type)
print('loading name2subject')
name2subject = pickle_load(args.name2sub)
print('loading ngram2subject...')
ngram2subject = pickle_load(args.ngram2sub)
print('loading knowledge graph...')
kb_triple = pickle_load(args.kb)
# load model
print('load entity detection model...')
entdect = EntDect(args.entdect_type, args.entdect)
print('load relation network model...')
relnet = RelNet(args.relnet)
if args.subnet_type == 'typevec':
subnet = SubTypeVec(args.subnet)
else:
subnet = SubTransE(args.subnet)
# load test data
print('loading test data...')
q_lineid, questions, q_word_ids, q_seq_len, gold_sub_ids, gold_rel_ids = read_data(args.data, word2idx,
relation2dix, subject2idx)
# '''step1: entity detection: find possible subject mention in question'''
mentions = entdect.infer((questions, q_word_ids, q_seq_len))
# '''step2: entity linking: find possible subjects responding to subject mention;
# search space reduction: generate candidate (subject, relation) pair according to possible subjects
# '''
cand_sub_ids, cand_rel_ids, cand_subrel_ids = link_entity(mentions, name2subject, ngram2subject,
kb_triple, subject2idx, relation2dix)
# '''step3: relation scoring: compute score for each candidate relations'''
rel_scores = relnet.infer((q_word_ids, q_seq_len, cand_rel_ids))
# '''step4: subject scoring: compute score for each candidate subjects'''
if args.subnet_type == 'typevec':
cand_sub_typevecs = []
for can_sub in cand_sub_ids:
type_vecs = []
for sub_id in can_sub:
types = subject2type.get(idx2subject[sub_id], [])
type_ids = [type2idx[type] for type in types]
type_vecs.append(type_ids)
cand_sub_typevecs.append(type_vecs)
sub_scores = subnet.infer((q_word_ids, q_seq_len, cand_sub_ids, cand_sub_typevecs))
else:
sub_scores = subnet.infer((q_word_ids, q_seq_len, cand_sub_ids))
# '''step5: inference'''
inference(gold_sub_ids, gold_rel_ids, cand_subrel_ids, rel_scores, sub_scores)
| [
"models.EntDect",
"models.SubTypeVec",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.zeros",
"models.RelNet",
"models.SubTransE"
] | [((968, 1140), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'header': 'None', 'index_col': 'None', 'sep': '"""\t"""', 'names': "['line_id', 'subject', 'entity_name', 'entity_type', 'relation', 'object',\n 'tokens', 'labels']"}), "(data_path, header=None, index_col=None, sep='\\t', names=[\n 'line_id', 'subject', 'entity_name', 'entity_type', 'relation',\n 'object', 'tokens', 'labels'])\n", (979, 1140), True, 'import pandas as pd\n'), ((1282, 1327), 'numpy.zeros', 'np.zeros', ([], {'shape': '(data_size, max_sequence_len)'}), '(shape=(data_size, max_sequence_len))\n', (1290, 1327), True, 'import numpy as np\n'), ((1344, 1369), 'numpy.zeros', 'np.zeros', ([], {'shape': 'data_size'}), '(shape=data_size)\n', (1352, 1369), True, 'import numpy as np\n'), ((5285, 5301), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (5299, 5301), False, 'from argparse import ArgumentParser\n'), ((8150, 8190), 'models.EntDect', 'EntDect', (['args.entdect_type', 'args.entdect'], {}), '(args.entdect_type, args.entdect)\n', (8157, 8190), False, 'from models import EntDect, RelNet, SubTransE, SubTypeVec\n'), ((8248, 8267), 'models.RelNet', 'RelNet', (['args.relnet'], {}), '(args.relnet)\n', (8254, 8267), False, 'from models import EntDect, RelNet, SubTransE, SubTypeVec\n'), ((8323, 8346), 'models.SubTypeVec', 'SubTypeVec', (['args.subnet'], {}), '(args.subnet)\n', (8333, 8346), False, 'from models import EntDect, RelNet, SubTransE, SubTypeVec\n'), ((8374, 8396), 'models.SubTransE', 'SubTransE', (['args.subnet'], {}), '(args.subnet)\n', (8383, 8396), False, 'from models import EntDect, RelNet, SubTransE, SubTypeVec\n')] |
import unittest
import pandas as pd
import numpy as np
from model_scripts.surge_inference import SurgePriceClassifier
class TestSurgePriceClassifier(unittest.TestCase):
'''
Tests for checking the surge price classifier model inference
script
params, returns: None
'''
def test_get_rush_hour(self):
'''
Tests if rush hour is returned by the given module
params, returns: None
'''
data = \
pd.DataFrame({'temp': [40.67], 'clouds': [0.94],
'pressure': [1013.76], 'rain': [0.0],
'humidity': [0.92], 'wind': [2.92],
'rush_hr': [0],
'location_latitude': [42.3559219],
'location_longitude': [-71.0549768],
'surge_mult': [0]})
rush_hr = data[['rush_hr']]
message = "ERROR : test_get_rush_hour is failing"
self.assertIsNotNone(rush_hr, message)
def test_unique_values(self):
'''
Tests the unique values predicted column
params, returns: None
'''
data = \
pd.DataFrame({'temp': [40.67], 'clouds': [0.94],
'pressure': [1013.76], 'rain': [0.0],
'humidity': [0.92], 'wind': [2.92],
'rush_hr': [0],
'location_latitude': [42.3559219],
'location_longitude': [-71.0549768],
'surge_mult': [0]})
message = "ERROR : test_unique_values is failing"
self.assertEqual(len(list(np.unique(data['surge_mult']))),
1, message)
def test_null_values(self):
'''
Test for null values. Prevents feedback loop errors
params, returns: None
'''
data = \
pd.DataFrame({'temp': [40.67], 'clouds': [0.94],
'pressure': [1013.76], 'rain': [0.0],
'humidity': [0.92], 'wind': [2.92],
'rush_hr': [0],
'location_latitude': [42.3559219],
'location_longitude': [-71.0549768],
'surge_mult': [0]})
self.assertEqual(len(data), len(data.dropna()))
def test_surge_model(self):
data = \
pd.DataFrame({'id': [0], 'temp': [40.67], 'clouds': [0.94],
'pressure': [1013.76], 'rain': [0.0],
'humidity': [0.92], 'wind': [2.92],
'location_latitude': [42.3559219],
'location_longitude': [-71.0549768],
'surge_mult': [0]})
surge_mult_object = SurgePriceClassifier(data)
surge = \
surge_mult_object.surge_prediction_model()
message = "Predicted surge is not correct"
self.assertIsNotNone(surge, message)
| [
"pandas.DataFrame",
"model_scripts.surge_inference.SurgePriceClassifier",
"numpy.unique"
] | [((466, 708), 'pandas.DataFrame', 'pd.DataFrame', (["{'temp': [40.67], 'clouds': [0.94], 'pressure': [1013.76], 'rain': [0.0],\n 'humidity': [0.92], 'wind': [2.92], 'rush_hr': [0], 'location_latitude':\n [42.3559219], 'location_longitude': [-71.0549768], 'surge_mult': [0]}"], {}), "({'temp': [40.67], 'clouds': [0.94], 'pressure': [1013.76],\n 'rain': [0.0], 'humidity': [0.92], 'wind': [2.92], 'rush_hr': [0],\n 'location_latitude': [42.3559219], 'location_longitude': [-71.0549768],\n 'surge_mult': [0]})\n", (478, 708), True, 'import pandas as pd\n'), ((1161, 1403), 'pandas.DataFrame', 'pd.DataFrame', (["{'temp': [40.67], 'clouds': [0.94], 'pressure': [1013.76], 'rain': [0.0],\n 'humidity': [0.92], 'wind': [2.92], 'rush_hr': [0], 'location_latitude':\n [42.3559219], 'location_longitude': [-71.0549768], 'surge_mult': [0]}"], {}), "({'temp': [40.67], 'clouds': [0.94], 'pressure': [1013.76],\n 'rain': [0.0], 'humidity': [0.92], 'wind': [2.92], 'rush_hr': [0],\n 'location_latitude': [42.3559219], 'location_longitude': [-71.0549768],\n 'surge_mult': [0]})\n", (1173, 1403), True, 'import pandas as pd\n'), ((1886, 2128), 'pandas.DataFrame', 'pd.DataFrame', (["{'temp': [40.67], 'clouds': [0.94], 'pressure': [1013.76], 'rain': [0.0],\n 'humidity': [0.92], 'wind': [2.92], 'rush_hr': [0], 'location_latitude':\n [42.3559219], 'location_longitude': [-71.0549768], 'surge_mult': [0]}"], {}), "({'temp': [40.67], 'clouds': [0.94], 'pressure': [1013.76],\n 'rain': [0.0], 'humidity': [0.92], 'wind': [2.92], 'rush_hr': [0],\n 'location_latitude': [42.3559219], 'location_longitude': [-71.0549768],\n 'surge_mult': [0]})\n", (1898, 2128), True, 'import pandas as pd\n'), ((2391, 2629), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [0], 'temp': [40.67], 'clouds': [0.94], 'pressure': [1013.76],\n 'rain': [0.0], 'humidity': [0.92], 'wind': [2.92], 'location_latitude':\n [42.3559219], 'location_longitude': [-71.0549768], 'surge_mult': [0]}"], {}), "({'id': [0], 'temp': [40.67], 'clouds': [0.94], 'pressure': [\n 1013.76], 'rain': [0.0], 'humidity': [0.92], 'wind': [2.92],\n 'location_latitude': [42.3559219], 'location_longitude': [-71.0549768],\n 'surge_mult': [0]})\n", (2403, 2629), True, 'import pandas as pd\n'), ((2776, 2802), 'model_scripts.surge_inference.SurgePriceClassifier', 'SurgePriceClassifier', (['data'], {}), '(data)\n', (2796, 2802), False, 'from model_scripts.surge_inference import SurgePriceClassifier\n'), ((1640, 1669), 'numpy.unique', 'np.unique', (["data['surge_mult']"], {}), "(data['surge_mult'])\n", (1649, 1669), True, 'import numpy as np\n')] |
import numpy as np
from holoviews.element import HeatMap, Points, Image
try:
from bokeh.models import FactorRange, HoverTool
except:
pass
from .testplot import TestBokehPlot, bokeh_renderer
class TestHeatMapPlot(TestBokehPlot):
def test_heatmap_hover_ensure_kdims_sanitized(self):
hm = HeatMap([(1,1,1), (2,2,0)], kdims=['x with space', 'y with $pecial symbol'])
hm = hm(plot={'tools': ['hover']})
self._test_hover_info(hm, [('x with space', '@{x_with_space}'),
('y with $pecial symbol', '@{y_with_pecial_symbol}'),
('z', '@{z}')])
def test_heatmap_custom_string_tooltip_hover(self):
tooltips = "<div><h1>Test</h1></div>"
custom_hover = HoverTool(tooltips=tooltips)
hm = HeatMap([(1,1,1), (2,2,0)], kdims=['x with space', 'y with $pecial symbol'])
hm = hm.options(tools=[custom_hover])
plot = bokeh_renderer.get_plot(hm)
hover = plot.handles['hover']
self.assertEqual(hover.tooltips, tooltips)
self.assertEqual(hover.renderers, [plot.handles['glyph_renderer']])
def test_heatmap_hover_ensure_vdims_sanitized(self):
hm = HeatMap([(1,1,1), (2,2,0)], vdims=['z with $pace'])
hm = hm(plot={'tools': ['hover']})
self._test_hover_info(hm, [('x', '@{x}'), ('y', '@{y}'),
('z with $pace', '@{z_with_pace}')])
def test_heatmap_colormapping(self):
hm = HeatMap([(1,1,1), (2,2,0)])
self._test_colormapping(hm, 2)
def test_heatmap_categorical_axes_string_int(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)])
plot = bokeh_renderer.get_plot(hmap)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B'])
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['1', '2'])
def test_heatmap_categorical_axes_string_int_invert_xyaxis(self):
opts = dict(invert_xaxis=True, invert_yaxis=True)
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).opts(plot=opts)
plot = bokeh_renderer.get_plot(hmap)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B'][::-1])
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['1', '2'][::-1])
def test_heatmap_categorical_axes_string_int_inverted(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(hmap)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['1', '2'])
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B'])
def test_heatmap_points_categorical_axes_string_int(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)])
points = Points([('A', 2), ('B', 1), ('C', 3)])
plot = bokeh_renderer.get_plot(hmap*points)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C'])
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['1', '2', '3'])
def test_heatmap_points_categorical_axes_string_int_inverted(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).opts(plot=dict(invert_axes=True))
points = Points([('A', 2), ('B', 1), ('C', 3)])
plot = bokeh_renderer.get_plot(hmap*points)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['1', '2', '3'])
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C'])
def test_heatmap_invert_axes(self):
arr = np.array([[0, 1, 2], [3, 4, 5]])
hm = HeatMap(Image(arr)).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(hm)
xdim, ydim = hm.kdims
source = plot.handles['source']
self.assertEqual(source.data['zvalues'], hm.dimension_values(2, flat=False).T.flatten())
self.assertEqual(source.data['x'], [xdim.pprint_value(v) for v in hm.dimension_values(0)])
self.assertEqual(source.data['y'], [ydim.pprint_value(v) for v in hm.dimension_values(1)])
def test_heatmap_xmarks_int(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).options(xmarks=2)
plot = bokeh_renderer.get_plot(hmap)
for marker, pos in zip(plot.handles['xmarks'], (0, 1)):
self.assertEqual(marker.location, pos)
self.assertEqual(marker.dimension, 'height')
def test_heatmap_xmarks_tuple(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).options(xmarks=('A', 'B'))
plot = bokeh_renderer.get_plot(hmap)
for marker, pos in zip(plot.handles['xmarks'], (0, 1)):
self.assertEqual(marker.location, pos)
self.assertEqual(marker.dimension, 'height')
def test_heatmap_xmarks_list(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).options(xmarks=[0, 1])
plot = bokeh_renderer.get_plot(hmap)
for marker, pos in zip(plot.handles['xmarks'], (0, 1)):
self.assertEqual(marker.location, pos)
self.assertEqual(marker.dimension, 'height')
def test_heatmap_ymarks_int(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).options(ymarks=2)
plot = bokeh_renderer.get_plot(hmap)
for marker, pos in zip(plot.handles['ymarks'], (2, 1)):
self.assertEqual(marker.location, pos)
self.assertEqual(marker.dimension, 'width')
def test_heatmap_ymarks_tuple(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).options(ymarks=('A', 'B'))
plot = bokeh_renderer.get_plot(hmap)
for marker, pos in zip(plot.handles['ymarks'], (0, 1)):
self.assertEqual(marker.location, pos)
self.assertEqual(marker.dimension, 'width')
def test_heatmap_ymarks_list(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).options(ymarks=[0, 1])
plot = bokeh_renderer.get_plot(hmap)
for marker, pos in zip(plot.handles['ymarks'], (2, 1)):
self.assertEqual(marker.location, pos)
self.assertEqual(marker.dimension, 'width')
def test_heatmap_dilate(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).options(dilate=True)
plot = bokeh_renderer.get_plot(hmap)
glyph = plot.handles['glyph']
self.assertTrue(glyph.dilate)
| [
"holoviews.element.Image",
"holoviews.element.HeatMap",
"holoviews.element.Points",
"numpy.array",
"bokeh.models.HoverTool"
] | [((312, 397), 'holoviews.element.HeatMap', 'HeatMap', (['[(1, 1, 1), (2, 2, 0)]'], {'kdims': "['x with space', 'y with $pecial symbol']"}), "([(1, 1, 1), (2, 2, 0)], kdims=['x with space', 'y with $pecial symbol']\n )\n", (319, 397), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((770, 798), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'tooltips'}), '(tooltips=tooltips)\n', (779, 798), False, 'from bokeh.models import FactorRange, HoverTool\n'), ((812, 897), 'holoviews.element.HeatMap', 'HeatMap', (['[(1, 1, 1), (2, 2, 0)]'], {'kdims': "['x with space', 'y with $pecial symbol']"}), "([(1, 1, 1), (2, 2, 0)], kdims=['x with space', 'y with $pecial symbol']\n )\n", (819, 897), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((1214, 1269), 'holoviews.element.HeatMap', 'HeatMap', (['[(1, 1, 1), (2, 2, 0)]'], {'vdims': "['z with $pace']"}), "([(1, 1, 1), (2, 2, 0)], vdims=['z with $pace'])\n", (1221, 1269), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((1501, 1532), 'holoviews.element.HeatMap', 'HeatMap', (['[(1, 1, 1), (2, 2, 0)]'], {}), '([(1, 1, 1), (2, 2, 0)])\n', (1508, 1532), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((1640, 1675), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (1647, 1675), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((3134, 3169), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (3141, 3169), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((3186, 3224), 'holoviews.element.Points', 'Points', (["[('A', 2), ('B', 1), ('C', 3)]"], {}), "([('A', 2), ('B', 1), ('C', 3)])\n", (3192, 3224), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((3758, 3796), 'holoviews.element.Points', 'Points', (["[('A', 2), ('B', 1), ('C', 3)]"], {}), "([('A', 2), ('B', 1), ('C', 3)])\n", (3764, 3796), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((4211, 4243), 'numpy.array', 'np.array', (['[[0, 1, 2], [3, 4, 5]]'], {}), '([[0, 1, 2], [3, 4, 5]])\n', (4219, 4243), True, 'import numpy as np\n'), ((2160, 2195), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (2167, 2195), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((2645, 2680), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (2652, 2680), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((3672, 3707), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (3679, 3707), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((4775, 4810), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (4782, 4810), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((5102, 5137), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (5109, 5137), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((5437, 5472), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (5444, 5472), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((5767, 5802), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (5774, 5802), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((6093, 6128), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (6100, 6128), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((6427, 6462), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (6434, 6462), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((6752, 6787), 'holoviews.element.HeatMap', 'HeatMap', (["[('A', 1, 1), ('B', 2, 2)]"], {}), "([('A', 1, 1), ('B', 2, 2)])\n", (6759, 6787), False, 'from holoviews.element import HeatMap, Points, Image\n'), ((4266, 4276), 'holoviews.element.Image', 'Image', (['arr'], {}), '(arr)\n', (4271, 4276), False, 'from holoviews.element import HeatMap, Points, Image\n')] |
import os
import pandas as pd
import uproot3 as uproot
from tqdm import tqdm
import numpy as np
import json
def generate_files(basedir, period, samples, TreeName="selection", format="pickle", mode="normal"):
"""
Combine jobs by dataset event process and save files
Args:
basedir (str): Path to analysis root folder
period (str): Jobs period used in anafile
TreeName (str): Tree name used in ROOTfile
samples (dict): Dictionary mapping each event flavour to jobs directories
format (str, optional): Format to save dataset as pandas persistent file. Defaults to "pickle".
Raises:
ValueError: Raise exception if specify an unsupported save format.
"""
if format not in ["pickle", "parquet"]:
raise ValueError("Format unsupported. Please use one of the following formats: ['pickle', 'parquet']")
comb_path = os.path.join(basedir, "datasets")
period_path = os.path.join(comb_path, period)
if not os.path.exists(comb_path):
os.makedirs(comb_path)
if not os.path.exists(period_path):
os.makedirs(period_path)
if mode == "syst":
with open(os.path.join(basedir, "lateral_systematics.json")) as json_sys_file:
systematics = json.load(json_sys_file)
has_tag = False # Remove if CMS join 2016 samples again
for datasets in tqdm(samples.keys()):
# Initialize source list (object which will store the systematic histograms)
if mode == "syst":
source_list = []
for sys_source in systematics.keys():
sys_list = systematics[sys_source]
universe_list = []
for universe in range(sys_list[1]):
universe_list.append(0)
source_list.append(universe_list)
first = True
DATA_LUMI = 0
PROC_XSEC = 0
SUM_GEN_WGT = 0
for dataset in samples[datasets]:
#print(dataset)
dataset_year = dataset.split("_files_")[0]
dataset_year = dataset_year.split("_")[-1]
dataset_tag = dataset.split("_"+dataset_year)[0][-3:]
if (dataset_year == period):
if( dataset_tag == "APV" ):
has_tag = True
cutflow = os.path.join(basedir, dataset, "cutflow.txt")
if os.path.isfile(cutflow):
with open(cutflow) as f:
for line in f:
if line[:10] == "Luminosity" :
DATA_LUMI = float(line.split()[1])
if line[:13] == "Cross section" :
PROC_XSEC = float(line.split()[2])
if line[:17] == "Sum of genWeights" :
SUM_GEN_WGT += float(line.split()[3])
if line[:17] == "Lumi. Uncertainty" :
DATA_LUMI_UNC = float(line.split()[2])
rootfile = os.path.join(basedir, dataset, "Tree.root")
f = uproot.open(rootfile)
tree = f[TreeName]
df = tree.pandas.df(flatten=False)
df = df.loc[:, ~df.columns.duplicated()] # If duplicated columns, keep th first one.
if first :
df_group = df.copy()
else:
df_group = pd.concat([df_group, df])
del df
#----------------------------------------------------
# Systematic
if mode == "syst":
for sys_source in systematics.keys():
sys_list = systematics[sys_source]
if( (sys_list[0] > 0) and (datasets[:4] == "Data") ):
continue
universe_list = []
for universe in range(sys_list[1]):
sys_file = str(sys_list[0]) + "_" + str(universe) + ".json"
with open(os.path.join(basedir, dataset, "Systematics", sys_file)) as json_file:
sys_dict = json.load(json_file)
if first :
source_list[sys_list[0]][universe] = sys_dict.copy()
else:
for variable in sys_dict.keys():
zipped_Hist = zip(source_list[sys_list[0]][universe][variable]["Hist"], sys_dict[variable]["Hist"])
New_Hist = [x + y for (x, y) in zipped_Hist]
source_list[sys_list[0]][universe][variable]["Hist"] = New_Hist
zipped_Unc = zip(source_list[sys_list[0]][universe][variable]["Unc"], sys_dict[variable]["Unc"])
New_Unc = [np.sqrt(x**2 + y**2) for (x, y) in zipped_Unc]
source_list[sys_list[0]][universe][variable]["Unc"] = New_Unc
del sys_dict
#---------------------------------------------------
first = False
if PROC_XSEC == 0:
dataScaleWeight = 1
else:
dataScaleWeight = (PROC_XSEC/SUM_GEN_WGT) * DATA_LUMI
df_group['evtWeight'] = df_group['evtWeight']*dataScaleWeight
if( has_tag ):
period_path = os.path.join(comb_path, "APV_"+period)
if not os.path.exists(period_path):
os.makedirs(period_path)
if format == "pickle":
fpath = os.path.join(period_path, f"{datasets}.p")
df_group.to_pickle(fpath)
elif format == "parquet":
fpath = os.path.join(period_path, f"{datasets}.parquet")
df_group.to_parquet(fpath, index=False)
else:
if format == "pickle":
fpath = os.path.join(basedir, "datasets", period, f"{datasets}.p")
df_group.to_pickle(fpath)
elif format == "parquet":
fpath = os.path.join(basedir, "datasets", period, f"{datasets}.parquet")
df_group.to_parquet(fpath, index=False)
del df_group
#---SYS--------------------------------------------------------------
if mode == "syst":
output_sys_dict = {}
if( datasets[:4] == "Data" ):
for variable in source_list[0][0].keys():
output_sys_dict[variable] = source_list[0][0][variable]
else:
for isource in range(len(source_list)):
for iuniverse in range(len(source_list[isource])):
#print(isource, iuniverse)
#print(source_list[isource][iuniverse].keys())
for variable in source_list[isource][iuniverse].keys():
New_Hist = [x*dataScaleWeight for x in source_list[isource][iuniverse][variable]["Hist"]]
source_list[isource][iuniverse][variable]["Hist"] = New_Hist
New_Unc = [x*dataScaleWeight for x in source_list[isource][iuniverse][variable]["Unc"]]
source_list[isource][iuniverse][variable]["Unc"] = New_Unc
output_sys_dict[variable] = source_list[isource][iuniverse][variable]
output_sys_dict[variable]["LumiUnc"] = DATA_LUMI_UNC
if( has_tag ):
period_path = os.path.join(comb_path, "APV_"+period)
if not os.path.exists(period_path):
os.makedirs(period_path)
with open(os.path.join(basedir, "datasets", period, f"{datasets}.json"), 'w') as json_file:
json.dump(output_sys_dict, json_file)
else:
with open(os.path.join(basedir, "datasets", period, f"{datasets}.json"), 'w') as json_file:
json.dump(output_sys_dict, json_file)
#--------------------------------------------------------------------
#regions = []
#with open(os.path.join(basedir, list(samples.values())[0][0], "Systematics", "0_0.json")) as json_file:
# sys_dict = json.load(json_file)
# for variable in sys_dict.keys():
# info = variable.split("_")
# regions.append(int(info[-3]))
#regions = np.unique(regions)
#global_source_list = [] # stores histograms for all regions
#for region in regions:
# global_source_list.append(source_list)
| [
"json.dump",
"json.load",
"os.makedirs",
"os.path.exists",
"os.path.isfile",
"numpy.sqrt",
"os.path.join",
"pandas.concat",
"uproot3.open"
] | [((893, 926), 'os.path.join', 'os.path.join', (['basedir', '"""datasets"""'], {}), "(basedir, 'datasets')\n", (905, 926), False, 'import os\n'), ((945, 976), 'os.path.join', 'os.path.join', (['comb_path', 'period'], {}), '(comb_path, period)\n', (957, 976), False, 'import os\n'), ((989, 1014), 'os.path.exists', 'os.path.exists', (['comb_path'], {}), '(comb_path)\n', (1003, 1014), False, 'import os\n'), ((1024, 1046), 'os.makedirs', 'os.makedirs', (['comb_path'], {}), '(comb_path)\n', (1035, 1046), False, 'import os\n'), ((1058, 1085), 'os.path.exists', 'os.path.exists', (['period_path'], {}), '(period_path)\n', (1072, 1085), False, 'import os\n'), ((1095, 1119), 'os.makedirs', 'os.makedirs', (['period_path'], {}), '(period_path)\n', (1106, 1119), False, 'import os\n'), ((1264, 1288), 'json.load', 'json.load', (['json_sys_file'], {}), '(json_sys_file)\n', (1273, 1288), False, 'import json\n'), ((5776, 5816), 'os.path.join', 'os.path.join', (['comb_path', "('APV_' + period)"], {}), "(comb_path, 'APV_' + period)\n", (5788, 5816), False, 'import os\n'), ((1169, 1218), 'os.path.join', 'os.path.join', (['basedir', '"""lateral_systematics.json"""'], {}), "(basedir, 'lateral_systematics.json')\n", (1181, 1218), False, 'import os\n'), ((2316, 2361), 'os.path.join', 'os.path.join', (['basedir', 'dataset', '"""cutflow.txt"""'], {}), "(basedir, dataset, 'cutflow.txt')\n", (2328, 2361), False, 'import os\n'), ((2381, 2404), 'os.path.isfile', 'os.path.isfile', (['cutflow'], {}), '(cutflow)\n', (2395, 2404), False, 'import os\n'), ((5834, 5861), 'os.path.exists', 'os.path.exists', (['period_path'], {}), '(period_path)\n', (5848, 5861), False, 'import os\n'), ((5879, 5903), 'os.makedirs', 'os.makedirs', (['period_path'], {}), '(period_path)\n', (5890, 5903), False, 'import os\n'), ((5963, 6005), 'os.path.join', 'os.path.join', (['period_path', 'f"""{datasets}.p"""'], {}), "(period_path, f'{datasets}.p')\n", (5975, 6005), False, 'import os\n'), ((6288, 6346), 'os.path.join', 'os.path.join', (['basedir', '"""datasets"""', 'period', 'f"""{datasets}.p"""'], {}), "(basedir, 'datasets', period, f'{datasets}.p')\n", (6300, 6346), False, 'import os\n'), ((7924, 7964), 'os.path.join', 'os.path.join', (['comb_path', "('APV_' + period)"], {}), "(comb_path, 'APV_' + period)\n", (7936, 7964), False, 'import os\n'), ((3050, 3093), 'os.path.join', 'os.path.join', (['basedir', 'dataset', '"""Tree.root"""'], {}), "(basedir, dataset, 'Tree.root')\n", (3062, 3093), False, 'import os\n'), ((3118, 3139), 'uproot3.open', 'uproot.open', (['rootfile'], {}), '(rootfile)\n', (3129, 3139), True, 'import uproot3 as uproot\n'), ((6110, 6158), 'os.path.join', 'os.path.join', (['period_path', 'f"""{datasets}.parquet"""'], {}), "(period_path, f'{datasets}.parquet')\n", (6122, 6158), False, 'import os\n'), ((6451, 6515), 'os.path.join', 'os.path.join', (['basedir', '"""datasets"""', 'period', 'f"""{datasets}.parquet"""'], {}), "(basedir, 'datasets', period, f'{datasets}.parquet')\n", (6463, 6515), False, 'import os\n'), ((7986, 8013), 'os.path.exists', 'os.path.exists', (['period_path'], {}), '(period_path)\n', (8000, 8013), False, 'import os\n'), ((8035, 8059), 'os.makedirs', 'os.makedirs', (['period_path'], {}), '(period_path)\n', (8046, 8059), False, 'import os\n'), ((8200, 8237), 'json.dump', 'json.dump', (['output_sys_dict', 'json_file'], {}), '(output_sys_dict, json_file)\n', (8209, 8237), False, 'import json\n'), ((8396, 8433), 'json.dump', 'json.dump', (['output_sys_dict', 'json_file'], {}), '(output_sys_dict, json_file)\n', (8405, 8433), False, 'import json\n'), ((3476, 3501), 'pandas.concat', 'pd.concat', (['[df_group, df]'], {}), '([df_group, df])\n', (3485, 3501), True, 'import pandas as pd\n'), ((8086, 8147), 'os.path.join', 'os.path.join', (['basedir', '"""datasets"""', 'period', 'f"""{datasets}.json"""'], {}), "(basedir, 'datasets', period, f'{datasets}.json')\n", (8098, 8147), False, 'import os\n'), ((8282, 8343), 'os.path.join', 'os.path.join', (['basedir', '"""datasets"""', 'period', 'f"""{datasets}.json"""'], {}), "(basedir, 'datasets', period, f'{datasets}.json')\n", (8294, 8343), False, 'import os\n'), ((4309, 4329), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4318, 4329), False, 'import json\n'), ((4191, 4246), 'os.path.join', 'os.path.join', (['basedir', 'dataset', '"""Systematics"""', 'sys_file'], {}), "(basedir, dataset, 'Systematics', sys_file)\n", (4203, 4246), False, 'import os\n'), ((5161, 5185), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (5168, 5185), True, 'import numpy as np\n')] |
"""
.. module:: PMRF
:synopsis: This module is responsible for image segmentation using pmrf algorithm
.. moduleauthor:: <NAME>
"""
__copyright__ = "CAMERA Materials Segmentation & Metrics (MSM) Copyright (c) 2017, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved."
__developers__ = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
__author__ = "<NAME>"
__maintainer__ = "CAMERA Image Processing"
__email__ = "<EMAIL>"
__license__ = "Modified BSD"
__version__ = "0.1"
import ctypes
from ..srm.pysrm.srm import segment_aux
import numpy as np
from scipy import misc
import subprocess
import os
from ...util.util import create_dir, listdir_fullpath, upscale
from skimage import io, img_as_float
from colorama import init
from termcolor import colored
from ..SegmentationAlgorithm import SegmentationAlgorithm
class PMRF(SegmentationAlgorithm):
def __init__(self, image, input_settings, preproc_settings,seg_settings,run_number):
super().__init__(image, input_settings, preproc_settings,seg_settings,run_number)
def segment(self):
""" Main segmentation process using PMRF algorithm
"""
input_type = self.input_settings["InputType"]
input_dir = self.input_settings["InputDir"]
if input_type==1:
input_dir_split = input_dir.split("/")
input_dir = "/".join(input_dir_split[:-1])
input_file = input_dir
output_dat_dir = os.path.join(input_dir,"msmcam_run_"+str(self.run_number),"res/pmrf/dat")
output_tif_dir = os.path.join(input_dir,"msmcam_run_"+str(self.run_number), "res/pmrf/tif")
input_dat_dir = os.path.join(input_dir, "msmcam_run_"+str(self.run_number),"dat")
overseg_tif_dir = os.path.join(input_dir,"msmcam_run_"+str(self.run_number), "preproc/overseg/tif")
overseg_dat_dir = os.path.join(input_dir,"msmcam_run_"+str(self.run_number), "preproc/overseg/dat")
in_memory = self.input_settings["InMemory"]
first_slice = self.input_settings["FirstSlice"]
last_slice = self.input_settings["LastSlice"]
num_slices = last_slice-first_slice
if in_memory:
self.segmented = np.zeros(self.image.shape,dtype=np.uint8)
create_dir(overseg_tif_dir)
create_dir(overseg_dat_dir)
# Prepare oversegmentations using srm
#if in_memory:
# self.image = upscale(self.image, self.preproc_settings["DownsampleScale"])
j = first_slice
for i in range(num_slices):
if in_memory:
img_slice = self.image[i,:,:]*1.0
else:
img_slice = io.imread(str(self.image[i]))*1.0
avg_out, lbl_out = segment_aux(img_slice, q=40)
name = str(overseg_tif_dir)+"/Slice"+"{0:0>4}".format(j)+".tif"
misc.imsave(name,avg_out)
name = str(overseg_dat_dir)+"/Slice"+"{0:0>4}".format(j)+".dat"
avg_out = avg_out.astype(np.uint8)
with open(name, 'wb') as f:
avg_out.tofile(f)
j = j+1
###########################
filenames = sorted(listdir_fullpath(str(input_dat_dir)))
create_dir(output_dat_dir)
create_dir(output_tif_dir)
multiphase = self.seg_settings["Multiphase"]
invert = self.seg_settings["Invert"]
if multiphase:
num_labels = self.seg_settings["NumClustersPMRF"]
else:
num_labels = 2
if in_memory:
z, y, x = self.image.shape
else:
y, x = io.imread(str(self.image[0])).shape
z = num_slices
j = first_slice
for i in range(len(filenames)):
input_name = filenames[i]
output_dat_name = str(output_dat_dir)+"/Slice"+"{0:0>4}".format(j)+".dat"
output_tif_name = str(output_tif_dir)+"/Slice"+"{0:0>4}".format(j)+".tif"
overseg_name = str(overseg_dat_dir) + "/Slice"+"{0:0>4}".format(j)+".dat"
print('python '+os.path.dirname(os.path.abspath(__file__))+'/MPI_PMRF.py -i '+input_name+ ' -s '+ overseg_name+ ' -o '+output_dat_name+' -b '+ str(x)+','+str(y)+','+str(z)+ ' -e 10 -m 10 -l '+str(num_labels))
p = subprocess.Popen(['python '+os.path.dirname(os.path.abspath(__file__))+'/MPI_PMRF.py -i '+input_name+ ' -s '+ overseg_name+ ' -o '+output_dat_name+' -b '+ str(x)+','+str(y)+','+str(z)+ ' -e 10 -m 10 -l '+str(num_labels)], shell=True)
(output, err) = p.communicate()
p_status = p.wait()
#Saving .tif from .dat
with open(output_dat_name, 'rb') as f:
img_res = np.fromfile(f, dtype=np.uint8)
img_res.shape = (y,x)
if num_labels==2 or multiphase is False:
index1 = np.where(img_res==30)
index2 = np.where(img_res==60)
if len(index1[0])>len(index2[0]):
img_res[index1]=255
img_res[index2]=0
if invert:
img_res[index1]=0
img_res[index2]=255
else:
img_res[index2]=255
img_res[index1]=0
if invert:
img_res[index2]=0
img_res[index1]=255
misc.imsave(output_tif_name,img_res)
if in_memory:
self.segmented[i,:,:] = img_res
#print('Finished image '+str(i))
print(colored('Finished image '+str(j),'yellow','on_grey', attrs=['bold']))
j = j+1
self.segmented_filenames = sorted(listdir_fullpath(str(output_tif_dir)))
| [
"os.path.abspath",
"numpy.fromfile",
"numpy.zeros",
"numpy.where",
"scipy.misc.imsave"
] | [((2358, 2400), 'numpy.zeros', 'np.zeros', (['self.image.shape'], {'dtype': 'np.uint8'}), '(self.image.shape, dtype=np.uint8)\n', (2366, 2400), True, 'import numpy as np\n'), ((3011, 3037), 'scipy.misc.imsave', 'misc.imsave', (['name', 'avg_out'], {}), '(name, avg_out)\n', (3022, 3037), False, 'from scipy import misc\n'), ((4932, 4962), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.uint8'}), '(f, dtype=np.uint8)\n', (4943, 4962), True, 'import numpy as np\n'), ((5686, 5723), 'scipy.misc.imsave', 'misc.imsave', (['output_tif_name', 'img_res'], {}), '(output_tif_name, img_res)\n', (5697, 5723), False, 'from scipy import misc\n'), ((5087, 5110), 'numpy.where', 'np.where', (['(img_res == 30)'], {}), '(img_res == 30)\n', (5095, 5110), True, 'import numpy as np\n'), ((5138, 5161), 'numpy.where', 'np.where', (['(img_res == 60)'], {}), '(img_res == 60)\n', (5146, 5161), True, 'import numpy as np\n'), ((4283, 4308), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4298, 4308), False, 'import os\n'), ((4533, 4558), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4548, 4558), False, 'import os\n')] |
""" Present an interactive function explorer with slider widgets.
Scrub the sliders to change the properties of the ``sin`` curve, or
type into the title text box to update the title of the plot.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve abcd_sliders.py
at your command prompt. Then navigate to the URL
http://localhost:5006/sliders
in your browser.
For this to run, you will need to install bokeh with conda or pip:
https://github.com/bokeh/bokeh
bokeh is at version 1.2.0 at the writing of this script
"""
import os
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, TextInput
from bokeh.plotting import figure
from irt_parameter_estimation.util import logistic4PLabcd
a_default = 0.002
b_default = 1500
c_default = 0
d_default = 1
# Set up data
x = np.arange(0, 3001, 50)
y = logistic4PLabcd(a_default, b_default, c_default, d_default, x)
source = ColumnDataSource(data=dict(x=x, y=y))
# Set up plot
plot = figure(
plot_height=400,
plot_width=400,
title="my sine wave",
tools="crosshair,pan,reset,save,wheel_zoom",
x_range=[0, 3000],
y_range=[-0.2, 1.2],
)
plot.line("x", "y", source=source, line_width=3, line_alpha=0.6)
# Set up widgets
text = TextInput(title="title", value="Item Characteristic Curve")
a = Slider(
title="a: discriminatory power", value=0.002, start=-0.006, end=0.006, step=0.0005
)
b = Slider(title="b: difficulty", value=1500.0, start=0.0, end=3000.0, step=25)
c = Slider(title="c: guessing parameter", value=0.0, start=-0.2, end=1.0, step=0.01)
d = Slider(title="d: expert error rate", value=1.0, start=0.0, end=1.0, step=0.01)
# Set up callbacks
def update_title(attrname, old, new):
plot.title.text = text.value
text.on_change("value", update_title)
def update_data(attrname, old, new):
# Get the current slider values
aa = a.value
bb = b.value
cc = c.value
dd = d.value
# Generate the new curve
x = np.arange(0, 3001, 50)
y = logistic4PLabcd(aa, bb, cc, dd, x)
source.data = dict(x=x, y=y)
for w in [a, b, c, d]:
w.on_change("value", update_data)
# Set up layouts and add to document
inputs = column(text, a, b, c, d)
curdoc().add_root(row(inputs, plot, width=800))
curdoc().title = "Sliders"
if __name__ == "__main__":
os.system(f"bokeh serve {__file__}")
| [
"bokeh.models.widgets.TextInput",
"bokeh.plotting.figure",
"os.system",
"irt_parameter_estimation.util.logistic4PLabcd",
"bokeh.io.curdoc",
"numpy.arange",
"bokeh.layouts.column",
"bokeh.models.widgets.Slider",
"bokeh.layouts.row"
] | [((923, 945), 'numpy.arange', 'np.arange', (['(0)', '(3001)', '(50)'], {}), '(0, 3001, 50)\n', (932, 945), True, 'import numpy as np\n'), ((950, 1012), 'irt_parameter_estimation.util.logistic4PLabcd', 'logistic4PLabcd', (['a_default', 'b_default', 'c_default', 'd_default', 'x'], {}), '(a_default, b_default, c_default, d_default, x)\n', (965, 1012), False, 'from irt_parameter_estimation.util import logistic4PLabcd\n'), ((1083, 1238), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': '(400)', 'plot_width': '(400)', 'title': '"""my sine wave"""', 'tools': '"""crosshair,pan,reset,save,wheel_zoom"""', 'x_range': '[0, 3000]', 'y_range': '[-0.2, 1.2]'}), "(plot_height=400, plot_width=400, title='my sine wave', tools=\n 'crosshair,pan,reset,save,wheel_zoom', x_range=[0, 3000], y_range=[-0.2,\n 1.2])\n", (1089, 1238), False, 'from bokeh.plotting import figure\n'), ((1349, 1408), 'bokeh.models.widgets.TextInput', 'TextInput', ([], {'title': '"""title"""', 'value': '"""Item Characteristic Curve"""'}), "(title='title', value='Item Characteristic Curve')\n", (1358, 1408), False, 'from bokeh.models.widgets import Slider, TextInput\n'), ((1414, 1509), 'bokeh.models.widgets.Slider', 'Slider', ([], {'title': '"""a: discriminatory power"""', 'value': '(0.002)', 'start': '(-0.006)', 'end': '(0.006)', 'step': '(0.0005)'}), "(title='a: discriminatory power', value=0.002, start=-0.006, end=\n 0.006, step=0.0005)\n", (1420, 1509), False, 'from bokeh.models.widgets import Slider, TextInput\n'), ((1515, 1590), 'bokeh.models.widgets.Slider', 'Slider', ([], {'title': '"""b: difficulty"""', 'value': '(1500.0)', 'start': '(0.0)', 'end': '(3000.0)', 'step': '(25)'}), "(title='b: difficulty', value=1500.0, start=0.0, end=3000.0, step=25)\n", (1521, 1590), False, 'from bokeh.models.widgets import Slider, TextInput\n'), ((1595, 1680), 'bokeh.models.widgets.Slider', 'Slider', ([], {'title': '"""c: guessing parameter"""', 'value': '(0.0)', 'start': '(-0.2)', 'end': '(1.0)', 'step': '(0.01)'}), "(title='c: guessing parameter', value=0.0, start=-0.2, end=1.0, step=0.01\n )\n", (1601, 1680), False, 'from bokeh.models.widgets import Slider, TextInput\n'), ((1680, 1758), 'bokeh.models.widgets.Slider', 'Slider', ([], {'title': '"""d: expert error rate"""', 'value': '(1.0)', 'start': '(0.0)', 'end': '(1.0)', 'step': '(0.01)'}), "(title='d: expert error rate', value=1.0, start=0.0, end=1.0, step=0.01)\n", (1686, 1758), False, 'from bokeh.models.widgets import Slider, TextInput\n'), ((2284, 2308), 'bokeh.layouts.column', 'column', (['text', 'a', 'b', 'c', 'd'], {}), '(text, a, b, c, d)\n', (2290, 2308), False, 'from bokeh.layouts import row, column\n'), ((2073, 2095), 'numpy.arange', 'np.arange', (['(0)', '(3001)', '(50)'], {}), '(0, 3001, 50)\n', (2082, 2095), True, 'import numpy as np\n'), ((2104, 2138), 'irt_parameter_estimation.util.logistic4PLabcd', 'logistic4PLabcd', (['aa', 'bb', 'cc', 'dd', 'x'], {}), '(aa, bb, cc, dd, x)\n', (2119, 2138), False, 'from irt_parameter_estimation.util import logistic4PLabcd\n'), ((2328, 2356), 'bokeh.layouts.row', 'row', (['inputs', 'plot'], {'width': '(800)'}), '(inputs, plot, width=800)\n', (2331, 2356), False, 'from bokeh.layouts import row, column\n'), ((2358, 2366), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (2364, 2366), False, 'from bokeh.io import curdoc\n'), ((2417, 2453), 'os.system', 'os.system', (['f"""bokeh serve {__file__}"""'], {}), "(f'bokeh serve {__file__}')\n", (2426, 2453), False, 'import os\n'), ((2310, 2318), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (2316, 2318), False, 'from bokeh.io import curdoc\n')] |
"""Class that collects experience batches."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import math
from rl_2048.game import play
# Parameters for undersampling
DO_UNDERSAMPLING = True
AVG_KEEP_PROB = 0.04
MIN_KEEP_PROB = 0.01
class ExperienceCollector(object):
"""Collects experiences by playing according to a particular strategy."""
@staticmethod
def get_keep_probability(index, length):
"""Computes the keep probability for the experience with a given index.
First, the index is mapped to a value x between 0 and 1 (last index mapped
to 0, index 0 mapped to 1). Then, the keep probability is computed by a
function keep_prob = e^(ax) + MIN_KEEP_PROB, such that the average
probability is AVG_KEEP_PROB.
For small AVG_KEEP_PROB, a can be approximated by
a = - 1 / (AVG_KEEP_PROB - MIN_KEEP_PROB).
Args:
index: zero-based index of the experience.
length: total number of experiences.
"""
if not DO_UNDERSAMPLING:
return 1.0
value = 1 - index / (length - 1)
return (math.e ** (- 1 / (AVG_KEEP_PROB - MIN_KEEP_PROB) * value) + MIN_KEEP_PROB)
def deduplicate(self, experiences):
"""Returns a new experience array that contains contains no duplicates."""
state_set = set()
filterted_experiences = []
for experience in experiences:
state_tuple = tuple(experience.state.flatten())
if not state_tuple in state_set:
state_set.add(state_tuple)
filterted_experiences.append(experience)
return filterted_experiences
def collect(self, strategy, num_games=1):
"""Plays num_games random games, returns all collected experiences."""
experiences = []
for _ in range(num_games):
_, new_experiences = play.play(strategy, allow_unavailable_action=False)
deduplicated_experiences = self.deduplicate(new_experiences)
count = len(deduplicated_experiences)
experiences += [e for index, e in enumerate(deduplicated_experiences)
if (np.random.rand() < self.get_keep_probability(index, count))]
return experiences
| [
"numpy.random.rand",
"rl_2048.game.play.play"
] | [((1966, 2017), 'rl_2048.game.play.play', 'play.play', (['strategy'], {'allow_unavailable_action': '(False)'}), '(strategy, allow_unavailable_action=False)\n', (1975, 2017), False, 'from rl_2048.game import play\n'), ((2255, 2271), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2269, 2271), True, 'import numpy as np\n')] |
"""
Classes for point set registration using variants of Iterated-Closest Point
Author: <NAME>
"""
from abc import ABCMeta, abstractmethod
import logging
import numpy as np
from .feature_matcher import PointToPlaneFeatureMatcher
from .points import PointCloud, NormalCloud
from .rigid_transformations import RigidTransform
from .utils import skew
class RegistrationResult(object):
"""Struct to hold results of point set registration.
Attributes
----------
T_source_target : :obj:`autolab_core.RigidTranform`
transformation from source to target frame
cost : float
numeric value of the registration objective for the given transform
"""
def __init__(self, T_source_target, cost):
self.T_source_target = T_source_target
self.cost = cost
class IterativeRegistrationSolver:
"""Abstract class for iterative registration solvers."""
__metaclass__ = ABCMeta
@abstractmethod
def register(
self,
source_point_cloud,
target_point_cloud,
source_normal_cloud,
target_normal_cloud,
matcher,
num_iterations=1,
compute_total_cost=True,
match_centroids=False,
vis=False,
):
"""Iteratively register objects to one another.
Parameters
----------
source_point_cloud : :obj:`autolab_core.PointCloud`
source object points
target_point_cloud : :obj`autolab_core.PointCloud`
target object points
source_normal_cloud : :obj:`autolab_core.NormalCloud`
source object outward-pointing normals
target_normal_cloud : :obj:`autolab_core.NormalCloud`
target object outward-pointing normals
matcher : :obj:`PointToPlaneFeatureMatcher`
object to match the point sets
num_iterations : int
the number of iterations to run
compute_total_cost : bool
whether or not to compute the total cost upon termination.
match_centroids : bool
whether or not to match the centroids of the point clouds
Returns
-------
:obj`RegistrationResult`
results containing source to target transformation and cost
"""
pass
class PointToPlaneICPSolver(IterativeRegistrationSolver):
"""Performs Iterated Closest Point with an objective weighted between
point-to-point and point-to-plane.
Attributes
----------
sample_size : int
number of randomly sampled points to use per iteration
cost_sample_size : int
number of randomly sampled points to use for cost evaluations
gamma : float
weight of point-to-point objective relative to point-to-plane objective
mu : float
regularizer for matrix inversion in the Gauss-Newton step
"""
def __init__(
self, sample_size=100, cost_sample_size=100, gamma=100.0, mu=1e-2
):
self.sample_size_ = sample_size
self.cost_sample_size_ = cost_sample_size
self.gamma_ = gamma
self.mu_ = mu
IterativeRegistrationSolver.__init__(self)
def register(
self,
source_point_cloud,
target_point_cloud,
source_normal_cloud,
target_normal_cloud,
matcher,
num_iterations=1,
compute_total_cost=True,
match_centroids=False,
vis=False,
):
"""
Iteratively register objects to one another using a modified version
of point to plane ICP. The cost func is PointToPlane_COST +
gamma * PointToPoint_COST. Uses a `stochastic Gauss-Newton step`
where on each iteration a smaller number of points is sampled.
Parameters
----------
source_point_cloud : :obj:`autolab_core.PointCloud`
source object points
target_point_cloud : :obj`autolab_core.PointCloud`
target object points
source_normal_cloud : :obj:`autolab_core.NormalCloud`
source object outward-pointing normals
target_normal_cloud : :obj:`autolab_core.NormalCloud`
target object outward-pointing normals
matcher : :obj:`PointToPlaneFeatureMatcher`
object to match the point sets
num_iterations : int
the number of iterations to run
compute_total_cost : bool
whether or not to compute the total cost upon termination.
match_centroids : bool
whether or not to match the centroids of the point clouds
Returns
-------
:obj`RegistrationResult`
results containing source to target transformation and cost
"""
# check valid data
if not isinstance(source_point_cloud, PointCloud) or not isinstance(
target_point_cloud, PointCloud
):
raise ValueError(
"Source and target point clouds must be PointCloud objects"
)
if not isinstance(source_normal_cloud, NormalCloud) or not isinstance(
target_normal_cloud, NormalCloud
):
raise ValueError(
"Source and target normal clouds must be NormalCloud objects"
)
if not isinstance(matcher, PointToPlaneFeatureMatcher):
raise ValueError(
"Feature matcher must be a PointToPlaneFeatureMatcher object"
)
if (
source_point_cloud.num_points != source_normal_cloud.num_points
or target_point_cloud.num_points != target_normal_cloud.num_points
):
raise ValueError(
"Input point clouds must have the same number of points \
as corresponding normal cloud"
)
# extract source and target point and normal data arrays
orig_source_points = source_point_cloud.data.T
orig_target_points = target_point_cloud.data.T
orig_source_normals = source_normal_cloud.data.T
orig_target_normals = target_normal_cloud.data.T
# setup the problem
normal_norms = np.linalg.norm(orig_target_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_target_points = orig_target_points[valid_inds[0], :]
orig_target_normals = orig_target_normals[valid_inds[0], :]
normal_norms = np.linalg.norm(orig_source_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_source_points = orig_source_points[valid_inds[0], :]
orig_source_normals = orig_source_normals[valid_inds[0], :]
# alloc buffers for solutions
source_mean_point = np.mean(orig_source_points, axis=0)
target_mean_point = np.mean(orig_target_points, axis=0)
R_sol = np.eye(3)
t_sol = np.zeros([3, 1]) # init with diff between means
if match_centroids:
t_sol[:, 0] = target_mean_point - source_mean_point
# iterate through
for i in range(num_iterations):
logging.info("Point to plane ICP iteration %d" % (i))
# subsample points
source_subsample_inds = np.random.choice(
orig_source_points.shape[0], size=self.sample_size_
)
source_points = orig_source_points[source_subsample_inds, :]
source_normals = orig_source_normals[source_subsample_inds, :]
target_subsample_inds = np.random.choice(
orig_target_points.shape[0], size=self.sample_size_
)
target_points = orig_target_points[target_subsample_inds, :]
target_normals = orig_target_normals[target_subsample_inds, :]
# transform source points
source_points = (
R_sol.dot(source_points.T)
+ np.tile(t_sol, [1, source_points.shape[0]])
).T
source_normals = (R_sol.dot(source_normals.T)).T
# closest points
corrs = matcher.match(
source_points, target_points, source_normals, target_normals
)
# solve optimal rotation + translation
valid_corrs = np.where(corrs.index_map != -1)[0]
source_corr_points = corrs.source_points[valid_corrs, :]
target_corr_points = corrs.target_points[
corrs.index_map[valid_corrs], :
]
target_corr_normals = corrs.target_normals[
corrs.index_map[valid_corrs], :
]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
logging.warning("No correspondences found")
break
# create A and b matrices for Gauss-Newton step on joint cost
# function
A = np.zeros([6, 6])
b = np.zeros([6, 1])
Ap = np.zeros([6, 6])
bp = np.zeros([6, 1])
G = np.zeros([3, 6])
G[:, 3:] = np.eye(3)
for i in range(num_corrs):
s = source_corr_points[i : i + 1, :].T
t = target_corr_points[i : i + 1, :].T
n = target_corr_normals[i : i + 1, :].T
G[:, :3] = skew(s).T
A += G.T.dot(n).dot(n.T).dot(G)
b += G.T.dot(n).dot(n.T).dot(t - s)
Ap += G.T.dot(G)
bp += G.T.dot(t - s)
v = np.linalg.solve(
A + self.gamma_ * Ap + self.mu_ * np.eye(6),
b + self.gamma_ * bp,
)
# create pose values from the solution
R = np.eye(3)
R = R + skew(v[:3])
U, S, V = np.linalg.svd(R.astype(np.float))
R = U.dot(V)
t = v[3:]
# incrementally update the final transform
R_sol = R.dot(R_sol)
t_sol = R.dot(t_sol) + t
T_source_target = RigidTransform(
R_sol,
t_sol,
from_frame=source_point_cloud.frame,
to_frame=target_point_cloud.frame,
)
total_cost = 0
source_points = (
R_sol.dot(orig_source_points.T)
+ np.tile(t_sol, [1, orig_source_points.shape[0]])
).T
source_normals = (R_sol.dot(orig_source_normals.T)).T
if compute_total_cost:
# rematch all points to get the final cost
corrs = matcher.match(
source_points,
orig_target_points,
source_normals,
orig_target_normals,
)
valid_corrs = np.where(corrs.index_map != -1)[0]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
return RegistrationResult(T_source_target, np.inf)
# get the corresponding points
source_corr_points = corrs.source_points[valid_corrs, :]
target_corr_points = corrs.target_points[
corrs.index_map[valid_corrs], :
]
target_corr_normals = corrs.target_normals[
corrs.index_map[valid_corrs], :
]
# determine total cost
source_target_alignment = np.diag(
(source_corr_points - target_corr_points).dot(
target_corr_normals.T
)
)
point_plane_cost = (1.0 / num_corrs) * np.sum(
source_target_alignment * source_target_alignment
)
point_dist_cost = (1.0 / num_corrs) * np.sum(
np.linalg.norm(source_corr_points - target_corr_points, axis=1)
** 2
)
total_cost = point_plane_cost + self.gamma_ * point_dist_cost
return RegistrationResult(T_source_target, total_cost)
def register_2d(
self,
source_point_cloud,
target_point_cloud,
source_normal_cloud,
target_normal_cloud,
matcher,
num_iterations=1,
compute_total_cost=True,
vis=False,
):
"""
Iteratively register objects to one another using a modified version
of point to plane ICP which only solves for tx and ty (translation
in the plane) and theta (rotation about the z axis). The cost func
is actually PointToPlane_COST + gamma * PointToPoint_COST
Points should be specified in the basis of the planar worksurface.
Parameters
----------
source_point_cloud : :obj:`autolab_core.PointCloud`
source object points
target_point_cloud : :obj`autolab_core.PointCloud`
target object points
source_normal_cloud : :obj:`autolab_core.NormalCloud`
source object outward-pointing normals
target_normal_cloud : :obj:`autolab_core.NormalCloud`
target object outward-pointing normals
matcher : :obj:`PointToPlaneFeatureMatcher`
object to match the point sets
num_iterations : int
the number of iterations to run
compute_total_cost : bool
whether or not to compute the total cost upon termination.
Returns
-------
:obj`RegistrationResult`
results containing source to target transformation and cost
"""
if not isinstance(source_point_cloud, PointCloud) or not isinstance(
target_point_cloud, PointCloud
):
raise ValueError(
"Source and target point clouds must be PointCloud objects"
)
if not isinstance(source_normal_cloud, NormalCloud) or not isinstance(
target_normal_cloud, NormalCloud
):
raise ValueError(
"Source and target normal clouds must be NormalCloud objects"
)
if not isinstance(matcher, PointToPlaneFeatureMatcher):
raise ValueError(
"Feature matcher must be a PointToPlaneFeatureMatcher object"
)
if (
source_point_cloud.num_points != source_normal_cloud.num_points
or target_point_cloud.num_points != target_normal_cloud.num_points
):
raise ValueError(
"Input point clouds must have the same number of points as \
corresponding normal cloud"
)
# extract source and target point and normal data arrays
orig_source_points = source_point_cloud.data.T
orig_target_points = target_point_cloud.data.T
orig_source_normals = source_normal_cloud.data.T
orig_target_normals = target_normal_cloud.data.T
# setup the problem
logging.info("Setting up problem")
normal_norms = np.linalg.norm(orig_target_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_target_points = orig_target_points[valid_inds[0], :]
orig_target_normals = orig_target_normals[valid_inds[0], :]
normal_norms = np.linalg.norm(orig_source_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_source_points = orig_source_points[valid_inds[0], :]
orig_source_normals = orig_source_normals[valid_inds[0], :]
# alloc buffers for solutions
R_sol = np.eye(3)
t_sol = np.zeros([3, 1])
# iterate through
for i in range(num_iterations):
logging.info("Point to plane ICP iteration %d" % (i))
# subsample points
source_subsample_inds = np.random.choice(
orig_source_points.shape[0], size=self.sample_size_
)
source_points = orig_source_points[source_subsample_inds, :]
source_normals = orig_source_normals[source_subsample_inds, :]
target_subsample_inds = np.random.choice(
orig_target_points.shape[0], size=self.sample_size_
)
target_points = orig_target_points[target_subsample_inds, :]
target_normals = orig_target_normals[target_subsample_inds, :]
# transform source points
source_points = (
R_sol.dot(source_points.T)
+ np.tile(t_sol, [1, source_points.shape[0]])
).T
source_normals = (R_sol.dot(source_normals.T)).T
# closest points
corrs = matcher.match(
source_points, target_points, source_normals, target_normals
)
# solve optimal rotation + translation
valid_corrs = np.where(corrs.index_map != -1)[0]
source_corr_points = corrs.source_points[valid_corrs, :]
target_corr_points = corrs.target_points[
corrs.index_map[valid_corrs], :
]
target_corr_normals = corrs.target_normals[
corrs.index_map[valid_corrs], :
]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
break
# create A and b matrices for Gauss-Newton step on joint cost
# function
A = np.zeros([3, 3]) # A and b for point to plane cost
b = np.zeros([3, 1])
Ap = np.zeros([3, 3]) # A and b for point to point cost
bp = np.zeros([3, 1])
G = np.zeros([3, 3])
G[:2, 1:] = np.eye(2)
for i in range(num_corrs):
s = source_corr_points[i : i + 1, :].T
t = target_corr_points[i : i + 1, :].T
n = target_corr_normals[i : i + 1, :].T
G[0, 0] = -s[1]
G[1, 0] = s[0]
A += G.T.dot(n).dot(n.T).dot(G)
b += G.T.dot(n).dot(n.T).dot(t - s)
Ap += G.T.dot(G)
bp += G.T.dot(t - s)
v = np.linalg.solve(
A + self.gamma_ * Ap + self.mu_ * np.eye(3),
b + self.gamma_ * bp,
)
# create pose values from the solution
R = np.eye(3)
R = R + skew(np.array([[0], [0], [v[0, 0]]]))
U, S, V = np.linalg.svd(R.astype(np.float))
R = U.dot(V)
t = np.array([[v[1, 0]], [v[2, 0]], [0]])
# incrementally update the final transform
R_sol = R.dot(R_sol)
t_sol = R.dot(t_sol) + t
# compute solution transform
T_source_target = RigidTransform(
R_sol,
t_sol,
from_frame=source_point_cloud.frame,
to_frame=target_point_cloud.frame,
)
total_cost = 0
if compute_total_cost:
# subsample points
source_subsample_inds = np.random.choice(
orig_source_points.shape[0], size=self.cost_sample_size_
)
source_points = orig_source_points[source_subsample_inds, :]
source_normals = orig_source_normals[source_subsample_inds, :]
target_subsample_inds = np.random.choice(
orig_target_points.shape[0], size=self.cost_sample_size_
)
target_points = orig_target_points[target_subsample_inds, :]
target_normals = orig_target_normals[target_subsample_inds, :]
# transform source points
source_points = (
R_sol.dot(source_points.T)
+ np.tile(t_sol, [1, source_points.shape[0]])
).T
source_normals = (R_sol.dot(source_normals.T)).T
# rematch to get the total cost
corrs = matcher.match(
source_points, target_points, source_normals, target_normals
)
valid_corrs = np.where(corrs.index_map != -1)[0]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
return RegistrationResult(T_source_target, np.inf)
# get the corresponding points
source_corr_points = corrs.source_points[valid_corrs, :]
target_corr_points = corrs.target_points[
corrs.index_map[valid_corrs], :
]
target_corr_normals = corrs.target_normals[
corrs.index_map[valid_corrs], :
]
# determine total cost
source_target_alignment = np.diag(
(source_corr_points - target_corr_points).dot(
target_corr_normals.T
)
)
point_plane_cost = (1.0 / num_corrs) * np.sum(
source_target_alignment * source_target_alignment
)
point_dist_cost = (1.0 / num_corrs) * np.sum(
np.linalg.norm(source_corr_points - target_corr_points, axis=1)
** 2
)
total_cost = point_plane_cost + self.gamma_ * point_dist_cost
return RegistrationResult(T_source_target, total_cost)
| [
"numpy.sum",
"logging.warning",
"numpy.zeros",
"numpy.nonzero",
"logging.info",
"numpy.mean",
"numpy.linalg.norm",
"numpy.array",
"numpy.where",
"numpy.random.choice",
"numpy.tile",
"numpy.eye"
] | [((6100, 6143), 'numpy.linalg.norm', 'np.linalg.norm', (['orig_target_normals'], {'axis': '(1)'}), '(orig_target_normals, axis=1)\n', (6114, 6143), True, 'import numpy as np\n'), ((6165, 6189), 'numpy.nonzero', 'np.nonzero', (['normal_norms'], {}), '(normal_norms)\n', (6175, 6189), True, 'import numpy as np\n'), ((6348, 6391), 'numpy.linalg.norm', 'np.linalg.norm', (['orig_source_normals'], {'axis': '(1)'}), '(orig_source_normals, axis=1)\n', (6362, 6391), True, 'import numpy as np\n'), ((6413, 6437), 'numpy.nonzero', 'np.nonzero', (['normal_norms'], {}), '(normal_norms)\n', (6423, 6437), True, 'import numpy as np\n'), ((6639, 6674), 'numpy.mean', 'np.mean', (['orig_source_points'], {'axis': '(0)'}), '(orig_source_points, axis=0)\n', (6646, 6674), True, 'import numpy as np\n'), ((6703, 6738), 'numpy.mean', 'np.mean', (['orig_target_points'], {'axis': '(0)'}), '(orig_target_points, axis=0)\n', (6710, 6738), True, 'import numpy as np\n'), ((6755, 6764), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6761, 6764), True, 'import numpy as np\n'), ((6781, 6797), 'numpy.zeros', 'np.zeros', (['[3, 1]'], {}), '([3, 1])\n', (6789, 6797), True, 'import numpy as np\n'), ((14619, 14653), 'logging.info', 'logging.info', (['"""Setting up problem"""'], {}), "('Setting up problem')\n", (14631, 14653), False, 'import logging\n'), ((14677, 14720), 'numpy.linalg.norm', 'np.linalg.norm', (['orig_target_normals'], {'axis': '(1)'}), '(orig_target_normals, axis=1)\n', (14691, 14720), True, 'import numpy as np\n'), ((14742, 14766), 'numpy.nonzero', 'np.nonzero', (['normal_norms'], {}), '(normal_norms)\n', (14752, 14766), True, 'import numpy as np\n'), ((14925, 14968), 'numpy.linalg.norm', 'np.linalg.norm', (['orig_source_normals'], {'axis': '(1)'}), '(orig_source_normals, axis=1)\n', (14939, 14968), True, 'import numpy as np\n'), ((14990, 15014), 'numpy.nonzero', 'np.nonzero', (['normal_norms'], {}), '(normal_norms)\n', (15000, 15014), True, 'import numpy as np\n'), ((15204, 15213), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15210, 15213), True, 'import numpy as np\n'), ((15230, 15246), 'numpy.zeros', 'np.zeros', (['[3, 1]'], {}), '([3, 1])\n', (15238, 15246), True, 'import numpy as np\n'), ((7001, 7052), 'logging.info', 'logging.info', (["('Point to plane ICP iteration %d' % i)"], {}), "('Point to plane ICP iteration %d' % i)\n", (7013, 7052), False, 'import logging\n'), ((7123, 7192), 'numpy.random.choice', 'np.random.choice', (['orig_source_points.shape[0]'], {'size': 'self.sample_size_'}), '(orig_source_points.shape[0], size=self.sample_size_)\n', (7139, 7192), True, 'import numpy as np\n'), ((7407, 7476), 'numpy.random.choice', 'np.random.choice', (['orig_target_points.shape[0]'], {'size': 'self.sample_size_'}), '(orig_target_points.shape[0], size=self.sample_size_)\n', (7423, 7476), True, 'import numpy as np\n'), ((8751, 8767), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {}), '([6, 6])\n', (8759, 8767), True, 'import numpy as np\n'), ((8784, 8800), 'numpy.zeros', 'np.zeros', (['[6, 1]'], {}), '([6, 1])\n', (8792, 8800), True, 'import numpy as np\n'), ((8818, 8834), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {}), '([6, 6])\n', (8826, 8834), True, 'import numpy as np\n'), ((8852, 8868), 'numpy.zeros', 'np.zeros', (['[6, 1]'], {}), '([6, 1])\n', (8860, 8868), True, 'import numpy as np\n'), ((8885, 8901), 'numpy.zeros', 'np.zeros', (['[3, 6]'], {}), '([3, 6])\n', (8893, 8901), True, 'import numpy as np\n'), ((8925, 8934), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (8931, 8934), True, 'import numpy as np\n'), ((9563, 9572), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (9569, 9572), True, 'import numpy as np\n'), ((15326, 15377), 'logging.info', 'logging.info', (["('Point to plane ICP iteration %d' % i)"], {}), "('Point to plane ICP iteration %d' % i)\n", (15338, 15377), False, 'import logging\n'), ((15448, 15517), 'numpy.random.choice', 'np.random.choice', (['orig_source_points.shape[0]'], {'size': 'self.sample_size_'}), '(orig_source_points.shape[0], size=self.sample_size_)\n', (15464, 15517), True, 'import numpy as np\n'), ((15732, 15801), 'numpy.random.choice', 'np.random.choice', (['orig_target_points.shape[0]'], {'size': 'self.sample_size_'}), '(orig_target_points.shape[0], size=self.sample_size_)\n', (15748, 15801), True, 'import numpy as np\n'), ((17016, 17032), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (17024, 17032), True, 'import numpy as np\n'), ((17084, 17100), 'numpy.zeros', 'np.zeros', (['[3, 1]'], {}), '([3, 1])\n', (17092, 17100), True, 'import numpy as np\n'), ((17118, 17134), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (17126, 17134), True, 'import numpy as np\n'), ((17187, 17203), 'numpy.zeros', 'np.zeros', (['[3, 1]'], {}), '([3, 1])\n', (17195, 17203), True, 'import numpy as np\n'), ((17220, 17236), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (17228, 17236), True, 'import numpy as np\n'), ((17261, 17270), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (17267, 17270), True, 'import numpy as np\n'), ((17925, 17934), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (17931, 17934), True, 'import numpy as np\n'), ((18090, 18127), 'numpy.array', 'np.array', (['[[v[1, 0]], [v[2, 0]], [0]]'], {}), '([[v[1, 0]], [v[2, 0]], [0]])\n', (18098, 18127), True, 'import numpy as np\n'), ((18600, 18674), 'numpy.random.choice', 'np.random.choice', (['orig_source_points.shape[0]'], {'size': 'self.cost_sample_size_'}), '(orig_source_points.shape[0], size=self.cost_sample_size_)\n', (18616, 18674), True, 'import numpy as np\n'), ((18889, 18963), 'numpy.random.choice', 'np.random.choice', (['orig_target_points.shape[0]'], {'size': 'self.cost_sample_size_'}), '(orig_target_points.shape[0], size=self.cost_sample_size_)\n', (18905, 18963), True, 'import numpy as np\n'), ((8140, 8171), 'numpy.where', 'np.where', (['(corrs.index_map != -1)'], {}), '(corrs.index_map != -1)\n', (8148, 8171), True, 'import numpy as np\n'), ((8571, 8614), 'logging.warning', 'logging.warning', (['"""No correspondences found"""'], {}), "('No correspondences found')\n", (8586, 8614), False, 'import logging\n'), ((10129, 10177), 'numpy.tile', 'np.tile', (['t_sol', '[1, orig_source_points.shape[0]]'], {}), '(t_sol, [1, orig_source_points.shape[0]])\n', (10136, 10177), True, 'import numpy as np\n'), ((10550, 10581), 'numpy.where', 'np.where', (['(corrs.index_map != -1)'], {}), '(corrs.index_map != -1)\n', (10558, 10581), True, 'import numpy as np\n'), ((11346, 11403), 'numpy.sum', 'np.sum', (['(source_target_alignment * source_target_alignment)'], {}), '(source_target_alignment * source_target_alignment)\n', (11352, 11403), True, 'import numpy as np\n'), ((16465, 16496), 'numpy.where', 'np.where', (['(corrs.index_map != -1)'], {}), '(corrs.index_map != -1)\n', (16473, 16496), True, 'import numpy as np\n'), ((19590, 19621), 'numpy.where', 'np.where', (['(corrs.index_map != -1)'], {}), '(corrs.index_map != -1)\n', (19598, 19621), True, 'import numpy as np\n'), ((20386, 20443), 'numpy.sum', 'np.sum', (['(source_target_alignment * source_target_alignment)'], {}), '(source_target_alignment * source_target_alignment)\n', (20392, 20443), True, 'import numpy as np\n'), ((7785, 7828), 'numpy.tile', 'np.tile', (['t_sol', '[1, source_points.shape[0]]'], {}), '(t_sol, [1, source_points.shape[0]])\n', (7792, 7828), True, 'import numpy as np\n'), ((16110, 16153), 'numpy.tile', 'np.tile', (['t_sol', '[1, source_points.shape[0]]'], {}), '(t_sol, [1, source_points.shape[0]])\n', (16117, 16153), True, 'import numpy as np\n'), ((17960, 17991), 'numpy.array', 'np.array', (['[[0], [0], [v[0, 0]]]'], {}), '([[0], [0], [v[0, 0]]])\n', (17968, 17991), True, 'import numpy as np\n'), ((19272, 19315), 'numpy.tile', 'np.tile', (['t_sol', '[1, source_points.shape[0]]'], {}), '(t_sol, [1, source_points.shape[0]])\n', (19279, 19315), True, 'import numpy as np\n'), ((9432, 9441), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (9438, 9441), True, 'import numpy as np\n'), ((11508, 11571), 'numpy.linalg.norm', 'np.linalg.norm', (['(source_corr_points - target_corr_points)'], {'axis': '(1)'}), '(source_corr_points - target_corr_points, axis=1)\n', (11522, 11571), True, 'import numpy as np\n'), ((17794, 17803), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (17800, 17803), True, 'import numpy as np\n'), ((20548, 20611), 'numpy.linalg.norm', 'np.linalg.norm', (['(source_corr_points - target_corr_points)'], {'axis': '(1)'}), '(source_corr_points - target_corr_points, axis=1)\n', (20562, 20611), True, 'import numpy as np\n')] |
import numpy as np
import sympy as sp
import pylbm
import sys
"""
Von Karman vortex street simulated by Navier-Stokes solver D2Q9
Reynolds number = 2500
"""
def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percents = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = '*' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
h5_save = True
X, Y, LA = sp.symbols('X, Y, LA')
rho, qx, qy = sp.symbols('rho, qx, qy')
def bc_rect(f, m, x, y, rhoo, uo):
m[rho] = 0.
m[qx] = rhoo*uo
m[qy] = 0.
def vorticity(sol):
qx_n = sol.m[qx]
qy_n = sol.m[qy]
vort = np.abs(qx_n[1:-1, 2:] - qx_n[1:-1, :-2]
- qy_n[2:, 1:-1] + qy_n[:-2, 1:-1])
return vort.T
def save(mpi_topo, x, y, m, num):
h5 = pylbm.H5File(mpi_topo, filename, path, num)
h5.set_grid(x, y)
h5.add_scalar('rho', m[rho])
h5.add_vector('velocity', [m[qx], m[qy]])
h5.save()
# parameters
xmin, xmax, ymin, ymax = 0., 2., 0., 1.
radius = 0.125
if h5_save:
dx = 1./512 # spatial step
else:
dx = 1./128
la = 1. # velocity of the scheme
rhoo = 1.
uo = 0.05
mu = 5.e-6
zeta = 10*mu
dummy = 3.0/(la*rhoo*dx)
s1 = 1.0/(0.5+zeta*dummy)
s2 = 1.0/(0.5+mu*dummy)
s = [0.,0.,0.,s1,s1,s1,s1,s2,s2]
dummy = 1./(LA**2*rhoo)
qx2 = dummy*qx**2
qy2 = dummy*qy**2
q2 = qx2+qy2
qxy = dummy*qx*qy
dico = {
'box': {
'x': [xmin, xmax],
'y': [ymin, ymax],
'label': [0, 1, 0, 0]},
'elements': [pylbm.Circle([.3, 0.5*(ymin+ymax)+2*dx], radius, label=2)],
'space_step': dx,
'scheme_velocity': LA,
'schemes': [
{
'velocities': list(range(9)),
'polynomials': [
1,
LA*X, LA*Y,
3*(X**2+Y**2)-4,
0.5*(9*(X**2+Y**2)**2-21*(X**2+Y**2)+8),
3*X*(X**2+Y**2)-5*X, 3*Y*(X**2+Y**2)-5*Y,
X**2-Y**2, X*Y
],
'relaxation_parameters': s,
'equilibrium': [
rho,
qx, qy,
-2*rho + 3*q2,
rho - 3*q2,
-qx/LA, -qy/LA,
qx2 - qy2, qxy
],
'conserved_moments': [rho, qx, qy],
},
],
'init': {rho: rhoo,
qx: rhoo*uo,
qy: 0.
},
'parameters': {LA: la},
'boundary_conditions': {
0: {'method': {0: pylbm.bc.BouzidiBounceBack}, 'value': (bc_rect, (rhoo, uo))},
1: {'method': {0: pylbm.bc.NeumannX}},
2: {'method': {0: pylbm.bc.BouzidiBounceBack}},
},
'generator': 'cython',
}
sol = pylbm.Simulation(dico)
Re = rhoo*uo*2*radius/mu
print("Reynolds number {0:10.3e}".format(Re))
x, y = sol.domain.x, sol.domain.y
if h5_save:
Tf = 500.
im = 0
l = Tf / sol.dt / 64
printProgress(im, l, prefix='Progress:', suffix='Complete', barLength=50)
filename = 'Karman'
path = './data_' + filename
save(sol.domain.mpi_topo, x, y, sol.m, im)
while sol.t < Tf:
for k in range(64):
sol.one_time_step()
im += 1
printProgress(im, l, prefix='Progress:', suffix='Complete', barLength=50)
save(sol.domain.mpi_topo, x, y, sol.m, im)
else:
viewer = pylbm.viewer.matplotlib_viewer
fig = viewer.Fig()
ax = fig[0]
ax.ellipse([.3/dx, 0.5*(ymin+ymax)/dx+2], [radius/dx, radius/dx], 'r')
image = ax.image(vorticity(sol), cmap='cubehelix', clim=[0, .05])
def update(iframe):
nrep = 64
for i in range(nrep):
sol.one_time_step()
image.set_data(vorticity(sol))
ax.title = "Solution t={0:f}".format(sol.t)
# run the simulation
fig.animate(update, interval=1)
fig.show()
| [
"sys.stdout.write",
"sympy.symbols",
"pylbm.Simulation",
"pylbm.Circle",
"numpy.abs",
"pylbm.H5File",
"sys.stdout.flush"
] | [((1201, 1223), 'sympy.symbols', 'sp.symbols', (['"""X, Y, LA"""'], {}), "('X, Y, LA')\n", (1211, 1223), True, 'import sympy as sp\n'), ((1238, 1263), 'sympy.symbols', 'sp.symbols', (['"""rho, qx, qy"""'], {}), "('rho, qx, qy')\n", (1248, 1263), True, 'import sympy as sp\n'), ((3411, 3433), 'pylbm.Simulation', 'pylbm.Simulation', (['dico'], {}), '(dico)\n', (3427, 3433), False, 'import pylbm\n'), ((1069, 1087), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1085, 1087), False, 'import sys\n'), ((1425, 1500), 'numpy.abs', 'np.abs', (['(qx_n[1:-1, 2:] - qx_n[1:-1, :-2] - qy_n[2:, 1:-1] + qy_n[:-2, 1:-1])'], {}), '(qx_n[1:-1, 2:] - qx_n[1:-1, :-2] - qy_n[2:, 1:-1] + qy_n[:-2, 1:-1])\n', (1431, 1500), True, 'import numpy as np\n'), ((1581, 1624), 'pylbm.H5File', 'pylbm.H5File', (['mpi_topo', 'filename', 'path', 'num'], {}), '(mpi_topo, filename, path, num)\n', (1593, 1624), False, 'import pylbm\n'), ((987, 1063), 'sys.stdout.write', 'sys.stdout.write', (["('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix))"], {}), "('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix))\n", (1003, 1063), False, 'import sys\n'), ((1123, 1145), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (1139, 1145), False, 'import sys\n'), ((1154, 1172), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1170, 1172), False, 'import sys\n'), ((2278, 2344), 'pylbm.Circle', 'pylbm.Circle', (['[0.3, 0.5 * (ymin + ymax) + 2 * dx]', 'radius'], {'label': '(2)'}), '([0.3, 0.5 * (ymin + ymax) + 2 * dx], radius, label=2)\n', (2290, 2344), False, 'import pylbm\n')] |
#### MK 4 Networks ####
'''
Exploration of convex Networks on a simple example
It includes the ICNN techniques (Amos et al)
'''
### This is a script for the training of the
### Third NN approach
'''
Improvements:
1) accepts u as a N-vector
2) Generalized Loss function
3) Adapted network layout
4) RESNet Used as Netowork ( TODO )
'''
import csv
import multiprocessing
import pandas as pd
from joblib import Parallel, delayed
### imports
import numpy as np
# in-project imports
import legacyCode.nnUtils as nnUtils
import csv
# Tensorflow
import tensorflow as tf
from tensorflow import Tensor
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.constraints import NonNeg
from tensorflow.keras import initializers
# import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
plt.style.use("kitish")
# ------ Code starts here --------
def main():
# Training Parameters
batchSize = 5000
epochCount = 5000
### Dense Network
filename = "legacyCode/models/ConvComparison_fcnn"
# model = create_modelMK4()
# model = tf.keras.models.load_model(filename + '/model')
# model = trainModel(model, filename, batchSize, epochCount)
# model.load_weights(filename + '/best_model.h5')
model = tf.keras.models.load_model(filename + '/model')
### Convex Network (nonnegative weights)
filename = "legacyCode/models/ConvComparison_nonNeg"
# model_nonneg = create_modelMK4_nonneg()
# model_nonneg = tf.keras.models.load_model(filename + '/model')
# model_nonneg = trainModel(model_nonneg, filename, batchSize, epochCount)
# model_nonneg.load_weights(filename + '/best_model.h5')
model_nonneg = tf.keras.models.load_model(filename + '/model')
### Convex Network ICNN architecture
filename = "legacyCode/models/ConvComparison_ICNN"
# model_ICNN = create_modelMK4_ICNN()
# model_ICNN = trainModel(model_ICNN, filename, batchSize, epochCount)
# model_nonneg.load_weights(filename + '/best_model.h5')
model_ICNN = tf.keras.models.load_model(filename + '/model')
# printDerivative(model)
# printDerivative(model_ICNN)
evaluateModel(model, model_nonneg, model_ICNN)
# print_weights(model)
# print("----")
# print_weights(model_nonneg)
plt.show()
return 0
def printDerivative(model):
x = np.arange(-100.0, 100.0, 0.001)
y = np.reshape(x, (x.shape[0], 1))
x_model = tf.Variable(y)
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(x_model, training=False) # same as model.predict(x)
gradients = tape.gradient(predictions, x_model)
# Gradient
# print(grads)
# plot model predictions and derivatives
y = createTrainingData(x)
# plt.plot(x, predictions)
plt.plot(x, gradients)
# plt.plot(x, y)
# plt.plot(x,x)
plt.ylabel('function value')
plt.xlabel('input value')
plt.legend(['Model', 'Model Derivative', 'Target Fct', 'Target Derivative'])
plt.show()
return gradients
def printWeights(model):
for layer in model.layers:
weights = layer.get_weights() # list of numpy arrays
print(weights)
# if weights:
# plt.plot(weights)
# plt.ylabel('weight value')
# plt.xlabel('weight index')
# plt.show()
return 0
def evaluateModel(model, model2, model3):
x = np.arange(-10, 10, 0.001)
y = createTrainingData(x)
predictions = model.predict(x)
predictions2 = model2.predict(x)
predictions3 = model3.predict(x)
plt.plot(x, y)
plt.plot(x, predictions)
plt.plot(x, predictions2)
plt.plot(x, predictions3)
plt.ylabel('function value')
plt.xlabel('input value')
# plt.ylim([30.9,31])
plt.legend(['quadratic function', 'FCNN', 'naive convex', 'ICNN'])
plt.show()
return 0
def trainModel(model, filename, batchSize, epochCount):
### 0) Set variables #######################################################
# Name of modelDirectory
# filename = "models/Mk4_nnM_1"
filenameAlpha = "trainingData_M1_alpha.csv"
filenameU = "trainingData_M1_u.csv"
### 1) Generate Training Data #############################################
print("Create Training Data")
# build training data!
x = np.arange(-5.0, 5.0, 0.0001)
y = createTrainingData(x)
### 2) Create Model ########################################################
# print("Create Model")
# Load weights
# model.load_weights(filename + '/best_model.h5')
### 3) Setup Training and Train the model ##################################
# Create Early Stopping callback
es = EarlyStopping(monitor='loss', mode='min', min_delta=0.000000001, patience=500,
verbose=10) # loss == custom_loss1dMBPrime by model definition
mc_best = ModelCheckpoint(filename + '/best_model.h5', monitor='loss', mode='min', save_best_only=True)
mc_500 = ModelCheckpoint(filename + '/model_quicksave.h5', monitor='loss', mode='min', save_best_only=False,
save_freq=500)
# Train the model
print("Train Model")
history = model.fit(x, y, validation_split=0.01, epochs=epochCount, batch_size=batchSize, verbose=1,
callbacks=[es, mc_best, mc_500]) # batch size = 900000
# View History
# nnUtils.print_history(history.history)
### 4) Save trained model and history ########################################
print("Save model and history")
nnUtils.save_training(filename, model, history)
print("Training successfully saved")
# load history
history1 = nnUtils.load_trainHistory(filename)
# print history as a check
# nnUtils.print_history(history1)
print("Training Sequence successfully finished")
return model
### Build the network:
def create_modelMK4():
# Define the input
weightIniMean = 0.0
weightIniStdDev = 0.05
# Number of basis functions used:
# Weight initializer
initializer = tf.keras.initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=None)
#### input layer ####
input_ = keras.Input(shape=(1,))
# Hidden layers
# hidden = layers.BatchNormalization()(input_)
'''
hidden = layers.Dense(3,kernel_constraint=NonNeg(), activation="relu")(input_)
hidden = layers.Dense(3,kernel_constraint=NonNeg(), activation="relu")(hidden)
hidden = layers.Dense(3, kernel_constraint=NonNeg(), activation="relu")(hidden)
'''
hidden = layers.Dense(3, activation="softplus",
kernel_initializer=initializer,
bias_initializer='ones')(input_)
hidden = layers.Dense(3, activation="softplus",
kernel_initializer=initializer,
bias_initializer='ones'
)(hidden)
hidden = layers.Dense(3, activation="softplus",
kernel_initializer=initializer,
bias_initializer='ones'
)(hidden)
# Define the output
output_ = layers.Dense(1,
kernel_initializer=initializer,
bias_initializer='ones'
)(hidden)
# Create the model
model = keras.Model(inputs=[input_], outputs=[output_])
model.summary()
# model.compile(loss=cLoss_FONC_varD(quadOrder,BasisDegree), optimizer='adam')#, metrics=[custom_loss1dMB, custom_loss1dMBPrime])
model.compile(loss="mean_squared_error", optimizer='adam', metrics=['mean_absolute_error'])
return model
def create_modelMK4_nonneg():
# Define the input
weightIniMean = 0.0
weightIniStdDev = 0.05
# Define LayerDimensions
layerDim = 3
# Weight initializer
initializer = tf.keras.initializers.RandomUniform(minval=0, maxval=0.5, seed=None)
input_ = keras.Input(shape=(1,))
# Hidden layers
# hidden = layers.BatchNormalization()(input_)
hidden = layers.Dense(layerDim, activation="softplus",
kernel_initializer=initializer,
bias_initializer='zeros'
)(input_)
hidden = layers.Dense(layerDim, kernel_constraint=NonNeg(), activation="softplus",
kernel_initializer=initializer,
bias_initializer='zeros'
)(hidden)
hidden = layers.Dense(layerDim, kernel_constraint=NonNeg(), activation="softplus",
kernel_initializer=initializer,
bias_initializer='zeros'
)(hidden)
# Define the ouput
output_ = layers.Dense(1, kernel_constraint=NonNeg(),
kernel_initializer=initializer,
bias_initializer='zeros'
)(hidden)
# Create the model
model = keras.Model(inputs=[input_], outputs=[output_])
model.summary()
# model.compile(loss=cLoss_FONC_varD(quadOrder,BasisDegree), optimizer='adam')#, metrics=[custom_loss1dMB, custom_loss1dMBPrime])
model.compile(loss="mean_squared_error", optimizer='adam', metrics=['mean_absolute_error'])
return model
def create_modelMK4_ICNN():
# Define the input
weightIniMean = 0.0
weightIniStdDev = 0.05
# Define LayerDimensions
# inputDim = 1
layerDim = 3
# Weight initializer
initializerNonNeg = tf.keras.initializers.RandomUniform(minval=0, maxval=0.5, seed=None)
initializer = tf.keras.initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=None)
def convexLayer(layerInput_z: Tensor, netInput_x: Tensor) -> Tensor:
# Weighted sum of previous layers output plus bias
weightedNonNegSum_z = layers.Dense(layerDim, kernel_constraint=NonNeg(), activation=None,
kernel_initializer=initializerNonNeg,
use_bias=True,
bias_initializer='zeros'
# name='in_z_NN_Dense'
)(layerInput_z)
# Weighted sum of network input
weightedSum_x = layers.Dense(layerDim, activation=None,
kernel_initializer=initializer,
use_bias=False
# name='in_x_Dense'
)(netInput_x)
# Wz+Wx+b
intermediateSum = layers.Add()([weightedSum_x, weightedNonNegSum_z])
# activation
out = tf.keras.activations.softplus(intermediateSum)
# batch normalization
# out = layers.BatchNormalization()(out)
return out
def convexLayerOutput(layerInput_z: Tensor, netInput_x: Tensor) -> Tensor:
# Weighted sum of previous layers output plus bias
weightedNonNegSum_z = layers.Dense(1, kernel_constraint=NonNeg(), activation=None,
kernel_initializer=initializerNonNeg,
use_bias=True,
bias_initializer='zeros'
# name='in_z_NN_Dense'
)(layerInput_z)
# Weighted sum of network input
weightedSum_x = layers.Dense(1, activation=None,
kernel_initializer=initializer,
use_bias=False
# name='in_x_Dense'
)(netInput_x)
# Wz+Wx+b
intermediateSum = layers.Add()([weightedSum_x, weightedNonNegSum_z])
# activation
out = tf.keras.activations.softplus(intermediateSum)
# batch normalization
# out = layers.BatchNormalization()(out)
return out
# Number of basis functions used:
input_ = keras.Input(shape=(1,))
### Hidden layers ###
# First Layer is a std dense layer
hidden = layers.Dense(3, activation="softplus",
kernel_initializer=initializer,
bias_initializer='zeros'
)(input_)
# other layers are convexLayers
hidden = convexLayer(hidden, input_)
hidden = convexLayer(hidden, input_)
output_ = convexLayerOutput(hidden, input_) # outputlayer
# Create the model
model = keras.Model(inputs=[input_], outputs=[output_])
model.summary()
# model.compile(loss=cLoss_FONC_varD(quadOrder,BasisDegree), optimizer='adam')#, metrics=[custom_loss1dMB, custom_loss1dMBPrime])
model.compile(loss="mean_squared_error", optimizer='adam', metrics=['mean_absolute_error'])
return model
def createTrainingData(x):
return -0.5 * x * x
def loadTrainingData():
filenameU = "trainingData_M0_u.csv"
filenameH = "trainingData_M0_h.csv"
# Load Alpha
f = open(filenameH, 'r')
hList = list()
uList = list()
# --- Load moments u ---
with f:
reader = csv.reader(f)
for row in reader:
numRow = []
for word in row:
numRow.append(float(word))
hList.append(numRow)
f = open(filenameU, 'r')
# --- Load entropy values ---
with f:
reader = csv.reader(f)
for row in reader:
numRow = []
for word in row:
numRow.append(float(word))
uList.append(numRow)
return (np.asarray(uList), np.asarray(hList))
if __name__ == '__main__':
main()
| [
"csv.reader",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.style.use",
"tensorflow.Variable",
"numpy.arange",
"tensorflow.keras.activations.softplus",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.Input",
"legacyCode.nnUtils.load... | [((898, 921), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""kitish"""'], {}), "('kitish')\n", (911, 921), True, 'import matplotlib.pyplot as plt\n'), ((1346, 1393), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (["(filename + '/model')"], {}), "(filename + '/model')\n", (1372, 1393), True, 'import tensorflow as tf\n'), ((1772, 1819), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (["(filename + '/model')"], {}), "(filename + '/model')\n", (1798, 1819), True, 'import tensorflow as tf\n'), ((2113, 2160), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (["(filename + '/model')"], {}), "(filename + '/model')\n", (2139, 2160), True, 'import tensorflow as tf\n'), ((2362, 2372), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2370, 2372), True, 'import matplotlib.pyplot as plt\n'), ((2424, 2455), 'numpy.arange', 'np.arange', (['(-100.0)', '(100.0)', '(0.001)'], {}), '(-100.0, 100.0, 0.001)\n', (2433, 2455), True, 'import numpy as np\n'), ((2464, 2494), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], 1)'], {}), '(x, (x.shape[0], 1))\n', (2474, 2494), True, 'import numpy as np\n'), ((2509, 2523), 'tensorflow.Variable', 'tf.Variable', (['y'], {}), '(y)\n', (2520, 2523), True, 'import tensorflow as tf\n'), ((2983, 3005), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'gradients'], {}), '(x, gradients)\n', (2991, 3005), True, 'import matplotlib.pyplot as plt\n'), ((3052, 3080), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""function value"""'], {}), "('function value')\n", (3062, 3080), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3110), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""input value"""'], {}), "('input value')\n", (3095, 3110), True, 'import matplotlib.pyplot as plt\n'), ((3115, 3191), 'matplotlib.pyplot.legend', 'plt.legend', (["['Model', 'Model Derivative', 'Target Fct', 'Target Derivative']"], {}), "(['Model', 'Model Derivative', 'Target Fct', 'Target Derivative'])\n", (3125, 3191), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3207), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3205, 3207), True, 'import matplotlib.pyplot as plt\n'), ((3576, 3601), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(0.001)'], {}), '(-10, 10, 0.001)\n', (3585, 3601), True, 'import numpy as np\n'), ((3748, 3762), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (3756, 3762), True, 'import matplotlib.pyplot as plt\n'), ((3767, 3791), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'predictions'], {}), '(x, predictions)\n', (3775, 3791), True, 'import matplotlib.pyplot as plt\n'), ((3796, 3821), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'predictions2'], {}), '(x, predictions2)\n', (3804, 3821), True, 'import matplotlib.pyplot as plt\n'), ((3826, 3851), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'predictions3'], {}), '(x, predictions3)\n', (3834, 3851), True, 'import matplotlib.pyplot as plt\n'), ((3857, 3885), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""function value"""'], {}), "('function value')\n", (3867, 3885), True, 'import matplotlib.pyplot as plt\n'), ((3890, 3915), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""input value"""'], {}), "('input value')\n", (3900, 3915), True, 'import matplotlib.pyplot as plt\n'), ((3946, 4012), 'matplotlib.pyplot.legend', 'plt.legend', (["['quadratic function', 'FCNN', 'naive convex', 'ICNN']"], {}), "(['quadratic function', 'FCNN', 'naive convex', 'ICNN'])\n", (3956, 4012), True, 'import matplotlib.pyplot as plt\n'), ((4017, 4027), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4025, 4027), True, 'import matplotlib.pyplot as plt\n'), ((4487, 4515), 'numpy.arange', 'np.arange', (['(-5.0)', '(5.0)', '(0.0001)'], {}), '(-5.0, 5.0, 0.0001)\n', (4496, 4515), True, 'import numpy as np\n'), ((4859, 4947), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'mode': '"""min"""', 'min_delta': '(1e-09)', 'patience': '(500)', 'verbose': '(10)'}), "(monitor='loss', mode='min', min_delta=1e-09, patience=500,\n verbose=10)\n", (4872, 4947), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((5039, 5136), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(filename + '/best_model.h5')"], {'monitor': '"""loss"""', 'mode': '"""min"""', 'save_best_only': '(True)'}), "(filename + '/best_model.h5', monitor='loss', mode='min',\n save_best_only=True)\n", (5054, 5136), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((5146, 5265), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(filename + '/model_quicksave.h5')"], {'monitor': '"""loss"""', 'mode': '"""min"""', 'save_best_only': '(False)', 'save_freq': '(500)'}), "(filename + '/model_quicksave.h5', monitor='loss', mode=\n 'min', save_best_only=False, save_freq=500)\n", (5161, 5265), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((5713, 5760), 'legacyCode.nnUtils.save_training', 'nnUtils.save_training', (['filename', 'model', 'history'], {}), '(filename, model, history)\n', (5734, 5760), True, 'import legacyCode.nnUtils as nnUtils\n'), ((5837, 5872), 'legacyCode.nnUtils.load_trainHistory', 'nnUtils.load_trainHistory', (['filename'], {}), '(filename)\n', (5862, 5872), True, 'import legacyCode.nnUtils as nnUtils\n'), ((6217, 6288), 'tensorflow.keras.initializers.RandomUniform', 'tf.keras.initializers.RandomUniform', ([], {'minval': '(-0.5)', 'maxval': '(0.5)', 'seed': 'None'}), '(minval=-0.5, maxval=0.5, seed=None)\n', (6252, 6288), True, 'import tensorflow as tf\n'), ((6328, 6351), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (6339, 6351), False, 'from tensorflow import keras\n'), ((7490, 7537), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[input_]', 'outputs': '[output_]'}), '(inputs=[input_], outputs=[output_])\n', (7501, 7537), False, 'from tensorflow import keras\n'), ((8003, 8071), 'tensorflow.keras.initializers.RandomUniform', 'tf.keras.initializers.RandomUniform', ([], {'minval': '(0)', 'maxval': '(0.5)', 'seed': 'None'}), '(minval=0, maxval=0.5, seed=None)\n', (8038, 8071), True, 'import tensorflow as tf\n'), ((8086, 8109), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (8097, 8109), False, 'from tensorflow import keras\n'), ((9117, 9164), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[input_]', 'outputs': '[output_]'}), '(inputs=[input_], outputs=[output_])\n', (9128, 9164), False, 'from tensorflow import keras\n'), ((9653, 9721), 'tensorflow.keras.initializers.RandomUniform', 'tf.keras.initializers.RandomUniform', ([], {'minval': '(0)', 'maxval': '(0.5)', 'seed': 'None'}), '(minval=0, maxval=0.5, seed=None)\n', (9688, 9721), True, 'import tensorflow as tf\n'), ((9740, 9811), 'tensorflow.keras.initializers.RandomUniform', 'tf.keras.initializers.RandomUniform', ([], {'minval': '(-0.5)', 'maxval': '(0.5)', 'seed': 'None'}), '(minval=-0.5, maxval=0.5, seed=None)\n', (9775, 9811), True, 'import tensorflow as tf\n'), ((12201, 12224), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (12212, 12224), False, 'from tensorflow import keras\n'), ((12705, 12752), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[input_]', 'outputs': '[output_]'}), '(inputs=[input_], outputs=[output_])\n', (12716, 12752), False, 'from tensorflow import keras\n'), ((2534, 2551), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2549, 2551), True, 'import tensorflow as tf\n'), ((6704, 6803), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(3)'], {'activation': '"""softplus"""', 'kernel_initializer': 'initializer', 'bias_initializer': '"""ones"""'}), "(3, activation='softplus', kernel_initializer=initializer,\n bias_initializer='ones')\n", (6716, 6803), False, 'from tensorflow.keras import layers\n'), ((6873, 6972), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(3)'], {'activation': '"""softplus"""', 'kernel_initializer': 'initializer', 'bias_initializer': '"""ones"""'}), "(3, activation='softplus', kernel_initializer=initializer,\n bias_initializer='ones')\n", (6885, 6972), False, 'from tensorflow.keras import layers\n'), ((7069, 7168), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(3)'], {'activation': '"""softplus"""', 'kernel_initializer': 'initializer', 'bias_initializer': '"""ones"""'}), "(3, activation='softplus', kernel_initializer=initializer,\n bias_initializer='ones')\n", (7081, 7168), False, 'from tensorflow.keras import layers\n'), ((7291, 7363), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'kernel_initializer': 'initializer', 'bias_initializer': '"""ones"""'}), "(1, kernel_initializer=initializer, bias_initializer='ones')\n", (7303, 7363), False, 'from tensorflow.keras import layers\n'), ((8196, 8304), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['layerDim'], {'activation': '"""softplus"""', 'kernel_initializer': 'initializer', 'bias_initializer': '"""zeros"""'}), "(layerDim, activation='softplus', kernel_initializer=\n initializer, bias_initializer='zeros')\n", (8208, 8304), False, 'from tensorflow.keras import layers\n'), ((10839, 10885), 'tensorflow.keras.activations.softplus', 'tf.keras.activations.softplus', (['intermediateSum'], {}), '(intermediateSum)\n', (10868, 10885), True, 'import tensorflow as tf\n'), ((12003, 12049), 'tensorflow.keras.activations.softplus', 'tf.keras.activations.softplus', (['intermediateSum'], {}), '(intermediateSum)\n', (12032, 12049), True, 'import tensorflow as tf\n'), ((12304, 12404), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(3)'], {'activation': '"""softplus"""', 'kernel_initializer': 'initializer', 'bias_initializer': '"""zeros"""'}), "(3, activation='softplus', kernel_initializer=initializer,\n bias_initializer='zeros')\n", (12316, 12404), False, 'from tensorflow.keras import layers\n'), ((13324, 13337), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (13334, 13337), False, 'import csv\n'), ((13589, 13602), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (13599, 13602), False, 'import csv\n'), ((13773, 13790), 'numpy.asarray', 'np.asarray', (['uList'], {}), '(uList)\n', (13783, 13790), True, 'import numpy as np\n'), ((13792, 13809), 'numpy.asarray', 'np.asarray', (['hList'], {}), '(hList)\n', (13802, 13809), True, 'import numpy as np\n'), ((10439, 10530), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['layerDim'], {'activation': 'None', 'kernel_initializer': 'initializer', 'use_bias': '(False)'}), '(layerDim, activation=None, kernel_initializer=initializer,\n use_bias=False)\n', (10451, 10530), False, 'from tensorflow.keras import layers\n'), ((10752, 10764), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (10762, 10764), False, 'from tensorflow.keras import layers\n'), ((11610, 11695), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': 'None', 'kernel_initializer': 'initializer', 'use_bias': '(False)'}), '(1, activation=None, kernel_initializer=initializer, use_bias=False\n )\n', (11622, 11695), False, 'from tensorflow.keras import layers\n'), ((11916, 11928), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (11926, 11928), False, 'from tensorflow.keras import layers\n'), ((8441, 8449), 'tensorflow.keras.constraints.NonNeg', 'NonNeg', ([], {}), '()\n', (8447, 8449), False, 'from tensorflow.keras.constraints import NonNeg\n'), ((8673, 8681), 'tensorflow.keras.constraints.NonNeg', 'NonNeg', ([], {}), '()\n', (8679, 8681), False, 'from tensorflow.keras.constraints import NonNeg\n'), ((8923, 8931), 'tensorflow.keras.constraints.NonNeg', 'NonNeg', ([], {}), '()\n', (8929, 8931), False, 'from tensorflow.keras.constraints import NonNeg\n'), ((10016, 10024), 'tensorflow.keras.constraints.NonNeg', 'NonNeg', ([], {}), '()\n', (10022, 10024), False, 'from tensorflow.keras.constraints import NonNeg\n'), ((11187, 11195), 'tensorflow.keras.constraints.NonNeg', 'NonNeg', ([], {}), '()\n', (11193, 11195), False, 'from tensorflow.keras.constraints import NonNeg\n')] |
# Install lungmask from https://github.com/amrane99/lungmask using pip install git+https://github.com/amrane99/lungmask
from lungmask import mask
import SimpleITK as sitk
import os
import numpy as np
from mp.utils.load_restore import pkl_dump
from mp.paths import storage_data_path
import mp.data.datasets.dataset_utils as du
def LungSegmentation(input_path, target_path, gpu=False, cuda=0):
# Load ct scan and create segmentation
input_image = sitk.ReadImage(input_path)
segmentation = mask.apply(image=input_image, gpu=gpu, cuda=cuda) # default model is U-net(R231)
# load alternative models
# model = mask.get_model('unet','LTRCLobes')
# segmentation = mask.apply(input_image, model)
file_name = input_path.split('/')[-1].split('.nii')[0]
sitk.WriteImage(sitk.GetImageFromArray(segmentation), os.path.join(target_path, file_name+"_lung_seg.nii.gz"))
sitk.WriteImage(input_image, os.path.join(target_path, file_name+".nii.gz"))
return segmentation
def calculateSegmentationVolume(original_file_path, scan_np):
# If a pixel is segmented, the volume is voxel space (x * y * z) --> Remember scaling if scl_slope field in original image is nonzero
reader = sitk.ImageFileReader()
reader.SetFileName(original_file_path)
reader.LoadPrivateTagsOn()
reader.ReadImageInformation()
voxel_x = float(reader.GetMetaData('pixdim[1]'))
voxel_y = float(reader.GetMetaData('pixdim[2]'))
voxel_z = float(reader.GetMetaData('pixdim[3]'))
try:
# If 10, this indicated mm and seconds
voxel_unit = int(reader.GetMetaData('xyzt_units'))
except:
# If error occurs, field is empty, so set to mm
voxel_unit = 10
scl_slope = float(reader.GetMetaData('scl_slope'))
scl_inter = float(reader.GetMetaData('scl_inter'))
if scl_slope != 0:
voxel_vol = (scl_slope * voxel_x + scl_inter)\
* (scl_slope * voxel_y + scl_inter)\
* (scl_slope * voxel_z + scl_inter)
else:
voxel_vol = voxel_x * voxel_y * voxel_z
# Calculate segmentation volume based on voxel_vol
# Determine start index and end index of segmentation (0-based)
# --> Start and end point of Lung
start_seg = True
end_seg = True
discard = False
start_seg_idx = None
end_seg_idx = None
for idx, ct_slice in enumerate(scan_np):
transp = np.transpose(np.nonzero(ct_slice))
if len(transp) != 0 and idx > 0:
start_seg = False
start_seg_idx = idx
break
if len(transp) != 0 and idx == 0:
discard = True
break
reversed_scan = scan_np[::-1]
#if not discard:
for idx, ct_slice in enumerate(reversed_scan):
transp = np.transpose(np.nonzero(ct_slice))
if len(transp) != 0 and idx > 0:
end_seg = False
end_seg_idx = len(reversed_scan) - idx - 1 # to get 0-based
break
if len(transp) != 0 and idx == 0:
discard = True
break
# Calculate segmentation volume based on voxel_vol
segmentation_volume = 0
#if not discard:
for ct_slice in scan_np:
transp = np.transpose(np.nonzero(ct_slice))
# Calculate volume of segmented 2D slice
segmentation_volume += len(transp)*voxel_vol
if voxel_unit == 10: # segmentation_volume is in mm^3
segmentation_volume /= 1000 # to ml
segmentation_volume = int(segmentation_volume)
print("The segmentation has a volume of {} ml.".format(segmentation_volume))
return discard, segmentation_volume, start_seg_idx, end_seg_idx
def CheckWholeLungCaptured(input_path, target_path, gpu=False, cuda=0):
scan_np = LungSegmentation(input_path, target_path, gpu, cuda)
discard, segmentation_volume, start_seg_idx, end_seg_idx = calculateSegmentationVolume(input_path, scan_np)
return discard, segmentation_volume, start_seg_idx, end_seg_idx
"""Grand Challenge Data"""
def GC(source_path, target_path, gpu=True, cuda=7):
r"""Extracts MRI images and saves the modified images."""
# Filenames have the form 'volume-covid19-A-XXXX_ct.nii'
filenames = [x for x in os.listdir(source_path) if 'covid19' in x
and '_seg' not in x and '._' not in x]
# Create directories if not existing
if not os.path.isdir(target_path):
os.makedirs(target_path)
result = dict()
for num, filename in enumerate(filenames):
discard, tlc, start_seg_idx, end_seg_idx = CheckWholeLungCaptured(os.path.join(source_path, filename), target_path, gpu, cuda)
if not discard:
print("Based on start index of the segmentation {} and the end index of the segmentation {}, the whole lung should be captured.".format(start_seg_idx, end_seg_idx))
print("Total Lung Capacity: {} ml.".format(tlc))
# Thresholds might be adapted
if 4000 < tlc and tlc < 4400:
print("\n Based on the total lung capacity of {} ml, the CT scan might be from a woman, since it fits the average total lung capacity of a women (approx. 4200 ml).".format(tlc))
if 5800 < tlc and tlc < 6200:
print("\n Based on the total lung capacity of {} ml, the CT scan might be from a man, since it fits the average total lung capacity of a man (approx. 6000 ml).".format(tlc))
else:
print("Based on start index of the segmentation {} and the end index of the segmentation {}, the whole lung is not captured.".format(start_seg_idx, end_seg_idx))
result[filename] = [discard, tlc, start_seg_idx, end_seg_idx]
# Save dict
pkl_dump(result, 'GC', path=target_path)
"""Decathlon Lung Data"""
def Decathlon(source_path, target_path, gpu=True, cuda=7):
r"""Extracts MRI images and saves the modified images."""
images_path = os.path.join(source_path, 'imagesTr')
# Filenames have the form 'lung_XXX.nii.gz'
filenames = [x for x in os.listdir(images_path) if x[:4] == 'lung']
# Create directories if not existing
if not os.path.isdir(target_path):
os.makedirs(target_path)
result = dict()
for num, filename in enumerate(filenames):
discard, tlc, start_seg_idx, end_seg_idx = CheckWholeLungCaptured(os.path.join(images_path, filename), target_path, gpu, cuda)
if not discard:
print("Based on start index of the segmentation {} and the end index of the segmentation {}, the whole lung should be captured.".format(start_seg_idx, end_seg_idx))
print("Total Lung Capacity: {} ml.".format(tlc))
# Thresholds might be adapted
if 4000 < tlc and tlc < 4400:
print("\n Based on the total lung capacity of {} ml, the CT scan might be from a woman, since it fits the average total lung capacity of a women (approx. 4200 ml).".format(tlc))
if 5800 < tlc and tlc < 6200:
print("\n Based on the total lung capacity of {} ml, the CT scan might be from a man, since it fits the average total lung capacity of a man (approx. 6000 ml).".format(tlc))
else:
print("Based on start index of the segmentation {} and the end index of the segmentation {}, the whole lung is not captured.".format(start_seg_idx, end_seg_idx))
result[filename] = [discard, tlc, start_seg_idx, end_seg_idx]
# Save dict
pkl_dump(result, 'Decathlon', path=target_path)
if __name__ == '__main__':
# Extract necessary paths GC
global_name = 'GC_Corona'
dataset_path = os.path.join(storage_data_path, global_name, 'Train')
original_data_path = os.path.join(du.get_original_data_path(global_name), 'Train')
print("Start with Grand Challenge train dataset.")
GC(original_data_path, dataset_path)
dataset_path = os.path.join(storage_data_path, global_name, 'Validation')
original_data_path = os.path.join(du.get_original_data_path(global_name), 'Validation')
print("Start with Grand Challenge validation dataset.")
GC(original_data_path, dataset_path)
# Extract necessary paths Decathlon
global_name = 'DecathlonLung'
dataset_path = os.path.join(storage_data_path, global_name)
original_data_path = du.get_original_data_path(global_name)
print("Start with Decathlon Lung dataset.")
Decathlon(original_data_path, dataset_path) | [
"lungmask.mask.apply",
"os.makedirs",
"SimpleITK.ImageFileReader",
"mp.utils.load_restore.pkl_dump",
"SimpleITK.ReadImage",
"os.path.isdir",
"mp.data.datasets.dataset_utils.get_original_data_path",
"numpy.nonzero",
"SimpleITK.GetImageFromArray",
"os.path.join",
"os.listdir"
] | [((449, 475), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['input_path'], {}), '(input_path)\n', (463, 475), True, 'import SimpleITK as sitk\n'), ((492, 541), 'lungmask.mask.apply', 'mask.apply', ([], {'image': 'input_image', 'gpu': 'gpu', 'cuda': 'cuda'}), '(image=input_image, gpu=gpu, cuda=cuda)\n', (502, 541), False, 'from lungmask import mask\n'), ((1174, 1196), 'SimpleITK.ImageFileReader', 'sitk.ImageFileReader', ([], {}), '()\n', (1194, 1196), True, 'import SimpleITK as sitk\n'), ((5177, 5217), 'mp.utils.load_restore.pkl_dump', 'pkl_dump', (['result', '"""GC"""'], {'path': 'target_path'}), "(result, 'GC', path=target_path)\n", (5185, 5217), False, 'from mp.utils.load_restore import pkl_dump\n'), ((5379, 5416), 'os.path.join', 'os.path.join', (['source_path', '"""imagesTr"""'], {}), "(source_path, 'imagesTr')\n", (5391, 5416), False, 'import os\n'), ((6781, 6828), 'mp.utils.load_restore.pkl_dump', 'pkl_dump', (['result', '"""Decathlon"""'], {'path': 'target_path'}), "(result, 'Decathlon', path=target_path)\n", (6789, 6828), False, 'from mp.utils.load_restore import pkl_dump\n'), ((6934, 6987), 'os.path.join', 'os.path.join', (['storage_data_path', 'global_name', '"""Train"""'], {}), "(storage_data_path, global_name, 'Train')\n", (6946, 6987), False, 'import os\n'), ((7179, 7237), 'os.path.join', 'os.path.join', (['storage_data_path', 'global_name', '"""Validation"""'], {}), "(storage_data_path, global_name, 'Validation')\n", (7191, 7237), False, 'import os\n'), ((7507, 7551), 'os.path.join', 'os.path.join', (['storage_data_path', 'global_name'], {}), '(storage_data_path, global_name)\n', (7519, 7551), False, 'import os\n'), ((7574, 7612), 'mp.data.datasets.dataset_utils.get_original_data_path', 'du.get_original_data_path', (['global_name'], {}), '(global_name)\n', (7599, 7612), True, 'import mp.data.datasets.dataset_utils as du\n'), ((771, 807), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['segmentation'], {}), '(segmentation)\n', (793, 807), True, 'import SimpleITK as sitk\n'), ((809, 866), 'os.path.join', 'os.path.join', (['target_path', "(file_name + '_lung_seg.nii.gz')"], {}), "(target_path, file_name + '_lung_seg.nii.gz')\n", (821, 866), False, 'import os\n'), ((896, 944), 'os.path.join', 'os.path.join', (['target_path', "(file_name + '.nii.gz')"], {}), "(target_path, file_name + '.nii.gz')\n", (908, 944), False, 'import os\n'), ((3976, 4002), 'os.path.isdir', 'os.path.isdir', (['target_path'], {}), '(target_path)\n', (3989, 4002), False, 'import os\n'), ((4006, 4030), 'os.makedirs', 'os.makedirs', (['target_path'], {}), '(target_path)\n', (4017, 4030), False, 'import os\n'), ((5579, 5605), 'os.path.isdir', 'os.path.isdir', (['target_path'], {}), '(target_path)\n', (5592, 5605), False, 'import os\n'), ((5609, 5633), 'os.makedirs', 'os.makedirs', (['target_path'], {}), '(target_path)\n', (5620, 5633), False, 'import os\n'), ((7023, 7061), 'mp.data.datasets.dataset_utils.get_original_data_path', 'du.get_original_data_path', (['global_name'], {}), '(global_name)\n', (7048, 7061), True, 'import mp.data.datasets.dataset_utils as du\n'), ((7273, 7311), 'mp.data.datasets.dataset_utils.get_original_data_path', 'du.get_original_data_path', (['global_name'], {}), '(global_name)\n', (7298, 7311), True, 'import mp.data.datasets.dataset_utils as du\n'), ((2254, 2274), 'numpy.nonzero', 'np.nonzero', (['ct_slice'], {}), '(ct_slice)\n', (2264, 2274), True, 'import numpy as np\n'), ((2549, 2569), 'numpy.nonzero', 'np.nonzero', (['ct_slice'], {}), '(ct_slice)\n', (2559, 2569), True, 'import numpy as np\n'), ((2907, 2927), 'numpy.nonzero', 'np.nonzero', (['ct_slice'], {}), '(ct_slice)\n', (2917, 2927), True, 'import numpy as np\n'), ((3843, 3866), 'os.listdir', 'os.listdir', (['source_path'], {}), '(source_path)\n', (3853, 3866), False, 'import os\n'), ((4161, 4196), 'os.path.join', 'os.path.join', (['source_path', 'filename'], {}), '(source_path, filename)\n', (4173, 4196), False, 'import os\n'), ((5488, 5511), 'os.listdir', 'os.listdir', (['images_path'], {}), '(images_path)\n', (5498, 5511), False, 'import os\n'), ((5766, 5801), 'os.path.join', 'os.path.join', (['images_path', 'filename'], {}), '(images_path, filename)\n', (5778, 5801), False, 'import os\n')] |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from globalvars import *
class LineDetect:
"""
Detect lines using sliding window protocol
"""
def __init__(self):
self.left_fitx = []
self.right_fitx = []
self.ploty = []
self.right_fit = []
self.left_fit = []
self.warped = []
def find_lane_pixels(self, binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = NWINDOWS
# Set the width of the windows +/- margin
margin = MARGIN
# Set minimum number of pixels found to recenter window
minpix = MINPIX
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin# Update this
win_xleft_high = leftx_current + margin # Update this
win_xright_low = rightx_current - margin# Update this
win_xright_high = rightx_current + margin # Update this
cv2.rectangle(out_img,(win_xleft_low,win_y_low), \
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low), \
(win_xright_high,win_y_high),(0,255,0), 2)
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def fit_polynomial(self, binary_warped):
self.warped = binary_warped
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = self.find_lane_pixels(binary_warped)
try:
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
except TypeError:
return None, None, None, None, None
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
return left_fitx, right_fitx, ploty, left_fit, right_fit
def fit_poly(self, img_shape, leftx, lefty, rightx, righty):
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
left_fitx = None
right_fitx = None
print("Failed to fit a line")
self.left_fitx = left_fitx
self.right_fitx = right_fitx
self.ploty = ploty
return left_fitx, right_fitx, ploty
def search_around_poly(self, binary_warped):
# HYPERPARAMETER
result = None
margin = MARGIN_SEARCH_POLY
self.warped = binary_warped
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + \
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + \
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + \
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + \
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fitx, right_fitx, ploty = self.fit_poly(binary_warped.shape, leftx, lefty, \
rightx, righty)
return left_fitx, right_fitx, ploty
def measure_curvature_real(self, left_fitx, right_fitx, ploty):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = YM_PER_PIX # meters per pixel in y dimension
xm_per_pix = XM_PER_PIX # meters per pixel in x dimension
# Start by generating our fake example data
# Make sure to feed in your real data instead in your project!
#ploty, left_fit_cr, right_fit_cr = generate_data(ym_per_pix, xm_per_pix)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
left_fit_cr = np.polyfit(ploty * ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty * ym_per_pix, right_fitx* xm_per_pix, 2)
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) \
/ np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5)\
/ np.absolute(2*right_fit_cr[0])
return left_curverad, right_curverad
def offset_center(self, left_fitx, right_fitx):
xm_per_pix = XM_PER_PIX
center_lane = (right_fitx[self.warped.shape[0]-1] + left_fitx[self.warped.shape[0]-1])/2
#center_lane = (rightx[0] + leftx[0])/2
center_car = self.warped.shape[1]//2
offset = center_lane - center_car
center_offset_pixels = abs(center_car - center_lane)
center_offset_meters = xm_per_pix*center_offset_pixels
return center_offset_meters
| [
"numpy.dstack",
"numpy.absolute",
"numpy.sum",
"numpy.argmax",
"numpy.polyfit",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.int",
"numpy.linspace",
"cv2.rectangle",
"numpy.concatenate"
] | [((526, 588), 'numpy.sum', 'np.sum', (['binary_warped[binary_warped.shape[0] // 2:, :]'], {'axis': '(0)'}), '(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)\n', (532, 588), True, 'import numpy as np\n'), ((673, 729), 'numpy.dstack', 'np.dstack', (['(binary_warped, binary_warped, binary_warped)'], {}), '((binary_warped, binary_warped, binary_warped))\n', (682, 729), True, 'import numpy as np\n'), ((891, 922), 'numpy.int', 'np.int', (['(histogram.shape[0] // 2)'], {}), '(histogram.shape[0] // 2)\n', (897, 922), True, 'import numpy as np\n'), ((942, 973), 'numpy.argmax', 'np.argmax', (['histogram[:midpoint]'], {}), '(histogram[:midpoint])\n', (951, 973), True, 'import numpy as np\n'), ((1402, 1444), 'numpy.int', 'np.int', (['(binary_warped.shape[0] // nwindows)'], {}), '(binary_warped.shape[0] // nwindows)\n', (1408, 1444), True, 'import numpy as np\n'), ((1580, 1600), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (1588, 1600), True, 'import numpy as np\n'), ((1620, 1640), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (1628, 1640), True, 'import numpy as np\n'), ((4593, 4659), 'numpy.linspace', 'np.linspace', (['(0)', '(binary_warped.shape[0] - 1)', 'binary_warped.shape[0]'], {}), '(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n', (4604, 4659), True, 'import numpy as np\n'), ((5246, 5273), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (5256, 5273), True, 'import numpy as np\n'), ((5294, 5323), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (5304, 5323), True, 'import numpy as np\n'), ((5387, 5433), 'numpy.linspace', 'np.linspace', (['(0)', '(img_shape[0] - 1)', 'img_shape[0]'], {}), '(0, img_shape[0] - 1, img_shape[0])\n', (5398, 5433), True, 'import numpy as np\n'), ((6143, 6163), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (6151, 6163), True, 'import numpy as np\n'), ((6183, 6203), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (6191, 6203), True, 'import numpy as np\n'), ((7905, 7918), 'numpy.max', 'np.max', (['ploty'], {}), '(ploty)\n', (7911, 7918), True, 'import numpy as np\n'), ((7951, 8008), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(left_fitx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, left_fitx * xm_per_pix, 2)\n', (7961, 8008), True, 'import numpy as np\n'), ((8030, 8088), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(right_fitx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, right_fitx * xm_per_pix, 2)\n', (8040, 8088), True, 'import numpy as np\n'), ((996, 1027), 'numpy.argmax', 'np.argmax', (['histogram[midpoint:]'], {}), '(histogram[midpoint:])\n', (1005, 1027), True, 'import numpy as np\n'), ((2515, 2615), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xleft_low, win_y_low)', '(win_xleft_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xleft_low, win_y_low), (win_xleft_high,\n win_y_high), (0, 255, 0), 2)\n', (2528, 2615), False, 'import cv2\n'), ((2633, 2735), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xright_low, win_y_low)', '(win_xright_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xright_low, win_y_low), (win_xright_high,\n win_y_high), (0, 255, 0), 2)\n', (2646, 2735), False, 'import cv2\n'), ((3648, 3678), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (3662, 3678), True, 'import numpy as np\n'), ((3709, 3740), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (3723, 3740), True, 'import numpy as np\n'), ((4374, 4401), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (4384, 4401), True, 'import numpy as np\n'), ((4426, 4455), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (4436, 4455), True, 'import numpy as np\n'), ((8217, 8248), 'numpy.absolute', 'np.absolute', (['(2 * left_fit_cr[0])'], {}), '(2 * left_fit_cr[0])\n', (8228, 8248), True, 'import numpy as np\n'), ((8373, 8405), 'numpy.absolute', 'np.absolute', (['(2 * right_fit_cr[0])'], {}), '(2 * right_fit_cr[0])\n', (8384, 8405), True, 'import numpy as np\n'), ((3353, 3386), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (3360, 3386), True, 'import numpy as np\n'), ((3482, 3516), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (3489, 3516), True, 'import numpy as np\n')] |
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import random
import numpy as np
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
import scipy
import scipy.io as sio
from scipy.io import wavfile
from scipy import spatial
import evolvetools as et
from playsound import playsound
import difflib
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def mutate_arr(arr):
#for d in data:
# for i in d:
# print(i)
mutant = np.zeros(shape=arr.shape,dtype=arr.dtype)
for i, s in enumerate(arr):
mutant[i] = et.mutation(s,0.5)
return mutant
def initIndividual(icls, data):
mutant = mutate_arr(data)
return icls(mutant)
def initPopulation(pcls, ind_init, data, n):
return pcls(ind_init(data) for i in range(0,n))
def evalSound(individual):
rate = 44100
sio.wavfile.write("indiv.wav", rate, individual)
playsound("indiv.wav")
print("How did that sound?")
score = input("Enter integer between 0-100)")
if RepresentsInt(score) == True:
score = int(score)
else:
print("score not number, setting to 0 lmao")
score = 0
return score,
def evalData2Data(individual, target):
result = 1 - spatial.distance.cosine(individual, target) #similarity
if not 0 <= result <= 1:
result = 0
return result,
# return np.sum(individual == target),
# return np.linalg.norm((individual - target), ord=1),
# sm = difflib.SequenceMatcher(None, individual, target)
# print(sm.ratio)
# return(sm.ratio),
def cxTwoPointCopy(ind1, ind2):
"""Execute a two points crossover with copy on the input individuals. The
copy is required because the slicing in numpy returns a view of the data,
which leads to a self overwritting in the swap operation. It prevents
::
>>> import numpy
>>> a = numpy.array((1,2,3,4))
>>> b = numpy.array((5,6,7,8))
>>> a[1:3], b[1:3] = b[1:3], a[1:3]
>>> print(a)
[1 6 7 4]
>>> print(b)
[5 6 7 8]
"""
size = len(ind1)
cxpoint1 = random.randint(1, size)
cxpoint2 = random.randint(1, size - 1)
if cxpoint2 >= cxpoint1:
cxpoint2 += 1
else: # Swap the two cx points
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
ind1[cxpoint1:cxpoint2], ind2[cxpoint1:cxpoint2] \
= ind2[cxpoint1:cxpoint2].copy(), ind1[cxpoint1:cxpoint2].copy()
return ind1, ind2
seed_audio_name = 'bruh2.wav'
rate, data = sio.wavfile.read(seed_audio_name)
target_audio_name = 'minecraft_oof.wav'
rate_t, data_t = sio.wavfile.read(target_audio_name)
smaller = min(len(data), len(data_t)) - 1
data = data[:smaller, :]
data_t = data_t[:smaller, :]
data_ch1 = data[:, 0]
target_ch1 = data_t[:, 0]
print(len(data_ch1))
#sio.wavfile.write("test1.wav", 44100, data_ch1)
#sio.wavfile.write("test2.wav", 44100, target_ch1)
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", np.ndarray, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("individual", initIndividual, creator.Individual)
toolbox.register("population", initPopulation, list, toolbox.individual, data_ch1)
toolbox.register("evaluate", evalData2Data, target=target_ch1)
toolbox.register("mate", cxTwoPointCopy)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
def main():
random.seed(64)
pop = toolbox.population(n=1000)
# Numpy equality function (operators.eq) between two arrays returns the
# equality element wise, which raises an exception in the if similar()
# check of the hall of fame. Using a different equality function like
# numpy.array_equal or numpy.allclose solve this issue.
hof = tools.HallOfFame(1, similar=np.array_equal)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
algorithms.eaMuPlusLambda(pop, toolbox, 10, 1000, cxpb=0.5, mutpb=0.5, ngen=50, stats=stats,
halloffame=hof)
sio.wavfile.write("hof.wav", 44100, hof[0])
return pop, stats, hof
if __name__ == "__main__":
pop, stats, hof = main()
| [
"playsound.playsound",
"scipy.spatial.distance.cosine",
"deap.base.Toolbox",
"random.randint",
"evolvetools.mutation",
"numpy.zeros",
"scipy.io.wavfile.write",
"scipy.io.wavfile.read",
"deap.tools.Statistics",
"deap.creator.create",
"random.seed",
"deap.algorithms.eaMuPlusLambda",
"deap.tool... | [((3230, 3263), 'scipy.io.wavfile.read', 'sio.wavfile.read', (['seed_audio_name'], {}), '(seed_audio_name)\n', (3246, 3263), True, 'import scipy.io as sio\n'), ((3322, 3357), 'scipy.io.wavfile.read', 'sio.wavfile.read', (['target_audio_name'], {}), '(target_audio_name)\n', (3338, 3357), True, 'import scipy.io as sio\n'), ((3628, 3686), 'deap.creator.create', 'creator.create', (['"""FitnessMax"""', 'base.Fitness'], {'weights': '(1.0,)'}), "('FitnessMax', base.Fitness, weights=(1.0,))\n", (3642, 3686), False, 'from deap import creator\n'), ((3687, 3755), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'np.ndarray'], {'fitness': 'creator.FitnessMax'}), "('Individual', np.ndarray, fitness=creator.FitnessMax)\n", (3701, 3755), False, 'from deap import creator\n'), ((3767, 3781), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (3779, 3781), False, 'from deap import base\n'), ((1203, 1245), 'numpy.zeros', 'np.zeros', ([], {'shape': 'arr.shape', 'dtype': 'arr.dtype'}), '(shape=arr.shape, dtype=arr.dtype)\n', (1211, 1245), True, 'import numpy as np\n'), ((1568, 1616), 'scipy.io.wavfile.write', 'sio.wavfile.write', (['"""indiv.wav"""', 'rate', 'individual'], {}), "('indiv.wav', rate, individual)\n", (1585, 1616), True, 'import scipy.io as sio\n'), ((1621, 1643), 'playsound.playsound', 'playsound', (['"""indiv.wav"""'], {}), "('indiv.wav')\n", (1630, 1643), False, 'from playsound import playsound\n'), ((2825, 2848), 'random.randint', 'random.randint', (['(1)', 'size'], {}), '(1, size)\n', (2839, 2848), False, 'import random\n'), ((2864, 2891), 'random.randint', 'random.randint', (['(1)', '(size - 1)'], {}), '(1, size - 1)\n', (2878, 2891), False, 'import random\n'), ((4177, 4192), 'random.seed', 'random.seed', (['(64)'], {}), '(64)\n', (4188, 4192), False, 'import random\n'), ((4535, 4578), 'deap.tools.HallOfFame', 'tools.HallOfFame', (['(1)'], {'similar': 'np.array_equal'}), '(1, similar=np.array_equal)\n', (4551, 4578), False, 'from deap import tools\n'), ((4596, 4644), 'deap.tools.Statistics', 'tools.Statistics', (['(lambda ind: ind.fitness.values)'], {}), '(lambda ind: ind.fitness.values)\n', (4612, 4644), False, 'from deap import tools\n'), ((4791, 4904), 'deap.algorithms.eaMuPlusLambda', 'algorithms.eaMuPlusLambda', (['pop', 'toolbox', '(10)', '(1000)'], {'cxpb': '(0.5)', 'mutpb': '(0.5)', 'ngen': '(50)', 'stats': 'stats', 'halloffame': 'hof'}), '(pop, toolbox, 10, 1000, cxpb=0.5, mutpb=0.5, ngen\n =50, stats=stats, halloffame=hof)\n', (4816, 4904), False, 'from deap import algorithms\n'), ((4933, 4976), 'scipy.io.wavfile.write', 'sio.wavfile.write', (['"""hof.wav"""', '(44100)', 'hof[0]'], {}), "('hof.wav', 44100, hof[0])\n", (4950, 4976), True, 'import scipy.io as sio\n'), ((1297, 1316), 'evolvetools.mutation', 'et.mutation', (['s', '(0.5)'], {}), '(s, 0.5)\n', (1308, 1316), True, 'import evolvetools as et\n'), ((1947, 1990), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['individual', 'target'], {}), '(individual, target)\n', (1970, 1990), False, 'from scipy import spatial\n')] |
# Written by: <NAME>, @dataoutsider
# Viz: "On the Move", enjoy!
import numpy as np
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
from math import cos, sin, pi
plt.rcParams["figure.figsize"] = 12.8, 9.6
def tube(x, y):
return (x**2+y**2)
N = 10
n = 1000
x = np.linspace(-N,N,n)
y = x
Xgrid, Ygrid = np.meshgrid(x, y)
Zgrid = tube(Xgrid, Ygrid)
Xout = np.reshape(Xgrid, -1)
Yout = np.reshape(Ygrid, -1)
Zout = np.reshape(Zgrid, -1)
angle = 45
length = len(Xout)
import csv
import os
# with open(os.path.dirname(__file__) + '/background.csv', 'w',) as csvfile:
# writer = csv.writer(csvfile, lineterminator = '\n')
# writer.writerow(['x', 'y', 'z'])
# for i in range(length):
# writer.writerow([Xout[i]*cos(angle*pi/180) + Yout[i]*sin(angle*pi/180), -Xout[i]*sin(angle*pi/180) + Yout[i]*cos(angle*pi/180), Zout[i]])
with open(os.path.dirname(__file__) + '/background_square.csv', 'w',) as csvfile:
writer = csv.writer(csvfile, lineterminator = '\n')
writer.writerow(['x', 'y', 'z'])
for i in range(length):
writer.writerow([Xout[i], Yout[i], Zout[i]])
print('finished') | [
"numpy.meshgrid",
"csv.writer",
"os.path.dirname",
"numpy.reshape",
"numpy.linspace"
] | [((303, 324), 'numpy.linspace', 'np.linspace', (['(-N)', 'N', 'n'], {}), '(-N, N, n)\n', (314, 324), True, 'import numpy as np\n'), ((344, 361), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (355, 361), True, 'import numpy as np\n'), ((397, 418), 'numpy.reshape', 'np.reshape', (['Xgrid', '(-1)'], {}), '(Xgrid, -1)\n', (407, 418), True, 'import numpy as np\n'), ((426, 447), 'numpy.reshape', 'np.reshape', (['Ygrid', '(-1)'], {}), '(Ygrid, -1)\n', (436, 447), True, 'import numpy as np\n'), ((455, 476), 'numpy.reshape', 'np.reshape', (['Zgrid', '(-1)'], {}), '(Zgrid, -1)\n', (465, 476), True, 'import numpy as np\n'), ((978, 1018), 'csv.writer', 'csv.writer', (['csvfile'], {'lineterminator': '"""\n"""'}), "(csvfile, lineterminator='\\n')\n", (988, 1018), False, 'import csv\n'), ((893, 918), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (908, 918), False, 'import os\n')] |
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_memory_growth(gpus[0], True)
print(gpus)
except RuntimeError as e:
# 프로그램 시작시에 메모리 증가가 설정되어야만 합니다
print(e)
from source.modals import modals_cov as modals
from source.utils import train
import numpy as np
def make_cov_model(input_dim, output_dim, flag=True):
inputs = tf.keras.Input(shape=(input_dim))
x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x)
x = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x)
x = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Flatten()(x)
if flag:
outputs = tf.keras.layers.Dense(output_dim, activation='softmax')(x)
else:
outputs = tf.keras.layers.Dense(output_dim, activation='relu')(x)
return tf.keras.Model(inputs, outputs, name='cifar10')
if __name__ == '__main__':
reduce_cifar10 = tf.keras.datasets.cifar10
(X_train, y_train), (X_test, y_test) = reduce_cifar10.load_data()
# 76% -> 86%
# need conv net
y_train = np.squeeze(np.eye(10)[y_train])
y_test = np.squeeze(np.eye(10)[y_test])
print(y_train.shape, X_train.shape)
#normalization
X_train = X_train/255
X_test = X_test/255
# base model : 80.6%
# model = make_cov_model(X_train.shape[1:], y_train.shape[-1])
# model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
# model.fit(X_train, y_train, epochs=100, batch_size=128, validation_data=(X_test, y_test))
model = modals(batch_size=128)
model.set_model(1, X_train.shape[1:], y_train.shape[-1], 32)
model.compile(optimizer = [tf.keras.optimizers.Adam(1e-3), tf.keras.optimizers.Adam(5e-4)])
dir_path = 'result_run_cifar10/'
train(model, X_train[:], y_train[:], X_test[:], y_test[:], 0, path=dir_path, metric='acc')
model.classifier.save(dir_path + 'cls.h5')
model.dis.save(dir_path + 'dis.h5') | [
"source.modals.modals_cov",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"numpy.eye",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Input",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.Model",
"tensorflow.keras.optimizers.Adam",
"source.ut... | [((31, 82), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (75, 82), True, 'import tensorflow as tf\n'), ((436, 467), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'input_dim'}), '(shape=input_dim)\n', (450, 467), True, 'import tensorflow as tf\n'), ((1114, 1161), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""cifar10"""'}), "(inputs, outputs, name='cifar10')\n", (1128, 1161), True, 'import tensorflow as tf\n'), ((1857, 1879), 'source.modals.modals_cov', 'modals', ([], {'batch_size': '(128)'}), '(batch_size=128)\n', (1863, 1879), True, 'from source.modals import modals_cov as modals\n'), ((2096, 2190), 'source.utils.train', 'train', (['model', 'X_train[:]', 'y_train[:]', 'X_test[:]', 'y_test[:]', '(0)'], {'path': 'dir_path', 'metric': '"""acc"""'}), "(model, X_train[:], y_train[:], X_test[:], y_test[:], 0, path=dir_path,\n metric='acc')\n", (2101, 2190), False, 'from source.utils import train\n'), ((109, 164), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[0]', '(True)'], {}), '(gpus[0], True)\n', (149, 164), True, 'import tensorflow as tf\n'), ((478, 547), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, (3, 3), activation='relu', padding='same')\n", (500, 547), True, 'import tensorflow as tf\n'), ((564, 610), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (592, 610), True, 'import tensorflow as tf\n'), ((622, 692), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128, (3, 3), activation='relu', padding='same')\n", (644, 692), True, 'import tensorflow as tf\n'), ((704, 750), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (732, 750), True, 'import tensorflow as tf\n'), ((762, 832), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256, (3, 3), activation='relu', padding='same')\n", (784, 832), True, 'import tensorflow as tf\n'), ((844, 884), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (882, 884), True, 'import tensorflow as tf\n'), ((896, 921), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (919, 921), True, 'import tensorflow as tf\n'), ((960, 1015), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_dim'], {'activation': '"""softmax"""'}), "(output_dim, activation='softmax')\n", (981, 1015), True, 'import tensorflow as tf\n'), ((1047, 1099), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_dim'], {'activation': '"""relu"""'}), "(output_dim, activation='relu')\n", (1068, 1099), True, 'import tensorflow as tf\n'), ((1379, 1389), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (1385, 1389), True, 'import numpy as np\n'), ((1424, 1434), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (1430, 1434), True, 'import numpy as np\n'), ((1985, 2016), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (2009, 2016), True, 'import tensorflow as tf\n'), ((2017, 2049), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.0005)'], {}), '(0.0005)\n', (2041, 2049), True, 'import tensorflow as tf\n')] |
import numpy as np
import numpy.testing as npt
import os.path as op
import nibabel as nib
import nibabel.tmpdirs as nbtmp
import dipy.data.fetcher as fetcher
import AFQ.bundles as bdl
hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
def test_bundles_class():
# Example Segmentation results
img = nib.Nifti1Image(np.zeros((2, 2, 2)), np.eye(4))
bundles = {'CST_L': {'sl': [[[-80.5, -120.5, -60.5],
[-80.5, -94.5, -36.5],
[-78.5, -68.7, -12.6]],
[[-80.5, -120.5, -60.5],
[-80.5, -94.5, -36.5],
[-78.5, -68.7, -12.6]]],
'idx': [0, 1]},
'CST_R': {'sl': [[[-80.5, -120.5, -60.5],
[-80.5, -94.5, -36.5],
[-78.5, -68.7, -12.6]],
[[-80.5, -120.5, -60.5],
[-80.5, -94.5, -36.5],
[-78.5, -68.7, -12.6]]],
'idx': [0, 1]}}
with nbtmp.InTemporaryDirectory() as tmpdir:
# save in bundles class for bundles class tests
bundles_og = bdl.Bundles(reference=img,
bundles_dict=bundles,
using_idx=True)
bundles_og.save_bundles(file_path=tmpdir)
# load bundles again
bundles = bdl.Bundles(reference=img)
bundle_names = ['CST_L', 'CST_R']
bundles.load_bundles(bundle_names, file_path=tmpdir)
# check loaded bundles are same
npt.assert_equal(len(bundles.bundles), len(bundles_og.bundles))
npt.assert_equal(len(bundles.bundles['CST_L'].streamlines),
len(bundles_og.bundles['CST_L'].streamlines))
npt.assert_equal(len(bundles.bundles['CST_R'].streamlines),
len(bundles_og.bundles['CST_R'].streamlines))
npt.assert_equal(bundles.space, bundles_og.space)
npt.assert_equal(bundles.bundles['CST_L'].space_attributes,
bundles_og.bundles['CST_L'].space_attributes)
npt.assert_equal(bundles.origin, bundles_og.origin)
npt.assert_array_equal(
bundles.bundles['CST_L'].data_per_streamline['idx'],
bundles_og.bundles['CST_L'].data_per_streamline['idx'])
# test tract profiles
profiles = bundles.tract_profiles(
np.ones(nib.load(hardi_fdata).shape[:3]),
'test_subject', n_points=1)
npt.assert_almost_equal(profiles.Value, np.zeros(2))
# test clean bundles
bundles.clean_bundles()
npt.assert_equal(len(bundles.bundles), len(bundles_og.bundles))
| [
"nibabel.load",
"numpy.testing.assert_array_equal",
"numpy.zeros",
"numpy.testing.assert_equal",
"numpy.eye",
"nibabel.tmpdirs.InTemporaryDirectory",
"os.path.join",
"AFQ.bundles.Bundles"
] | [((199, 243), 'os.path.join', 'op.join', (['fetcher.dipy_home', '"""stanford_hardi"""'], {}), "(fetcher.dipy_home, 'stanford_hardi')\n", (206, 243), True, 'import os.path as op\n'), ((258, 295), 'os.path.join', 'op.join', (['hardi_dir', '"""HARDI150.nii.gz"""'], {}), "(hardi_dir, 'HARDI150.nii.gz')\n", (265, 295), True, 'import os.path as op\n'), ((2038, 2087), 'numpy.testing.assert_equal', 'npt.assert_equal', (['bundles.space', 'bundles_og.space'], {}), '(bundles.space, bundles_og.space)\n', (2054, 2087), True, 'import numpy.testing as npt\n'), ((2092, 2202), 'numpy.testing.assert_equal', 'npt.assert_equal', (["bundles.bundles['CST_L'].space_attributes", "bundles_og.bundles['CST_L'].space_attributes"], {}), "(bundles.bundles['CST_L'].space_attributes, bundles_og.\n bundles['CST_L'].space_attributes)\n", (2108, 2202), True, 'import numpy.testing as npt\n'), ((2223, 2274), 'numpy.testing.assert_equal', 'npt.assert_equal', (['bundles.origin', 'bundles_og.origin'], {}), '(bundles.origin, bundles_og.origin)\n', (2239, 2274), True, 'import numpy.testing as npt\n'), ((2279, 2414), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (["bundles.bundles['CST_L'].data_per_streamline['idx']", "bundles_og.bundles['CST_L'].data_per_streamline['idx']"], {}), "(bundles.bundles['CST_L'].data_per_streamline['idx'],\n bundles_og.bundles['CST_L'].data_per_streamline['idx'])\n", (2301, 2414), True, 'import numpy.testing as npt\n'), ((385, 404), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (393, 404), True, 'import numpy as np\n'), ((406, 415), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (412, 415), True, 'import numpy as np\n'), ((1191, 1219), 'nibabel.tmpdirs.InTemporaryDirectory', 'nbtmp.InTemporaryDirectory', ([], {}), '()\n', (1217, 1219), True, 'import nibabel.tmpdirs as nbtmp\n'), ((1308, 1372), 'AFQ.bundles.Bundles', 'bdl.Bundles', ([], {'reference': 'img', 'bundles_dict': 'bundles', 'using_idx': '(True)'}), '(reference=img, bundles_dict=bundles, using_idx=True)\n', (1319, 1372), True, 'import AFQ.bundles as bdl\n'), ((1537, 1563), 'AFQ.bundles.Bundles', 'bdl.Bundles', ([], {'reference': 'img'}), '(reference=img)\n', (1548, 1563), True, 'import AFQ.bundles as bdl\n'), ((2624, 2635), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2632, 2635), True, 'import numpy as np\n'), ((2510, 2531), 'nibabel.load', 'nib.load', (['hardi_fdata'], {}), '(hardi_fdata)\n', (2518, 2531), True, 'import nibabel as nib\n')] |
# import sys
# sys.path.extend(['/home/ubuntu/workspace/scrabble-gan'])
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
def main():
latent_dim = 128
char_vec = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
path_to_saved_model = '/home/ubuntu/workspace/scrabble-gan/res/out/big_ac_gan/model/generator_15'
# number of samples to generate
batch_size = 10
# sample string
sample_string = 'machinelearning'
# load trained model
imported_model = tf.saved_model.load(path_to_saved_model)
# inference loop
for idx in range(1):
fake_labels = []
words = [sample_string] * 10
noise = tf.random.normal([batch_size, latent_dim])
# encode words
for word in words:
fake_labels.append([char_vec.index(char) for char in word])
fake_labels = np.array(fake_labels, np.int32)
# run inference process
predictions = imported_model([noise, fake_labels], training=False)
# transform values into range [0, 1]
predictions = (predictions + 1) / 2.0
# plot results
for i in range(predictions.shape[0]):
plt.subplot(10, 1, i + 1)
plt.imshow(predictions[i, :, :, 0], cmap='gray')
# plt.text(0, -1, "".join([char_vec[label] for label in fake_labels[i]]))
plt.axis('off')
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"tensorflow.random.normal",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"numpy.array",
"tensorflow.saved_model.load"
] | [((571, 611), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['path_to_saved_model'], {}), '(path_to_saved_model)\n', (590, 611), True, 'import tensorflow as tf\n'), ((737, 779), 'tensorflow.random.normal', 'tf.random.normal', (['[batch_size, latent_dim]'], {}), '([batch_size, latent_dim])\n', (753, 779), True, 'import tensorflow as tf\n'), ((924, 955), 'numpy.array', 'np.array', (['fake_labels', 'np.int32'], {}), '(fake_labels, np.int32)\n', (932, 955), True, 'import numpy as np\n'), ((1446, 1456), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1454, 1456), True, 'import matplotlib.pyplot as plt\n'), ((1237, 1262), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(10)', '(1)', '(i + 1)'], {}), '(10, 1, i + 1)\n', (1248, 1262), True, 'import matplotlib.pyplot as plt\n'), ((1275, 1323), 'matplotlib.pyplot.imshow', 'plt.imshow', (['predictions[i, :, :, 0]'], {'cmap': '"""gray"""'}), "(predictions[i, :, :, 0], cmap='gray')\n", (1285, 1323), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1437), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1430, 1437), True, 'import matplotlib.pyplot as plt\n')] |
"""Specify the jobs to run via config file.
Product assortment exeperiment (Figure 7.2).
"""
import collections
import functools
import numpy as np
from base.config_lib import Config
from base.experiment import ExperimentNoAction
from assortment.agent_assortment import TSAssortment, GreedyAssortment, EpsilonGreedyAssortment,AnnealingEpsilonGreedyAssortment
from assortment.env_assortment import ProductAssortment
def get_config():
"""Generates the config for the experiment."""
name = 'product_assortment'
num_products = 6
prior_mean = 0
prior_var_diagonal = 1
prior_var_off_diagonal = 0.2
noise_var = 0.04
profits = np.array([1/6]*6)
epsilon = 0.07
k = 9
agents = collections.OrderedDict(
[('TS',
functools.partial(TSAssortment,
num_products, prior_mean, prior_var_diagonal,prior_var_off_diagonal, noise_var, profits,epsilon,k)),
('greedy',
functools.partial(GreedyAssortment,
num_products, prior_mean, prior_var_diagonal,prior_var_off_diagonal, noise_var, profits,epsilon,k)),
(str(epsilon) + '-greedy',
functools.partial(EpsilonGreedyAssortment,
num_products, prior_mean, prior_var_diagonal,prior_var_off_diagonal, noise_var, profits,epsilon,k)),
(str(k)+'/('+str(k)+'+t)-greedy',
functools.partial(AnnealingEpsilonGreedyAssortment,
num_products, prior_mean, prior_var_diagonal,prior_var_off_diagonal, noise_var, profits,epsilon,k))]
)
environments = collections.OrderedDict(
[('env',
functools.partial(ProductAssortment,
num_products, prior_mean, prior_var_diagonal,prior_var_off_diagonal, noise_var, profits))]
)
experiments = collections.OrderedDict(
[(name, ExperimentNoAction)]
)
n_steps = 500
n_seeds = 20000
config = Config(name, agents, environments, experiments, n_steps, n_seeds)
return config
| [
"collections.OrderedDict",
"functools.partial",
"base.config_lib.Config",
"numpy.array"
] | [((639, 660), 'numpy.array', 'np.array', (['([1 / 6] * 6)'], {}), '([1 / 6] * 6)\n', (647, 660), True, 'import numpy as np\n'), ((1778, 1831), 'collections.OrderedDict', 'collections.OrderedDict', (['[(name, ExperimentNoAction)]'], {}), '([(name, ExperimentNoAction)])\n', (1801, 1831), False, 'import collections\n'), ((1887, 1952), 'base.config_lib.Config', 'Config', (['name', 'agents', 'environments', 'experiments', 'n_steps', 'n_seeds'], {}), '(name, agents, environments, experiments, n_steps, n_seeds)\n', (1893, 1952), False, 'from base.config_lib import Config\n'), ((743, 880), 'functools.partial', 'functools.partial', (['TSAssortment', 'num_products', 'prior_mean', 'prior_var_diagonal', 'prior_var_off_diagonal', 'noise_var', 'profits', 'epsilon', 'k'], {}), '(TSAssortment, num_products, prior_mean,\n prior_var_diagonal, prior_var_off_diagonal, noise_var, profits, epsilon, k)\n', (760, 880), False, 'import functools\n'), ((928, 1069), 'functools.partial', 'functools.partial', (['GreedyAssortment', 'num_products', 'prior_mean', 'prior_var_diagonal', 'prior_var_off_diagonal', 'noise_var', 'profits', 'epsilon', 'k'], {}), '(GreedyAssortment, num_products, prior_mean,\n prior_var_diagonal, prior_var_off_diagonal, noise_var, profits, epsilon, k)\n', (945, 1069), False, 'import functools\n'), ((1134, 1282), 'functools.partial', 'functools.partial', (['EpsilonGreedyAssortment', 'num_products', 'prior_mean', 'prior_var_diagonal', 'prior_var_off_diagonal', 'noise_var', 'profits', 'epsilon', 'k'], {}), '(EpsilonGreedyAssortment, num_products, prior_mean,\n prior_var_diagonal, prior_var_off_diagonal, noise_var, profits, epsilon, k)\n', (1151, 1282), False, 'import functools\n'), ((1355, 1516), 'functools.partial', 'functools.partial', (['AnnealingEpsilonGreedyAssortment', 'num_products', 'prior_mean', 'prior_var_diagonal', 'prior_var_off_diagonal', 'noise_var', 'profits', 'epsilon', 'k'], {}), '(AnnealingEpsilonGreedyAssortment, num_products,\n prior_mean, prior_var_diagonal, prior_var_off_diagonal, noise_var,\n profits, epsilon, k)\n', (1372, 1516), False, 'import functools\n'), ((1604, 1734), 'functools.partial', 'functools.partial', (['ProductAssortment', 'num_products', 'prior_mean', 'prior_var_diagonal', 'prior_var_off_diagonal', 'noise_var', 'profits'], {}), '(ProductAssortment, num_products, prior_mean,\n prior_var_diagonal, prior_var_off_diagonal, noise_var, profits)\n', (1621, 1734), False, 'import functools\n')] |
import logging
import time
from pathlib import Path
from collections import deque, defaultdict
import h5py
import zarr
import torch
import tracemalloc
import numpy as np
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader, IterableDataset
class DataReader:
def read(self, group_key, subj_keys, dtype=True, preload=True):
pass
def read_data_to_memory(self, subject_keys, group, dtype=np.float16, preload=True):
"""Reads data from source to memory.
The dataset should be stored using the following structure:
<data_path>/<group>/<key>...
A generator function (data_generator) can be defined to read data respecting this
structure (implementations for hdf5/zarr/nifti directory are available).
Args:
subject_keys (list): identifying keys
group (str): data group name
dtype (type, optional): store dtype (default np.float16/np.uint8). Defaults to np.float16.
preload (bool, optional): if False, data will be loaded on the fly. Defaults to True.
Returns
object: collections.deque list containing the dataset
"""
logger = logging.getLogger(__name__)
logger.info(f'loading group [{group}]...')
# check timing and memory allocation
t = time.perf_counter()
tracemalloc.start()
data = deque(self.read(subject_keys, group, dtype, preload))
current, peak = tracemalloc.get_traced_memory()
logger.debug(f'finished: {time.perf_counter() - t :.3f} s, current memory usage {current / 10**9: .2f}GB, peak memory usage {peak / 10**9:.2f}GB')
return data
def get_data_shape(self, subject_keys, group):
pass
def get_data_attribute(self, subject_keys, group, attribute):
pass
def close(self):
pass
class DataReaderHDF5(DataReader):
def __init__(self, path_data):
self.path_data = path_data
self.hf = h5py.File(str(path_data), 'r')
self.logger = logging.getLogger(__name__)
def read(self, subject_keys, group, dtype=np.float16, preload=True):
for k in tqdm(subject_keys):
data = self.hf[f'{group}/{k}']
if preload:
data = data[:].astype(dtype)
yield data[np.newaxis, ...]
def get_data_shape(self, subject_keys, group):
shapes = {}
for k in subject_keys:
shapes[k] = np.array(self.hf[f'{group}/{k}'].shape)
shapes[k] = np.insert(shapes[k], 0, 1)
return shapes
def get_data_attribute(self, subject_keys, group, attribute):
attr = {}
for k in subject_keys:
attr[k] = self.hf[f'{group}/{k}'].attrs[attribute]
return attr
def close(self):
self.hf.close()
def grid_patch_generator(img, patch_size, patch_overlap, **kwargs):
"""Generates grid of overlapping patches.
All patches are overlapping (2*patch_overlap per axis).
Cropping the original image by patch_overlap.
The resulting patches can be re-assembled to the
original image shape.
Additional np.pad argument can be passed via **kwargs.
Args:
img (np.array): CxHxWxD
patch_size (list/np.array): patch shape [H,W,D]
patch_overlap (list/np.array): overlap (per axis) [H,W,D]
Yields:
np.array, np.array, int: patch data CxHxWxD,
patch position [H,W,D],
patch number
"""
dim = 3
patch_size = np.array(patch_size)
img_size = np.array(img.shape[1:])
patch_overlap = np.array(patch_overlap)
cropped_patch_size = patch_size - 2*patch_overlap
n_patches = np.ceil(img_size/cropped_patch_size).astype(int)
overhead = cropped_patch_size - img_size % cropped_patch_size
padded_img = np.pad(img, [[0,0],
[patch_overlap[0], patch_overlap[0] + overhead[0]],
[patch_overlap[1], patch_overlap[1] + overhead[1]],
[patch_overlap[2], patch_overlap[2] + overhead[2]]], **kwargs)
pos = [np.arange(0, n_patches[k])*cropped_patch_size[k] for k in range(dim)]
count = -1
for p0 in pos[0]:
for p1 in pos[1]:
for p2 in pos[2]:
idx = np.array([p0, p1, p2])
idx_end = idx + patch_size
count += 1
patch = padded_img[:, idx[0]:idx_end[0], idx[1]:idx_end[1], idx[2]:idx_end[2]]
yield patch, idx, count
class GridPatchSampler(IterableDataset):
def __init__(self,
data_path,
subject_keys,
patch_size, patch_overlap,
out_channels=1,
out_dtype=np.uint8,
image_group='images',
ReaderClass=DataReaderHDF5,
pad_args={'mode': 'symmetric'}):
"""GridPatchSampler for patch based inference.
Creates IterableDataset of overlapping patches (overlap between neighboring
patches: 2*patch_overlapping).
To assemble the original image shape use add_processed_batch(). The
number of channels for the assembled images (corresponding to the
channels of the processed patches) has to be defined by num_channels:
<num_channels>xHxWxD.
Args:
data_path (Path/str): data path (e.g. zarr/hdf5 file)
subject_keys (list): subject keys
patch_size (list/np.array): [H,W,D] patch shape
patch_overlap (list/np.array): [H,W,D] patch boundary
out_channels (int, optional): number of channels for the processed patches. Defaults to 1.
out_dtype (dtype, optional): data type of processed patches. Defaults to np.uint8.
image_group (str, optional): image group tag . Defaults to 'images'.
ReaderClass (function, optional): data reader class. Defaults to DataReaderHDF5.
pad_args (dict, optional): additional np.pad parameters. Defaults to {'mode': 'symmetric'}.
"""
self.data_path = str(data_path)
self.subject_keys = subject_keys
self.patch_size = np.array(patch_size)
self.patch_overlap = patch_overlap
self.image_group = image_group
self.ReaderClass = ReaderClass
self.out_channels = out_channels
self.out_dtype = out_dtype
self.results = zarr.group()
self.originals = {}
self.pad_args = pad_args
# read image data for each subject in subject_keys
reader = self.ReaderClass(self.data_path)
self.data_shape = reader.get_data_shape(self.subject_keys, self.image_group)
self.data_affine = reader.get_data_attribute(self.subject_keys, self.image_group, 'affine')
self.data_generator = reader.read_data_to_memory(self.subject_keys, self.image_group, dtype=np.float16)
reader.close()
def add_processed_batch(self, sample):
"""Assembles the processed patches to the original array shape.
Args:
sample (dict): 'key', 'position', 'data' (C,H,W,D) for each patch
"""
for i, key in enumerate(sample['key']):
# crop patch overlap
cropped_patch = np.array(sample['data'][i, :,
self.patch_overlap[0]:-self.patch_overlap[1],
self.patch_overlap[1]:-self.patch_overlap[1],
self.patch_overlap[2]:-self.patch_overlap[2]])
# start and end position
pos = np.array(sample['position'][i])
pos_end = np.array(pos + np.array(cropped_patch.shape[1:]))
# check if end position is outside the original array (due to padding)
# -> crop again (overhead)
img_size = np.array(self.data_shape[key][1:])
crop_pos_end = np.minimum(pos_end, img_size)
overhead = np.maximum(pos_end - crop_pos_end, [0, 0, 0])
new_patch_size = np.array(cropped_patch.shape[1:]) - overhead
# add the patch to the corresponing entry in the result container
ds_shape = np.array(self.data_shape[key])
ds_shape[0] = self.out_channels
ds = self.results.require_dataset(key, shape=ds_shape, chunks=False, dtype=self.out_dtype)
ds.attrs['affine'] = np.array(self.data_affine[key]).tolist()
ds[:, pos[0]:pos_end[0],
pos[1]:pos_end[1],
pos[2]:pos_end[2]] = cropped_patch[:, :new_patch_size[0],
:new_patch_size[1],
:new_patch_size[2]].astype(self.out_dtype)
def get_assembled_data(self):
"""Gets the dictionary with assembled/processed images.
Returns:
dict: Dictionary containing the processed and assembled images (key=subject_key)
"""
return self.results
def grid_patch_sampler(self):
"""Data reading and patch generation.
Yields:
dict: patch dictionary (subject_key, position, count and data)
"""
# create a patch iterator
for subj_idx, sample in enumerate(tqdm(self.data_generator)):
subject_key = self.subject_keys[subj_idx]
# create patches
result_shape = np.array(sample.shape)
result_shape[0] = self.out_channels
patch_generator = grid_patch_generator(
sample, self.patch_size, self.patch_overlap, **self.pad_args)
for patch, idx, count in patch_generator:
patch_dict = {'data': patch[: , :, :, :],
'key': subject_key,
'position': idx,
'count': count}
yield patch_dict
def __iter__(self):
return iter(self.grid_patch_sampler())
def __len__(self):
return 1 | [
"numpy.pad",
"tracemalloc.start",
"tqdm.tqdm",
"numpy.minimum",
"numpy.maximum",
"numpy.ceil",
"time.perf_counter",
"zarr.group",
"numpy.insert",
"numpy.array",
"numpy.arange",
"tracemalloc.get_traced_memory",
"logging.getLogger"
] | [((3587, 3607), 'numpy.array', 'np.array', (['patch_size'], {}), '(patch_size)\n', (3595, 3607), True, 'import numpy as np\n'), ((3623, 3646), 'numpy.array', 'np.array', (['img.shape[1:]'], {}), '(img.shape[1:])\n', (3631, 3646), True, 'import numpy as np\n'), ((3667, 3690), 'numpy.array', 'np.array', (['patch_overlap'], {}), '(patch_overlap)\n', (3675, 3690), True, 'import numpy as np\n'), ((3893, 4090), 'numpy.pad', 'np.pad', (['img', '[[0, 0], [patch_overlap[0], patch_overlap[0] + overhead[0]], [patch_overlap\n [1], patch_overlap[1] + overhead[1]], [patch_overlap[2], patch_overlap[\n 2] + overhead[2]]]'], {}), '(img, [[0, 0], [patch_overlap[0], patch_overlap[0] + overhead[0]], [\n patch_overlap[1], patch_overlap[1] + overhead[1]], [patch_overlap[2], \n patch_overlap[2] + overhead[2]]], **kwargs)\n', (3899, 4090), True, 'import numpy as np\n'), ((1216, 1243), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1233, 1243), False, 'import logging\n'), ((1352, 1371), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1369, 1371), False, 'import time\n'), ((1380, 1399), 'tracemalloc.start', 'tracemalloc.start', ([], {}), '()\n', (1397, 1399), False, 'import tracemalloc\n'), ((1493, 1524), 'tracemalloc.get_traced_memory', 'tracemalloc.get_traced_memory', ([], {}), '()\n', (1522, 1524), False, 'import tracemalloc\n'), ((2062, 2089), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2079, 2089), False, 'import logging\n'), ((2181, 2199), 'tqdm.tqdm', 'tqdm', (['subject_keys'], {}), '(subject_keys)\n', (2185, 2199), False, 'from tqdm import tqdm\n'), ((6266, 6286), 'numpy.array', 'np.array', (['patch_size'], {}), '(patch_size)\n', (6274, 6286), True, 'import numpy as np\n'), ((6507, 6519), 'zarr.group', 'zarr.group', ([], {}), '()\n', (6517, 6519), False, 'import zarr\n'), ((2480, 2519), 'numpy.array', 'np.array', (["self.hf[f'{group}/{k}'].shape"], {}), "(self.hf[f'{group}/{k}'].shape)\n", (2488, 2519), True, 'import numpy as np\n'), ((2544, 2570), 'numpy.insert', 'np.insert', (['shapes[k]', '(0)', '(1)'], {}), '(shapes[k], 0, 1)\n', (2553, 2570), True, 'import numpy as np\n'), ((3761, 3799), 'numpy.ceil', 'np.ceil', (['(img_size / cropped_patch_size)'], {}), '(img_size / cropped_patch_size)\n', (3768, 3799), True, 'import numpy as np\n'), ((4181, 4207), 'numpy.arange', 'np.arange', (['(0)', 'n_patches[k]'], {}), '(0, n_patches[k])\n', (4190, 4207), True, 'import numpy as np\n'), ((7355, 7532), 'numpy.array', 'np.array', (["sample['data'][i, :, self.patch_overlap[0]:-self.patch_overlap[1], self.\n patch_overlap[1]:-self.patch_overlap[1], self.patch_overlap[2]:-self.\n patch_overlap[2]]"], {}), "(sample['data'][i, :, self.patch_overlap[0]:-self.patch_overlap[1],\n self.patch_overlap[1]:-self.patch_overlap[1], self.patch_overlap[2]:-\n self.patch_overlap[2]])\n", (7363, 7532), True, 'import numpy as np\n'), ((7735, 7766), 'numpy.array', 'np.array', (["sample['position'][i]"], {}), "(sample['position'][i])\n", (7743, 7766), True, 'import numpy as np\n'), ((7984, 8018), 'numpy.array', 'np.array', (['self.data_shape[key][1:]'], {}), '(self.data_shape[key][1:])\n', (7992, 8018), True, 'import numpy as np\n'), ((8046, 8075), 'numpy.minimum', 'np.minimum', (['pos_end', 'img_size'], {}), '(pos_end, img_size)\n', (8056, 8075), True, 'import numpy as np\n'), ((8099, 8144), 'numpy.maximum', 'np.maximum', (['(pos_end - crop_pos_end)', '[0, 0, 0]'], {}), '(pos_end - crop_pos_end, [0, 0, 0])\n', (8109, 8144), True, 'import numpy as np\n'), ((8320, 8350), 'numpy.array', 'np.array', (['self.data_shape[key]'], {}), '(self.data_shape[key])\n', (8328, 8350), True, 'import numpy as np\n'), ((9434, 9459), 'tqdm.tqdm', 'tqdm', (['self.data_generator'], {}), '(self.data_generator)\n', (9438, 9459), False, 'from tqdm import tqdm\n'), ((9572, 9594), 'numpy.array', 'np.array', (['sample.shape'], {}), '(sample.shape)\n', (9580, 9594), True, 'import numpy as np\n'), ((4366, 4388), 'numpy.array', 'np.array', (['[p0, p1, p2]'], {}), '([p0, p1, p2])\n', (4374, 4388), True, 'import numpy as np\n'), ((8174, 8207), 'numpy.array', 'np.array', (['cropped_patch.shape[1:]'], {}), '(cropped_patch.shape[1:])\n', (8182, 8207), True, 'import numpy as np\n'), ((7804, 7837), 'numpy.array', 'np.array', (['cropped_patch.shape[1:]'], {}), '(cropped_patch.shape[1:])\n', (7812, 7837), True, 'import numpy as np\n'), ((8531, 8562), 'numpy.array', 'np.array', (['self.data_affine[key]'], {}), '(self.data_affine[key])\n', (8539, 8562), True, 'import numpy as np\n'), ((1559, 1578), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1576, 1578), False, 'import time\n')] |
import numpy as np
import pickle
import matplotlib.pyplot as plt
import random
from encoder import get_encoder_layer
from decoder import decoding_layer,process_decoder_input
import tensorflow as tf
import os
device_name = "/gpu:0"
#def corrupt_noise(traj, rate_noise, factor):
# new_traj={}
# for count, key in enumerate(traj):
# if count%500==0:
# print('count:',count)
# new_traj[key] = traj[key]
# for i in range(len(traj[key])):
# seed = random.random()
# if seed < rate_noise:
# #adding gauss noise
# for col in range(46):
# new_traj[key][i][col] = traj[key][i][col] + factor * random.gauss(0,1)
# return new_traj
#
#def corrupt_drop(traj, rate_drop):
# new_traj={}
# for count, key in enumerate(traj):
# if count%500==0:
# print('count:',count)
# new_traj[key] = traj[key]
# droprow = []
# for i in range(len(traj[key])):
# seed = random.random()
# if seed < rate_drop:
# #dropping
# droprow.append(i)
# new_traj[key] = np.delete(new_traj[key], droprow, axis = 0)
#
# return new_traj
def get_inputs():
'''
model inputs tensor
'''
embed_seq = tf.placeholder(tf.float32, [None, None, 20], name='embed_seq')
inputs = tf.placeholder(tf.int32, [None, None], name='inputs')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
# define target sequence length (target_sequence_length and source_sequence_length are used to paprameters of feed_dict)
target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
return embed_seq, inputs, targets, learning_rate, target_sequence_length, max_target_sequence_length, source_sequence_length
def seq2seq_model(embed_seq,
input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
source_vocab_size,
target_vocab_size,
encoder_embedding_size,
decoder_embedding_size,
rnn_size,
num_layers):
# get encoder output
_, encoder_state = get_encoder_layer(embed_seq,
input_data,
rnn_size,
num_layers,
source_sequence_length,
source_vocab_size,
encoding_embedding_size)
#tf.add_to_collection("encoder_state",encoder_state)
print(encoder_state)
print('Done encoder state')
# decoder input after Data_Preprocessing
decoder_input = process_decoder_input(targets, target_letter_to_int, batch_size)
# state vector and input to decoder
training_decoder_output, predicting_decoder_output = decoding_layer(target_letter_to_int,
decoding_embedding_size,
num_layers,
rnn_size,
target_sequence_length,
max_target_sequence_length,
encoder_state,
decoder_input,
batch_size)
return training_decoder_output, predicting_decoder_output
def pad_sentence_batch(sentence_batch, pad_int):
'''
batch completion,guarantee the same sequence_length
parameters:
- sentence batch
- pad_int: <PAD> respond to index
'''
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int, embed_mat):
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# complete sequence
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# record each length
targets_lengths = []
for target in targets_batch:
targets_lengths.append(len(target))
source_lengths = []
for source in sources_batch:
source_lengths.append(len(source))
embed_batch = np.array([[embed_mat[i] for i in psb] for psb in pad_sources_batch])
yield embed_batch, pad_targets_batch, pad_sources_batch, targets_lengths, source_lengths
def extract_character_vocab(data, UNIQUE_WORDS):
'''
build data mapping
'''
vocab_to_int = {}
int_to_vocab = {}
special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>']
for plays in data:
for segs in plays:
if frozenset(segs) in UNIQUE_WORDS == False:
print('No this segment! please build it.')
else:
vocab_to_int[frozenset(segs)] = UNIQUE_WORDS[frozenset(segs)]
int_to_vocab[UNIQUE_WORDS[frozenset(segs)]] = frozenset(segs)
vocab_to_int['<PAD>'] = max(UNIQUE_WORDS.values()) + 1
vocab_to_int['<UNK>'] = max(UNIQUE_WORDS.values()) + 2
vocab_to_int['<GO>'] = max(UNIQUE_WORDS.values()) + 3
vocab_to_int['<EOS>'] = max(UNIQUE_WORDS.values()) + 4
int_to_vocab[max(UNIQUE_WORDS.values()) + 1] = '<PAD>'
int_to_vocab[max(UNIQUE_WORDS.values()) + 2] = '<UNK>'
int_to_vocab[max(UNIQUE_WORDS.values()) + 3] = '<GO>'
int_to_vocab[max(UNIQUE_WORDS.values()) + 4] = '<EOS>'
return int_to_vocab, vocab_to_int
def mapping_source_int(cor_ogm_train_data, UNIQUE_WORDS):
source_int = []
for plays in cor_ogm_train_data:
temp = []
for word in plays:
temp.append(UNIQUE_WORDS[frozenset(word)])
source_int.append(temp)
return source_int
def mapping_target_int(ogm_train_data, UNIQUE_WORDS):
target_int = []
for plays in ogm_train_data:
temp = []
for word in plays:
temp.append(UNIQUE_WORDS[frozenset(word)])
temp.append(target_letter_to_int['<EOS>'])
target_int.append(temp)
return target_int
if __name__ == '__main__':
TF_CONFIG_ = tf.ConfigProto()
TF_CONFIG_.gpu_options.allow_growth = True
sess = tf.Session(config=TF_CONFIG_)
print('autoencoder')
path1=r'TrainedData/'
#cor_ogm_train_data = ogm_train_data=pickle.load(open(path1+'drop_ogm_train_data', 'rb'), encoding='bytes') #for drop version
cor_ogm_train_data = ogm_train_data=pickle.load(open(path1+'noise_ogm_train_data', 'rb'), encoding='bytes') #for noise version
ogm_train_data=pickle.load(open(path1+'ogm_train_data', 'rb'), encoding='bytes')
embed_mat=pickle.load(open(path1+'embed_mat', 'rb'), encoding='bytes')
embed_mat = np.r_[embed_mat,np.random.rand(len(embed_mat[0])).reshape(1,-1)]
embed_mat = np.r_[embed_mat,np.random.rand(len(embed_mat[0])).reshape(1,-1)]
embed_mat = np.r_[embed_mat,np.random.rand(len(embed_mat[0])).reshape(1,-1)]
embed_mat = np.r_[embed_mat,np.random.rand(len(embed_mat[0])).reshape(1,-1)]
UNIQUE_WORDS=pickle.load(open(path1+'corpus', 'rb'), encoding='bytes')
source_int_to_letter, source_letter_to_int = extract_character_vocab(cor_ogm_train_data, UNIQUE_WORDS)
target_int_to_letter, target_letter_to_int = extract_character_vocab(ogm_train_data, UNIQUE_WORDS)
source_int = mapping_source_int(cor_ogm_train_data, UNIQUE_WORDS)
target_int = mapping_target_int(ogm_train_data, UNIQUE_WORDS)
#look transform
print('source', source_int[:5])
print('target', target_int[:5])
# Number of Epochs
epochs = 10
# Batch Size
batch_size = 10
# RNN Size
rnn_size = 50
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 50
decoding_embedding_size = 50
# Learning Rate
learning_rate = 0.01
with tf.device(device_name):
#building graph
train_graph = tf.Graph()
with train_graph.as_default():
embed_seq, input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length = get_inputs()
training_decoder_output, predicting_decoder_output = seq2seq_model(embed_seq,
input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
len(source_letter_to_int),
len(target_letter_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers)
training_logits = tf.identity(training_decoder_output.rnn_output, 'logits')
predicting_logits = tf.identity(predicting_decoder_output.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
print('done building graph')
# train and validation
train_source = source_int[batch_size:]
train_target = target_int[batch_size:]
# leave one batch for validation
valid_source = source_int[:batch_size]
valid_target = target_int[:batch_size]
(valid_embed_batch, valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next(get_batches(valid_target, valid_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>'],
embed_mat))
display_step = 5
checkpoint = path1 + "model_1/trained_model.ckpt"
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
with tf.Session(graph=train_graph, config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(1, epochs+1):
for batch_i, (embed_batch, targets_batch, sources_batch, targets_lengths, sources_lengths) in enumerate(
get_batches(train_target, train_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>'],
embed_mat)):
_ , loss = sess.run(
[train_op, cost],
{embed_seq: embed_batch,
input_data: sources_batch,
targets: targets_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths})
if batch_i % display_step == 0:
# validation loss
validation_loss = sess.run(
[cost],
{embed_seq:valid_embed_batch,
input_data: valid_sources_batch,
targets: valid_targets_batch,
lr: learning_rate,
target_sequence_length: valid_targets_lengths,
source_sequence_length: valid_sources_lengths})
print('Epoch {:>3}/{} Batch {:>4}/{} - Training Loss: {:>6.3f} - Validation loss: {:>6.3f}'
.format(epoch_i,
epochs,
batch_i,
len(train_source) // batch_size,
loss,
validation_loss[0]))
# save model
saver = tf.train.Saver()
saver.save(sess, checkpoint)
print('Model Trained and Saved') | [
"decoder.decoding_layer",
"tensorflow.clip_by_value",
"tensorflow.identity",
"decoder.process_decoder_input",
"tensorflow.ConfigProto",
"tensorflow.reduce_max",
"tensorflow.GPUOptions",
"encoder.get_encoder_layer",
"tensorflow.placeholder",
"tensorflow.name_scope",
"tensorflow.train.Saver",
"t... | [((1300, 1362), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, 20]'], {'name': '"""embed_seq"""'}), "(tf.float32, [None, None, 20], name='embed_seq')\n", (1314, 1362), True, 'import tensorflow as tf\n'), ((1376, 1429), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {'name': '"""inputs"""'}), "(tf.int32, [None, None], name='inputs')\n", (1390, 1429), True, 'import tensorflow as tf\n'), ((1444, 1498), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {'name': '"""targets"""'}), "(tf.int32, [None, None], name='targets')\n", (1458, 1498), True, 'import tensorflow as tf\n'), ((1519, 1567), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""learning_rate"""'}), "(tf.float32, name='learning_rate')\n", (1533, 1567), True, 'import tensorflow as tf\n'), ((1722, 1786), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None,)'], {'name': '"""target_sequence_length"""'}), "(tf.int32, (None,), name='target_sequence_length')\n", (1736, 1786), True, 'import tensorflow as tf\n'), ((1820, 1880), 'tensorflow.reduce_max', 'tf.reduce_max', (['target_sequence_length'], {'name': '"""max_target_len"""'}), "(target_sequence_length, name='max_target_len')\n", (1833, 1880), True, 'import tensorflow as tf\n'), ((1910, 1974), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None,)'], {'name': '"""source_sequence_length"""'}), "(tf.int32, (None,), name='source_sequence_length')\n", (1924, 1974), True, 'import tensorflow as tf\n'), ((2624, 2758), 'encoder.get_encoder_layer', 'get_encoder_layer', (['embed_seq', 'input_data', 'rnn_size', 'num_layers', 'source_sequence_length', 'source_vocab_size', 'encoding_embedding_size'], {}), '(embed_seq, input_data, rnn_size, num_layers,\n source_sequence_length, source_vocab_size, encoding_embedding_size)\n', (2641, 2758), False, 'from encoder import get_encoder_layer\n'), ((3195, 3259), 'decoder.process_decoder_input', 'process_decoder_input', (['targets', 'target_letter_to_int', 'batch_size'], {}), '(targets, target_letter_to_int, batch_size)\n', (3216, 3259), False, 'from decoder import decoding_layer, process_decoder_input\n'), ((3362, 3547), 'decoder.decoding_layer', 'decoding_layer', (['target_letter_to_int', 'decoding_embedding_size', 'num_layers', 'rnn_size', 'target_sequence_length', 'max_target_sequence_length', 'encoder_state', 'decoder_input', 'batch_size'], {}), '(target_letter_to_int, decoding_embedding_size, num_layers,\n rnn_size, target_sequence_length, max_target_sequence_length,\n encoder_state, decoder_input, batch_size)\n', (3376, 3547), False, 'from decoder import decoding_layer, process_decoder_input\n'), ((7240, 7256), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (7254, 7256), True, 'import tensorflow as tf\n'), ((7315, 7344), 'tensorflow.Session', 'tf.Session', ([], {'config': 'TF_CONFIG_'}), '(config=TF_CONFIG_)\n', (7325, 7344), True, 'import tensorflow as tf\n'), ((12176, 12226), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.7)'}), '(per_process_gpu_memory_fraction=0.7)\n', (12189, 12226), True, 'import tensorflow as tf\n'), ((5368, 5436), 'numpy.array', 'np.array', (['[[embed_mat[i] for i in psb] for psb in pad_sources_batch]'], {}), '([[embed_mat[i] for i in psb] for psb in pad_sources_batch])\n', (5376, 5436), True, 'import numpy as np\n'), ((8969, 8991), 'tensorflow.device', 'tf.device', (['device_name'], {}), '(device_name)\n', (8978, 8991), True, 'import tensorflow as tf\n'), ((9039, 9049), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9047, 9049), True, 'import tensorflow as tf\n'), ((14173, 14189), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (14187, 14189), True, 'import tensorflow as tf\n'), ((10522, 10579), 'tensorflow.identity', 'tf.identity', (['training_decoder_output.rnn_output', '"""logits"""'], {}), "(training_decoder_output.rnn_output, 'logits')\n", (10533, 10579), True, 'import tensorflow as tf\n'), ((10612, 10680), 'tensorflow.identity', 'tf.identity', (['predicting_decoder_output.sample_id'], {'name': '"""predictions"""'}), "(predicting_decoder_output.sample_id, name='predictions')\n", (10623, 10680), True, 'import tensorflow as tf\n'), ((10714, 10819), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['target_sequence_length', 'max_target_sequence_length'], {'dtype': 'tf.float32', 'name': '"""masks"""'}), "(target_sequence_length, max_target_sequence_length, dtype=\n tf.float32, name='masks')\n", (10730, 10819), True, 'import tensorflow as tf\n'), ((12341, 12374), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (12372, 12374), True, 'import tensorflow as tf\n'), ((10841, 10870), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimization"""'], {}), "('optimization')\n", (10854, 10870), True, 'import tensorflow as tf\n'), ((10927, 10992), 'tensorflow.contrib.seq2seq.sequence_loss', 'tf.contrib.seq2seq.sequence_loss', (['training_logits', 'targets', 'masks'], {}), '(training_logits, targets, masks)\n', (10959, 10992), True, 'import tensorflow as tf\n'), ((11110, 11136), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (11132, 11136), True, 'import tensorflow as tf\n'), ((12274, 12313), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (12288, 12313), True, 'import tensorflow as tf\n'), ((11281, 11314), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-5.0)', '(5.0)'], {}), '(grad, -5.0, 5.0)\n', (11297, 11314), True, 'import tensorflow as tf\n')] |
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest, f_classif, chi2, f_regression
import numpy as np
import matplotlib.pyplot as plt
interval = 15
max_iter = 150
def pca(x):
model = PCA(n_components=9)
data = model.fit_transform(x)
print(model.explained_variance_ratio_.sum())
return data
#return x
def select(x, y):
selector = SelectKBest(score_func=f_classif, k=10)
real_features = selector.fit_transform(x, y)
#print(selector.scores_)
return real_features
################################### SVM Part #####################################
def svm(data_name):
# load data
x_tr = np.load('data/' + data_name + '_feature.npy')
y_tr = np.load('data/' + data_name + '_target.npy')
x_t = np.load('data/' + data_name + '.t_feature.npy')
y_t = np.load('data/' + data_name + '.t_target.npy')
if data_name == 'madelon':
scaler = StandardScaler()
scaler.fit(x_tr)
x_tr = scaler.transform(x_tr)
scaler.fit(x_t)
x_t = scaler.transform(x_t)
res_tr = []
res_t = []
# training stage
for i in range(interval, max_iter + 1, interval):
model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=i, probability=False, shrinking=True,
tol=0.001, verbose=False, random_state=666)
model.fit(x_tr, y_tr)
res_tr.append(round(model.score(x_tr, y_tr), 3))
res_t.append(round(model.score(x_t, y_t), 3))
# print(model.score(x_tr, y_tr))
# print(model.score(x_t, y_t))
# print(model.predict(x_tr))
print('train: ', res_tr)
print('test: ', res_t)
def plot_s_kernel_splice():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_linear = [0.612, 0.598, 0.633, 0.547, 0.577, 0.599, 0.645, 0.558, 0.589, 0.607]
y_tr_poly3 = [0.579, 0.67, 0.718, 0.725, 0.739, 0.767, 0.798, 0.834, 0.864, 0.848]
y_tr_rbf = [0.659, 0.719, 0.821, 0.865, 0.894, 0.937, 0.952, 0.981, 0.989, 0.998]
y_tr_sigmoid = [0.498, 0.489, 0.488, 0.488, 0.491, 0.491, 0.492, 0.492, 0.492, 0.491]
y_t_linear = [0.63, 0.583, 0.65, 0.556, 0.575, 0.589, 0.656, 0.558, 0.62, 0.594]
y_t_poly3 = [0.559, 0.694, 0.69, 0.695, 0.733, 0.73, 0.732, 0.781, 0.798, 0.793]
y_t_rbf = [0.64, 0.7, 0.774, 0.783, 0.813, 0.844, 0.834, 0.869, 0.874, 0.887]
y_t_sigmoid = [0.475, 0.456, 0.453, 0.455, 0.456, 0.456, 0.457, 0.457, 0.457, 0.459]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_linear, color='#90EE90', linewidth=1.7, label='no kernel')
ax.plot(x, y_tr_poly3, color='#ffa07a', linewidth=1.7, label='3-polynomial')
ax.plot(x, y_tr_rbf, color='#9999ff', linewidth=1.7, label='rbf')
ax.plot(x, y_tr_sigmoid, color='#F0E68C', linewidth=1.7, label='sigmoid')
ax.scatter(x, y_tr_linear, s=13, c='#90EE90')
ax.scatter(x, y_tr_poly3, s=13, c='#ffa07a')
ax.scatter(x, y_tr_rbf, s=13, c='#9999ff')
ax.scatter(x, y_tr_sigmoid, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_kernel_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_linear, color='#90EE90', linewidth=1.7, label='no kernel')
ax.plot(x, y_t_poly3, color='#ffa07a', linewidth=1.7, label='3-polynomial')
ax.plot(x, y_t_rbf, color='#9999ff', linewidth=1.7, label='rbf')
ax.plot(x, y_t_sigmoid, color='#F0E68C', linewidth=1.7, label='sigmoid')
ax.scatter(x, y_t_linear, s=13, c='#90EE90')
ax.scatter(x, y_t_poly3, s=13, c='#ffa07a')
ax.scatter(x, y_t_rbf, s=13, c='#9999ff')
ax.scatter(x, y_t_sigmoid, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_kernel_splice_t')
plt.show()
def plot_s_kernel_sat():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_linear = [0.633, 0.726, 0.801, 0.655, 0.665, 0.808, 0.833, 0.826, 0.853, 0.857]
y_tr_poly3 = [0.642, 0.664, 0.658, 0.638, 0.651, 0.661, 0.664, 0.693, 0.68, 0.644]
y_tr_rbf = [0.688, 0.685, 0.676, 0.715, 0.582, 0.624, 0.783, 0.823, 0.826, 0.828]
y_tr_sigmoid = [0.657, 0.706, 0.647, 0.681, 0.679, 0.648, 0.683, 0.776, 0.78, 0.798]
y_t_linear = [0.56, 0.673, 0.776, 0.662, 0.684, 0.78, 0.796, 0.796, 0.806, 0.825]
y_t_poly3 = [0.632, 0.656, 0.652, 0.644, 0.652, 0.66, 0.666, 0.688, 0.674, 0.657]
y_t_rbf = [0.664, 0.682, 0.656, 0.707, 0.608, 0.63, 0.764, 0.795, 0.798, 0.804]
y_t_sigmoid = [0.659, 0.696, 0.646, 0.674, 0.688, 0.666, 0.688, 0.749, 0.748, 0.764]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_linear, color='#90EE90', linewidth=1.7, label='no kernel')
ax.plot(x, y_tr_poly3, color='#ffa07a', linewidth=1.7, label='3-polynomial')
ax.plot(x, y_tr_rbf, color='#9999ff', linewidth=1.7, label='rbf')
ax.plot(x, y_tr_sigmoid, color='#F0E68C', linewidth=1.7, label='sigmoid')
ax.scatter(x, y_tr_linear, s=13, c='#90EE90')
ax.scatter(x, y_tr_poly3, s=13, c='#ffa07a')
ax.scatter(x, y_tr_rbf, s=13, c='#9999ff')
ax.scatter(x, y_tr_sigmoid, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_kernel_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_linear, color='#90EE90', linewidth=1.7, label='no kernel')
ax.plot(x, y_t_poly3, color='#ffa07a', linewidth=1.7, label='3-polynomial')
ax.plot(x, y_t_rbf, color='#9999ff', linewidth=1.7, label='rbf')
ax.plot(x, y_t_sigmoid, color='#F0E68C', linewidth=1.7, label='sigmoid')
ax.scatter(x, y_t_linear, s=13, c='#90EE90')
ax.scatter(x, y_t_poly3, s=13, c='#ffa07a')
ax.scatter(x, y_t_rbf, s=13, c='#9999ff')
ax.scatter(x, y_t_sigmoid, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_kernel_sat_t')
plt.show()
# poly kernel results: kernel_num (2-10)
# splice: tr:[0.852, 0.947, 0.983, 0.99, 0.99, 0.993, 0.99, 0.989, 0.983]
# t:[0.786, 0.857, 0.865, 0.871, 0.88, 0.874, 0.864, 0.867, 0.851]
# sat: tr:[0.651, 0.672, 0.492, 0.553, 0.475, 0.511, 0.471, 0.524, 0.373]
# t:[0.635, 0.666, 0.505, 0.554, 0.488, 0.512, 0.477, 0.528, 0.403]
def plot_s_dim_splice():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_25_tr = [0.615, 0.75, 0.749, 0.831, 0.853, 0.855, 0.891, 0.942, 0.965, 0.97]
y_5_tr = [0.754, 0.762, 0.798, 0.812, 0.867, 0.889, 0.921, 0.966, 0.969, 0.997]
y_75_tr = [0.67, 0.7, 0.816, 0.846, 0.874, 0.922, 0.941, 0.966, 0.989, 0.997]
y_tr = [0.659, 0.719, 0.821, 0.865, 0.894, 0.937, 0.952, 0.981, 0.989, 0.998]
y_25_t = [0.635, 0.571, 0.567, 0.556, 0.585, 0.569, 0.569, 0.573, 0.603, 0.601]
y_5_t = [0.591, 0.547, 0.56, 0.585, 0.586, 0.587, 0.587, 0.598, 0.61, 0.585]
y_75_t = [0.51, 0.55, 0.584, 0.55, 0.581, 0.645, 0.583, 0.612, 0.637, 0.626]
y_t = [0.64, 0.7, 0.774, 0.783, 0.813, 0.844, 0.834, 0.869, 0.874, 0.887]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_tr, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_tr, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_tr, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_tr, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_tr, s=13, c='#90EE90')
ax.scatter(x, y_5_tr, s=13, c='#ffa07a')
ax.scatter(x, y_75_tr, s=13, c='#9999ff')
ax.scatter(x, y_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_dim_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_t, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_t, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_t, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_t, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_t, s=13, c='#90EE90')
ax.scatter(x, y_5_t, s=13, c='#ffa07a')
ax.scatter(x, y_75_t, s=13, c='#9999ff')
ax.scatter(x, y_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_dim_splice_t')
plt.show()
def plot_s_dim_sat():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_25_tr = [0.617, 0.534, 0.558, 0.732, 0.674, 0.766, 0.764, 0.773, 0.797, 0.806]
y_5_tr = [0.661, 0.75, 0.749, 0.704, 0.736, 0.819, 0.802, 0.725, 0.809, 0.846]
y_75_tr = [0.709, 0.576, 0.651, 0.803, 0.733, 0.825, 0.811, 0.84, 0.859, 0.835]
y_tr = [0.633, 0.726, 0.801, 0.655, 0.665, 0.808, 0.833, 0.826, 0.853, 0.857]
y_25_t = [0.489, 0.502, 0.453, 0.496, 0.554, 0.56, 0.536, 0.652, 0.668, 0.663]
y_5_t = [0.585, 0.616, 0.515, 0.571, 0.558, 0.648, 0.622, 0.59, 0.632, 0.658]
y_75_t = [0.564, 0.426, 0.442, 0.619, 0.487, 0.648, 0.62, 0.648, 0.659, 0.655]
y_t = [0.56, 0.673, 0.776, 0.662, 0.684, 0.78, 0.796, 0.796, 0.806, 0.825]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_tr, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_tr, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_tr, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_tr, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_tr, s=13, c='#90EE90')
ax.scatter(x, y_5_tr, s=13, c='#ffa07a')
ax.scatter(x, y_75_tr, s=13, c='#9999ff')
ax.scatter(x, y_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_dim_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_t, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_t, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_t, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_t, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_t, s=13, c='#90EE90')
ax.scatter(x, y_5_t, s=13, c='#ffa07a')
ax.scatter(x, y_75_t, s=13, c='#9999ff')
ax.scatter(x, y_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_dim_sat_t')
plt.show()
def plot_s_penalty_splice():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = [0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1.0, 3.0, 10.0]
y_tr = [0.853, 0.903, 0.961, 0.962, 0.962, 0.969, 0.99, 1.0, 1.0]
y_t = [0.736, 0.805, 0.874, 0.888, 0.893, 0.889, 0.897, 0.895, 0.897]
x_ax = np.arange(9) * 0.9
total_width, n = 0.75, 2
width = total_width / n
x_ax = x_ax - (total_width - width) / 2
plt.bar(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white', label='Training set')
plt.bar(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor='white', label='Testing set')
for x, y1, y2 in zip(x_ax, y_tr, y_t):
plt.text(x - 0.02, y1, '%.2f' % y1, ha='center', va='bottom')
plt.text(x + width + 0.075, y2, '%.2f' % y2, ha='center', va='bottom')
ax = plt.gca()
ax.set_xticks(x_ax + width / 2)
ax.set_xticklabels((0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1.0, 3.0, 10.0))
plt.xlabel('Penalty parameter')
plt.ylabel('Accuracy')
plt.ylim(0, 1.245)
plt.legend()
plt.savefig('report/img/svm_penalty_splice')
plt.show()
def plot_s_penalty_sat():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = [0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1.0, 3.0, 10.0]
y_tr = [0.807, 0.829, 0.845, 0.854, 0.867, 0.87, 0.875, 0.88, 0.77]
y_t = [0.786, 0.808, 0.822, 0.826, 0.832, 0.832, 0.835, 0.829, 0.728]
x_ax = np.arange(9) * 0.9
total_width, n = 0.75, 2
width = total_width / n
x_ax = x_ax - (total_width - width) / 2
plt.bar(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white', label='Training set')
plt.bar(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor='white', label='Testing set')
for x, y1, y2 in zip(x_ax, y_tr, y_t):
plt.text(x - 0.04, y1, '%.2f' % y1, ha='center', va='bottom')
plt.text(x + width + 0.075, y2, '%.2f' % y2, ha='center', va='bottom')
ax = plt.gca()
ax.set_xticks(x_ax + width / 2)
ax.set_xticklabels((0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1.0, 3.0, 10.0))
plt.xlabel('Penalty parameter')
plt.ylabel('Accuracy')
plt.ylim(0, 1.1)
plt.legend()
plt.savefig('report/img/svm_penalty_sat')
plt.show()
def plot_s_baseline():
x = list(range(interval, max_iter + 1, interval))
y_tr_splice = [0.659, 0.719, 0.821, 0.865, 0.894, 0.937, 0.952, 0.981, 0.989, 0.998]
y_tr_sat = [0.688, 0.685, 0.676, 0.715, 0.582, 0.624, 0.783, 0.823, 0.826, 0.828]
y_t_splice = [0.64, 0.7, 0.774, 0.783, 0.813, 0.844, 0.834, 0.869, 0.874, 0.887]
y_t_sat = [0.664, 0.682, 0.656, 0.707, 0.608, 0.63, 0.764, 0.795, 0.798, 0.804]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_splice, color='#9999ff', linewidth=1.7, label='Training set')
ax.plot(x, y_t_splice, color='#ffa07a', linewidth=1.7, label='Testing set')
ax.scatter(x, y_tr_splice, s=13, c='#9999ff')
ax.scatter(x, y_t_splice, s=13, c='#ffa07a')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_baseline_splice')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_sat, color='#9999ff', linewidth=1.7, label='Training set')
ax.plot(x, y_t_sat, color='#ffa07a', linewidth=1.7, label='Testing set')
ax.scatter(x, y_tr_sat, s=13, c='#9999ff')
ax.scatter(x, y_t_sat, s=13, c='#ffa07a')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_baseline_sat')
plt.show()
################################### SVM Part #####################################
################################### MLP Part #####################################
def mlp(data_name):
# load data
x_tr = np.load('data/' + data_name + '_feature.npy')
y_tr = np.load('data/' + data_name + '_target.npy')
x_t = np.load('data/' + data_name + '.t_feature.npy')
y_t = np.load('data/' + data_name + '.t_target.npy')
# training stage
res_tr = []
res_t = []
for i in range(interval, max_iter + 1, interval):
model = MLPClassifier(solver='adam', alpha=1e-3,
learning_rate_init=0.001, max_iter=i,
activation='relu',
hidden_layer_sizes=(100,), random_state=666)
model.fit(x_tr, y_tr)
res_tr.append(round(model.score(x_tr, y_tr), 3))
res_t.append(round(model.score(x_t, y_t), 3))
print('train: ', res_tr)
print('test: ', res_t)
def plot_activation_splice():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_relu = [0.698, 0.764, 0.813, 0.845, 0.865, 0.884, 0.906, 0.921, 0.921, 0.921]
y_tr_logi = [0.517, 0.517, 0.721, 0.799, 0.819, 0.834, 0.845, 0.852, 0.857, 0.861]
y_tr_tanh = [0.65, 0.732, 0.793, 0.832, 0.863, 0.881, 0.905, 0.915, 0.929, 0.944]
y_t_relu = [0.68, 0.759, 0.822, 0.85, 0.857, 0.867, 0.874, 0.883, 0.883, 0.883]
y_t_logi = [0.52, 0.52, 0.717, 0.835, 0.832, 0.827, 0.83, 0.834, 0.836, 0.839]
y_t_tanh = [0.646, 0.721, 0.777, 0.805, 0.817, 0.822, 0.832, 0.836, 0.84, 0.84]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_relu, color='#90EE90', linewidth=1.7, label='relu')
ax.plot(x, y_tr_logi, color='#ffa07a', linewidth=1.7, label='sigmoid')
ax.plot(x, y_tr_tanh, color='#9999ff', linewidth=1.7, label='tanh')
ax.scatter(x, y_tr_relu, s=13, c='#90EE90')
ax.scatter(x, y_tr_logi, s=13, c='#ffa07a')
ax.scatter(x, y_tr_tanh, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_activation_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_relu, color='#90EE90', linewidth=1.7, label='relu')
ax.plot(x, y_t_logi, color='#ffa07a', linewidth=1.7, label='sigmoid')
ax.plot(x, y_t_tanh, color='#9999ff', linewidth=1.7, label='tanh')
ax.scatter(x, y_t_relu, s=13, c='#90EE90')
ax.scatter(x, y_t_logi, s=13, c='#ffa07a')
ax.scatter(x, y_t_tanh, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_activation_splice_t')
plt.show()
def plot_activation_sat():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_relu = [0.15, 0.455, 0.816, 0.834, 0.845, 0.847, 0.854, 0.86, 0.862, 0.866]
y_tr_logi = [0.099, 0.257, 0.419, 0.43, 0.412, 0.396, 0.394, 0.392, 0.39, 0.428]
y_tr_tanh = [0.476, 0.59, 0.588, 0.624, 0.614, 0.621, 0.629, 0.653, 0.776, 0.803]
y_t_relu = [0.17, 0.445, 0.777, 0.788, 0.796, 0.795, 0.806, 0.812, 0.814, 0.814]
y_t_logi = [0.106, 0.198, 0.411, 0.421, 0.411, 0.406, 0.403, 0.401, 0.4, 0.452]
y_t_tanh = [0.436, 0.566, 0.574, 0.618, 0.605, 0.605, 0.608, 0.622, 0.718, 0.745]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_relu, color='#90EE90', linewidth=1.7, label='relu')
ax.plot(x, y_tr_logi, color='#ffa07a', linewidth=1.7, label='sigmoid')
ax.plot(x, y_tr_tanh, color='#9999ff', linewidth=1.7, label='tanh')
ax.scatter(x, y_tr_relu, s=13, c='#90EE90')
ax.scatter(x, y_tr_logi, s=13, c='#ffa07a')
ax.scatter(x, y_tr_tanh, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_activation_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_relu, color='#90EE90', linewidth=1.7, label='relu')
ax.plot(x, y_t_logi, color='#ffa07a', linewidth=1.7, label='sigmoid')
ax.plot(x, y_t_tanh, color='#9999ff', linewidth=1.7, label='tanh')
ax.scatter(x, y_t_relu, s=13, c='#90EE90')
ax.scatter(x, y_t_logi, s=13, c='#ffa07a')
ax.scatter(x, y_t_tanh, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_activation_sat_t')
plt.show()
def plot_optimizer_splice():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_lbfgs = [0.779, 0.847, 0.88, 0.913, 0.925, 0.943, 0.952, 0.963, 0.964, 0.97]
y_tr_sgd = [0.57, 0.63, 0.643, 0.673, 0.698, 0.723, 0.752, 0.761, 0.781, 0.796]
y_tr_adam = [0.698, 0.764, 0.813, 0.845, 0.865, 0.884, 0.906, 0.921, 0.921, 0.921]
y_t_lbfgs = [0.794, 0.849, 0.848, 0.874, 0.878, 0.875, 0.873, 0.866, 0.857, 0.855]
y_t_sgd = [0.595, 0.637, 0.651, 0.671, 0.703, 0.724, 0.746, 0.76, 0.768, 0.788]
y_t_adam = [0.68, 0.759, 0.822, 0.85, 0.857, 0.867, 0.874, 0.883, 0.883, 0.883]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_lbfgs, color='#90EE90', linewidth=1.7, label='lbfgs')
ax.plot(x, y_tr_sgd, color='#ffa07a', linewidth=1.7, label='sgd')
ax.plot(x, y_tr_adam, color='#9999ff', linewidth=1.7, label='adam')
ax.scatter(x, y_tr_lbfgs, s=13, c='#90EE90')
ax.scatter(x, y_tr_sgd, s=13, c='#ffa07a')
ax.scatter(x, y_tr_adam, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_optimizer_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_lbfgs, color='#90EE90', linewidth=1.7, label='lbfgs')
ax.plot(x, y_t_sgd, color='#ffa07a', linewidth=1.7, label='sgd')
ax.plot(x, y_t_adam, color='#9999ff', linewidth=1.7, label='adam')
ax.scatter(x, y_t_lbfgs, s=13, c='#90EE90')
ax.scatter(x, y_t_sgd, s=13, c='#ffa07a')
ax.scatter(x, y_t_adam, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_optimizer_splice_t')
plt.show()
def plot_optimizer_sat():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_lbfgs = [0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257]
y_tr_sgd = [0.099, 0.257, 0.257, 0.407, 0.434, 0.446, 0.458, 0.471, 0.485, 0.518]
y_tr_adam = [0.15, 0.455, 0.816, 0.834, 0.845, 0.847, 0.854, 0.86, 0.862, 0.866]
y_t_lbfgs = [0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198]
y_t_sgd = [0.106, 0.198, 0.198, 0.397, 0.43, 0.444, 0.45, 0.463, 0.477, 0.508]
y_t_adam = [0.17, 0.445, 0.777, 0.788, 0.796, 0.795, 0.806, 0.812, 0.814, 0.814]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_lbfgs, color='#90EE90', linewidth=1.7, label='lbfgs')
ax.plot(x, y_tr_sgd, color='#ffa07a', linewidth=1.7, label='sgd')
ax.plot(x, y_tr_adam, color='#9999ff', linewidth=1.7, label='adam')
ax.scatter(x, y_tr_lbfgs, s=13, c='#90EE90')
ax.scatter(x, y_tr_sgd, s=13, c='#ffa07a')
ax.scatter(x, y_tr_adam, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_optimizer_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_lbfgs, color='#90EE90', linewidth=1.7, label='lbfgs')
ax.plot(x, y_t_sgd, color='#ffa07a', linewidth=1.7, label='sgd')
ax.plot(x, y_t_adam, color='#9999ff', linewidth=1.7, label='adam')
ax.scatter(x, y_t_lbfgs, s=13, c='#90EE90')
ax.scatter(x, y_t_sgd, s=13, c='#ffa07a')
ax.scatter(x, y_t_adam, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_optimizer_sat_t')
plt.show()
def plot_lr_splice():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=300,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = [0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1]
y_tr = [0.921, 0.937, 0.911, 0.832, 0.517, 0.517, 0.517]
y_t = [0.883, 0.888, 0.876, 0.833, 0.52, 0.52, 0.52]
x_ax = np.arange(7)
total_width, n = 0.75, 2
width = total_width / n
x_ax = x_ax - (total_width - width) / 2
plt.bar(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white', label='Training set')
plt.bar(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor='white', label='Testing set')
for x, y1, y2 in zip(x_ax, y_tr, y_t):
plt.text(x - 0.02, y1, '%.2f' % y1, ha='center', va='bottom')
plt.text(x + width + 0.05, y2, '%.2f' % y2, ha='center', va='bottom')
ax = plt.gca()
ax.set_xticks(x_ax + width / 2)
ax.set_xticklabels((0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1))
plt.xlabel('Learning rate')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_lr_splice')
plt.show()
def plot_lr_sat():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=300,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = [0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1]
y_tr = [0.877, 0.875, 0.87, 0.832, 0.87, 0.862, 0.715]
y_t = [0.832, 0.818, 0.812, 0.833, 0.826, 0.817, 0.661]
x_ax = np.arange(7)
total_width, n = 0.75, 2
width = total_width / n
x_ax = x_ax - (total_width - width) / 2
plt.bar(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white', label='Training set')
plt.bar(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor='white', label='Testing set')
for x, y1, y2 in zip(x_ax, y_tr, y_t):
plt.text(x - 0.02, y1, '%.2f' % y1, ha='center', va='bottom')
plt.text(x + width + 0.05, y2, '%.2f' % y2, ha='center', va='bottom')
ax = plt.gca()
ax.set_xticks(x_ax + width / 2)
ax.set_xticklabels((0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1))
plt.xlabel('Learning rate')
plt.ylabel('Accuracy')
plt.ylim(0, 1.06)
plt.legend()
plt.savefig('report/img/mlp_lr_sat')
plt.show()
def plot_dim_splice():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_25_tr = [0.665, 0.727, 0.769, 0.792, 0.815, 0.832, 0.841, 0.853, 0.865, 0.876]
y_5_tr = [0.541, 0.727, 0.814, 0.833, 0.854, 0.864, 0.876, 0.887, 0.899, 0.909]
y_75_tr = [0.524, 0.712, 0.811, 0.858, 0.891, 0.914, 0.932, 0.956, 0.964, 0.974]
y_tr = [0.698, 0.764, 0.813, 0.845, 0.865, 0.884, 0.906, 0.921, 0.921, 0.921]
y_25_t = [0.392, 0.443, 0.486, 0.525, 0.55, 0.562, 0.573, 0.574, 0.575, 0.58]
y_5_t = [0.541, 0.727, 0.814, 0.833, 0.854, 0.864, 0.876, 0.887, 0.899, 0.909]
y_75_t = [0.522, 0.564, 0.56, 0.569, 0.571, 0.571, 0.576, 0.586, 0.586, 0.582]
y_t = [0.68, 0.759, 0.822, 0.85, 0.857, 0.867, 0.874, 0.883, 0.883, 0.883]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_tr, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_tr, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_tr, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_tr, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_tr, s=13, c='#90EE90')
ax.scatter(x, y_5_tr, s=13, c='#ffa07a')
ax.scatter(x, y_75_tr, s=13, c='#9999ff')
ax.scatter(x, y_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_dim_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_t, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_t, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_t, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_t, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_t, s=13, c='#90EE90')
ax.scatter(x, y_5_t, s=13, c='#ffa07a')
ax.scatter(x, y_75_t, s=13, c='#9999ff')
ax.scatter(x, y_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_dim_splice_t')
plt.show()
def plot_dim_sat():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_25_tr = [0.413, 0.44, 0.489, 0.681, 0.79, 0.8, 0.809, 0.815, 0.823, 0.826]
y_5_tr = [0.174, 0.396, 0.477, 0.649, 0.769, 0.776, 0.792, 0.806, 0.823, 0.832]
y_75_tr = [0.342, 0.406, 0.412, 0.416, 0.453, 0.504, 0.742, 0.754, 0.763, 0.798]
y_tr = [0.15, 0.455, 0.816, 0.834, 0.845, 0.847, 0.854, 0.86, 0.862, 0.866]
y_25_t = [0.384, 0.424, 0.482, 0.604, 0.683, 0.68, 0.669, 0.664, 0.664, 0.662]
y_5_t = [0.235, 0.3, 0.354, 0.536, 0.604, 0.574, 0.57, 0.568, 0.575, 0.583]
y_75_t = [0.322, 0.372, 0.382, 0.432, 0.452, 0.464, 0.462, 0.469, 0.487, 0.522]
y_t = [0.17, 0.445, 0.777, 0.788, 0.796, 0.795, 0.806, 0.812, 0.814, 0.814]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_tr, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_tr, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_tr, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_tr, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_tr, s=13, c='#90EE90')
ax.scatter(x, y_5_tr, s=13, c='#ffa07a')
ax.scatter(x, y_75_tr, s=13, c='#9999ff')
ax.scatter(x, y_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_dim_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_t, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_t, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_t, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_t, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_t, s=13, c='#90EE90')
ax.scatter(x, y_5_t, s=13, c='#ffa07a')
ax.scatter(x, y_75_t, s=13, c='#9999ff')
ax.scatter(x, y_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_dim_sat_t')
plt.show()
def plot_archi_sat():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
# 1: (10, 10)
# 2: (10, 10, 10, 10)
# 3: (50, 50)
# 4: (50, 50, 50, 50)
x = list(range(interval, max_iter + 1, interval))
y_1_tr = [0.15, 0.455, 0.816, 0.834, 0.845, 0.847, 0.854, 0.86, 0.862, 0.866]
y_2_tr = [0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257]
y_3_tr = [0.679, 0.729, 0.743, 0.756, 0.762, 0.769, 0.847, 0.858, 0.871, 0.901]
y_4_tr = [0.337, 0.558, 0.558, 0.611, 0.652, 0.652, 0.652, 0.652, 0.652, 0.652]
y_1_t = [0.17, 0.445, 0.777, 0.788, 0.796, 0.795, 0.806, 0.812, 0.814, 0.814]
y_2_t = [0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198]
y_3_t = [0.652, 0.714, 0.719, 0.723, 0.726, 0.727, 0.779, 0.79, 0.814, 0.83]
y_4_t = [0.279, 0.484, 0.476, 0.552, 0.606, 0.606, 0.606, 0.606, 0.606, 0.606]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_1_tr, color='#90EE90', linewidth=1.7, label='(10, 10)')
ax.plot(x, y_2_tr, color='#ffa07a', linewidth=1.7, label='(10, 10, 10, 10)')
ax.plot(x, y_3_tr, color='#9999ff', linewidth=1.7, label='(50, 50)')
ax.plot(x, y_4_tr, color='#F0E68C', linewidth=1.7, label='(50, 50, 50, 50)')
ax.scatter(x, y_1_tr, s=13, c='#90EE90')
ax.scatter(x, y_2_tr, s=13, c='#ffa07a')
ax.scatter(x, y_3_tr, s=13, c='#9999ff')
ax.scatter(x, y_4_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.ylim(0, 0.97)
plt.legend()
plt.savefig('report/img/mlp_archi_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_1_t, color='#90EE90', linewidth=1.7, label='(10, 10)')
ax.plot(x, y_2_t, color='#ffa07a', linewidth=1.7, label='(10, 10, 10, 10)')
ax.plot(x, y_3_t, color='#9999ff', linewidth=1.7, label='(50, 50)')
ax.plot(x, y_4_t, color='#F0E68C', linewidth=1.7, label='(50, 50, 50, 50)')
ax.scatter(x, y_1_t, s=13, c='#90EE90')
ax.scatter(x, y_2_t, s=13, c='#ffa07a')
ax.scatter(x, y_3_t, s=13, c='#9999ff')
ax.scatter(x, y_4_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.ylim(0, 0.92)
plt.legend()
plt.savefig('report/img/mlp_archi_sat_t')
plt.show()
def plot_archi_splice():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
# 1: (10, 10)
# 2: (10, 10, 10, 10)
# 3: (50, 50)
# 4: (50, 50, 50, 50)
x = list(range(interval, max_iter + 1, interval))
y_1_tr = [0.698, 0.764, 0.813, 0.845, 0.865, 0.884, 0.906, 0.921, 0.921, 0.921]
y_2_tr = [0.616, 0.691, 0.779, 0.827, 0.838, 0.858, 0.86, 0.86, 0.86, 0.86]
y_3_tr = [0.483, 0.483, 0.483, 0.483, 0.483, 0.483, 0.483, 0.483, 0.483, 0.483]
y_4_tr = [0.785, 0.867, 0.914, 0.969, 0.992, 1.0, 1.0, 1.0, 1.0, 1.0]
y_1_t = [0.68, 0.759, 0.822, 0.85, 0.857, 0.867, 0.874, 0.883, 0.883, 0.883]
y_2_t = [0.608, 0.687, 0.775, 0.816, 0.835, 0.843, 0.842, 0.842, 0.842, 0.842]
y_3_t = [0.48, 0.48, 0.48, 0.48, 0.48, 0.48, 0.48, 0.48, 0.48, 0.48]
y_4_t = [0.76, 0.83, 0.844, 0.844, 0.849, 0.842, 0.841, 0.844, 0.844, 0.844]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_1_tr, color='#90EE90', linewidth=1.7, label='(10, 10)')
ax.plot(x, y_2_tr, color='#ffa07a', linewidth=1.7, label='(10, 10, 10, 10)')
ax.plot(x, y_3_tr, color='#9999ff', linewidth=1.7, label='(50, 50)')
ax.plot(x, y_4_tr, color='#F0E68C', linewidth=1.7, label='(50, 50, 50, 50)')
ax.scatter(x, y_1_tr, s=13, c='#90EE90')
ax.scatter(x, y_2_tr, s=13, c='#ffa07a')
ax.scatter(x, y_3_tr, s=13, c='#9999ff')
ax.scatter(x, y_4_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_archi_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_1_t, color='#90EE90', linewidth=1.7, label='(10, 10)')
ax.plot(x, y_2_t, color='#ffa07a', linewidth=1.7, label='(10, 10, 10, 10)')
ax.plot(x, y_3_t, color='#9999ff', linewidth=1.7, label='(50, 50)')
ax.plot(x, y_4_t, color='#F0E68C', linewidth=1.7, label='(50, 50, 50, 50)')
ax.scatter(x, y_1_t, s=13, c='#90EE90')
ax.scatter(x, y_2_t, s=13, c='#ffa07a')
ax.scatter(x, y_3_t, s=13, c='#9999ff')
ax.scatter(x, y_4_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.ylim(0, 0.92)
plt.legend()
plt.savefig('report/img/mlp_archi_splice_t')
plt.show()
def plot_baseline():
x = list(range(interval, max_iter + 1, interval))
y_tr_splice = [0.837, 0.861, 0.888, 0.9, 0.935, 0.947, 0.965, 0.981, 0.988, 0.995]
y_tr_sat = [0.834, 0.862, 0.879, 0.892, 0.899, 0.905, 0.913, 0.919, 0.924, 0.93]
y_t_splice = [0.828, 0.849, 0.857, 0.851, 0.867, 0.874, 0.883, 0.886, 0.887, 0.891]
y_t_sat = [0.816, 0.831, 0.838, 0.854, 0.862, 0.865, 0.868, 0.872, 0.872, 0.875]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_splice, color='#9999ff', linewidth=1.7, label='Training set')
ax.plot(x, y_t_splice, color='#ffa07a', linewidth=1.7, label='Testing set')
ax.scatter(x, y_tr_splice, s=13, c='#9999ff')
ax.scatter(x, y_t_splice, s=13, c='#ffa07a')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_baseline_splice')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_sat, color='#9999ff', linewidth=1.7, label='Training set')
ax.plot(x, y_t_sat, color='#ffa07a', linewidth=1.7, label='Testing set')
ax.scatter(x, y_tr_sat, s=13, c='#9999ff')
ax.scatter(x, y_t_sat, s=13, c='#ffa07a')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_baseline_sat')
plt.show()
################################### MLP Part #####################################
if __name__ == '__main__':
# splice satimage.scale
## MLP ##
#mlp('satimage.scale')
#mlp('splice')
plot_baseline()
## SVM ##
#svm('satimage.scale')
#svm('splice')
#plot_s_baseline() | [
"numpy.load",
"matplotlib.pyplot.show",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.ylim",
"sklearn.svm.SVC",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"sklearn.decomposition.PCA",
"numpy.arange",
"sklearn.neural_n... | [((366, 385), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(9)'}), '(n_components=9)\n', (369, 385), False, 'from sklearn.decomposition import PCA\n'), ((533, 572), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'f_classif', 'k': '(10)'}), '(score_func=f_classif, k=10)\n', (544, 572), False, 'from sklearn.feature_selection import SelectKBest, f_classif, chi2, f_regression\n'), ((809, 854), 'numpy.load', 'np.load', (["('data/' + data_name + '_feature.npy')"], {}), "('data/' + data_name + '_feature.npy')\n", (816, 854), True, 'import numpy as np\n'), ((866, 910), 'numpy.load', 'np.load', (["('data/' + data_name + '_target.npy')"], {}), "('data/' + data_name + '_target.npy')\n", (873, 910), True, 'import numpy as np\n'), ((921, 968), 'numpy.load', 'np.load', (["('data/' + data_name + '.t_feature.npy')"], {}), "('data/' + data_name + '.t_feature.npy')\n", (928, 968), True, 'import numpy as np\n'), ((979, 1025), 'numpy.load', 'np.load', (["('data/' + data_name + '.t_target.npy')"], {}), "('data/' + data_name + '.t_target.npy')\n", (986, 1025), True, 'import numpy as np\n'), ((2987, 3013), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (2997, 3013), True, 'import matplotlib.pyplot as plt\n'), ((3023, 3032), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3030, 3032), True, 'import matplotlib.pyplot as plt\n'), ((3611, 3635), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (3621, 3635), True, 'import matplotlib.pyplot as plt\n'), ((3640, 3662), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (3650, 3662), True, 'import matplotlib.pyplot as plt\n'), ((3667, 3679), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3677, 3679), True, 'import matplotlib.pyplot as plt\n'), ((3684, 3730), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_kernel_splice_tr"""'], {}), "('report/img/svm_kernel_splice_tr')\n", (3695, 3730), True, 'import matplotlib.pyplot as plt\n'), ((3735, 3745), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3743, 3745), True, 'import matplotlib.pyplot as plt\n'), ((3751, 3777), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (3761, 3777), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3796), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3794, 3796), True, 'import matplotlib.pyplot as plt\n'), ((4367, 4391), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (4377, 4391), True, 'import matplotlib.pyplot as plt\n'), ((4396, 4418), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (4406, 4418), True, 'import matplotlib.pyplot as plt\n'), ((4423, 4435), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4433, 4435), True, 'import matplotlib.pyplot as plt\n'), ((4440, 4485), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_kernel_splice_t"""'], {}), "('report/img/svm_kernel_splice_t')\n", (4451, 4485), True, 'import matplotlib.pyplot as plt\n'), ((4490, 4500), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4498, 4500), True, 'import matplotlib.pyplot as plt\n'), ((5570, 5596), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (5580, 5596), True, 'import matplotlib.pyplot as plt\n'), ((5606, 5615), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5613, 5615), True, 'import matplotlib.pyplot as plt\n'), ((6194, 6218), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (6204, 6218), True, 'import matplotlib.pyplot as plt\n'), ((6223, 6245), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (6233, 6245), True, 'import matplotlib.pyplot as plt\n'), ((6250, 6262), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6260, 6262), True, 'import matplotlib.pyplot as plt\n'), ((6267, 6310), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_kernel_sat_tr"""'], {}), "('report/img/svm_kernel_sat_tr')\n", (6278, 6310), True, 'import matplotlib.pyplot as plt\n'), ((6315, 6325), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6323, 6325), True, 'import matplotlib.pyplot as plt\n'), ((6331, 6357), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (6341, 6357), True, 'import matplotlib.pyplot as plt\n'), ((6367, 6376), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6374, 6376), True, 'import matplotlib.pyplot as plt\n'), ((6947, 6971), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (6957, 6971), True, 'import matplotlib.pyplot as plt\n'), ((6976, 6998), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (6986, 6998), True, 'import matplotlib.pyplot as plt\n'), ((7003, 7015), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7013, 7015), True, 'import matplotlib.pyplot as plt\n'), ((7020, 7062), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_kernel_sat_t"""'], {}), "('report/img/svm_kernel_sat_t')\n", (7031, 7062), True, 'import matplotlib.pyplot as plt\n'), ((7067, 7077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7075, 7077), True, 'import matplotlib.pyplot as plt\n'), ((8457, 8483), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (8467, 8483), True, 'import matplotlib.pyplot as plt\n'), ((8493, 8502), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8500, 8502), True, 'import matplotlib.pyplot as plt\n'), ((9065, 9089), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (9075, 9089), True, 'import matplotlib.pyplot as plt\n'), ((9094, 9116), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (9104, 9116), True, 'import matplotlib.pyplot as plt\n'), ((9121, 9133), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9131, 9133), True, 'import matplotlib.pyplot as plt\n'), ((9138, 9181), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_dim_splice_tr"""'], {}), "('report/img/svm_dim_splice_tr')\n", (9149, 9181), True, 'import matplotlib.pyplot as plt\n'), ((9186, 9196), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9194, 9196), True, 'import matplotlib.pyplot as plt\n'), ((9202, 9228), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (9212, 9228), True, 'import matplotlib.pyplot as plt\n'), ((9238, 9247), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9245, 9247), True, 'import matplotlib.pyplot as plt\n'), ((9802, 9826), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (9812, 9826), True, 'import matplotlib.pyplot as plt\n'), ((9831, 9853), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (9841, 9853), True, 'import matplotlib.pyplot as plt\n'), ((9858, 9870), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9868, 9870), True, 'import matplotlib.pyplot as plt\n'), ((9875, 9917), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_dim_splice_t"""'], {}), "('report/img/svm_dim_splice_t')\n", (9886, 9917), True, 'import matplotlib.pyplot as plt\n'), ((9922, 9932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9930, 9932), True, 'import matplotlib.pyplot as plt\n'), ((10961, 10987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (10971, 10987), True, 'import matplotlib.pyplot as plt\n'), ((10997, 11006), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11004, 11006), True, 'import matplotlib.pyplot as plt\n'), ((11569, 11593), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (11579, 11593), True, 'import matplotlib.pyplot as plt\n'), ((11598, 11620), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (11608, 11620), True, 'import matplotlib.pyplot as plt\n'), ((11625, 11637), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11635, 11637), True, 'import matplotlib.pyplot as plt\n'), ((11642, 11682), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_dim_sat_tr"""'], {}), "('report/img/svm_dim_sat_tr')\n", (11653, 11682), True, 'import matplotlib.pyplot as plt\n'), ((11687, 11697), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11695, 11697), True, 'import matplotlib.pyplot as plt\n'), ((11703, 11729), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (11713, 11729), True, 'import matplotlib.pyplot as plt\n'), ((11739, 11748), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11746, 11748), True, 'import matplotlib.pyplot as plt\n'), ((12303, 12327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (12313, 12327), True, 'import matplotlib.pyplot as plt\n'), ((12332, 12354), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (12342, 12354), True, 'import matplotlib.pyplot as plt\n'), ((12359, 12371), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12369, 12371), True, 'import matplotlib.pyplot as plt\n'), ((12376, 12415), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_dim_sat_t"""'], {}), "('report/img/svm_dim_sat_t')\n", (12387, 12415), True, 'import matplotlib.pyplot as plt\n'), ((12420, 12430), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12428, 12430), True, 'import matplotlib.pyplot as plt\n'), ((13085, 13183), 'matplotlib.pyplot.bar', 'plt.bar', (['x_ax', 'y_tr'], {'width': 'width', 'facecolor': '"""#9999ff"""', 'edgecolor': '"""white"""', 'label': '"""Training set"""'}), "(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white',\n label='Training set')\n", (13092, 13183), True, 'import matplotlib.pyplot as plt\n'), ((13184, 13289), 'matplotlib.pyplot.bar', 'plt.bar', (['(x_ax + width)', 'y_t'], {'width': 'width', 'facecolor': '"""#ffa07a"""', 'edgecolor': '"""white"""', 'label': '"""Testing set"""'}), "(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor=\n 'white', label='Testing set')\n", (13191, 13289), True, 'import matplotlib.pyplot as plt\n'), ((13487, 13496), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13494, 13496), True, 'import matplotlib.pyplot as plt\n'), ((13611, 13642), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Penalty parameter"""'], {}), "('Penalty parameter')\n", (13621, 13642), True, 'import matplotlib.pyplot as plt\n'), ((13647, 13669), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (13657, 13669), True, 'import matplotlib.pyplot as plt\n'), ((13674, 13692), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.245)'], {}), '(0, 1.245)\n', (13682, 13692), True, 'import matplotlib.pyplot as plt\n'), ((13697, 13709), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13707, 13709), True, 'import matplotlib.pyplot as plt\n'), ((13714, 13758), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_penalty_splice"""'], {}), "('report/img/svm_penalty_splice')\n", (13725, 13758), True, 'import matplotlib.pyplot as plt\n'), ((13763, 13773), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13771, 13773), True, 'import matplotlib.pyplot as plt\n'), ((14427, 14525), 'matplotlib.pyplot.bar', 'plt.bar', (['x_ax', 'y_tr'], {'width': 'width', 'facecolor': '"""#9999ff"""', 'edgecolor': '"""white"""', 'label': '"""Training set"""'}), "(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white',\n label='Training set')\n", (14434, 14525), True, 'import matplotlib.pyplot as plt\n'), ((14526, 14631), 'matplotlib.pyplot.bar', 'plt.bar', (['(x_ax + width)', 'y_t'], {'width': 'width', 'facecolor': '"""#ffa07a"""', 'edgecolor': '"""white"""', 'label': '"""Testing set"""'}), "(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor=\n 'white', label='Testing set')\n", (14533, 14631), True, 'import matplotlib.pyplot as plt\n'), ((14829, 14838), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14836, 14838), True, 'import matplotlib.pyplot as plt\n'), ((14953, 14984), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Penalty parameter"""'], {}), "('Penalty parameter')\n", (14963, 14984), True, 'import matplotlib.pyplot as plt\n'), ((14989, 15011), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (14999, 15011), True, 'import matplotlib.pyplot as plt\n'), ((15016, 15032), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.1)'], {}), '(0, 1.1)\n', (15024, 15032), True, 'import matplotlib.pyplot as plt\n'), ((15037, 15049), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15047, 15049), True, 'import matplotlib.pyplot as plt\n'), ((15054, 15095), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_penalty_sat"""'], {}), "('report/img/svm_penalty_sat')\n", (15065, 15095), True, 'import matplotlib.pyplot as plt\n'), ((15100, 15110), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15108, 15110), True, 'import matplotlib.pyplot as plt\n'), ((15538, 15564), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (15548, 15564), True, 'import matplotlib.pyplot as plt\n'), ((15574, 15583), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15581, 15583), True, 'import matplotlib.pyplot as plt\n'), ((15918, 15942), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (15928, 15942), True, 'import matplotlib.pyplot as plt\n'), ((15947, 15969), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (15957, 15969), True, 'import matplotlib.pyplot as plt\n'), ((15974, 15986), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15984, 15986), True, 'import matplotlib.pyplot as plt\n'), ((15991, 16036), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_baseline_splice"""'], {}), "('report/img/svm_baseline_splice')\n", (16002, 16036), True, 'import matplotlib.pyplot as plt\n'), ((16041, 16051), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16049, 16051), True, 'import matplotlib.pyplot as plt\n'), ((16057, 16083), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (16067, 16083), True, 'import matplotlib.pyplot as plt\n'), ((16093, 16102), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (16100, 16102), True, 'import matplotlib.pyplot as plt\n'), ((16425, 16449), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (16435, 16449), True, 'import matplotlib.pyplot as plt\n'), ((16454, 16476), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (16464, 16476), True, 'import matplotlib.pyplot as plt\n'), ((16481, 16493), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16491, 16493), True, 'import matplotlib.pyplot as plt\n'), ((16498, 16540), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/svm_baseline_sat"""'], {}), "('report/img/svm_baseline_sat')\n", (16509, 16540), True, 'import matplotlib.pyplot as plt\n'), ((16545, 16555), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16553, 16555), True, 'import matplotlib.pyplot as plt\n'), ((16774, 16819), 'numpy.load', 'np.load', (["('data/' + data_name + '_feature.npy')"], {}), "('data/' + data_name + '_feature.npy')\n", (16781, 16819), True, 'import numpy as np\n'), ((16831, 16875), 'numpy.load', 'np.load', (["('data/' + data_name + '_target.npy')"], {}), "('data/' + data_name + '_target.npy')\n", (16838, 16875), True, 'import numpy as np\n'), ((16886, 16933), 'numpy.load', 'np.load', (["('data/' + data_name + '.t_feature.npy')"], {}), "('data/' + data_name + '.t_feature.npy')\n", (16893, 16933), True, 'import numpy as np\n'), ((16944, 16990), 'numpy.load', 'np.load', (["('data/' + data_name + '.t_target.npy')"], {}), "('data/' + data_name + '.t_target.npy')\n", (16951, 16990), True, 'import numpy as np\n'), ((18371, 18397), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (18381, 18397), True, 'import matplotlib.pyplot as plt\n'), ((18407, 18416), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (18414, 18416), True, 'import matplotlib.pyplot as plt\n'), ((18853, 18877), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (18863, 18877), True, 'import matplotlib.pyplot as plt\n'), ((18882, 18904), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (18892, 18904), True, 'import matplotlib.pyplot as plt\n'), ((18909, 18921), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18919, 18921), True, 'import matplotlib.pyplot as plt\n'), ((18926, 18976), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_activation_splice_tr"""'], {}), "('report/img/mlp_activation_splice_tr')\n", (18937, 18976), True, 'import matplotlib.pyplot as plt\n'), ((18981, 18991), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18989, 18991), True, 'import matplotlib.pyplot as plt\n'), ((18997, 19023), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (19007, 19023), True, 'import matplotlib.pyplot as plt\n'), ((19033, 19042), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (19040, 19042), True, 'import matplotlib.pyplot as plt\n'), ((19473, 19497), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (19483, 19497), True, 'import matplotlib.pyplot as plt\n'), ((19502, 19524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (19512, 19524), True, 'import matplotlib.pyplot as plt\n'), ((19529, 19541), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19539, 19541), True, 'import matplotlib.pyplot as plt\n'), ((19546, 19595), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_activation_splice_t"""'], {}), "('report/img/mlp_activation_splice_t')\n", (19557, 19595), True, 'import matplotlib.pyplot as plt\n'), ((19600, 19610), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19608, 19610), True, 'import matplotlib.pyplot as plt\n'), ((20455, 20481), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (20465, 20481), True, 'import matplotlib.pyplot as plt\n'), ((20491, 20500), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20498, 20500), True, 'import matplotlib.pyplot as plt\n'), ((20937, 20961), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (20947, 20961), True, 'import matplotlib.pyplot as plt\n'), ((20966, 20988), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (20976, 20988), True, 'import matplotlib.pyplot as plt\n'), ((20993, 21005), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21003, 21005), True, 'import matplotlib.pyplot as plt\n'), ((21010, 21057), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_activation_sat_tr"""'], {}), "('report/img/mlp_activation_sat_tr')\n", (21021, 21057), True, 'import matplotlib.pyplot as plt\n'), ((21062, 21072), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21070, 21072), True, 'import matplotlib.pyplot as plt\n'), ((21078, 21104), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (21088, 21104), True, 'import matplotlib.pyplot as plt\n'), ((21114, 21123), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (21121, 21123), True, 'import matplotlib.pyplot as plt\n'), ((21554, 21578), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (21564, 21578), True, 'import matplotlib.pyplot as plt\n'), ((21583, 21605), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (21593, 21605), True, 'import matplotlib.pyplot as plt\n'), ((21610, 21622), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21620, 21622), True, 'import matplotlib.pyplot as plt\n'), ((21627, 21673), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_activation_sat_t"""'], {}), "('report/img/mlp_activation_sat_t')\n", (21638, 21673), True, 'import matplotlib.pyplot as plt\n'), ((21678, 21688), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21686, 21688), True, 'import matplotlib.pyplot as plt\n'), ((22536, 22562), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (22546, 22562), True, 'import matplotlib.pyplot as plt\n'), ((22572, 22581), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22579, 22581), True, 'import matplotlib.pyplot as plt\n'), ((23015, 23039), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (23025, 23039), True, 'import matplotlib.pyplot as plt\n'), ((23044, 23066), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (23054, 23066), True, 'import matplotlib.pyplot as plt\n'), ((23071, 23083), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (23081, 23083), True, 'import matplotlib.pyplot as plt\n'), ((23088, 23137), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_optimizer_splice_tr"""'], {}), "('report/img/mlp_optimizer_splice_tr')\n", (23099, 23137), True, 'import matplotlib.pyplot as plt\n'), ((23142, 23152), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23150, 23152), True, 'import matplotlib.pyplot as plt\n'), ((23158, 23184), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (23168, 23184), True, 'import matplotlib.pyplot as plt\n'), ((23194, 23203), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (23201, 23203), True, 'import matplotlib.pyplot as plt\n'), ((23631, 23655), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (23641, 23655), True, 'import matplotlib.pyplot as plt\n'), ((23660, 23682), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (23670, 23682), True, 'import matplotlib.pyplot as plt\n'), ((23687, 23699), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (23697, 23699), True, 'import matplotlib.pyplot as plt\n'), ((23704, 23752), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_optimizer_splice_t"""'], {}), "('report/img/mlp_optimizer_splice_t')\n", (23715, 23752), True, 'import matplotlib.pyplot as plt\n'), ((23757, 23767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23765, 23767), True, 'import matplotlib.pyplot as plt\n'), ((24614, 24640), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (24624, 24640), True, 'import matplotlib.pyplot as plt\n'), ((24650, 24659), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24657, 24659), True, 'import matplotlib.pyplot as plt\n'), ((25093, 25117), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (25103, 25117), True, 'import matplotlib.pyplot as plt\n'), ((25122, 25144), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (25132, 25144), True, 'import matplotlib.pyplot as plt\n'), ((25149, 25161), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (25159, 25161), True, 'import matplotlib.pyplot as plt\n'), ((25166, 25212), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_optimizer_sat_tr"""'], {}), "('report/img/mlp_optimizer_sat_tr')\n", (25177, 25212), True, 'import matplotlib.pyplot as plt\n'), ((25217, 25227), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25225, 25227), True, 'import matplotlib.pyplot as plt\n'), ((25233, 25259), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (25243, 25259), True, 'import matplotlib.pyplot as plt\n'), ((25269, 25278), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (25276, 25278), True, 'import matplotlib.pyplot as plt\n'), ((25706, 25730), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (25716, 25730), True, 'import matplotlib.pyplot as plt\n'), ((25735, 25757), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (25745, 25757), True, 'import matplotlib.pyplot as plt\n'), ((25762, 25774), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (25772, 25774), True, 'import matplotlib.pyplot as plt\n'), ((25779, 25824), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_optimizer_sat_t"""'], {}), "('report/img/mlp_optimizer_sat_t')\n", (25790, 25824), True, 'import matplotlib.pyplot as plt\n'), ((25829, 25839), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25837, 25839), True, 'import matplotlib.pyplot as plt\n'), ((26294, 26306), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (26303, 26306), True, 'import numpy as np\n'), ((26413, 26511), 'matplotlib.pyplot.bar', 'plt.bar', (['x_ax', 'y_tr'], {'width': 'width', 'facecolor': '"""#9999ff"""', 'edgecolor': '"""white"""', 'label': '"""Training set"""'}), "(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white',\n label='Training set')\n", (26420, 26511), True, 'import matplotlib.pyplot as plt\n'), ((26512, 26617), 'matplotlib.pyplot.bar', 'plt.bar', (['(x_ax + width)', 'y_t'], {'width': 'width', 'facecolor': '"""#ffa07a"""', 'edgecolor': '"""white"""', 'label': '"""Testing set"""'}), "(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor=\n 'white', label='Testing set')\n", (26519, 26617), True, 'import matplotlib.pyplot as plt\n'), ((26814, 26823), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (26821, 26823), True, 'import matplotlib.pyplot as plt\n'), ((26933, 26960), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Learning rate"""'], {}), "('Learning rate')\n", (26943, 26960), True, 'import matplotlib.pyplot as plt\n'), ((26965, 26987), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (26975, 26987), True, 'import matplotlib.pyplot as plt\n'), ((26992, 27004), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (27002, 27004), True, 'import matplotlib.pyplot as plt\n'), ((27009, 27048), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_lr_splice"""'], {}), "('report/img/mlp_lr_splice')\n", (27020, 27048), True, 'import matplotlib.pyplot as plt\n'), ((27053, 27063), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27061, 27063), True, 'import matplotlib.pyplot as plt\n'), ((27516, 27528), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (27525, 27528), True, 'import numpy as np\n'), ((27635, 27733), 'matplotlib.pyplot.bar', 'plt.bar', (['x_ax', 'y_tr'], {'width': 'width', 'facecolor': '"""#9999ff"""', 'edgecolor': '"""white"""', 'label': '"""Training set"""'}), "(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white',\n label='Training set')\n", (27642, 27733), True, 'import matplotlib.pyplot as plt\n'), ((27734, 27839), 'matplotlib.pyplot.bar', 'plt.bar', (['(x_ax + width)', 'y_t'], {'width': 'width', 'facecolor': '"""#ffa07a"""', 'edgecolor': '"""white"""', 'label': '"""Testing set"""'}), "(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor=\n 'white', label='Testing set')\n", (27741, 27839), True, 'import matplotlib.pyplot as plt\n'), ((28036, 28045), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (28043, 28045), True, 'import matplotlib.pyplot as plt\n'), ((28155, 28182), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Learning rate"""'], {}), "('Learning rate')\n", (28165, 28182), True, 'import matplotlib.pyplot as plt\n'), ((28187, 28209), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (28197, 28209), True, 'import matplotlib.pyplot as plt\n'), ((28214, 28231), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.06)'], {}), '(0, 1.06)\n', (28222, 28231), True, 'import matplotlib.pyplot as plt\n'), ((28236, 28248), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (28246, 28248), True, 'import matplotlib.pyplot as plt\n'), ((28253, 28289), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_lr_sat"""'], {}), "('report/img/mlp_lr_sat')\n", (28264, 28289), True, 'import matplotlib.pyplot as plt\n'), ((28294, 28304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28302, 28304), True, 'import matplotlib.pyplot as plt\n'), ((29297, 29323), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (29307, 29323), True, 'import matplotlib.pyplot as plt\n'), ((29333, 29342), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (29340, 29342), True, 'import matplotlib.pyplot as plt\n'), ((29905, 29929), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (29915, 29929), True, 'import matplotlib.pyplot as plt\n'), ((29934, 29956), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (29944, 29956), True, 'import matplotlib.pyplot as plt\n'), ((29961, 29973), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (29971, 29973), True, 'import matplotlib.pyplot as plt\n'), ((29978, 30021), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_dim_splice_tr"""'], {}), "('report/img/mlp_dim_splice_tr')\n", (29989, 30021), True, 'import matplotlib.pyplot as plt\n'), ((30026, 30036), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30034, 30036), True, 'import matplotlib.pyplot as plt\n'), ((30042, 30068), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (30052, 30068), True, 'import matplotlib.pyplot as plt\n'), ((30078, 30087), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (30085, 30087), True, 'import matplotlib.pyplot as plt\n'), ((30642, 30666), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (30652, 30666), True, 'import matplotlib.pyplot as plt\n'), ((30671, 30693), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (30681, 30693), True, 'import matplotlib.pyplot as plt\n'), ((30698, 30710), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (30708, 30710), True, 'import matplotlib.pyplot as plt\n'), ((30715, 30757), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_dim_splice_t"""'], {}), "('report/img/mlp_dim_splice_t')\n", (30726, 30757), True, 'import matplotlib.pyplot as plt\n'), ((30762, 30772), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30770, 30772), True, 'import matplotlib.pyplot as plt\n'), ((31756, 31782), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (31766, 31782), True, 'import matplotlib.pyplot as plt\n'), ((31792, 31801), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (31799, 31801), True, 'import matplotlib.pyplot as plt\n'), ((32364, 32388), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (32374, 32388), True, 'import matplotlib.pyplot as plt\n'), ((32393, 32415), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (32403, 32415), True, 'import matplotlib.pyplot as plt\n'), ((32420, 32432), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (32430, 32432), True, 'import matplotlib.pyplot as plt\n'), ((32437, 32477), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_dim_sat_tr"""'], {}), "('report/img/mlp_dim_sat_tr')\n", (32448, 32477), True, 'import matplotlib.pyplot as plt\n'), ((32482, 32492), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32490, 32492), True, 'import matplotlib.pyplot as plt\n'), ((32498, 32524), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (32508, 32524), True, 'import matplotlib.pyplot as plt\n'), ((32534, 32543), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32541, 32543), True, 'import matplotlib.pyplot as plt\n'), ((33098, 33122), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (33108, 33122), True, 'import matplotlib.pyplot as plt\n'), ((33127, 33149), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (33137, 33149), True, 'import matplotlib.pyplot as plt\n'), ((33154, 33166), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (33164, 33166), True, 'import matplotlib.pyplot as plt\n'), ((33171, 33210), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_dim_sat_t"""'], {}), "('report/img/mlp_dim_sat_t')\n", (33182, 33210), True, 'import matplotlib.pyplot as plt\n'), ((33215, 33225), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33223, 33225), True, 'import matplotlib.pyplot as plt\n'), ((34307, 34333), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (34317, 34333), True, 'import matplotlib.pyplot as plt\n'), ((34343, 34352), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34350, 34352), True, 'import matplotlib.pyplot as plt\n'), ((34914, 34938), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (34924, 34938), True, 'import matplotlib.pyplot as plt\n'), ((34943, 34965), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (34953, 34965), True, 'import matplotlib.pyplot as plt\n'), ((34970, 34987), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.97)'], {}), '(0, 0.97)\n', (34978, 34987), True, 'import matplotlib.pyplot as plt\n'), ((34992, 35004), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (35002, 35004), True, 'import matplotlib.pyplot as plt\n'), ((35009, 35051), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_archi_sat_tr"""'], {}), "('report/img/mlp_archi_sat_tr')\n", (35020, 35051), True, 'import matplotlib.pyplot as plt\n'), ((35056, 35066), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (35064, 35066), True, 'import matplotlib.pyplot as plt\n'), ((35072, 35098), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (35082, 35098), True, 'import matplotlib.pyplot as plt\n'), ((35108, 35117), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (35115, 35117), True, 'import matplotlib.pyplot as plt\n'), ((35671, 35695), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (35681, 35695), True, 'import matplotlib.pyplot as plt\n'), ((35700, 35722), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (35710, 35722), True, 'import matplotlib.pyplot as plt\n'), ((35727, 35744), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.92)'], {}), '(0, 0.92)\n', (35735, 35744), True, 'import matplotlib.pyplot as plt\n'), ((35749, 35761), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (35759, 35761), True, 'import matplotlib.pyplot as plt\n'), ((35766, 35807), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_archi_sat_t"""'], {}), "('report/img/mlp_archi_sat_t')\n", (35777, 35807), True, 'import matplotlib.pyplot as plt\n'), ((35812, 35822), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (35820, 35822), True, 'import matplotlib.pyplot as plt\n'), ((36884, 36910), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (36894, 36910), True, 'import matplotlib.pyplot as plt\n'), ((36920, 36929), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (36927, 36929), True, 'import matplotlib.pyplot as plt\n'), ((37491, 37515), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (37501, 37515), True, 'import matplotlib.pyplot as plt\n'), ((37520, 37542), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (37530, 37542), True, 'import matplotlib.pyplot as plt\n'), ((37547, 37559), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (37557, 37559), True, 'import matplotlib.pyplot as plt\n'), ((37564, 37609), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_archi_splice_tr"""'], {}), "('report/img/mlp_archi_splice_tr')\n", (37575, 37609), True, 'import matplotlib.pyplot as plt\n'), ((37614, 37624), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37622, 37624), True, 'import matplotlib.pyplot as plt\n'), ((37630, 37656), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (37640, 37656), True, 'import matplotlib.pyplot as plt\n'), ((37666, 37675), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (37673, 37675), True, 'import matplotlib.pyplot as plt\n'), ((38229, 38253), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (38239, 38253), True, 'import matplotlib.pyplot as plt\n'), ((38258, 38280), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (38268, 38280), True, 'import matplotlib.pyplot as plt\n'), ((38285, 38302), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.92)'], {}), '(0, 0.92)\n', (38293, 38302), True, 'import matplotlib.pyplot as plt\n'), ((38307, 38319), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (38317, 38319), True, 'import matplotlib.pyplot as plt\n'), ((38324, 38368), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_archi_splice_t"""'], {}), "('report/img/mlp_archi_splice_t')\n", (38335, 38368), True, 'import matplotlib.pyplot as plt\n'), ((38373, 38383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38381, 38383), True, 'import matplotlib.pyplot as plt\n'), ((38810, 38836), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (38820, 38836), True, 'import matplotlib.pyplot as plt\n'), ((38846, 38855), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (38853, 38855), True, 'import matplotlib.pyplot as plt\n'), ((39190, 39214), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (39200, 39214), True, 'import matplotlib.pyplot as plt\n'), ((39219, 39241), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (39229, 39241), True, 'import matplotlib.pyplot as plt\n'), ((39246, 39258), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (39256, 39258), True, 'import matplotlib.pyplot as plt\n'), ((39263, 39308), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_baseline_splice"""'], {}), "('report/img/mlp_baseline_splice')\n", (39274, 39308), True, 'import matplotlib.pyplot as plt\n'), ((39313, 39323), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39321, 39323), True, 'import matplotlib.pyplot as plt\n'), ((39329, 39355), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (39339, 39355), True, 'import matplotlib.pyplot as plt\n'), ((39365, 39374), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (39372, 39374), True, 'import matplotlib.pyplot as plt\n'), ((39697, 39721), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (39707, 39721), True, 'import matplotlib.pyplot as plt\n'), ((39726, 39748), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (39736, 39748), True, 'import matplotlib.pyplot as plt\n'), ((39753, 39765), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (39763, 39765), True, 'import matplotlib.pyplot as plt\n'), ((39770, 39812), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report/img/mlp_baseline_sat"""'], {}), "('report/img/mlp_baseline_sat')\n", (39781, 39812), True, 'import matplotlib.pyplot as plt\n'), ((39817, 39827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39825, 39827), True, 'import matplotlib.pyplot as plt\n'), ((1074, 1090), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1088, 1090), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((1338, 1566), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(1.0)', 'cache_size': '(200)', 'class_weight': 'None', 'coef0': '(0.0)', 'decision_function_shape': '"""ovr"""', 'degree': '(3)', 'gamma': '"""auto"""', 'kernel': '"""rbf"""', 'max_iter': 'i', 'probability': '(False)', 'shrinking': '(True)', 'tol': '(0.001)', 'verbose': '(False)', 'random_state': '(666)'}), "(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',\n max_iter=i, probability=False, shrinking=True, tol=0.001, verbose=False,\n random_state=666)\n", (1341, 1566), False, 'from sklearn.svm import SVC\n'), ((12960, 12972), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (12969, 12972), True, 'import numpy as np\n'), ((13336, 13397), 'matplotlib.pyplot.text', 'plt.text', (['(x - 0.02)', 'y1', "('%.2f' % y1)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x - 0.02, y1, '%.2f' % y1, ha='center', va='bottom')\n", (13344, 13397), True, 'import matplotlib.pyplot as plt\n'), ((13406, 13476), 'matplotlib.pyplot.text', 'plt.text', (['(x + width + 0.075)', 'y2', "('%.2f' % y2)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x + width + 0.075, y2, '%.2f' % y2, ha='center', va='bottom')\n", (13414, 13476), True, 'import matplotlib.pyplot as plt\n'), ((14302, 14314), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (14311, 14314), True, 'import numpy as np\n'), ((14678, 14739), 'matplotlib.pyplot.text', 'plt.text', (['(x - 0.04)', 'y1', "('%.2f' % y1)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x - 0.04, y1, '%.2f' % y1, ha='center', va='bottom')\n", (14686, 14739), True, 'import matplotlib.pyplot as plt\n'), ((14748, 14818), 'matplotlib.pyplot.text', 'plt.text', (['(x + width + 0.075)', 'y2', "('%.2f' % y2)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x + width + 0.075, y2, '%.2f' % y2, ha='center', va='bottom')\n", (14756, 14818), True, 'import matplotlib.pyplot as plt\n'), ((17114, 17261), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""adam"""', 'alpha': '(0.001)', 'learning_rate_init': '(0.001)', 'max_iter': 'i', 'activation': '"""relu"""', 'hidden_layer_sizes': '(100,)', 'random_state': '(666)'}), "(solver='adam', alpha=0.001, learning_rate_init=0.001,\n max_iter=i, activation='relu', hidden_layer_sizes=(100,), random_state=666)\n", (17127, 17261), False, 'from sklearn.neural_network import MLPClassifier\n'), ((26664, 26725), 'matplotlib.pyplot.text', 'plt.text', (['(x - 0.02)', 'y1', "('%.2f' % y1)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x - 0.02, y1, '%.2f' % y1, ha='center', va='bottom')\n", (26672, 26725), True, 'import matplotlib.pyplot as plt\n'), ((26734, 26803), 'matplotlib.pyplot.text', 'plt.text', (['(x + width + 0.05)', 'y2', "('%.2f' % y2)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x + width + 0.05, y2, '%.2f' % y2, ha='center', va='bottom')\n", (26742, 26803), True, 'import matplotlib.pyplot as plt\n'), ((27886, 27947), 'matplotlib.pyplot.text', 'plt.text', (['(x - 0.02)', 'y1', "('%.2f' % y1)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x - 0.02, y1, '%.2f' % y1, ha='center', va='bottom')\n", (27894, 27947), True, 'import matplotlib.pyplot as plt\n'), ((27956, 28025), 'matplotlib.pyplot.text', 'plt.text', (['(x + width + 0.05)', 'y2', "('%.2f' % y2)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x + width + 0.05, y2, '%.2f' % y2, ha='center', va='bottom')\n", (27964, 28025), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import matplotlib.pyplot as plt
# UKF Parameters
ukf_lambda = 10.0 # UKFのλパラメータ
ukf_kappa = 0.1 # UKFのκパラメータ
ukf_alpha2 = (2.0 + ukf_lambda) / (2.0 + ukf_kappa) # UKFのα^2パラメータ
ukf_w0_m = ukf_lambda / (2.0 + ukf_lambda) # UKFの重みパラメータ
ukf_w0_c = ukf_w0_m + (1.0 - ukf_alpha2 + 2.0) # UKFの重みパラメータ
ukf_wi = 1.0 / (2.0 * (2.0 + ukf_lambda)) # UKFの重みパラメータ
ukf_wm = np.zeros([5, 1]) # UKFの重みパラメータw_m
ukf_wc = np.zeros([5, 1]) # UKFの重みパラメータw_c
ukf_gamma = np.sqrt(3.0 + ukf_lambda) # γ = √(n + λ)
# UKFの重みパラメータの設定
ukf_wm[0] = ukf_w0_m
ukf_wc[0] = ukf_w0_c
for i in range(1, 5):
ukf_wm[i] = ukf_wi
ukf_wc[i] = ukf_wi
def gen_sigma_point(mu, sigma):
"""
Function to generate Matrix chi which is the set of the sigma point Vector.
arguments :
mu : Vector of the mean values.
sigma : Correlation Matrix.
return :
chi : Matrix chi.
"""
n = len(mu) # 次元
chi = np.zeros([n, 2 * n + 1]) # χ行列
root_sigma = np.linalg.cholesky(sigma) # √Σの計算
chi[:, 0] = mu[:, 0]
chi[:, 1:1 + n] = mu + ukf_gamma * root_sigma
chi[:, 1 + n:2 * n + 1] = mu - ukf_gamma * root_sigma
return chi
def gen_correlation_mat(mu, chi, R):
"""
Function to describe the change of the correlation matrix.
arguments :
mu : Vector of the mean values.
chi : Destribution of the sigma point.
R : Correlation Matrix of the observation noise.
return :
sigma_bar : Correlation Matrix after the change.
"""
sigma_bar = R
for i in range(chi.shape[1]):
x = chi[:, i].reshape((chi.shape[0], 1)) - mu
sigma_bar = sigma_bar + ukf_wc[i] * (np.dot(x, x.T))
return sigma_bar
def UKF(mu, sigma, u, z, state_func, obs_func, R, Q, dt):
"""
Unscented Kalman Filter Program.
arguments :
mu : Vector which is the set of state values.
sigma : Correlation Matrix of the state values.
u : Control inputs.Vector/Float.
z : Observed values.
state_func : Function which describe how the state changes.
obs_func : Function to get observed values from state values.
R : Correlation Matrix of the noise of the state change.
Q : Correlation Matrix of the observation noise.
dt : Time span of the integration.
returns :
mu : Vector which is the set of state values.
sigma : Correlation Matrix of the state values.
---------------
"""
# Σ点生成
chi = gen_sigma_point(mu, sigma) # χ
# Σ点の遷移
chi_star = state_func(u, chi, dt)
# 事前分布取得
mu_bar = np.dot(chi_star, ukf_wm) # 平均の事前分布
sigma_bar = gen_correlation_mat(mu_bar, chi_star, R) # 共分散行列の事前分布
# Σ点の事前分布
chi_bar = gen_sigma_point(mu_bar, sigma_bar)
# 観測の予測分布
z_bar = obs_func(chi_bar)
# 観測の予測値
z_hat = np.dot(z_bar, ukf_wm)
# 観測の予測共分散行列
S = gen_correlation_mat(z_hat, z_bar, Q)
# Σ_xzの計算
sigma_xz = np.zeros([mu.shape[0], z.shape[0]])
for i in range(chi_bar.shape[1]):
x_ = chi_bar[:, i] - mu_bar[:, 0]
z_ = z_bar[:, i] - z_hat[:, 0]
x_ = x_.reshape((x_.shape[0], 1))
z_ = z_.reshape((z_.shape[0], 1))
sigma_xz = sigma_xz + ukf_wc[i] * (np.dot(x_, z_.T))
# カルマンゲインの計算
K = np.dot(sigma_xz, np.linalg.inv(S))
# 戻り値の計算
mu = mu_bar + np.dot(K, (z - z_hat))
sigma = sigma_bar - np.dot(np.dot(K, S), K.T)
return mu, sigma
def state_func(u, chi, dt):
L = 100
omega = np.pi / 10
return chi + np.array([[-L * omega * np.sin(omega * u)],
[L * omega * np.cos(omega * u)]])
def obs_func(chi_bar):
obs = np.zeros_like(chi_bar)
for i in range(chi_bar.shape[1]):
obs[0][i] = np.sqrt(chi_bar[0, i] ** 2 + chi_bar[1, i] ** 2)
obs[1][i] = np.arctan(chi_bar[1, i] / chi_bar[0, i])
return obs
# 関数
def state_eq(x, L, omega, t):
x_new = x + np.array([-L * omega * np.sin(omega * t),
L * omega * np.cos(omega * t)]).reshape([2, 1])
return x_new
def obs_eq(x, obs_noise_y1, obs_noise_y2):
x1 = x[0]
x2 = x[1]
y = np.array([np.sqrt(x1 ** 2 + x2 ** 2),
np.arctan(x2 / x1)]).reshape([2, 1])
noise = np.array([np.random.normal(0, obs_noise_y1),
np.random.normal(0, obs_noise_y2)]).reshape([2, 1])
return y + noise
def obs_eq_noiseless(x):
x1 = x[0]
x2 = x[1]
y = np.array([np.sqrt(x1 ** 2 + x2 ** 2),
np.arctan(x2 / x1)]).reshape([2, 1])
return y
def true(x, input_noise):
noise = np.random.normal(0, input_noise, (2, 1))
return x + noise
def state_jacobian():
jacobian = np.identity(2)
return jacobian
def obs_jacobian(x):
jacobian = np.empty((2, 2))
jacobian[0][0] = x[0] / np.sqrt(x[0] ** 2 + x[1] ** 2)
jacobian[0][1] = x[1] / np.sqrt(x[0] ** 2 + x[1] ** 2)
jacobian[1][0] = -x[1] / (x[0] ** 2 + x[1] ** 2)
jacobian[1][1] = x[0] / (x[0] ** 2 + x[1] ** 2)
return jacobian
def system(x, L, omega, t, input_noise, obs_noise_y1, obs_noise_y2):
true_state = true(state_eq(x, L, omega, t), input_noise)
obs = obs_eq(true_state, obs_noise_y1, obs_noise_y2)
return true_state, obs
def EKF(m, V, y, Q, R, L, omega, t):
# 予測ステップ
m_est = state_eq(m, L, omega, t)
A = state_jacobian()
V_est = np.dot(np.dot(A, V), A.transpose()) + Q
# 観測更新ステップ
C = obs_jacobian(m_est)
temp = np.dot(np.dot(C, V_est), C.transpose()) + R
K = np.dot(np.dot(V_est, C.transpose()), np.linalg.inv(temp))
m_next = m_est + np.dot(K, (y - obs_eq_noiseless(m_est)))
V_next = np.dot(np.identity(V_est.shape[0]) - np.dot(K, C), V_est)
return m_next, V_next
if __name__ == "__main__":
x = np.array([100, 0]).reshape([2, 1])
L = 100
omega = np.pi / 10
input_noise = 1.0 ** 2
obs_noise_y1 = 10.0 ** 2
obs_noise_y2 = (5.0 * np.pi / 180) ** 2
m = np.array([100, 0]).reshape([2, 1])
t = 0.0
dt = 1.0
V = np.identity(2) * 1.0 ** 2
Q = np.identity(2) * input_noise
R = np.array([[obs_noise_y1, 0],
[0, obs_noise_y2]])
# 記録用
rec = np.empty([4, 21])
for i in range(21):
rec[0, i] = x[0]
rec[1, i] = x[1]
rec[2, i] = m[0]
rec[3, i] = m[1]
x, y = system(x, L, omega, t, input_noise, obs_noise_y1, obs_noise_y2)
m, V = EKF(m, V, y, Q, R, L, omega, t)
t += dt
plt.plot(rec[0, :], rec[1, :], color="blue", marker="o", label="true")
plt.plot(rec[2, :], rec[3, :], color="red", marker="^", label="estimated")
plt.legend()
plt.show()
| [
"numpy.zeros_like",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.empty",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.identity",
"numpy.sin",
"numpy.array",
"numpy.linalg.inv",
"numpy.random.normal",
"numpy.cos",
"numpy.dot",
"numpy.sqrt",
"numpy.arctan",
"numpy.lina... | [((388, 404), 'numpy.zeros', 'np.zeros', (['[5, 1]'], {}), '([5, 1])\n', (396, 404), True, 'import numpy as np\n'), ((432, 448), 'numpy.zeros', 'np.zeros', (['[5, 1]'], {}), '([5, 1])\n', (440, 448), True, 'import numpy as np\n'), ((479, 504), 'numpy.sqrt', 'np.sqrt', (['(3.0 + ukf_lambda)'], {}), '(3.0 + ukf_lambda)\n', (486, 504), True, 'import numpy as np\n'), ((937, 961), 'numpy.zeros', 'np.zeros', (['[n, 2 * n + 1]'], {}), '([n, 2 * n + 1])\n', (945, 961), True, 'import numpy as np\n'), ((986, 1011), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['sigma'], {}), '(sigma)\n', (1004, 1011), True, 'import numpy as np\n'), ((2611, 2635), 'numpy.dot', 'np.dot', (['chi_star', 'ukf_wm'], {}), '(chi_star, ukf_wm)\n', (2617, 2635), True, 'import numpy as np\n'), ((2853, 2874), 'numpy.dot', 'np.dot', (['z_bar', 'ukf_wm'], {}), '(z_bar, ukf_wm)\n', (2859, 2874), True, 'import numpy as np\n'), ((2968, 3003), 'numpy.zeros', 'np.zeros', (['[mu.shape[0], z.shape[0]]'], {}), '([mu.shape[0], z.shape[0]])\n', (2976, 3003), True, 'import numpy as np\n'), ((3678, 3700), 'numpy.zeros_like', 'np.zeros_like', (['chi_bar'], {}), '(chi_bar)\n', (3691, 3700), True, 'import numpy as np\n'), ((4608, 4648), 'numpy.random.normal', 'np.random.normal', (['(0)', 'input_noise', '(2, 1)'], {}), '(0, input_noise, (2, 1))\n', (4624, 4648), True, 'import numpy as np\n'), ((4709, 4723), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (4720, 4723), True, 'import numpy as np\n'), ((4782, 4798), 'numpy.empty', 'np.empty', (['(2, 2)'], {}), '((2, 2))\n', (4790, 4798), True, 'import numpy as np\n'), ((6104, 6152), 'numpy.array', 'np.array', (['[[obs_noise_y1, 0], [0, obs_noise_y2]]'], {}), '([[obs_noise_y1, 0], [0, obs_noise_y2]])\n', (6112, 6152), True, 'import numpy as np\n'), ((6192, 6209), 'numpy.empty', 'np.empty', (['[4, 21]'], {}), '([4, 21])\n', (6200, 6209), True, 'import numpy as np\n'), ((6484, 6554), 'matplotlib.pyplot.plot', 'plt.plot', (['rec[0, :]', 'rec[1, :]'], {'color': '"""blue"""', 'marker': '"""o"""', 'label': '"""true"""'}), "(rec[0, :], rec[1, :], color='blue', marker='o', label='true')\n", (6492, 6554), True, 'import matplotlib.pyplot as plt\n'), ((6559, 6633), 'matplotlib.pyplot.plot', 'plt.plot', (['rec[2, :]', 'rec[3, :]'], {'color': '"""red"""', 'marker': '"""^"""', 'label': '"""estimated"""'}), "(rec[2, :], rec[3, :], color='red', marker='^', label='estimated')\n", (6567, 6633), True, 'import matplotlib.pyplot as plt\n'), ((6638, 6650), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6648, 6650), True, 'import matplotlib.pyplot as plt\n'), ((6655, 6665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6663, 6665), True, 'import matplotlib.pyplot as plt\n'), ((3311, 3327), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (3324, 3327), True, 'import numpy as np\n'), ((3361, 3381), 'numpy.dot', 'np.dot', (['K', '(z - z_hat)'], {}), '(K, z - z_hat)\n', (3367, 3381), True, 'import numpy as np\n'), ((3759, 3807), 'numpy.sqrt', 'np.sqrt', (['(chi_bar[0, i] ** 2 + chi_bar[1, i] ** 2)'], {}), '(chi_bar[0, i] ** 2 + chi_bar[1, i] ** 2)\n', (3766, 3807), True, 'import numpy as np\n'), ((3828, 3868), 'numpy.arctan', 'np.arctan', (['(chi_bar[1, i] / chi_bar[0, i])'], {}), '(chi_bar[1, i] / chi_bar[0, i])\n', (3837, 3868), True, 'import numpy as np\n'), ((4827, 4857), 'numpy.sqrt', 'np.sqrt', (['(x[0] ** 2 + x[1] ** 2)'], {}), '(x[0] ** 2 + x[1] ** 2)\n', (4834, 4857), True, 'import numpy as np\n'), ((4886, 4916), 'numpy.sqrt', 'np.sqrt', (['(x[0] ** 2 + x[1] ** 2)'], {}), '(x[0] ** 2 + x[1] ** 2)\n', (4893, 4916), True, 'import numpy as np\n'), ((5569, 5588), 'numpy.linalg.inv', 'np.linalg.inv', (['temp'], {}), '(temp)\n', (5582, 5588), True, 'import numpy as np\n'), ((6033, 6047), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (6044, 6047), True, 'import numpy as np\n'), ((6067, 6081), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (6078, 6081), True, 'import numpy as np\n'), ((3415, 3427), 'numpy.dot', 'np.dot', (['K', 'S'], {}), '(K, S)\n', (3421, 3427), True, 'import numpy as np\n'), ((5392, 5404), 'numpy.dot', 'np.dot', (['A', 'V'], {}), '(A, V)\n', (5398, 5404), True, 'import numpy as np\n'), ((5487, 5503), 'numpy.dot', 'np.dot', (['C', 'V_est'], {}), '(C, V_est)\n', (5493, 5503), True, 'import numpy as np\n'), ((5672, 5699), 'numpy.identity', 'np.identity', (['V_est.shape[0]'], {}), '(V_est.shape[0])\n', (5683, 5699), True, 'import numpy as np\n'), ((5702, 5714), 'numpy.dot', 'np.dot', (['K', 'C'], {}), '(K, C)\n', (5708, 5714), True, 'import numpy as np\n'), ((5787, 5805), 'numpy.array', 'np.array', (['[100, 0]'], {}), '([100, 0])\n', (5795, 5805), True, 'import numpy as np\n'), ((5965, 5983), 'numpy.array', 'np.array', (['[100, 0]'], {}), '([100, 0])\n', (5973, 5983), True, 'import numpy as np\n'), ((1666, 1680), 'numpy.dot', 'np.dot', (['x', 'x.T'], {}), '(x, x.T)\n', (1672, 1680), True, 'import numpy as np\n'), ((3250, 3266), 'numpy.dot', 'np.dot', (['x_', 'z_.T'], {}), '(x_, z_.T)\n', (3256, 3266), True, 'import numpy as np\n'), ((4164, 4190), 'numpy.sqrt', 'np.sqrt', (['(x1 ** 2 + x2 ** 2)'], {}), '(x1 ** 2 + x2 ** 2)\n', (4171, 4190), True, 'import numpy as np\n'), ((4210, 4228), 'numpy.arctan', 'np.arctan', (['(x2 / x1)'], {}), '(x2 / x1)\n', (4219, 4228), True, 'import numpy as np\n'), ((4269, 4302), 'numpy.random.normal', 'np.random.normal', (['(0)', 'obs_noise_y1'], {}), '(0, obs_noise_y1)\n', (4285, 4302), True, 'import numpy as np\n'), ((4326, 4359), 'numpy.random.normal', 'np.random.normal', (['(0)', 'obs_noise_y2'], {}), '(0, obs_noise_y2)\n', (4342, 4359), True, 'import numpy as np\n'), ((4472, 4498), 'numpy.sqrt', 'np.sqrt', (['(x1 ** 2 + x2 ** 2)'], {}), '(x1 ** 2 + x2 ** 2)\n', (4479, 4498), True, 'import numpy as np\n'), ((4518, 4536), 'numpy.arctan', 'np.arctan', (['(x2 / x1)'], {}), '(x2 / x1)\n', (4527, 4536), True, 'import numpy as np\n'), ((3562, 3579), 'numpy.sin', 'np.sin', (['(omega * u)'], {}), '(omega * u)\n', (3568, 3579), True, 'import numpy as np\n'), ((3622, 3639), 'numpy.cos', 'np.cos', (['(omega * u)'], {}), '(omega * u)\n', (3628, 3639), True, 'import numpy as np\n'), ((3963, 3980), 'numpy.sin', 'np.sin', (['(omega * t)'], {}), '(omega * t)\n', (3969, 3980), True, 'import numpy as np\n'), ((4020, 4037), 'numpy.cos', 'np.cos', (['(omega * t)'], {}), '(omega * t)\n', (4026, 4037), True, 'import numpy as np\n')] |
from kivy.config import Config
Config.set('graphics', 'width', '640')
Config.set('graphics', 'height', '640')
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.clock import Clock
from kivy.graphics import Color, Rectangle
import particle_system_ext
import random
import numpy as np
N = 64
class ParticleScene(FloatLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.particle_system = particle_system_ext.PyMasterParticleSystem(2,N,0.01,0.01,0.01)
self.slave_particle_system = particle_system_ext.PySlaveParticleSystem(10,self.particle_system)
Clock.schedule_interval(lambda dt: self.update_system(),1/30)
def update_system(self):
self.particle_system.spawn_particles(random.randint(-100,30),
0,0.5,2,np.array([0,0],dtype=np.float64),
3.14,0.5,0.1,np.array([0,0],dtype=np.float64),
ord('u'[0]))
self.particle_system.iterate()
self.slave_particle_system.spawn_particles(random.randint(-100,30),
0,0.5,1,np.array([0,0],dtype=np.float64),
3.14,0.5,2,np.array([10,10],dtype=np.float64),
ord('u'[0]))
self.slave_particle_system.iterate()
self.canvas.clear()
with self.canvas:
for i in range(self.particle_system.get_number_of_particles()):
particle_coords = self.particle_system.get_particle_coords(i)
particle_size = self.particle_system.get_particle_size(i)
particle_age = self.particle_system.get_particle_age(i)
Color(random.random(),(42-particle_age)/42,1,(42-particle_age)/42)
Rectangle(pos=((particle_coords[0]+N//2)*10,(particle_coords[1]+N//2)*10),size=(particle_size,particle_size))
for i in range(self.slave_particle_system.get_number_of_particles()):
particle_coords = self.slave_particle_system.get_particle_coords(i)
particle_size = self.slave_particle_system.get_particle_size(i)
particle_age = self.slave_particle_system.get_particle_age(i)
Color(0.9,random.random()/2,0.3,(42-particle_age)/42)
Rectangle(pos=((particle_coords[0]+N//2)*10,(particle_coords[1]+N//2)*10),size=(particle_size,particle_size))
def on_touch_down(self,touch):
self.touch_down_pos = touch.pos
def on_touch_up(self,touch):
x_diff = touch.pos[0] - self.touch_down_pos[0]
y_diff = touch.pos[1] - self.touch_down_pos[1]
x_pos = int(self.touch_down_pos[0]/10)
y_pos = int(self.touch_down_pos[1]/10)
drag_multiplier = 0.5
self.particle_system.add_velocity(x_pos,y_pos,x_diff*drag_multiplier,y_diff*drag_multiplier)
class Scene(Widget):
pass
class ParticlesDemoApp(App):
def build(self):
self.load_kv("particles_demo.kv")
def main():
ParticlesDemo = ParticlesDemoApp()
ParticlesDemo.run()
if __name__ == '__main__':
main() | [
"random.randint",
"particle_system_ext.PyMasterParticleSystem",
"kivy.config.Config.set",
"kivy.graphics.Rectangle",
"particle_system_ext.PySlaveParticleSystem",
"random.random",
"numpy.array"
] | [((31, 69), 'kivy.config.Config.set', 'Config.set', (['"""graphics"""', '"""width"""', '"""640"""'], {}), "('graphics', 'width', '640')\n", (41, 69), False, 'from kivy.config import Config\n'), ((70, 109), 'kivy.config.Config.set', 'Config.set', (['"""graphics"""', '"""height"""', '"""640"""'], {}), "('graphics', 'height', '640')\n", (80, 109), False, 'from kivy.config import Config\n'), ((506, 572), 'particle_system_ext.PyMasterParticleSystem', 'particle_system_ext.PyMasterParticleSystem', (['(2)', 'N', '(0.01)', '(0.01)', '(0.01)'], {}), '(2, N, 0.01, 0.01, 0.01)\n', (548, 572), False, 'import particle_system_ext\n'), ((606, 673), 'particle_system_ext.PySlaveParticleSystem', 'particle_system_ext.PySlaveParticleSystem', (['(10)', 'self.particle_system'], {}), '(10, self.particle_system)\n', (647, 673), False, 'import particle_system_ext\n'), ((826, 850), 'random.randint', 'random.randint', (['(-100)', '(30)'], {}), '(-100, 30)\n', (840, 850), False, 'import random\n'), ((904, 938), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float64'}), '([0, 0], dtype=np.float64)\n', (912, 938), True, 'import numpy as np\n'), ((996, 1030), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float64'}), '([0, 0], dtype=np.float64)\n', (1004, 1030), True, 'import numpy as np\n'), ((1179, 1203), 'random.randint', 'random.randint', (['(-100)', '(30)'], {}), '(-100, 30)\n', (1193, 1203), False, 'import random\n'), ((1263, 1297), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float64'}), '([0, 0], dtype=np.float64)\n', (1271, 1297), True, 'import numpy as np\n'), ((1359, 1395), 'numpy.array', 'np.array', (['[10, 10]'], {'dtype': 'np.float64'}), '([10, 10], dtype=np.float64)\n', (1367, 1395), True, 'import numpy as np\n'), ((1976, 2104), 'kivy.graphics.Rectangle', 'Rectangle', ([], {'pos': '((particle_coords[0] + N // 2) * 10, (particle_coords[1] + N // 2) * 10)', 'size': '(particle_size, particle_size)'}), '(pos=((particle_coords[0] + N // 2) * 10, (particle_coords[1] + N //\n 2) * 10), size=(particle_size, particle_size))\n', (1985, 2104), False, 'from kivy.graphics import Color, Rectangle\n'), ((2531, 2659), 'kivy.graphics.Rectangle', 'Rectangle', ([], {'pos': '((particle_coords[0] + N // 2) * 10, (particle_coords[1] + N // 2) * 10)', 'size': '(particle_size, particle_size)'}), '(pos=((particle_coords[0] + N // 2) * 10, (particle_coords[1] + N //\n 2) * 10), size=(particle_size, particle_size))\n', (2540, 2659), False, 'from kivy.graphics import Color, Rectangle\n'), ((1899, 1914), 'random.random', 'random.random', ([], {}), '()\n', (1912, 1914), False, 'import random\n'), ((2471, 2486), 'random.random', 'random.random', ([], {}), '()\n', (2484, 2486), False, 'import random\n')] |
# import tensorflow as tf
# import numpy as np
# class Augumentation():
# def __init__(self,size = 512):
# self.seed = 42
# self.size = size
# self.transform_functions = [self.crop,
# self.rotate]
# def transform(self,xx,yy):
# """ choose a random transfrom to be applied to both X and y"""
# func = np.random.choice(self.transform_functions)
# return func(xx,yy)
# def rotate(self,xx,yy):
# factor = 3.1416/8.0
# seed = np.random.randint(10000)
# rotate = tf.keras.layers.experimental.preprocessing.RandomRotation(factor, fill_mode='nearest', interpolation='bilinear', seed=seed)
# return rotate(xx),rotate(yy)
# def crop(self,xx,yy):
# seed = np.random.randint(10000)
# height_factor = 0.9
# width_factor = 0.9
# crop = tf.keras.layers.experimental.preprocessing.RandomZoom(height_factor, width_factor, fill_mode='reflect',interpolation='bilinear', seed=seed)
# return crop(xx),crop(yy)
from skimage.filters import gaussian
from skimage.util import random_noise
from skimage.exposure import adjust_gamma
from skimage.transform import resize,rotate
import numpy as np
class Augumentation():
def __init__(self,size = 512, seed = 42):
self.seed = seed
np.random.seed(self.seed)
self.size = size
self.transform_functions = [#self.crop,
self.identity,
self.blur,
#self.invert,
self.additive_noise,
self.flip_horizontal,
self.flip_vertical,
self.blur_and_noise,
self.contrast]
def transform(self,xx,yy):
func = np.random.choice(self.transform_functions)
xx,yy = func(xx,yy)
# whiten
#xx = (xx - np.mean(xx))/np.std(xx)
return xx,yy
def identity(self,xx,yy):
return xx,yy
def invert(self,xx,yy):
return np.max(xx) - xx,yy
def flip_horizontal(self,xx,yy):
return np.fliplr(xx),np.fliplr(yy)
def flip_vertical(self,xx,yy):
return np.flipud(xx),np.flipud(yy)
def crop(self,xx,yy):
r0,r1 = np.random.randint(16),-1-np.random.randint(16)
c0,c1 = np.random.randint(16),-1-np.random.randint(16)
xx= resize(xx[r0:r1,c0:c1],(self.size,self.size),preserve_range = True)
yy = resize(yy[r0:r1,c0:c1,:],(self.size,self.size),preserve_range = True)
return xx,yy
def blur(self,xx,yy):
return gaussian(xx,1.5*np.random.rand()),yy
def blur_and_noise(self,xx,yy):
xx,yy = self.blur(xx,yy)
return self.additive_noise(xx,yy)
def additive_noise(self,xx,yy):
return random_noise(xx,mode = 'gaussian',var = 0.02*np.random.rand()),yy
def poisson_noise(self,xx,yy):
return random_noise(xx,mode = 'poisson'),yy
def contrast(self,xx,yy):
return adjust_gamma(xx,0.25 + 1.25*np.random.rand()),yy
# def rotate(self,xx,yy):
# ang = np.random.randint(2*20)-20
# xx= rotate(xx[r0:r1,c0:c1],ang,preserve_range = True)
# yy = resize(yy[r0:r1,c0:c1,:],ang,preserve_range = True)
# return xx,yy
| [
"numpy.random.seed",
"skimage.util.random_noise",
"numpy.flipud",
"numpy.fliplr",
"numpy.max",
"numpy.random.randint",
"skimage.transform.resize",
"numpy.random.choice",
"numpy.random.rand"
] | [((1367, 1392), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (1381, 1392), True, 'import numpy as np\n'), ((1941, 1983), 'numpy.random.choice', 'np.random.choice', (['self.transform_functions'], {}), '(self.transform_functions)\n', (1957, 1983), True, 'import numpy as np\n'), ((2554, 2623), 'skimage.transform.resize', 'resize', (['xx[r0:r1, c0:c1]', '(self.size, self.size)'], {'preserve_range': '(True)'}), '(xx[r0:r1, c0:c1], (self.size, self.size), preserve_range=True)\n', (2560, 2623), False, 'from skimage.transform import resize, rotate\n'), ((2635, 2707), 'skimage.transform.resize', 'resize', (['yy[r0:r1, c0:c1, :]', '(self.size, self.size)'], {'preserve_range': '(True)'}), '(yy[r0:r1, c0:c1, :], (self.size, self.size), preserve_range=True)\n', (2641, 2707), False, 'from skimage.transform import resize, rotate\n'), ((2274, 2287), 'numpy.fliplr', 'np.fliplr', (['xx'], {}), '(xx)\n', (2283, 2287), True, 'import numpy as np\n'), ((2288, 2301), 'numpy.fliplr', 'np.fliplr', (['yy'], {}), '(yy)\n', (2297, 2301), True, 'import numpy as np\n'), ((2357, 2370), 'numpy.flipud', 'np.flipud', (['xx'], {}), '(xx)\n', (2366, 2370), True, 'import numpy as np\n'), ((2371, 2384), 'numpy.flipud', 'np.flipud', (['yy'], {}), '(yy)\n', (2380, 2384), True, 'import numpy as np\n'), ((2432, 2453), 'numpy.random.randint', 'np.random.randint', (['(16)'], {}), '(16)\n', (2449, 2453), True, 'import numpy as np\n'), ((2495, 2516), 'numpy.random.randint', 'np.random.randint', (['(16)'], {}), '(16)\n', (2512, 2516), True, 'import numpy as np\n'), ((3104, 3136), 'skimage.util.random_noise', 'random_noise', (['xx'], {'mode': '"""poisson"""'}), "(xx, mode='poisson')\n", (3116, 3136), False, 'from skimage.util import random_noise\n'), ((2198, 2208), 'numpy.max', 'np.max', (['xx'], {}), '(xx)\n', (2204, 2208), True, 'import numpy as np\n'), ((2457, 2478), 'numpy.random.randint', 'np.random.randint', (['(16)'], {}), '(16)\n', (2474, 2478), True, 'import numpy as np\n'), ((2520, 2541), 'numpy.random.randint', 'np.random.randint', (['(16)'], {}), '(16)\n', (2537, 2541), True, 'import numpy as np\n'), ((2790, 2806), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2804, 2806), True, 'import numpy as np\n'), ((3028, 3044), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3042, 3044), True, 'import numpy as np\n'), ((3219, 3235), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3233, 3235), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
tools for expression and count based tasks
"""
import os,sys,csv,gc,re
import numpy as np
def read_RSEM_counts_files(geneFilePath,isoformFilePath):
"""
read the RSEM counts files into a matrix
"""
if not os.path.exists(geneFilePath):
raise Exception("Cannot find gene file\n%s"%(geneFilePath))
if not os.path.exists(isoformFilePath):
raise Exception("Cannot find isoform file\n%s"%(isoformFilePath))
## load the gene counts
fid1 = open(geneFilePath,'rU')
reader1 = csv.reader(fid1,delimiter="\t")
header1 = next(reader1)
results1 = {}
check = 0
gc.disable()
for linja in reader1:
check += 1
results1[linja[0]] = {'transcript':linja[1],'length':float(linja[2]),'eff_length':float(linja[3]),\
'exp_count':int(round(float(linja[4]))),'TPM':float(linja[5]),'FPKM':float(linja[6])}
fid1.close()
if check != len(results1.keys()):
raise Exception("Rows in gene count file are not first columns unique")
## load the isoform results
fid2 = open(isoformFilePath,'rU')
reader2 = csv.reader(fid2,delimiter="\t")
header2 = next(reader2)
results2 = {}
check = 0
for linja in reader2:
check += 1
results2[linja[0]] = {'gene':linja[1],'length':float(linja[2]),'eff_length':float(linja[3]),\
'exp_count':float(linja[4]),'TPM':float(linja[5]),'FPKM':float(linja[6])}
fid1.close()
if check != len(results2.keys()):
raise Exception("Rows in gene count file are not first columns unique")
fid2.close()
gc.enable()
return results1, results2
def read_matrix(matFilePath,delimiter=",",mtype='float'):
"""
assumes that row one are the samples and col one are the transcripts
matrix can only be of mtype 'int' or 'float'
"""
print('reading', matFilePath)
if mtype not in ['int','float']:
raise Exception("mtype must be 'int' or 'float'")
if not os.path.exists(matFilePath):
raise Exception("Cannot find matFilePath\n%s"%matFilePath)
fid = open(matFilePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
## get the gene and sample ids
transcriptIds = []
sampleIds = np.array(header[1:])
gc.disable()
for linja in reader:
transcriptIds.append(linja[0])
gc.enable()
transcriptIds = np.array(transcriptIds)
fid.close()
## fill in the matrix
mat = np.zeros((transcriptIds.shape[0],sampleIds.shape[0]),dtype=mtype)
fid = open(matFilePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
row = 0
for linja in reader:
if mtype == 'int':
mat[row,:] = [int(float(i)) for i in linja[1:]]
else:
mat[row,:] = [float(i) for i in linja[1:]]
row +=1
fid.close()
return transcriptIds,sampleIds,mat
def read_de_results(filePath,delimiter=",",tool="edgeR"):
"""
read the differential expression output from DESeq or edgeR
"""
print('reading', filePath)
if not os.path.exists(filePath):
raise Exception("Cannot find matFilePath\n%s"%filePath)
if tool not in ["edgeR","DESeq"]:
raise Exception("invalid tool specified use 'edgeR' or 'DESeq'")
fid = open(filePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
## get columnIds
header = next(reader)
columnIds = np.array(header[1:])
## get the gene and sample ids
transcriptIds = []
gc.disable()
for linja in reader:
transcriptIds.append(linja[0])
gc.enable()
transcriptIds = np.array(transcriptIds)
fid.close()
## fill in the matrix
mat = np.zeros((transcriptIds.shape[0],columnIds.shape[0]))
fid = open(filePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
row = 0
for linja in reader:
_row = [re.sub("NA","NaN",i) for i in linja[1:]]
mat[row,:] = [float(i) for i in _row]
row +=1
fid.close()
return transcriptIds,columnIds,mat
[(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]#
def create_count_matrix(results,label,sampleList):
"""
this function is untested
"""
## use first sample to get rows
mat = np.zeros((len(results[0].keys()),len(sampleList)))
keys = sorted(np.array(results[0].keys()))
for j,sample in enumerate(sampleList):
for i,key in enumerate(keys):
mat[i,j] = results[j][key]['exp_count']
## write to file
fid = open("%s-counts.csv"%label,'w')
writer = csv.writer(fid)
if re.search("gene",label):
writer.writerow(["gene"]+sampleList)
else:
writer.writerow(["isoform"]+sampleList)
for r in range(mat.shape[0]):
row = [keys[r]] + [int(i) for i in mat[r,:].tolist()]
writer.writerow(row)
fid.close()
| [
"gc.disable",
"csv.reader",
"csv.writer",
"numpy.zeros",
"os.path.exists",
"numpy.array",
"re.search",
"gc.enable",
"re.sub"
] | [((545, 577), 'csv.reader', 'csv.reader', (['fid1'], {'delimiter': '"""\t"""'}), "(fid1, delimiter='\\t')\n", (555, 577), False, 'import os, sys, csv, gc, re\n'), ((641, 653), 'gc.disable', 'gc.disable', ([], {}), '()\n', (651, 653), False, 'import os, sys, csv, gc, re\n'), ((1148, 1180), 'csv.reader', 'csv.reader', (['fid2'], {'delimiter': '"""\t"""'}), "(fid2, delimiter='\\t')\n", (1158, 1180), False, 'import os, sys, csv, gc, re\n'), ((1653, 1664), 'gc.enable', 'gc.enable', ([], {}), '()\n', (1662, 1664), False, 'import os, sys, csv, gc, re\n'), ((2178, 2214), 'csv.reader', 'csv.reader', (['fid'], {'delimiter': 'delimiter'}), '(fid, delimiter=delimiter)\n', (2188, 2214), False, 'import os, sys, csv, gc, re\n'), ((2315, 2335), 'numpy.array', 'np.array', (['header[1:]'], {}), '(header[1:])\n', (2323, 2335), True, 'import numpy as np\n'), ((2340, 2352), 'gc.disable', 'gc.disable', ([], {}), '()\n', (2350, 2352), False, 'import os, sys, csv, gc, re\n'), ((2421, 2432), 'gc.enable', 'gc.enable', ([], {}), '()\n', (2430, 2432), False, 'import os, sys, csv, gc, re\n'), ((2453, 2476), 'numpy.array', 'np.array', (['transcriptIds'], {}), '(transcriptIds)\n', (2461, 2476), True, 'import numpy as np\n'), ((2530, 2597), 'numpy.zeros', 'np.zeros', (['(transcriptIds.shape[0], sampleIds.shape[0])'], {'dtype': 'mtype'}), '((transcriptIds.shape[0], sampleIds.shape[0]), dtype=mtype)\n', (2538, 2597), True, 'import numpy as np\n'), ((2641, 2677), 'csv.reader', 'csv.reader', (['fid'], {'delimiter': 'delimiter'}), '(fid, delimiter=delimiter)\n', (2651, 2677), False, 'import os, sys, csv, gc, re\n'), ((3398, 3434), 'csv.reader', 'csv.reader', (['fid'], {'delimiter': 'delimiter'}), '(fid, delimiter=delimiter)\n', (3408, 3434), False, 'import os, sys, csv, gc, re\n'), ((3502, 3522), 'numpy.array', 'np.array', (['header[1:]'], {}), '(header[1:])\n', (3510, 3522), True, 'import numpy as np\n'), ((3587, 3599), 'gc.disable', 'gc.disable', ([], {}), '()\n', (3597, 3599), False, 'import os, sys, csv, gc, re\n'), ((3668, 3679), 'gc.enable', 'gc.enable', ([], {}), '()\n', (3677, 3679), False, 'import os, sys, csv, gc, re\n'), ((3700, 3723), 'numpy.array', 'np.array', (['transcriptIds'], {}), '(transcriptIds)\n', (3708, 3723), True, 'import numpy as np\n'), ((3777, 3831), 'numpy.zeros', 'np.zeros', (['(transcriptIds.shape[0], columnIds.shape[0])'], {}), '((transcriptIds.shape[0], columnIds.shape[0]))\n', (3785, 3831), True, 'import numpy as np\n'), ((3873, 3909), 'csv.reader', 'csv.reader', (['fid'], {'delimiter': 'delimiter'}), '(fid, delimiter=delimiter)\n', (3883, 3909), False, 'import os, sys, csv, gc, re\n'), ((4674, 4689), 'csv.writer', 'csv.writer', (['fid'], {}), '(fid)\n', (4684, 4689), False, 'import os, sys, csv, gc, re\n'), ((4697, 4721), 're.search', 're.search', (['"""gene"""', 'label'], {}), "('gene', label)\n", (4706, 4721), False, 'import os, sys, csv, gc, re\n'), ((251, 279), 'os.path.exists', 'os.path.exists', (['geneFilePath'], {}), '(geneFilePath)\n', (265, 279), False, 'import os, sys, csv, gc, re\n'), ((360, 391), 'os.path.exists', 'os.path.exists', (['isoformFilePath'], {}), '(isoformFilePath)\n', (374, 391), False, 'import os, sys, csv, gc, re\n'), ((2036, 2063), 'os.path.exists', 'os.path.exists', (['matFilePath'], {}), '(matFilePath)\n', (2050, 2063), False, 'import os, sys, csv, gc, re\n'), ((3153, 3177), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (3167, 3177), False, 'import os, sys, csv, gc, re\n'), ((3989, 4011), 're.sub', 're.sub', (['"""NA"""', '"""NaN"""', 'i'], {}), "('NA', 'NaN', i)\n", (3995, 4011), False, 'import os, sys, csv, gc, re\n')] |
import numpy as np
import pandas as pd
import pytest
from abagen import datasets
KEYS = [
'microarray', 'annotation', 'pacall', 'probes', 'ontology'
]
def test_fetch_datasets(testdir):
# check downloading for a subset of donors
files = datasets.fetch_microarray(data_dir=str(testdir),
donors=['12876'])
assert isinstance(files, dict)
for k in KEYS:
assert len(files.get(k)) == 1
# check downloading incorrect donor
with pytest.raises(ValueError):
datasets.fetch_microarray(donors=['notadonor'])
files = datasets.fetch_microarray(data_dir=str(testdir),
donors=None)
def test_fetch_alleninf_coords():
coords = datasets._fetch_alleninf_coords()
assert isinstance(coords, pd.DataFrame)
assert coords.index.name == 'well_id'
assert np.all(coords.columns == ['mni_x', 'mni_y', 'mni_z'])
assert coords.shape == (3702, 3)
def test_fetch_mri():
with pytest.raises(NotImplementedError):
datasets.fetch_mri()
| [
"abagen.datasets.fetch_microarray",
"abagen.datasets._fetch_alleninf_coords",
"abagen.datasets.fetch_mri",
"pytest.raises",
"numpy.all"
] | [((743, 776), 'abagen.datasets._fetch_alleninf_coords', 'datasets._fetch_alleninf_coords', ([], {}), '()\n', (774, 776), False, 'from abagen import datasets\n'), ((874, 927), 'numpy.all', 'np.all', (["(coords.columns == ['mni_x', 'mni_y', 'mni_z'])"], {}), "(coords.columns == ['mni_x', 'mni_y', 'mni_z'])\n", (880, 927), True, 'import numpy as np\n'), ((498, 523), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (511, 523), False, 'import pytest\n'), ((533, 580), 'abagen.datasets.fetch_microarray', 'datasets.fetch_microarray', ([], {'donors': "['notadonor']"}), "(donors=['notadonor'])\n", (558, 580), False, 'from abagen import datasets\n'), ((998, 1032), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (1011, 1032), False, 'import pytest\n'), ((1042, 1062), 'abagen.datasets.fetch_mri', 'datasets.fetch_mri', ([], {}), '()\n', (1060, 1062), False, 'from abagen import datasets\n')] |
import torch
import torch.nn as nn
import numpy as np
from math import ceil
class ConvPoint(nn.Module):
"""ConvPoint convolution layer.
Provide the convolution layer as defined in ConvPoint paper
(https://github.com/aboulch/ConvPoint).
To be used with a `lightconvpoint.nn.Conv` instance.
# Arguments
in_channels: int.
The number of input channels.
out_channels: int.
The number of output channels.
kernel_size: int.
The size of the kernel.
bias: Boolean.
Defaults to `False`. Add an optimizable bias.
dim: int.
Defaults to `3`. Spatial dimension.
# Forward arguments
input: 3-D torch tensor.
The input features. Dimensions are (B, I, N) with B the batch size, I the
number of input channels and N the number of input points.
points: 3-D torch tensor.
The input points. Dimensions are (B, D, N) with B the batch size, D the
dimension of the spatial space and N the number of input points.
support_points: 3-D torch tensor.
The support points to project features on. Dimensions are (B, O, N) with B
the batch size, O the number of output channels and N the number of input
points.
# Returns
features: 3-D torch tensor.
The computed features. Dimensions are (B, O, N) with B the batch size,
O the number of output channels and N the number of input points.
support_points: 3-D torch tensor.
The support points. If they were provided as an input, return the same tensor.
"""
def __init__(self, in_channels, out_channels, kernel_size, bias=True, dim=3, kernel_separation=False,
normalize_pts=True, **kwargs):
super().__init__()
self.normalize_pts = normalize_pts
# parameters
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.has_bias = bias
self.dim = dim
# convolution kernel
if kernel_separation:
# equivalent to two kernels K1 * K2
dm = int(ceil(self.out_channels / self.in_channels))
self.cv = nn.Sequential(
nn.Conv2d(in_channels, dm*in_channels, (1, kernel_size), bias=bias, groups=self.in_channels),
nn.Conv2d(in_channels*dm, out_channels, (1, 1), bias=bias)
)
else:
self.cv = nn.Conv2d(in_channels, out_channels, (1, kernel_size), bias=bias)
# centers
center_data = np.zeros((self.dim, self.kernel_size))
for i in range(self.kernel_size):
coord = np.random.rand(self.dim) * 2 - 1
while (coord ** 2).sum() > 1:
coord = np.random.rand(self.dim) * 2 - 1
center_data[:, i] = coord
self.centers = nn.Parameter(
torch.from_numpy(center_data).float(), requires_grad=True
)
# MLP
modules = []
proj_dim = self.dim * self.kernel_size
for i in range(3):
modules.append(nn.Linear(proj_dim, self.kernel_size))
modules.append(nn.ReLU())
proj_dim = self.kernel_size
self.projector = nn.Sequential(*modules)
def normalize_points(self, pts, radius=None):
maxi = torch.sqrt((pts.detach() ** 2).sum(1).max(2)[0])
maxi = maxi + (maxi == 0)
return pts / maxi.view(maxi.size(0), 1, maxi.size(1), 1)
def forward(self, input, points, support_points):
"""Computes the features associated with the support points."""
# center the neighborhoods (local coordinates)
pts = points - support_points.unsqueeze(3)
# normalize points
if self.normalize_pts:
pts = self.normalize_points(pts)
# project features on kernel points
pts = (pts.permute(0, 2, 3, 1).unsqueeze(4) - self.centers).contiguous()
pts = pts.view(pts.size(0), pts.size(1), pts.size(2), -1)
mat = self.projector(pts)
# compute features
features = input.transpose(1, 2)
features = torch.matmul(features, mat).transpose(1,2)
features = self.cv(features).squeeze(3)
return features, support_points
| [
"torch.nn.ReLU",
"math.ceil",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"numpy.zeros",
"torch.nn.Linear",
"numpy.random.rand",
"torch.matmul",
"torch.from_numpy"
] | [((2639, 2677), 'numpy.zeros', 'np.zeros', (['(self.dim, self.kernel_size)'], {}), '((self.dim, self.kernel_size))\n', (2647, 2677), True, 'import numpy as np\n'), ((3306, 3329), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (3319, 3329), True, 'import torch.nn as nn\n'), ((2532, 2597), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(1, kernel_size)'], {'bias': 'bias'}), '(in_channels, out_channels, (1, kernel_size), bias=bias)\n', (2541, 2597), True, 'import torch.nn as nn\n'), ((2216, 2258), 'math.ceil', 'ceil', (['(self.out_channels / self.in_channels)'], {}), '(self.out_channels / self.in_channels)\n', (2220, 2258), False, 'from math import ceil\n'), ((2313, 2411), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(dm * in_channels)', '(1, kernel_size)'], {'bias': 'bias', 'groups': 'self.in_channels'}), '(in_channels, dm * in_channels, (1, kernel_size), bias=bias,\n groups=self.in_channels)\n', (2322, 2411), True, 'import torch.nn as nn\n'), ((2423, 2483), 'torch.nn.Conv2d', 'nn.Conv2d', (['(in_channels * dm)', 'out_channels', '(1, 1)'], {'bias': 'bias'}), '(in_channels * dm, out_channels, (1, 1), bias=bias)\n', (2432, 2483), True, 'import torch.nn as nn\n'), ((3164, 3201), 'torch.nn.Linear', 'nn.Linear', (['proj_dim', 'self.kernel_size'], {}), '(proj_dim, self.kernel_size)\n', (3173, 3201), True, 'import torch.nn as nn\n'), ((3230, 3239), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3237, 3239), True, 'import torch.nn as nn\n'), ((4196, 4223), 'torch.matmul', 'torch.matmul', (['features', 'mat'], {}), '(features, mat)\n', (4208, 4223), False, 'import torch\n'), ((2740, 2764), 'numpy.random.rand', 'np.random.rand', (['self.dim'], {}), '(self.dim)\n', (2754, 2764), True, 'import numpy as np\n'), ((2959, 2988), 'torch.from_numpy', 'torch.from_numpy', (['center_data'], {}), '(center_data)\n', (2975, 2988), False, 'import torch\n'), ((2839, 2863), 'numpy.random.rand', 'np.random.rand', (['self.dim'], {}), '(self.dim)\n', (2853, 2863), True, 'import numpy as np\n')] |
import os
import sys
from config import cfg
import argparse
import torch
from torch.backends import cudnn
import torchvision.transforms as T
from PIL import Image
sys.path.append('.')
from utils.logger import setup_logger
from model import make_model
import numpy as np
import cv2
from utils.metrics import cosine_similarity
def visualizer(test_img, camid, top_k = 10, img_size=[128,128]):
figure = np.asarray(query_img.resize((img_size[1],img_size[0])))
for k in range(top_k):
name = str(indices[0][k]).zfill(6)
img = np.asarray(Image.open(img_path[indices[0][k]]).resize((img_size[1],img_size[0])))
figure = np.hstack((figure, img))
title=name
figure = cv2.cvtColor(figure,cv2.COLOR_BGR2RGB)
if not os.path.exists(cfg.OUTPUT_DIR+ "/results/"):
print('create a new folder named results in {}'.format(cfg.OUTPUT_DIR))
os.makedirs(cfg.OUTPUT_DIR+ "/results")
cv2.imwrite(cfg.OUTPUT_DIR+ "/results/{}-cam{}.png".format(test_img,camid),figure)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ReID Baseline Training")
parser.add_argument(
"--config_file", default="./configs/Market1501.yaml", help="path to config file", type=str
)
args = parser.parse_args()
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.freeze()
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
cudnn.benchmark = True
model = make_model(cfg, 255)
model.load_param(cfg.TEST.TEST_WEIGHT)
device = 'cuda'
model = model.to(device)
transform = T.Compose([
T.Resize(cfg.DATA.INPUT_SIZE),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
logger = setup_logger('{}.test'.format(cfg.PROJECT_NAME), cfg.OUTPUT_DIR, if_train=False)
model.eval()
for test_img in os.listdir(cfg.TEST.QUERY_DIR):
logger.info('Finding ID {} ...'.format(test_img))
gallery_feats = torch.load(cfg.OUTPUT_DIR + '/gfeats.pth')
img_path = np.load(cfg.OUTPUT_DIR +'/imgpath.npy')
print(gallery_feats.shape, len(img_path))
query_img = Image.open(cfg.TEST.QUERY_DIR + test_img)
input = torch.unsqueeze(transform(query_img), 0)
input = input.to(device)
with torch.no_grad():
query_feat = model(input)
dist_mat = cosine_similarity(query_feat, gallery_feats)
indices = np.argsort(dist_mat, axis=1)
visualizer(test_img, camid='mixed', top_k=10, img_size=cfg.DATA.INPUT_SIZE) | [
"numpy.load",
"argparse.ArgumentParser",
"utils.metrics.cosine_similarity",
"numpy.argsort",
"torchvision.transforms.Normalize",
"torch.no_grad",
"sys.path.append",
"cv2.cvtColor",
"torch.load",
"os.path.exists",
"config.cfg.merge_from_file",
"numpy.hstack",
"model.make_model",
"os.listdir... | [((163, 183), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (178, 183), False, 'import sys\n'), ((702, 741), 'cv2.cvtColor', 'cv2.cvtColor', (['figure', 'cv2.COLOR_BGR2RGB'], {}), '(figure, cv2.COLOR_BGR2RGB)\n', (714, 741), False, 'import cv2\n'), ((1053, 1114), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ReID Baseline Training"""'}), "(description='ReID Baseline Training')\n", (1076, 1114), False, 'import argparse\n'), ((1358, 1370), 'config.cfg.freeze', 'cfg.freeze', ([], {}), '()\n', (1368, 1370), False, 'from config import cfg\n'), ((1473, 1493), 'model.make_model', 'make_model', (['cfg', '(255)'], {}), '(cfg, 255)\n', (1483, 1493), False, 'from model import make_model\n'), ((1892, 1922), 'os.listdir', 'os.listdir', (['cfg.TEST.QUERY_DIR'], {}), '(cfg.TEST.QUERY_DIR)\n', (1902, 1922), False, 'import os\n'), ((645, 669), 'numpy.hstack', 'np.hstack', (['(figure, img)'], {}), '((figure, img))\n', (654, 669), True, 'import numpy as np\n'), ((752, 796), 'os.path.exists', 'os.path.exists', (["(cfg.OUTPUT_DIR + '/results/')"], {}), "(cfg.OUTPUT_DIR + '/results/')\n", (766, 796), False, 'import os\n'), ((885, 925), 'os.makedirs', 'os.makedirs', (["(cfg.OUTPUT_DIR + '/results')"], {}), "(cfg.OUTPUT_DIR + '/results')\n", (896, 925), False, 'import os\n'), ((1316, 1353), 'config.cfg.merge_from_file', 'cfg.merge_from_file', (['args.config_file'], {}), '(args.config_file)\n', (1335, 1353), False, 'from config import cfg\n'), ((2007, 2049), 'torch.load', 'torch.load', (["(cfg.OUTPUT_DIR + '/gfeats.pth')"], {}), "(cfg.OUTPUT_DIR + '/gfeats.pth')\n", (2017, 2049), False, 'import torch\n'), ((2069, 2109), 'numpy.load', 'np.load', (["(cfg.OUTPUT_DIR + '/imgpath.npy')"], {}), "(cfg.OUTPUT_DIR + '/imgpath.npy')\n", (2076, 2109), True, 'import numpy as np\n'), ((2179, 2220), 'PIL.Image.open', 'Image.open', (['(cfg.TEST.QUERY_DIR + test_img)'], {}), '(cfg.TEST.QUERY_DIR + test_img)\n', (2189, 2220), False, 'from PIL import Image\n'), ((2399, 2443), 'utils.metrics.cosine_similarity', 'cosine_similarity', (['query_feat', 'gallery_feats'], {}), '(query_feat, gallery_feats)\n', (2416, 2443), False, 'from utils.metrics import cosine_similarity\n'), ((2462, 2490), 'numpy.argsort', 'np.argsort', (['dist_mat'], {'axis': '(1)'}), '(dist_mat, axis=1)\n', (2472, 2490), True, 'import numpy as np\n'), ((1623, 1652), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.DATA.INPUT_SIZE'], {}), '(cfg.DATA.INPUT_SIZE)\n', (1631, 1652), True, 'import torchvision.transforms as T\n'), ((1662, 1674), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1672, 1674), True, 'import torchvision.transforms as T\n'), ((1684, 1750), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1695, 1750), True, 'import torchvision.transforms as T\n'), ((2324, 2339), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2337, 2339), False, 'import torch\n'), ((557, 592), 'PIL.Image.open', 'Image.open', (['img_path[indices[0][k]]'], {}), '(img_path[indices[0][k]])\n', (567, 592), False, 'from PIL import Image\n')] |
from __future__ import annotations
from threading import Lock
from typing import ClassVar
from hypothesis import given, settings, strategies as st
import hypothesis.extra.numpy as st_np
import numpy as np
from rasterio import windows
import dask.core
import dask.threaded
from dask.array.utils import assert_eq
from stackstac.raster_spec import Bbox, RasterSpec
from stackstac.prepare import ASSET_TABLE_DT
from stackstac.to_dask import (
ChunksParam,
items_to_dask,
normalize_chunks,
window_from_bounds,
)
from stackstac.testing import strategies as st_stc
@st.composite
def asset_tables(
draw: st.DrawFn,
max_side: int | None = None,
) -> np.ndarray:
"""
Generate asset tables where entries have random bounds, and are randomly missing.
Each URL is of the form ``"fake://{i}/{j}"``, so you can parse it within a Reader
to know the (time, band) coordinates of that asset. Bounds may be zero-size (the min
and max are equal).
An example of an asset table:
np.array(
[
# Encode the (i, j) index in the table in the URL
[("fake://0/0", [0, 0, 2, 2]), ("fake://0/1", [0, 0, 2, 2])],
[("fake://1/0", [0, 3, 2, 5]), ("fake://1/1", [10, 13, 12, 15])],
[("fake://2/0", [1, 3, 2, 6]), ("fake://2/1", [1, 3, 2, 7])],
[(None, None), (None, None)],
],
dtype=ASSET_TABLE_DT,
)
"""
shape = draw(
st_np.array_shapes(min_dims=2, max_dims=2, max_side=max_side), label="shape"
)
bounds_arr = draw(
st_np.arrays(
object,
shape,
elements=st_stc.simple_bboxes(),
fill=st.none(),
),
label="bounds_arr",
)
asset_table = np.empty_like(bounds_arr, ASSET_TABLE_DT)
for (i, j), bounds in np.ndenumerate(bounds_arr):
if bounds:
# Encode the (i, j) index in the table in the URL
asset_table[i, j] = (f"fake://{i}/{j}", bounds)
return asset_table
@given(
st.data(),
asset_tables(max_side=5),
st_stc.simple_bboxes(-4, -4, 4, 4, zero_size=False),
st_stc.raster_dtypes,
st_stc.chunksizes(
4,
max_side=10,
auto=False,
bytes=False,
none=False,
minus_one=False,
dicts=False,
singleton=False,
),
)
@settings(max_examples=500, print_blob=True)
def test_items_to_dask(
data: st.DataObject,
asset_table: np.ndarray,
bounds: Bbox,
dtype_: np.dtype,
chunksize: tuple[int, int, int, int],
):
spec_ = RasterSpec(4326, bounds, (0.5, 0.5))
fill_value_ = data.draw(st_np.from_dtype(dtype_), label="fill_value")
# Build expected array of the final stack.
# Start with all nodata, then write data in for each asset.
# The `TestReader` will then just read from this final array, sliced to the appropriate window.
# (This is much easier than calculating where to put nodata values ourselves.)
asset_windows: dict[str, windows.Window] = {}
results = np.full(asset_table.shape + spec_.shape, fill_value_, dtype_)
for i, item in enumerate(asset_table):
for j, asset in enumerate(item):
url = asset["url"]
if url is None:
continue
assert url == f"fake://{i}/{j}"
window = window_from_bounds(asset["bounds"], spec_.transform)
asset_windows[url] = window
chunk = results[(i, j) + windows.window_index(window)]
if chunk.size:
# Asset falls within final bounds
chunk[:] = np.random.default_rng().uniform(0, 128, chunk.shape)
class TestReader:
opened: ClassVar[set[str]] = set()
lock: ClassVar[Lock] = Lock()
def __init__(
self,
*,
url: str,
spec: RasterSpec,
dtype: np.dtype,
fill_value: int | float,
**kwargs,
) -> None:
with self.lock:
# Each URL should only be opened once.
# The `dask.annotate` on the `asset_table_to_reader_and_window` step is necessary for this,
# otherwise blockwise fusion would merge the Reader creation into every `fetch_raster_window`!
assert url not in self.opened
self.opened.add(url)
i, j = map(int, url[7:].split("/"))
self.full_data = results[i, j]
self.window = asset_windows[url]
assert spec == spec_
assert dtype == dtype_
np.testing.assert_equal(fill_value, fill_value_)
def read(self, window: windows.Window) -> np.ndarray:
assert 0 < window.height <= chunksize[2]
assert 0 < window.width <= chunksize[3]
# Read should be bypassed entirely if windows don't intersect
assert windows.intersect(window, self.window)
return self.full_data[window.toslices()]
def close(self) -> None:
pass
def __getstate__(self) -> dict:
return self.__dict__
def __setstate__(self, state):
self.__init__(**state)
arr = items_to_dask(
asset_table,
spec_,
chunksize,
dtype=dtype_,
fill_value=fill_value_,
reader=TestReader,
)
assert arr.chunksize == tuple(
min(x, y) for x, y in zip(asset_table.shape + spec_.shape, chunksize)
)
assert arr.dtype == dtype_
assert_eq(arr, results, equal_nan=True)
# Check that entirely-empty chunks are broadcast-tricked into being tiny.
# NOTE: unfortunately, this computes the array again, which slows down tests.
# But passing a computed array into `assert_eq` would skip handy checks for chunks, meta, etc.
TestReader.opened.clear()
chunks = dask.threaded.get(arr.dask, list(dask.core.flatten(arr.__dask_keys__())))
for chunk in chunks:
if (
np.isnan(chunk) if np.isnan(fill_value_) else np.equal(chunk, fill_value_)
).all():
assert chunk.strides == (0, 0, 0, 0)
@given(
st_stc.chunksizes(4, max_side=1000),
st_np.array_shapes(min_dims=4, max_dims=4),
st_stc.raster_dtypes,
)
def test_normalize_chunks(
chunksize: ChunksParam, shape: tuple[int, int, int, int], dtype: np.dtype
):
chunks = normalize_chunks(chunksize, shape, dtype)
numblocks = tuple(map(len, chunks))
assert len(numblocks) == 4
assert all(x >= 1 for t in chunks for x in t)
if isinstance(chunksize, int) or isinstance(chunks, tuple) and len(chunks) == 2:
assert numblocks[:2] == shape[:2]
| [
"stackstac.raster_spec.RasterSpec",
"numpy.isnan",
"numpy.random.default_rng",
"hypothesis.settings",
"rasterio.windows.window_index",
"numpy.full",
"stackstac.testing.strategies.chunksizes",
"dask.array.utils.assert_eq",
"numpy.empty_like",
"numpy.equal",
"threading.Lock",
"stackstac.to_dask.... | [((2385, 2428), 'hypothesis.settings', 'settings', ([], {'max_examples': '(500)', 'print_blob': '(True)'}), '(max_examples=500, print_blob=True)\n', (2393, 2428), False, 'from hypothesis import given, settings, strategies as st\n'), ((1789, 1830), 'numpy.empty_like', 'np.empty_like', (['bounds_arr', 'ASSET_TABLE_DT'], {}), '(bounds_arr, ASSET_TABLE_DT)\n', (1802, 1830), True, 'import numpy as np\n'), ((1857, 1883), 'numpy.ndenumerate', 'np.ndenumerate', (['bounds_arr'], {}), '(bounds_arr)\n', (1871, 1883), True, 'import numpy as np\n'), ((2604, 2640), 'stackstac.raster_spec.RasterSpec', 'RasterSpec', (['(4326)', 'bounds', '(0.5, 0.5)'], {}), '(4326, bounds, (0.5, 0.5))\n', (2614, 2640), False, 'from stackstac.raster_spec import Bbox, RasterSpec\n'), ((3075, 3136), 'numpy.full', 'np.full', (['(asset_table.shape + spec_.shape)', 'fill_value_', 'dtype_'], {}), '(asset_table.shape + spec_.shape, fill_value_, dtype_)\n', (3082, 3136), True, 'import numpy as np\n'), ((5223, 5329), 'stackstac.to_dask.items_to_dask', 'items_to_dask', (['asset_table', 'spec_', 'chunksize'], {'dtype': 'dtype_', 'fill_value': 'fill_value_', 'reader': 'TestReader'}), '(asset_table, spec_, chunksize, dtype=dtype_, fill_value=\n fill_value_, reader=TestReader)\n', (5236, 5329), False, 'from stackstac.to_dask import ChunksParam, items_to_dask, normalize_chunks, window_from_bounds\n'), ((5535, 5574), 'dask.array.utils.assert_eq', 'assert_eq', (['arr', 'results'], {'equal_nan': '(True)'}), '(arr, results, equal_nan=True)\n', (5544, 5574), False, 'from dask.array.utils import assert_eq\n'), ((2064, 2073), 'hypothesis.strategies.data', 'st.data', ([], {}), '()\n', (2071, 2073), True, 'from hypothesis import given, settings, strategies as st\n'), ((2109, 2160), 'stackstac.testing.strategies.simple_bboxes', 'st_stc.simple_bboxes', (['(-4)', '(-4)', '(4)', '(4)'], {'zero_size': '(False)'}), '(-4, -4, 4, 4, zero_size=False)\n', (2129, 2160), True, 'from stackstac.testing import strategies as st_stc\n'), ((2192, 2313), 'stackstac.testing.strategies.chunksizes', 'st_stc.chunksizes', (['(4)'], {'max_side': '(10)', 'auto': '(False)', 'bytes': '(False)', 'none': '(False)', 'minus_one': '(False)', 'dicts': '(False)', 'singleton': '(False)'}), '(4, max_side=10, auto=False, bytes=False, none=False,\n minus_one=False, dicts=False, singleton=False)\n', (2209, 2313), True, 'from stackstac.testing import strategies as st_stc\n'), ((6391, 6432), 'stackstac.to_dask.normalize_chunks', 'normalize_chunks', (['chunksize', 'shape', 'dtype'], {}), '(chunksize, shape, dtype)\n', (6407, 6432), False, 'from stackstac.to_dask import ChunksParam, items_to_dask, normalize_chunks, window_from_bounds\n'), ((6157, 6192), 'stackstac.testing.strategies.chunksizes', 'st_stc.chunksizes', (['(4)'], {'max_side': '(1000)'}), '(4, max_side=1000)\n', (6174, 6192), True, 'from stackstac.testing import strategies as st_stc\n'), ((6198, 6240), 'hypothesis.extra.numpy.array_shapes', 'st_np.array_shapes', ([], {'min_dims': '(4)', 'max_dims': '(4)'}), '(min_dims=4, max_dims=4)\n', (6216, 6240), True, 'import hypothesis.extra.numpy as st_np\n'), ((1485, 1546), 'hypothesis.extra.numpy.array_shapes', 'st_np.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'max_side': 'max_side'}), '(min_dims=2, max_dims=2, max_side=max_side)\n', (1503, 1546), True, 'import hypothesis.extra.numpy as st_np\n'), ((2669, 2693), 'hypothesis.extra.numpy.from_dtype', 'st_np.from_dtype', (['dtype_'], {}), '(dtype_)\n', (2685, 2693), True, 'import hypothesis.extra.numpy as st_np\n'), ((3786, 3792), 'threading.Lock', 'Lock', ([], {}), '()\n', (3790, 3792), False, 'from threading import Lock\n'), ((3371, 3423), 'stackstac.to_dask.window_from_bounds', 'window_from_bounds', (["asset['bounds']", 'spec_.transform'], {}), "(asset['bounds'], spec_.transform)\n", (3389, 3423), False, 'from stackstac.to_dask import ChunksParam, items_to_dask, normalize_chunks, window_from_bounds\n'), ((4610, 4658), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fill_value', 'fill_value_'], {}), '(fill_value, fill_value_)\n', (4633, 4658), True, 'import numpy as np\n'), ((4920, 4958), 'rasterio.windows.intersect', 'windows.intersect', (['window', 'self.window'], {}), '(window, self.window)\n', (4937, 4958), False, 'from rasterio import windows\n'), ((1673, 1695), 'stackstac.testing.strategies.simple_bboxes', 'st_stc.simple_bboxes', ([], {}), '()\n', (1693, 1695), True, 'from stackstac.testing import strategies as st_stc\n'), ((1714, 1723), 'hypothesis.strategies.none', 'st.none', ([], {}), '()\n', (1721, 1723), True, 'from hypothesis import given, settings, strategies as st\n'), ((3502, 3530), 'rasterio.windows.window_index', 'windows.window_index', (['window'], {}), '(window)\n', (3522, 3530), False, 'from rasterio import windows\n'), ((6021, 6042), 'numpy.isnan', 'np.isnan', (['fill_value_'], {}), '(fill_value_)\n', (6029, 6042), True, 'import numpy as np\n'), ((6002, 6017), 'numpy.isnan', 'np.isnan', (['chunk'], {}), '(chunk)\n', (6010, 6017), True, 'import numpy as np\n'), ((6048, 6076), 'numpy.equal', 'np.equal', (['chunk', 'fill_value_'], {}), '(chunk, fill_value_)\n', (6056, 6076), True, 'import numpy as np\n'), ((3636, 3659), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (3657, 3659), True, 'import numpy as np\n')] |
import cv2
# import keyboard
import numpy as np
import open3d as o3d
import pygame
import os
import os.path as osp
import json
import time
from transforms3d.axangles import axangle2mat
import config
from capture import OpenCVCapture
from hand_mesh import HandMesh
from kinematics import mpii_to_mano
from utils import OneEuroFilter, imresize
from wrappers import ModelPipeline
from copy import deepcopy
from utils import *
class MyCapture:
"""
OpenCV wrapper to read from webcam.
"""
def __init__(self, fp, side='left'):
"""
Init.
"""
with open(fp, 'rb') as f:
self.d = pickle.load(f)
self.i = -1
self.frame_indexes = sorted(self.d.keys())
self.n = len(self.frame_indexes)
self.side = side
self.flip_side = 'right'
def read(self):
"""
Read one frame. Note this function might be blocked by the sensor.
Returns
-------
np.ndarray
Read frame. Might be `None` is the webcam fails to get on frame.
"""
self.i += 1
if self.i == self.n:
return None
frame_index = self.frame_indexes[self.i % self.n]
frame = self.d[frame_index][self.side]
if self.side == self.flip_side:
frame = np.flip(frame, axis=1)
return frame
def live_application(capture, output_dirpath):
model = ModelPipeline()
frame_index = 0
mano_params = []
measure_time = True
while True:
frame_large = capture.read()
if frame_large is None:
print(f'none frame {frame_index}')
# if frame_index == 0:
# continue
break
# if frame_large.shape[0] > frame_large.shape[1]:
# margin = int((frame_large.shape[0] - frame_large.shape[1]) / 2)
# frame_large = frame_large[margin:-margin]
# else:
# margin = int((frame_large.shape[1] - frame_large.shape[0]) / 2)
# frame_large = frame_large[:, margin:-margin]
frame = imresize(frame_large, (128, 128))
if measure_time:
ends1 = []
ends2 = []
for i in range(1000):
start = time.time()
_, theta_mpii = model.process(frame)
end1 = time.time()
theta_mano = mpii_to_mano(theta_mpii)
end2 = time.time()
ends1.append(end1 - start)
ends2.append(end2 - start)
t1 = np.mean(ends1[10:])
t2 = np.mean(ends2[10:])
print(f't1: {t1 * 1000:.2f}ms, {1 / t1:.2f}hz')
print(f't2: {t2 * 1000:.2f}ms, {1 / t2:.2f}hz')
return
else:
_, theta_mpii = model.process(frame)
theta_mano = mpii_to_mano(theta_mpii)
mano_params.append(deepcopy(theta_mano.tolist()))
osp.join(output_dirpath, "%06d.jpg" % frame_index)
frame_index += 1
with open(osp.join(output_dirpath, f'{capture.side}.pickle'), 'w') as f:
json.dump(mano_params, f)
if __name__ == '__main__':
fn_no_ext = '000012'
input_fp = f'/home/renat/workdir/data/kinect_pose_fitting/kinect_hands/hand_crops_vis/{fn_no_ext}.pickle'
output_dirpath = f'/home/renat/workdir/data/kinect_pose_fitting/kinect_hands/test_hand_models/minimal_hand/output_mano_params/{fn_no_ext}'
os.makedirs(output_dirpath, exist_ok=True)
for side in ['left', 'right']:
live_application(MyCapture(input_fp, side=side), output_dirpath)
| [
"json.dump",
"numpy.flip",
"os.makedirs",
"wrappers.ModelPipeline",
"utils.imresize",
"kinematics.mpii_to_mano",
"time.time",
"numpy.mean",
"os.path.join"
] | [((1291, 1306), 'wrappers.ModelPipeline', 'ModelPipeline', ([], {}), '()\n', (1304, 1306), False, 'from wrappers import ModelPipeline\n'), ((3050, 3092), 'os.makedirs', 'os.makedirs', (['output_dirpath'], {'exist_ok': '(True)'}), '(output_dirpath, exist_ok=True)\n', (3061, 3092), False, 'import os\n'), ((1869, 1902), 'utils.imresize', 'imresize', (['frame_large', '(128, 128)'], {}), '(frame_large, (128, 128))\n', (1877, 1902), False, 'from utils import OneEuroFilter, imresize\n'), ((2570, 2620), 'os.path.join', 'osp.join', (['output_dirpath', "('%06d.jpg' % frame_index)"], {}), "(output_dirpath, '%06d.jpg' % frame_index)\n", (2578, 2620), True, 'import os.path as osp\n'), ((2721, 2746), 'json.dump', 'json.dump', (['mano_params', 'f'], {}), '(mano_params, f)\n', (2730, 2746), False, 'import json\n'), ((1191, 1213), 'numpy.flip', 'np.flip', (['frame'], {'axis': '(1)'}), '(frame, axis=1)\n', (1198, 1213), True, 'import numpy as np\n'), ((2241, 2260), 'numpy.mean', 'np.mean', (['ends1[10:]'], {}), '(ends1[10:])\n', (2248, 2260), True, 'import numpy as np\n'), ((2272, 2291), 'numpy.mean', 'np.mean', (['ends2[10:]'], {}), '(ends2[10:])\n', (2279, 2291), True, 'import numpy as np\n'), ((2485, 2509), 'kinematics.mpii_to_mano', 'mpii_to_mano', (['theta_mpii'], {}), '(theta_mpii)\n', (2497, 2509), False, 'from kinematics import mpii_to_mano\n'), ((2654, 2704), 'os.path.join', 'osp.join', (['output_dirpath', 'f"""{capture.side}.pickle"""'], {}), "(output_dirpath, f'{capture.side}.pickle')\n", (2662, 2704), True, 'import os.path as osp\n'), ((2003, 2014), 'time.time', 'time.time', ([], {}), '()\n', (2012, 2014), False, 'import time\n'), ((2075, 2086), 'time.time', 'time.time', ([], {}), '()\n', (2084, 2086), False, 'import time\n'), ((2108, 2132), 'kinematics.mpii_to_mano', 'mpii_to_mano', (['theta_mpii'], {}), '(theta_mpii)\n', (2120, 2132), False, 'from kinematics import mpii_to_mano\n'), ((2148, 2159), 'time.time', 'time.time', ([], {}), '()\n', (2157, 2159), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# @Time : 2018-04-06 16:29
# @Author : Dingzh.tobest
# 文件描述 : AROON指标测试
import talib
import numpy as np
def init(context):
# 在context中保存全局变量
context.s1 = "000300.XSHG"
# before_trading此函数会在每天策略交易开始前被调用,当天只会被调用一次
def before_trading(context):
pass
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
highs = history_bars(context.s1, 60, '1d', 'high')
lows = history_bars(context.s1, 60, '1d', 'low')
down, up = talib.AROON(np.array(highs), np.array(lows), timeperiod=24)
down = down[-1]
up = up[-1]
if up > down:
order_target_percent(context.s1, 0.95)
else:
order_target_percent(context.s1, 0.0)
# after_trading函数会在每天交易结束后被调用,当天只会被调用一次
def after_trading(context):
pass | [
"numpy.array"
] | [((511, 526), 'numpy.array', 'np.array', (['highs'], {}), '(highs)\n', (519, 526), True, 'import numpy as np\n'), ((528, 542), 'numpy.array', 'np.array', (['lows'], {}), '(lows)\n', (536, 542), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: wensong
import os
import sys
sys.path.append(os.getcwd() + "/../../")
from utils.tf_utils import TFUtils
import tensorflow as tf
from utils.tf_vocab_processor import TFVocabProcessor
import numpy as np
import logging
class PreProcessor(object):
'''nlp分类器初始化
'''
def __init__(self):
'''init
'''
self.name = "PRE"
def execute(self, params):
'''预处理数据
'''
# 脚本参数
flags = params["INIT"]
# 不同分类任务,选择不同预处理方式
ret = self._pre_text_to_ids(flags)
# 打印参数
self._print_flags(flags)
return ret
def _print_flags(self, flags):
'''打印所有参数
'''
for key in flags.flag_values_dict():
logging.info("FLAGS " + key + " : " + str(flags[key].value))
def _pre_text_to_ids(self, flags):
# [titles, contents, labels]
vocab_set, titles, conts, labels = TFUtils.load_multitype_text(
flags.data_type, flags.data_file, flags.cls_num,
flags.doc_separators)
# 词表处理器
vocab_processor = TFVocabProcessor(
max_document_length=flags.max_seq_len,
min_frequency=flags.min_frequency)
# 建立词表
vocab_processor.feed(titles)
for sents in conts:
vocab_processor.feed(sents)
vocab_processor.build()
# 转化索引
tids = None
if flags.data_type == "shorttext" or flags.data_type == "longtext_with_title":
tids = vocab_processor.transform(titles)
cids = []
if flags.data_type == "longtext" or flags.data_type == "longtext_with_title":
for sents in conts:
sids = vocab_processor.transform(sents)
# cut & padding长文本
sids = TFUtils.cut_and_padding_2D(matrix=sids,
row_lens=flags.max_doc_len,
col_lens=flags.max_seq_len)
sids = np.array(sids)
cids.append(sids)
# 转为np.array类型
labels = np.array(labels)
cids = np.array(cids)
# 词表大小
flags.vocab_size = vocab_processor.length
# 随机打乱数据
np.random.seed()
shuffle_indices = np.random.permutation(np.arange(len(labels)))
labels_shuffled = labels[shuffle_indices]
# 分割出训练和测试集合
dev_sample_index = -1 * int(
flags.dev_sample_percentage * float(len(labels)))
l_train, l_dev = labels_shuffled[:dev_sample_index], labels_shuffled[
dev_sample_index:]
t_train, t_dev = None, None
c_train, c_dev = None, None
if flags.data_type == "shorttext" or flags.data_type == "longtext_with_title":
tids_shuffled = tids[shuffle_indices]
t_train, t_dev = tids_shuffled[:dev_sample_index], tids_shuffled[
dev_sample_index:]
if flags.data_type == "longtext" or flags.data_type == "longtext_with_title":
cids_shuffled = cids[shuffle_indices]
c_train, c_dev = cids_shuffled[:dev_sample_index], cids_shuffled[
dev_sample_index:]
logging.info("Train/Dev split: {:d}/{:d}".format(
len(l_train), len(l_dev)))
# 组成tuple
train_tuple = (t_train, c_train, l_train)
test_tuple = (t_dev, c_dev, l_dev)
return train_tuple, vocab_processor, test_tuple
| [
"utils.tf_utils.TFUtils.cut_and_padding_2D",
"numpy.random.seed",
"os.getcwd",
"utils.tf_vocab_processor.TFVocabProcessor",
"numpy.array",
"utils.tf_utils.TFUtils.load_multitype_text"
] | [((103, 114), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (112, 114), False, 'import os\n'), ((959, 1061), 'utils.tf_utils.TFUtils.load_multitype_text', 'TFUtils.load_multitype_text', (['flags.data_type', 'flags.data_file', 'flags.cls_num', 'flags.doc_separators'], {}), '(flags.data_type, flags.data_file, flags.cls_num,\n flags.doc_separators)\n', (986, 1061), False, 'from utils.tf_utils import TFUtils\n'), ((1125, 1220), 'utils.tf_vocab_processor.TFVocabProcessor', 'TFVocabProcessor', ([], {'max_document_length': 'flags.max_seq_len', 'min_frequency': 'flags.min_frequency'}), '(max_document_length=flags.max_seq_len, min_frequency=flags\n .min_frequency)\n', (1141, 1220), False, 'from utils.tf_vocab_processor import TFVocabProcessor\n'), ((2126, 2142), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2134, 2142), True, 'import numpy as np\n'), ((2158, 2172), 'numpy.array', 'np.array', (['cids'], {}), '(cids)\n', (2166, 2172), True, 'import numpy as np\n'), ((2264, 2280), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (2278, 2280), True, 'import numpy as np\n'), ((1818, 1917), 'utils.tf_utils.TFUtils.cut_and_padding_2D', 'TFUtils.cut_and_padding_2D', ([], {'matrix': 'sids', 'row_lens': 'flags.max_doc_len', 'col_lens': 'flags.max_seq_len'}), '(matrix=sids, row_lens=flags.max_doc_len,\n col_lens=flags.max_seq_len)\n', (1844, 1917), False, 'from utils.tf_utils import TFUtils\n'), ((2037, 2051), 'numpy.array', 'np.array', (['sids'], {}), '(sids)\n', (2045, 2051), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from dsbox.ml.neural_networks.processing import Text2Sequence
from nltk.stem.snowball import EnglishStemmer
import logging
logging.getLogger("tensorflow").setLevel(logging.WARNING)
np.random.seed(42)
class TestText2Sequence(unittest.TestCase):
def test_TestText2Sequence_fit_and_transform_should_return_correct_sequences(self):
# given
X = np.array(['this is really awesome !',
'this is really crap !'])
# when
text2seq = Text2Sequence(stemmer=EnglishStemmer())
sequences = text2seq.fit_transform(X)
# then
self.assertEqual(sequences[0][0], sequences[1][0])
self.assertEqual(sequences[0][1], sequences[1][1])
self.assertEqual(sequences[0][2], sequences[1][2])
self.assertNotEqual(sequences[0][3], sequences[1][3])
self.assertEqual(sequences[0][4], sequences[1][4])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.random.seed",
"nltk.stem.snowball.EnglishStemmer",
"numpy.array",
"logging.getLogger"
] | [((220, 238), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (234, 238), True, 'import numpy as np\n'), ((955, 970), 'unittest.main', 'unittest.main', ([], {}), '()\n', (968, 970), False, 'import unittest\n'), ((161, 192), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (178, 192), False, 'import logging\n'), ((401, 464), 'numpy.array', 'np.array', (["['this is really awesome !', 'this is really crap !']"], {}), "(['this is really awesome !', 'this is really crap !'])\n", (409, 464), True, 'import numpy as np\n'), ((544, 560), 'nltk.stem.snowball.EnglishStemmer', 'EnglishStemmer', ([], {}), '()\n', (558, 560), False, 'from nltk.stem.snowball import EnglishStemmer\n')] |
#
# Copyright 2019-2020 <NAME>
# 2019 <EMAIL>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Anisotropic continuum surface Green's function
#
import numpy as np
from scipy.linalg import null_space
class AnisotropicGreensFunction(object):
def __init__(self, C11, C12, C44, thickness=None, R=np.eye(3)):
"""
Compute the surface Green's function for a linear elastic half-space
with anisotropic elastic constants. The class supports generic
elastic tensors but presently only cubic elastic constants can be
passed to the constructor.
Note that this class fails for an isotropic substrate. Use
`IsotropicGreensFunction` instead for isotropic materials.
Parameters
----------
C11 : float
C11 elastic constant
C12 : float
C12 elastic constant
C44 : float
C44 elastic constant (shear modulus)
thickness : float
Thickness of the elastic substrate. If None (default) then a
substrate of infinite thickness will be computed.
R : np.ndarray
3x3 rotation matrix for rotation of the elastic constants.
"""
self._C11 = C11
self._C12 = C12
self._C44 = C44
self._C = np.array([[C11, C12, C12, 0, 0, 0], # xx
[C12, C11, C12, 0, 0, 0], # yy
[C12, C12, C11, 0, 0, 0], # zz
[0, 0, 0, C44, 0, 0], # yz
[0, 0, 0, 0, C44, 0], # xz
[0, 0, 0, 0, 0, C44]]) # xy
self._thickness = thickness
det_R = np.linalg.det(R)
if not np.isclose(det_R, 1.0):
raise ValueError(
"R is not a proper rotation matrix, det(R)={}".format(det_R))
self._R = R
C_tensor = np.zeros((3, 3, 3, 3))
for i, j, k, l in np.ndindex(3, 3, 3, 3):
C_tensor[i, j, k, l] = self.elasticity_tensor(i, j, k, l)
C_tensor = np.einsum(
'ig,jh,ghmn,km,ln', self._R, self._R, C_tensor, self._R, self._R
)
for i, j in np.ndindex(6, 6):
self._C[i, j] = self.voigt_from_tensor(C_tensor, i, j)
def elasticity_tensor(self, i, j, k, L):
Voigt_ij = i
if i != j:
Voigt_ij = 6 - i - j
Voigt_kl = k
if k != L:
Voigt_kl = 6 - k - L
return self._C[Voigt_ij, Voigt_kl]
def voigt_from_tensor(self, C_tensor, Voigt_ij, Voigt_kl):
tensor_ij_for_voigt_ij = {
0: (0, 0),
1: (1, 1),
2: (2, 2),
3: (1, 2),
4: (0, 2),
5: (0, 1),
}
i, j = tensor_ij_for_voigt_ij[Voigt_ij]
k, L = tensor_ij_for_voigt_ij[Voigt_kl]
return C_tensor[i, j, k, L]
def bulkop(self, qx, qy, qz):
"""
Return the linear operator M_il = -C_ijkl q_j q_k
Arguments
---------
q : 3-vector
Components of the wavevector
Returns
-------
M : 3x3-matrix
Linear operator
"""
q = (qx, qy, qz)
M = np.zeros((3, 3), dtype=complex)
for i, j, k, l in np.ndindex(3, 3, 3, 3):
M[i, l] += -self.elasticity_tensor(i, j, k, l) * q[j] * q[k]
return M
def find_eigenvalues(self, qx, qy):
# We know that det(M) has the form c0 + c2*Q^2 + c4*Q^4 + c6*Q^6, but
# we don't have the prefactors explicitly. We first need to construct
# them here by evaluating:
# Q = 0: det(M) = c0
# Q = 1: det(M) = c0 + c2 + c4 + c6
# Q = 2: det(M) = c0 + 4*c2 + 16*c4 + 64*c6
# Q = 3: det(M) = c0 + 9*c2 + 81*c4 + 729*c6
fac1 = 1
fac2 = 2
fac3 = 3
A = np.array([[0, 0, 0, 1],
[fac1 ** 6, fac1 ** 4, fac1 ** 2, 1],
[fac2 ** 6, fac2 ** 4, fac2 ** 2, 1],
[fac3 ** 6, fac3 ** 4, fac3 ** 2, 1]])
b = np.array([np.linalg.det(self.bulkop(qx, qy, 0)),
np.linalg.det(self.bulkop(qx, qy, 1j * fac1)),
np.linalg.det(self.bulkop(qx, qy, 1j * fac2)),
np.linalg.det(self.bulkop(qx, qy, 1j * fac3))])
p = np.linalg.solve(A, b)
r = np.roots(p)
# We need to take the sqrt because we have the roots of the equation
# c0 + c2*Q + c4*Q^2 + C6*Q^3
return np.sqrt(r)
def find_eigenvectors(self, qx, qy, qz, rcond=1e-6):
eta = []
for _qz in qz:
M = self.bulkop(qx, qy, _qz)
_eta = null_space(M, rcond=rcond)
if _eta.shape[1] != 1:
raise RuntimeError(
'Null space for wavevector {},{},{} spanned by {} '
'vectors, but should be spanned by a single one.'
.format(qx, qy, _qz, _eta.shape[1]))
eta += [_eta[:, 0]]
return eta
def _make_U(self, qz, eta, z=None):
U = np.zeros((3, len(qz)), dtype=complex)
if z is None:
for k, alpha in np.ndindex(3, len(qz)):
U[k, alpha] = eta[alpha][k]
else:
for k, alpha in np.ndindex(3, len(qz)):
U[k, alpha] = eta[alpha][k] * np.exp(1j * qz[alpha] * z)
return U
def _make_F(self, qx, qy, qz, eta, thickness):
q = [(qx, qy, _qz) for _qz in qz]
F = np.zeros((len(qz), len(qz)), dtype=complex)
# Traction boundary conditions on top
for i, k, alpha, l in np.ndindex(3, 3, len(qz), 3):
F[i, alpha] += 1j * self.elasticity_tensor(i, 2, k, l) * \
q[alpha][k] * eta[alpha][l]
# Displacement boundary conditions on bottom
if len(qz) > 3:
for i, alpha in np.ndindex(3, len(qz)):
F[i + 3, alpha] = np.exp(-1j * q[alpha][2] * thickness) * \
eta[alpha][i]
return F
def _make_U_and_F(self, qx, qy, thickness, exp_tol=100):
_qz = self.find_eigenvalues(qx, qy)
# If thickness*qz > some threshold, then we need to solve for the
# problem of infinite thickness, otherwise we get floating point
# issues when evaluating exp(-thickness*qz) in _make_F
if thickness is None or np.max(np.real(thickness * _qz)) > exp_tol:
qz = -1j * _qz
else:
qz = np.append(-1j * _qz, 1j * _qz)
eta = self.find_eigenvectors(qx, qy, qz)
return self._make_U(qz, eta), self._make_F(qx, qy, qz, eta, thickness)
def _gamma_stiffness(self):
"""Returns the 3x3 stiffness matrix at the Gamma point (q=0)"""
# Voigt components: xz = 4, yz = 3, zz = 2
return np.array([[self._C[4, 4], self._C[3, 4], self._C[2, 4]],
[self._C[3, 4], self._C[3, 3], self._C[2, 3]],
[self._C[2, 4], self._C[2, 3],
self._C[2, 2]]]) / self._thickness
def _greens_function(self, qx, qy, thickness, zero_tol=1e-6):
if thickness is not None and abs(qx) < zero_tol and abs(qy) < zero_tol:
# This is zero wavevector. We use the analytical solution in this
# case.
return np.linalg.inv(self._gamma_stiffness())
U, F = self._make_U_and_F(qx, qy, thickness)
return np.linalg.solve(F.T, U.T)[:3, :]
def _stiffness(self, qx, qy, thickness, zero_tol=1e-6):
if abs(qx) < zero_tol and abs(qy) < zero_tol:
if thickness is None:
return np.zeros((3, 3))
else:
return self._gamma_stiffness()
if thickness is None:
U, F = self._make_U_and_F(qx, qy, thickness)
return np.linalg.solve(U.T, F.T)
else:
return np.linalg.inv(
self._greens_function(qx, qy, thickness, zero_tol=zero_tol))
def greens_function(self, qx, qy, zero_tol=1e-6):
# Note: Normalization of the q vectors is required for numerical
# stability
abs_q = np.sqrt(qx ** 2 + qy ** 2)
abs_q[abs_q == 0] = 1
thickness = [None] * len(
abs_q) if self._thickness is None else self._thickness * abs_q
if np.isscalar(qx) and np.isscalar(qy):
return self._greens_function(qx / abs_q, qy / abs_q, thickness,
zero_tol=zero_tol) / abs_q
gf = []
for _qx, _qy, _abs_q, _thickness in zip(qx, qy, abs_q, thickness):
gf += [
self._greens_function(_qx / _abs_q, _qy / _abs_q, _thickness,
zero_tol=zero_tol) / _abs_q]
return np.array(gf)
def stiffness(self, qx, qy, zero_tol=1e-6):
# Note: Normalization of the q vectors is required for numerical
# stability
abs_q = np.sqrt(qx ** 2 + qy ** 2)
abs_q[abs_q == 0] = 1
thickness = [None] * len(
abs_q) if self._thickness is None else self._thickness * abs_q
if np.isscalar(qx) and np.isscalar(qy):
return self._stiffness(qx / abs_q, qy / abs_q, thickness,
zero_tol=zero_tol) * abs_q
gf = []
for _qx, _qy, _abs_q, _thickness in zip(qx, qy, abs_q, thickness):
gf += [self._stiffness(_qx / _abs_q, _qy / _abs_q, _thickness,
zero_tol=zero_tol) * _abs_q]
return np.array(gf)
__call__ = stiffness
| [
"scipy.linalg.null_space",
"numpy.roots",
"numpy.ndindex",
"numpy.isscalar",
"numpy.zeros",
"numpy.einsum",
"numpy.append",
"numpy.isclose",
"numpy.linalg.det",
"numpy.array",
"numpy.exp",
"numpy.real",
"numpy.eye",
"numpy.linalg.solve",
"numpy.sqrt"
] | [((1344, 1353), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1350, 1353), True, 'import numpy as np\n'), ((2331, 2493), 'numpy.array', 'np.array', (['[[C11, C12, C12, 0, 0, 0], [C12, C11, C12, 0, 0, 0], [C12, C12, C11, 0, 0, \n 0], [0, 0, 0, C44, 0, 0], [0, 0, 0, 0, C44, 0], [0, 0, 0, 0, 0, C44]]'], {}), '([[C11, C12, C12, 0, 0, 0], [C12, C11, C12, 0, 0, 0], [C12, C12,\n C11, 0, 0, 0], [0, 0, 0, C44, 0, 0], [0, 0, 0, 0, C44, 0], [0, 0, 0, 0,\n 0, C44]])\n', (2339, 2493), True, 'import numpy as np\n'), ((2714, 2730), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (2727, 2730), True, 'import numpy as np\n'), ((2917, 2939), 'numpy.zeros', 'np.zeros', (['(3, 3, 3, 3)'], {}), '((3, 3, 3, 3))\n', (2925, 2939), True, 'import numpy as np\n'), ((2966, 2988), 'numpy.ndindex', 'np.ndindex', (['(3)', '(3)', '(3)', '(3)'], {}), '(3, 3, 3, 3)\n', (2976, 2988), True, 'import numpy as np\n'), ((3079, 3154), 'numpy.einsum', 'np.einsum', (['"""ig,jh,ghmn,km,ln"""', 'self._R', 'self._R', 'C_tensor', 'self._R', 'self._R'], {}), "('ig,jh,ghmn,km,ln', self._R, self._R, C_tensor, self._R, self._R)\n", (3088, 3154), True, 'import numpy as np\n'), ((3197, 3213), 'numpy.ndindex', 'np.ndindex', (['(6)', '(6)'], {}), '(6, 6)\n', (3207, 3213), True, 'import numpy as np\n'), ((4233, 4264), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'complex'}), '((3, 3), dtype=complex)\n', (4241, 4264), True, 'import numpy as np\n'), ((4291, 4313), 'numpy.ndindex', 'np.ndindex', (['(3)', '(3)', '(3)', '(3)'], {}), '(3, 3, 3, 3)\n', (4301, 4313), True, 'import numpy as np\n'), ((4879, 5022), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [fac1 ** 6, fac1 ** 4, fac1 ** 2, 1], [fac2 ** 6, fac2 ** 4,\n fac2 ** 2, 1], [fac3 ** 6, fac3 ** 4, fac3 ** 2, 1]]'], {}), '([[0, 0, 0, 1], [fac1 ** 6, fac1 ** 4, fac1 ** 2, 1], [fac2 ** 6, \n fac2 ** 4, fac2 ** 2, 1], [fac3 ** 6, fac3 ** 4, fac3 ** 2, 1]])\n', (4887, 5022), True, 'import numpy as np\n'), ((5365, 5386), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (5380, 5386), True, 'import numpy as np\n'), ((5399, 5410), 'numpy.roots', 'np.roots', (['p'], {}), '(p)\n', (5407, 5410), True, 'import numpy as np\n'), ((5542, 5552), 'numpy.sqrt', 'np.sqrt', (['r'], {}), '(r)\n', (5549, 5552), True, 'import numpy as np\n'), ((9181, 9207), 'numpy.sqrt', 'np.sqrt', (['(qx ** 2 + qy ** 2)'], {}), '(qx ** 2 + qy ** 2)\n', (9188, 9207), True, 'import numpy as np\n'), ((9812, 9824), 'numpy.array', 'np.array', (['gf'], {}), '(gf)\n', (9820, 9824), True, 'import numpy as np\n'), ((9983, 10009), 'numpy.sqrt', 'np.sqrt', (['(qx ** 2 + qy ** 2)'], {}), '(qx ** 2 + qy ** 2)\n', (9990, 10009), True, 'import numpy as np\n'), ((10576, 10588), 'numpy.array', 'np.array', (['gf'], {}), '(gf)\n', (10584, 10588), True, 'import numpy as np\n'), ((2746, 2768), 'numpy.isclose', 'np.isclose', (['det_R', '(1.0)'], {}), '(det_R, 1.0)\n', (2756, 2768), True, 'import numpy as np\n'), ((5711, 5737), 'scipy.linalg.null_space', 'null_space', (['M'], {'rcond': 'rcond'}), '(M, rcond=rcond)\n', (5721, 5737), False, 'from scipy.linalg import null_space\n'), ((7526, 7560), 'numpy.append', 'np.append', (['(-1.0j * _qz)', '(1.0j * _qz)'], {}), '(-1.0j * _qz, 1.0j * _qz)\n', (7535, 7560), True, 'import numpy as np\n'), ((7856, 8015), 'numpy.array', 'np.array', (['[[self._C[4, 4], self._C[3, 4], self._C[2, 4]], [self._C[3, 4], self._C[3, \n 3], self._C[2, 3]], [self._C[2, 4], self._C[2, 3], self._C[2, 2]]]'], {}), '([[self._C[4, 4], self._C[3, 4], self._C[2, 4]], [self._C[3, 4],\n self._C[3, 3], self._C[2, 3]], [self._C[2, 4], self._C[2, 3], self._C[2,\n 2]]])\n', (7864, 8015), True, 'import numpy as np\n'), ((8473, 8498), 'numpy.linalg.solve', 'np.linalg.solve', (['F.T', 'U.T'], {}), '(F.T, U.T)\n', (8488, 8498), True, 'import numpy as np\n'), ((8866, 8891), 'numpy.linalg.solve', 'np.linalg.solve', (['U.T', 'F.T'], {}), '(U.T, F.T)\n', (8881, 8891), True, 'import numpy as np\n'), ((9359, 9374), 'numpy.isscalar', 'np.isscalar', (['qx'], {}), '(qx)\n', (9370, 9374), True, 'import numpy as np\n'), ((9379, 9394), 'numpy.isscalar', 'np.isscalar', (['qy'], {}), '(qy)\n', (9390, 9394), True, 'import numpy as np\n'), ((10161, 10176), 'numpy.isscalar', 'np.isscalar', (['qx'], {}), '(qx)\n', (10172, 10176), True, 'import numpy as np\n'), ((10181, 10196), 'numpy.isscalar', 'np.isscalar', (['qy'], {}), '(qy)\n', (10192, 10196), True, 'import numpy as np\n'), ((8678, 8694), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (8686, 8694), True, 'import numpy as np\n'), ((6380, 6408), 'numpy.exp', 'np.exp', (['(1.0j * qz[alpha] * z)'], {}), '(1.0j * qz[alpha] * z)\n', (6386, 6408), True, 'import numpy as np\n'), ((6969, 7008), 'numpy.exp', 'np.exp', (['(-1.0j * q[alpha][2] * thickness)'], {}), '(-1.0j * q[alpha][2] * thickness)\n', (6975, 7008), True, 'import numpy as np\n'), ((7431, 7455), 'numpy.real', 'np.real', (['(thickness * _qz)'], {}), '(thickness * _qz)\n', (7438, 7455), True, 'import numpy as np\n')] |
# coding: utf-8
# ## Prediction BigMart dataset from AWS Notebook Cloud Instance
# In[60]:
# Import Libraries
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import pandas as pd
import seaborn as sns
from statsmodels.nonparametric.kde import KDEUnivariate
from statsmodels.nonparametric import smoothers_lowess
from pandas import Series, DataFrame
from patsy import dmatrices
from sklearn import datasets, svm
from sklearn import grid_search
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier, GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
# # Read data from BigMart datasets for Train and Test
# In[61]:
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
# # Dimension of dataset and combine the Test and Train data
# ### Summary of the overall dataset - The data set has total of 14204 rows with 13 attributes.
# ### Train has 8523 and 5681 in test dataset
# In[62]:
train['source']='train'
test['source']='test'
data = pd.concat([train, test],ignore_index=True)
print (train.shape, test.shape, data.shape)
# # First Ten Records of Train
# In[63]:
train.head(10)
# # First 10 Records for Test Set
# In[64]:
test.head(10)
# In[65]:
#Describe the Train data
print(train.describe())
# In[66]:
#Describe the Test data
print(test.describe())
# # Describe the Combinded data set
# In[67]:
#Describe the Full data (Train + Test)
print(data.describe())
# # Data Exploration and Visualization
# In[68]:
# We can see the columns with Null instances
data.isnull().sum()
# In[69]:
# Unique values with in Attributes -
data.apply(lambda x: len(x.unique()))
# # Explore the Categorical Attributes from Combined dataset
# In[70]:
#Filter categorical variables
categorical_attributes = [x for x in data.dtypes.index if data.dtypes[x]=='object']
#Exclude ID cols and source:
categorical_attributes = [x for x in categorical_attributes if x not in ['Item_Identifier','Outlet_Identifier','source']]
#Print frequency of categories
for i in categorical_attributes:
print ('\nFrequency of Categories for attributes %s'%i)
print (data[i].value_counts())
# In[71]:
# Distribution of Weight Attributes
data.Item_Weight.plot(kind='hist', color='blue', edgecolor='black', figsize=(10,6),
title='Histogram of Item_Weight')
# In[72]:
#Check the mean sales by type:
data.pivot_table(values='Item_Outlet_Sales',index='Outlet_Type')
# In[73]:
# Distrubtion of Target Variable - Item_Outlet_Sales
import pylab
import scipy.stats as stats
stats.probplot(data.Item_Outlet_Sales, dist="uniform", plot=pylab)
pylab.show()
# # Plotting the histogram on Combined dataset
# In[74]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
data.hist(bins=50, figsize=(20,15))
plt.show()
# # Correlation Plot
# In[75]:
import seaborn as sns
f, ax = plt.subplots(figsize=[8,6])
sns.heatmap((data).corr(),
annot=True)
ax.set_title("Correlation of Attributes")
plt.show()
# # Replace Null values - Numerical Attributes
# In[76]:
print (data['Item_Weight'].isnull().sum())
data["Item_Weight"] = data["Item_Weight"].fillna(data["Item_Weight"].mean())
print(data['Item_Weight'].isnull().sum())
print (data['Outlet_Size'].isnull().sum())
data['Outlet_Size'] = data['Outlet_Size'].fillna(data['Outlet_Size'].mode().iloc[0])
print (data['Outlet_Size'].isnull().sum())
# In[77]:
#Impute for attribute with 0 value for Visibility
print ('Number of Records with Visibility = 0 is ', (data['Item_Visibility'] == 0).sum())
data['Item_Visibility'] = data['Item_Visibility'].mask(data['Item_Visibility'] == 0,data['Item_Visibility'].mean(skipna=True))
print ('Number of Records with Visibility = 0 is ', data['Item_Visibility'].isnull().sum())
# In[78]:
# Head 10 records from Combined data
data.head(10)
# # Handling Categorical Values
# In[79]:
#Item type combine:
data['Item_Identifier'].value_counts()
data['Item_Type_Combined'] = data['Item_Identifier'].apply(lambda x: x[0:2])
data['Item_Type_Combined'] = data['Item_Type_Combined'].map({'FD':'Food',
'NC':'Non-Consumable',
'DR':'Drinks'})
data['Item_Type_Combined'].value_counts()
# In[80]:
#Years:
data['Outlet_Years'] = 2018 - data['Outlet_Establishment_Year']
data['Outlet_Years'].describe()
# In[81]:
#Change categories of low fat:
print ('Original Categories:')
print (data['Item_Fat_Content'].value_counts())
data['Item_Fat_Content'] = data['Item_Fat_Content'].replace({'LF':'Low Fat',
'reg':'Regular',
'low fat':'Low Fat'})
print (data['Item_Fat_Content'].value_counts())
# In[82]:
# Create Non Edible category:
data.loc[data['Item_Type_Combined']=="Non-Consumable",'Item_Fat_Content'] = "Non-Edible"
data['Item_Fat_Content'].value_counts()
# # Encoding Categorical Attributes
# In[83]:
#Import library:
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
#New variable for outlet
data['Outlet'] = le.fit_transform(data['Outlet_Identifier'])
var_mod = ['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Item_Type_Combined','Outlet_Type','Outlet']
le = LabelEncoder()
for i in var_mod:
data[i] = le.fit_transform(data[i])
# In[84]:
#One_Hot_Coding on the different catergories of dataset
data = pd.get_dummies(data, columns=['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Outlet_Type',
'Item_Type_Combined','Outlet_Identifier'])
# In[85]:
# Display the combined dataset after encoding-
name_of_attribs = list(data)
data.apply(lambda x: len(x.unique()))
# # Implementation of Pipeline -
# In[86]:
from sklearn.base import BaseEstimator, TransformerMixin
# Create a class to select numerical or categorical columns
# since Scikit-Learn doesn't handle DataFrames yet
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
# In[87]:
num_attribs = data[['Item_Weight','Item_Visibility']]
# In[88]:
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Imputer
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('std_scaler', StandardScaler()),
])
# # Completing the Combined data Imputation and drop attributes
# In[89]:
data.drop(['Item_Type','Outlet_Establishment_Year'],axis=1,inplace=True)
# In[90]:
data.head()
# # Create Training and Test dataset from Combined dataset
# In[91]:
#Divide into test and train:
trainr = data.loc[data['source']=="train"]
testr = data.loc[data['source']=="test"]
# In[92]:
# Display the record count in each dataset
print (trainr.shape, testr.shape, data.shape)
# In[93]:
#Drop Target from Test and manual identifier column:
testr.drop(['Item_Outlet_Sales','source'],axis=1,inplace=True)
trainr.drop(['source'],axis=1,inplace=True)
# In[94]:
trainr.head()
# In[95]:
trainr.describe()
# In[96]:
trainr.info()
# In[97]:
testr.describe()
# In[98]:
# Create the train and test dataset
Xtrain = trainr.drop(["Item_Outlet_Sales"], axis=1)
ytrain = trainr["Item_Outlet_Sales"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(Xtrain, ytrain)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# In[99]:
# Create a dataset without Item_Identifier
from sklearn.metrics import accuracy_score
predictors = [x for x in Xtrain.columns if x not in ['Item_Identifier']]
print(predictors)
# # Linear Regression
# In[100]:
r_pipeline = Pipeline([
('std_scaler', StandardScaler()),
('linear', LinearRegression())
])
r_pipeline.fit(X_train[predictors], y_train)
preds = r_pipeline.predict(X_test[predictors])
# In[101]:
from sklearn import cross_validation, metrics
cv_score = cross_validation.cross_val_score(r_pipeline, X_train[predictors], y_train, cv=20,
scoring='mean_squared_error')
cv_score = np.sqrt(np.abs(cv_score))
RMSE = cv_score.mean()
print('RMSE is ', RMSE)
# In[102]:
from sklearn.metrics import mean_squared_error
RMSEd = mean_squared_error(preds, y_test)
RMSEsd=np.sqrt(RMSEd)
print('RMSE is ', RMSEsd)
# ## GradientBoostingRegressor Tree Implementation
# In[103]:
from sklearn.ensemble import GradientBoostingRegressor
pipedesc = Pipeline([('std_scaler', StandardScaler()),
('grboostregmodel', GradientBoostingRegressor(n_estimators=100, learning_rate=0.1,
max_depth=1, random_state=0, loss='ls'))])
# In[104]:
dscrmol = pipedesc.fit(X_train[predictors], y_train)
#print(dscrmol.get_params())
preddesctree = dscrmol.predict(X_test[predictors])
# In[105]:
from sklearn import cross_validation, metrics
cv_scoredesc = cross_validation.cross_val_score(pipedesc, X_train[predictors], y_train, cv=20,
scoring='mean_squared_error')
cv_scoredesct = np.sqrt(np.abs(cv_scoredesc))
RMSEdesc = cv_scoredesct.mean()
print('RMSE is ', RMSEdesc)
# ## HyperTune Gradient Boosting Regressor
# In[106]:
get_ipython().run_cell_magic('time', '', "from sklearn.ensemble import GradientBoostingRegressor\n\ngb_grid_params = {'learning_rate': [0.1, 0.05]\n #'max_depth': [4, 6, 8]\n #'min_samples_leaf': [20, 50,100,150],\n #'max_features': [1.0, 0.3, 0.1] \n }\n\ngb_gs = GradientBoostingRegressor(n_estimators = 60)\nclfgrd = grid_search.GridSearchCV(gb_gs,\n gb_grid_params,\n cv=20, \n n_jobs=10)\nclfgrdmof=clfgrd.fit(X_train[predictors], y_train)")
# In[107]:
get_ipython().run_cell_magic('time', '', 'clfpred = clfgrdmof.predict(X_test[predictors])')
# In[108]:
from sklearn import cross_validation, metrics
cvgd_scoredesc = cross_validation.cross_val_score(clfgrd, X_train[predictors], y_train, cv=20,
scoring='mean_squared_error')
cvgd_scoredesct = np.sqrt(np.abs(cvgd_scoredesc))
RMSEdescgd = cvgd_scoredesct.mean()
print('RMSE is ', RMSEdescgd)
# In[109]:
results = pd.DataFrame(columns=["Description", "RMSE"])
results.loc[len(results)] = ["LinearModel", RMSE]
results.loc[len(results)] = ["GradientBoost", RMSEdesc]
results.loc[len(results)] = ["HypertunedGradientBoost", RMSEdescgd]
results
# # Predict on original Test Set using Random forest model with Hypertune
# In[110]:
get_ipython().run_cell_magic('time', '', 'overallprediction=clfgrdmof.predict(testr[predictors])')
# In[111]:
print(overallprediction)
# In[112]:
import pickle
filename = 'finalized_model.pkl'
pickle.dump(clfgrdmof, open(filename, 'wb'))
# load the model from disk
loaded_model = pickle.load(open(filename, 'rb'))
Test1 = loaded_model.predict(testr[predictors])
# In[113]:
get_ipython().run_cell_magic('time', '', 'print(Test1)')
# # Overall Summary -
# Overall dataset - Initially, with Bigmart dataset, it has total of (14204, 13) records and was speratly provided
# with train(8523, 13) and test (5681, 12) dataset. It has 13 attributes with numerical and catagorical values.
#
# Below are the details on how we have processed and cleaned the data provided - data cleaning and preprocessing
# activities is performed on combined dataset with addition column added as "Source" to differentiate the data later
# for splitting the data.
#
# * **Data Exploration** – Analysed and plotted the categorical and continuous feature summaries to see which feature
# is closly related with target variable. This helped us with deciding which feature are influcing the prediction.
#
# * **Data Cleaning and Feature engineering** – Encoding and imputing missing values in the data and checking for
# outliers with
# replacing with mean values and relabeling the values in categorical columns as to bring consistencies.
# Also, added additional columns for effective feature engineering.
#
# * **Model Experiment** – Experiment has started with Linear Regression as Base model, with implementation of Gradient Boost Regressor and Hypertuned Gradient Boost Regressor.
#
# * **Model tunning** - GridsearchCV has been used tunning model and calculated the root mean
# square error.
#
# * **Model Evaluation** - After all the experiments and results captured in Table, it is clear the results are better
# with Hypertuned Gradient Boost Regressor.
#
# Below are the outcome of each model -
# - LinearModel (RMSE - 1128.050398)
# - GradientBoost - 1134.443937)
# - Hypertuned Gradient Boost (RMSE - 1078.128816)
#
#
# # Team Members -
# ### - <NAME>
# ### - <NAME>
| [
"pandas.DataFrame",
"pylab.show",
"matplotlib.pyplot.show",
"sklearn.cross_validation.cross_val_score",
"numpy.abs",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"pandas.get_dummies",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn... | [((983, 1007), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (994, 1007), True, 'import pandas as pd\n'), ((1016, 1039), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (1027, 1039), True, 'import pandas as pd\n'), ((1313, 1356), 'pandas.concat', 'pd.concat', (['[train, test]'], {'ignore_index': '(True)'}), '([train, test], ignore_index=True)\n', (1322, 1356), True, 'import pandas as pd\n'), ((2881, 2947), 'scipy.stats.probplot', 'stats.probplot', (['data.Item_Outlet_Sales'], {'dist': '"""uniform"""', 'plot': 'pylab'}), "(data.Item_Outlet_Sales, dist='uniform', plot=pylab)\n", (2895, 2947), True, 'import scipy.stats as stats\n'), ((2948, 2960), 'pylab.show', 'pylab.show', ([], {}), '()\n', (2958, 2960), False, 'import pylab\n'), ((3144, 3154), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3152, 3154), True, 'import matplotlib.pyplot as plt\n'), ((3221, 3249), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[8, 6]'}), '(figsize=[8, 6])\n', (3233, 3249), True, 'import matplotlib.pyplot as plt\n'), ((3342, 3352), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3350, 3352), True, 'import matplotlib.pyplot as plt\n'), ((5470, 5484), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5482, 5484), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((5688, 5702), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5700, 5702), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((5838, 5989), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['Item_Fat_Content', 'Outlet_Location_Type', 'Outlet_Size', 'Outlet_Type',\n 'Item_Type_Combined', 'Outlet_Identifier']"}), "(data, columns=['Item_Fat_Content', 'Outlet_Location_Type',\n 'Outlet_Size', 'Outlet_Type', 'Item_Type_Combined', 'Outlet_Identifier'])\n", (5852, 5989), True, 'import pandas as pd\n'), ((7959, 7991), 'sklearn.model_selection.train_test_split', 'train_test_split', (['Xtrain', 'ytrain'], {}), '(Xtrain, ytrain)\n', (7975, 7991), False, 'from sklearn.model_selection import train_test_split\n'), ((8562, 8677), 'sklearn.cross_validation.cross_val_score', 'cross_validation.cross_val_score', (['r_pipeline', 'X_train[predictors]', 'y_train'], {'cv': '(20)', 'scoring': '"""mean_squared_error"""'}), "(r_pipeline, X_train[predictors], y_train,\n cv=20, scoring='mean_squared_error')\n", (8594, 8677), False, 'from sklearn import cross_validation, metrics\n'), ((8873, 8906), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['preds', 'y_test'], {}), '(preds, y_test)\n', (8891, 8906), False, 'from sklearn.metrics import mean_squared_error\n'), ((8914, 8928), 'numpy.sqrt', 'np.sqrt', (['RMSEd'], {}), '(RMSEd)\n', (8921, 8928), True, 'import numpy as np\n'), ((9571, 9685), 'sklearn.cross_validation.cross_val_score', 'cross_validation.cross_val_score', (['pipedesc', 'X_train[predictors]', 'y_train'], {'cv': '(20)', 'scoring': '"""mean_squared_error"""'}), "(pipedesc, X_train[predictors], y_train, cv\n =20, scoring='mean_squared_error')\n", (9603, 9685), False, 'from sklearn import cross_validation, metrics\n'), ((10665, 10777), 'sklearn.cross_validation.cross_val_score', 'cross_validation.cross_val_score', (['clfgrd', 'X_train[predictors]', 'y_train'], {'cv': '(20)', 'scoring': '"""mean_squared_error"""'}), "(clfgrd, X_train[predictors], y_train, cv=\n 20, scoring='mean_squared_error')\n", (10697, 10777), False, 'from sklearn import cross_validation, metrics\n'), ((10959, 11004), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Description', 'RMSE']"}), "(columns=['Description', 'RMSE'])\n", (10971, 11004), True, 'import pandas as pd\n'), ((8738, 8754), 'numpy.abs', 'np.abs', (['cv_score'], {}), '(cv_score)\n', (8744, 8754), True, 'import numpy as np\n'), ((9750, 9770), 'numpy.abs', 'np.abs', (['cv_scoredesc'], {}), '(cv_scoredesc)\n', (9756, 9770), True, 'import numpy as np\n'), ((10844, 10866), 'numpy.abs', 'np.abs', (['cvgd_scoredesc'], {}), '(cvgd_scoredesc)\n', (10850, 10866), True, 'import numpy as np\n'), ((6943, 6959), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6957, 6959), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8332, 8348), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8346, 8348), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8370, 8388), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8386, 8388), False, 'from sklearn.linear_model import LinearRegression\n'), ((9114, 9130), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9128, 9130), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9174, 9280), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'n_estimators': '(100)', 'learning_rate': '(0.1)', 'max_depth': '(1)', 'random_state': '(0)', 'loss': '"""ls"""'}), "(n_estimators=100, learning_rate=0.1, max_depth=1,\n random_state=0, loss='ls')\n", (9199, 9280), False, 'from sklearn.ensemble import GradientBoostingRegressor\n')] |
# -*- coding: utf-8 -*-
# file: memnet.py
# author: songyouwei <<EMAIL>>
# Copyright (C) 2018. All Rights Reserved.
import numpy as np
from layers.attention import Attention
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from layers.squeeze_embedding import SqueezeEmbedding
import torch.nn.functional as F
import re
class LifelongABSA(nn.Module):
def locationed_memory(self, memory, memory_len):
# here we just simply calculate the location vector in Model2's manner
batch_size = memory.shape[0]
seq_len = memory.shape[1]
memory_len = memory_len.cpu().numpy()
weight = [[] for i in range(batch_size)]
for i in range(batch_size):
for idx in range(memory_len[i]):
weight[i].append(1-float(idx+1)/memory_len[i])
for idx in range(memory_len[i], seq_len):
weight[i].append(1)
weight = torch.tensor(weight).to(self.opt.device)
memory = weight.unsqueeze(2)*memory
return memory
def __init__(self, embedding_matrix, opt):
super(LifelongABSA, self).__init__()
self.opt = opt
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
self.attention = Attention(opt.embed_dim, score_function='mlp')
#self.x_linear = nn.Linear(opt.embed_dim, opt.embed_dim)
# self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)
self.polarities_dim = opt.polarities_dim
self.last = torch.nn.ModuleList()
self.hat = False
self.word_in_domain = torch.zeros(1, dtype=torch.int64)
self.context_attention = dict()
self.aspect_context_attention = dict()
self.currenttask = -1
self.currentSentence = 0
for t in range(self.opt.taskcla):
self.last.append(nn.Linear(opt.polarities_dim, opt.polarities_dim))
self.context_attention[t] = dict()
self.aspect_context_attention[t] = dict()
#Parameter(torch.Tensor(out_features, in_features))
self.W = torch.nn.Parameter(torch.randn(opt.embed_dim, opt.polarities_dim ))
self.opt.initializer(self.W)
#where the negative, neutral, and positive classes are denoted
# as [1, 0 ,0], [0, 1 ,0] and [0, 0 ,1] respectively
self.An = torch.tensor( np.array([[1,-1,-1]]), dtype=torch.float32, requires_grad=False )
self.Bn = torch.tensor(np.array([[1, 0, 0]]), dtype=torch.float32, requires_grad=False)
self.Ap = torch.tensor(np.array([[-1, -1, 1]]), dtype=torch.float32, requires_grad=False)
self.Bp = torch.tensor(np.array([[0, 0, 1]]), dtype=torch.float32, requires_grad=False)
self.L2MN = False
def forward(self, t, inputs, s):
if self.currenttask != t:
self.currenttask = t
self.currentSentence = 0
text_raw_without_aspect_indices, aspect_indices = inputs[0], inputs[1]
memory_len = torch.sum(text_raw_without_aspect_indices != 0, dim=-1)
aspect_len = torch.sum(aspect_indices != 0, dim=-1)
nonzeros_aspect = torch.tensor(aspect_len, dtype=torch.float).to(self.opt.device)
memory = self.embed(text_raw_without_aspect_indices)
memory = self.squeeze_embedding(memory, memory_len)
# memory = self.locationed_memory(memory, memory_len)
aspect = self.embed(aspect_indices)
aspect = torch.sum(aspect, dim=1)
#memory_sentence = torch.sum(memory, dim=1)
aspect = torch.div(aspect, nonzeros_aspect.view(nonzeros_aspect.size(0), 1))
x = aspect.unsqueeze(dim=1)
out_at, score_sentence = self.attention(memory, x)
#Save sentence context attention
o_lifelog = torch.matmul (score_sentence,memory)
s_output = torch.matmul((x + o_lifelog), self.W)
if self.L2MN == True:
#print ("Execute L2MN algorithm ")
#Only obtein the real index without 80
faPosVector = self.getFaPositiveVector(text_raw_without_aspect_indices, aspect_indices)
faPosVectorTensor = torch.tensor(faPosVector, dtype=torch.float32 , requires_grad=False)
faNegVector = self.getFaPositiveVector(text_raw_without_aspect_indices, aspect_indices)
faNegVectorTensor = torch.tensor(faNegVector, dtype=torch.float32, requires_grad=False)
hQMatrix = self.getHqVectorMatrix(text_raw_without_aspect_indices, aspect_indices)
hQMatrixTensor = torch.tensor(hQMatrix, dtype=torch.float32, requires_grad=False)
o_lifelogPositive = torch.matmul (faPosVectorTensor,memory)
o_lifelogNegative = torch.matmul(faNegVectorTensor, memory)
score_sentence_Hq = (score_sentence + faNegVectorTensor + faPosVectorTensor)
multScoreSenteceHq = torch.matmul(score_sentence_Hq,hQMatrixTensor)
#Positive actions
parcialPositive = torch.matmul(self.Ap , torch.transpose(self.Bp, 0, 1))
s_output_positive = torch.matmul( o_lifelogPositive, self.W)
parcialPositive = torch.matmul(parcialPositive, s_output_positive)
#Negative actions
parcialNegative = torch.matmul(self.An , torch.transpose(self.Bn, 0, 1))
s_output_negative = torch.matmul(o_lifelogNegative, self.W)
parcialNegative = torch.matmul(parcialNegative, s_output_negative)
sjoin = s_output + parcialPositive + parcialNegative + multScoreSenteceHq
# for _ in range(self.opt.hops):
# x = self.x_linear(x)
# out_at, _ = self.attention(memory, x)
# x = out_at + x
# x = x.view(x.size(0), -1)
# out = self.dense
y = []
#y.append(self.last[i](s_output).view(-1, self.opt.polarities_dim))
for i, _ in enumerate(range(self.opt.taskcla)):
y.append(self.last[i](s_output).view(-1,self.opt.polarities_dim))
for ielement in range(text_raw_without_aspect_indices.shape[0]):
self.context_attention[t.item()][self.currentSentence] = zip(text_raw_without_aspect_indices[ielement],
score_sentence[ielement][0])
self.aspect_context_attention[t.item()][self.currentSentence] = (text_raw_without_aspect_indices[ielement]
,aspect_indices[ielement])
self.currentSentence += 1
#Update all word index for each sentence
self.word_in_domain = torch.unique(torch.cat((self.word_in_domain, text_raw_without_aspect_indices.view(-1))))
# for iBatch in range(score_sentence.size(0)):
# sentencePos = iBatch + t
#
# attentionContext = dict()
# for i, iattention in score_sentence:
# attentionContext[text_raw_without_aspect_indices[sentencePos]] = iattention
# self.context_attention[sentencePos].update(attentionContext)
return y
def getEmbeddingMatrixEx(self,vocabulary):
if self.embed == None or type(vocabulary) == type(None):
return None
t = torch.unique(vocabulary)
memory = self.embed(t)
# Where M in R t.q V * K
# M = WC
return torch.matmul(memory,self.W)
def buildASA(self, task, dataset, word_contex_domain, aspect_domain):
exdomain_context_sentiment = dict()
domain_context_sentiment = dict()
#Build data structure
for ivalue, iword_index in enumerate(word_contex_domain):
exdomain_context_sentiment[iword_index] = dict()
domain_context_sentiment[iword_index] = dict()
for ivalue, iaspect in enumerate(aspect_domain):
exdomain_context_sentiment[iword_index][iaspect] = {0:(0,0),1:(0,0),2:(0,0)}
domain_context_sentiment[iword_index][iaspect] = {0: 0, 1: 0, 2: 0}
#Count exist context words
for iValue, isentences in enumerate (dataset):
contexIndexAttention = self.context_attention[task][iValue]
text_raw_without_aspect_indices, aspect_indices = self.aspect_context_attention[task][iValue]
targets = isentences['polarity']
### Polarity convenction
# 0 negative
# 1 neutral
# 2 positive
###
for idex, (index, score) in enumerate(contexIndexAttention):
if index.item() in domain_context_sentiment:
#print("Index = " + str(text_raw_without_aspect_indices[idex]) + " == " + str(index.item()))
#print ("Score = " + str(score.item()))
for iaspect in aspect_indices:
if iaspect.item() in domain_context_sentiment[index.item()]:
word_numerator, word_denominator = exdomain_context_sentiment[index.item()][iaspect.item()][targets]
word_denominator += 1
word_numerator +=1*score.item()
exdomain_context_sentiment[index.item()][iaspect.item()][targets] = (word_numerator,word_denominator)
#Compute probabilities
for ivalue, iword_index in enumerate(word_contex_domain):
for ivalue, iaspect in enumerate(aspect_domain):
for iopinion in range(3):
word_numerator, word_denominator = exdomain_context_sentiment[iword_index.item()][iaspect.item()][iopinion]
if word_denominator != 0:
domain_context_sentiment[iword_index.item()][iaspect.item()][iopinion] = word_numerator/word_denominator
return domain_context_sentiment
def insertKnowBase(self, ASAt, CSEt):
self.currentASAt = ASAt
self.currentCSEt = CSEt
self.L2MN = True
def getFAvector(self, type, sent_index_word, list_aspect):
if self.currentASAt == None:
return None
#index = 0 negative
#index = 1 neutral
#index = 2 positive
index = 0
fAList = []
if type == "positive":
index = 1
rowSize = sent_index_word.shape[0]
asaWordKeys = self.currentASAt.keys()
memory_len = torch.sum(list_aspect != 0, dim=-1)
index_len = torch.sum(sent_index_word != 0, dim=-1)
sent_index_word = self.squeeze_embedding(sent_index_word, index_len)
nlist_aspect = self.squeeze_embedding(list_aspect, memory_len)
for iRow in range(rowSize):
fA = []
list_index_word = sent_index_word[iRow]
for word in list_index_word:
if not ( word.item() in asaWordKeys ):
fA.append(0)
else: #Exist in Knowldge Base
aspectDict = self.currentASAt[word.item()]
aspectDictKeys = aspectDict.keys()
list_aspect_row = nlist_aspect[iRow]
aspectToCompare = set([ival.item() for ival in list_aspect_row if ival != 0])
aspectInterset = set(aspectDictKeys)
aspectInterset = aspectInterset & aspectToCompare
if len (aspectInterset)== 0 :
fA.append(0)
else:
for iaspect in aspectInterset:
scoreattention =aspectDict[iaspect][index]
fA.append(scoreattention)
break
fAList.append([fA])
return np.array(fAList)
def getFaPositiveVector(self,sent_index_word, list_aspect):
return self.getFAvector("positive",sent_index_word, list_aspect)
def getFaNegativeVector(self,sent_index_word, list_aspect):
return self.getFAvector("negative",sent_index_word, list_aspect)
def getHqVectorMatrix(self,sent_index_word, list_aspect):
if self.currentCSEt == None:
return None
resultHqList = list()
rowSize = sent_index_word.shape[0]
index_len = torch.sum(sent_index_word != 0, dim=-1)
sent_index_word = self.squeeze_embedding(sent_index_word, index_len)
currentCSEtKeys = self.currentCSEt.keys()
for iRow in range(rowSize):
resultHq = list()
list_index_word = sent_index_word[iRow]
for word in list_index_word:
if not (word.item() in currentCSEtKeys):
resultHq.append([0, 0, 0])
else:
aspectDict = self.currentCSEt[word.item()]
resultHq.append(aspectDict.numpy())
resultHqList.append(resultHq)
return np.array(resultHqList)
def get_Optimizer(self):
if self.optimizer != None:
return self.optimizer
return None
def set_Optimizer(self, newoptimizer):
self.optimizer = newoptimizer
| [
"torch.unique",
"torch.nn.ModuleList",
"layers.attention.Attention",
"layers.squeeze_embedding.SqueezeEmbedding",
"torch.randn",
"numpy.array",
"torch.nn.Linear",
"torch.zeros",
"torch.matmul",
"torch.sum",
"torch.tensor",
"torch.transpose"
] | [((1290, 1324), 'layers.squeeze_embedding.SqueezeEmbedding', 'SqueezeEmbedding', ([], {'batch_first': '(True)'}), '(batch_first=True)\n', (1306, 1324), False, 'from layers.squeeze_embedding import SqueezeEmbedding\n'), ((1350, 1396), 'layers.attention.Attention', 'Attention', (['opt.embed_dim'], {'score_function': '"""mlp"""'}), "(opt.embed_dim, score_function='mlp')\n", (1359, 1396), False, 'from layers.attention import Attention\n'), ((1599, 1620), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (1618, 1620), False, 'import torch\n'), ((1676, 1709), 'torch.zeros', 'torch.zeros', (['(1)'], {'dtype': 'torch.int64'}), '(1, dtype=torch.int64)\n', (1687, 1709), False, 'import torch\n'), ((3064, 3119), 'torch.sum', 'torch.sum', (['(text_raw_without_aspect_indices != 0)'], {'dim': '(-1)'}), '(text_raw_without_aspect_indices != 0, dim=-1)\n', (3073, 3119), False, 'import torch\n'), ((3141, 3179), 'torch.sum', 'torch.sum', (['(aspect_indices != 0)'], {'dim': '(-1)'}), '(aspect_indices != 0, dim=-1)\n', (3150, 3179), False, 'import torch\n'), ((3517, 3541), 'torch.sum', 'torch.sum', (['aspect'], {'dim': '(1)'}), '(aspect, dim=1)\n', (3526, 3541), False, 'import torch\n'), ((3840, 3876), 'torch.matmul', 'torch.matmul', (['score_sentence', 'memory'], {}), '(score_sentence, memory)\n', (3852, 3876), False, 'import torch\n'), ((3897, 3932), 'torch.matmul', 'torch.matmul', (['(x + o_lifelog)', 'self.W'], {}), '(x + o_lifelog, self.W)\n', (3909, 3932), False, 'import torch\n'), ((7291, 7315), 'torch.unique', 'torch.unique', (['vocabulary'], {}), '(vocabulary)\n', (7303, 7315), False, 'import torch\n'), ((7412, 7440), 'torch.matmul', 'torch.matmul', (['memory', 'self.W'], {}), '(memory, self.W)\n', (7424, 7440), False, 'import torch\n'), ((10378, 10413), 'torch.sum', 'torch.sum', (['(list_aspect != 0)'], {'dim': '(-1)'}), '(list_aspect != 0, dim=-1)\n', (10387, 10413), False, 'import torch\n'), ((10434, 10473), 'torch.sum', 'torch.sum', (['(sent_index_word != 0)'], {'dim': '(-1)'}), '(sent_index_word != 0, dim=-1)\n', (10443, 10473), False, 'import torch\n'), ((11673, 11689), 'numpy.array', 'np.array', (['fAList'], {}), '(fAList)\n', (11681, 11689), True, 'import numpy as np\n'), ((12183, 12222), 'torch.sum', 'torch.sum', (['(sent_index_word != 0)'], {'dim': '(-1)'}), '(sent_index_word != 0, dim=-1)\n', (12192, 12222), False, 'import torch\n'), ((12816, 12838), 'numpy.array', 'np.array', (['resultHqList'], {}), '(resultHqList)\n', (12824, 12838), True, 'import numpy as np\n'), ((1206, 1255), 'torch.tensor', 'torch.tensor', (['embedding_matrix'], {'dtype': 'torch.float'}), '(embedding_matrix, dtype=torch.float)\n', (1218, 1255), False, 'import torch\n'), ((2184, 2230), 'torch.randn', 'torch.randn', (['opt.embed_dim', 'opt.polarities_dim'], {}), '(opt.embed_dim, opt.polarities_dim)\n', (2195, 2230), False, 'import torch\n'), ((2437, 2460), 'numpy.array', 'np.array', (['[[1, -1, -1]]'], {}), '([[1, -1, -1]])\n', (2445, 2460), True, 'import numpy as np\n'), ((2534, 2555), 'numpy.array', 'np.array', (['[[1, 0, 0]]'], {}), '([[1, 0, 0]])\n', (2542, 2555), True, 'import numpy as np\n'), ((2631, 2654), 'numpy.array', 'np.array', (['[[-1, -1, 1]]'], {}), '([[-1, -1, 1]])\n', (2639, 2654), True, 'import numpy as np\n'), ((2729, 2750), 'numpy.array', 'np.array', (['[[0, 0, 1]]'], {}), '([[0, 0, 1]])\n', (2737, 2750), True, 'import numpy as np\n'), ((4191, 4258), 'torch.tensor', 'torch.tensor', (['faPosVector'], {'dtype': 'torch.float32', 'requires_grad': '(False)'}), '(faPosVector, dtype=torch.float32, requires_grad=False)\n', (4203, 4258), False, 'import torch\n'), ((4391, 4458), 'torch.tensor', 'torch.tensor', (['faNegVector'], {'dtype': 'torch.float32', 'requires_grad': '(False)'}), '(faNegVector, dtype=torch.float32, requires_grad=False)\n', (4403, 4458), False, 'import torch\n'), ((4582, 4646), 'torch.tensor', 'torch.tensor', (['hQMatrix'], {'dtype': 'torch.float32', 'requires_grad': '(False)'}), '(hQMatrix, dtype=torch.float32, requires_grad=False)\n', (4594, 4646), False, 'import torch\n'), ((4679, 4718), 'torch.matmul', 'torch.matmul', (['faPosVectorTensor', 'memory'], {}), '(faPosVectorTensor, memory)\n', (4691, 4718), False, 'import torch\n'), ((4751, 4790), 'torch.matmul', 'torch.matmul', (['faNegVectorTensor', 'memory'], {}), '(faNegVectorTensor, memory)\n', (4763, 4790), False, 'import torch\n'), ((4913, 4960), 'torch.matmul', 'torch.matmul', (['score_sentence_Hq', 'hQMatrixTensor'], {}), '(score_sentence_Hq, hQMatrixTensor)\n', (4925, 4960), False, 'import torch\n'), ((5106, 5145), 'torch.matmul', 'torch.matmul', (['o_lifelogPositive', 'self.W'], {}), '(o_lifelogPositive, self.W)\n', (5118, 5145), False, 'import torch\n'), ((5178, 5226), 'torch.matmul', 'torch.matmul', (['parcialPositive', 's_output_positive'], {}), '(parcialPositive, s_output_positive)\n', (5190, 5226), False, 'import torch\n'), ((5372, 5411), 'torch.matmul', 'torch.matmul', (['o_lifelogNegative', 'self.W'], {}), '(o_lifelogNegative, self.W)\n', (5384, 5411), False, 'import torch\n'), ((5441, 5489), 'torch.matmul', 'torch.matmul', (['parcialNegative', 's_output_negative'], {}), '(parcialNegative, s_output_negative)\n', (5453, 5489), False, 'import torch\n'), ((933, 953), 'torch.tensor', 'torch.tensor', (['weight'], {}), '(weight)\n', (945, 953), False, 'import torch\n'), ((1933, 1982), 'torch.nn.Linear', 'nn.Linear', (['opt.polarities_dim', 'opt.polarities_dim'], {}), '(opt.polarities_dim, opt.polarities_dim)\n', (1942, 1982), True, 'import torch.nn as nn\n'), ((3206, 3249), 'torch.tensor', 'torch.tensor', (['aspect_len'], {'dtype': 'torch.float'}), '(aspect_len, dtype=torch.float)\n', (3218, 3249), False, 'import torch\n'), ((5043, 5073), 'torch.transpose', 'torch.transpose', (['self.Bp', '(0)', '(1)'], {}), '(self.Bp, 0, 1)\n', (5058, 5073), False, 'import torch\n'), ((5309, 5339), 'torch.transpose', 'torch.transpose', (['self.Bn', '(0)', '(1)'], {}), '(self.Bn, 0, 1)\n', (5324, 5339), False, 'import torch\n')] |
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from future import standard_library
standard_library.install_aliases() # triggers E402, hence noqa below
import h5py # noqa
from collections import defaultdict # noqa
import numpy as np # noqa
import os # noqa
from neon import logger as neon_logger # noqa
from neon.data.text_preprocessing import clean_string # noqa
from neon.util.compat import pickle # noqa
def build_data_train(path='.', filepath='labeledTrainData.tsv', vocab_file=None,
vocab=None, skip_headers=True, train_ratio=0.8):
"""
Loads the data file and spits out a h5 file with record of
{y, review_text, review_int}
Typically two passes over the data.
1st pass is for vocab and pre-processing. (WARNING: to get phrases, we need to go
though multiple passes). 2nd pass is converting text into integers. We will deal with integers
from thereafter.
WARNING: we use h5 just as proof of concept for handling large datasets
Datasets may fit entirely in memory as numpy as array
"""
fname_h5 = filepath + '.h5'
if vocab_file is None:
fname_vocab = filepath + '.vocab'
else:
fname_vocab = vocab_file
if not os.path.exists(fname_h5) or not os.path.exists(fname_vocab):
# create the h5 store - NOTE: hdf5 is row-oriented store and we slice rows
# reviews_text holds the metadata and processed text file
# reviews_int holds the ratings, ints
h5f = h5py.File(fname_h5, 'w')
shape, maxshape = (2 ** 16,), (None, )
dt = np.dtype([('y', np.uint8),
('split', np.bool),
('num_words', np.uint16),
# WARNING: vlen=bytes in python 3
('text', h5py.special_dtype(vlen=str))
])
reviews_text = h5f.create_dataset('reviews', shape=shape, maxshape=maxshape,
dtype=dt, compression='gzip')
reviews_train = h5f.create_dataset(
'train', shape=shape, maxshape=maxshape,
dtype=h5py.special_dtype(vlen=np.int32), compression='gzip')
reviews_valid = h5f.create_dataset(
'valid', shape=shape, maxshape=maxshape,
dtype=h5py.special_dtype(vlen=np.int32), compression='gzip')
wdata = np.zeros((1, ), dtype=dt)
# init vocab only for train data
build_vocab = False
if vocab is None:
vocab = defaultdict(int)
build_vocab = True
nsamples = 0
# open the file, skip the headers if needed
f = open(filepath, 'r')
if skip_headers:
f.readline()
for i, line in enumerate(f):
_, rating, review = line.strip().split('\t')
# clean the review
review = clean_string(review)
review_words = review.strip().split()
num_words = len(review_words)
split = int(np.random.rand() < train_ratio)
# create record
wdata['y'] = int(float(rating))
wdata['text'] = review
wdata['num_words'] = num_words
wdata['split'] = split
reviews_text[i] = wdata
# update the vocab if needed
if build_vocab:
for word in review_words:
vocab[word] += 1
nsamples += 1
# histogram of class labels, sentence length
ratings, counts = np.unique(
reviews_text['y'][:nsamples], return_counts=True)
sen_len, sen_len_counts = np.unique(
reviews_text['num_words'][:nsamples], return_counts=True)
vocab_size = len(vocab)
nclass = len(ratings)
reviews_text.attrs['vocab_size'] = vocab_size
reviews_text.attrs['nrows'] = nsamples
reviews_text.attrs['nclass'] = nclass
reviews_text.attrs['class_distribution'] = counts
neon_logger.display("vocabulary size - {}".format(vocab_size))
neon_logger.display("# of samples - {}".format(nsamples))
neon_logger.display("# of classes {}".format(nclass))
neon_logger.display("class distribution - {} {}".format(ratings, counts))
sen_counts = list(zip(sen_len, sen_len_counts))
sen_counts = sorted(sen_counts, key=lambda kv: kv[1], reverse=True)
neon_logger.display("sentence length - {} {} {}".format(len(sen_len),
sen_len, sen_len_counts))
# WARNING: assume vocab is of order ~4-5 million words.
# sort the vocab , re-assign ids by its frequency. Useful for downstream tasks
# only done for train data
if build_vocab:
vocab_sorted = sorted(
list(vocab.items()), key=lambda kv: kv[1], reverse=True)
vocab = {}
for i, t in enumerate(list(zip(*vocab_sorted))[0]):
vocab[t] = i
# map text to integers
ntrain = 0
nvalid = 0
for i in range(nsamples):
text = reviews_text[i]['text']
y = int(reviews_text[i]['y'])
split = reviews_text[i]['split']
text_int = [y] + [vocab[t] for t in text.strip().split()]
if split:
reviews_train[ntrain] = text_int
ntrain += 1
else:
reviews_valid[nvalid] = text_int
nvalid += 1
reviews_text.attrs['ntrain'] = ntrain
reviews_text.attrs['nvalid'] = nvalid
neon_logger.display(
"# of train - {0}, # of valid - {1}".format(reviews_text.attrs['ntrain'],
reviews_text.attrs['nvalid']))
# close open files
h5f.close()
f.close()
if not os.path.exists(fname_vocab):
rev_vocab = {}
for wrd, wrd_id in vocab.items():
rev_vocab[wrd_id] = wrd
neon_logger.display("vocabulary from IMDB dataset is saved into {}".format(fname_vocab))
pickle.dump((vocab, rev_vocab), open(fname_vocab, 'wb'), 2)
return fname_h5, fname_vocab
| [
"h5py.File",
"h5py.special_dtype",
"future.standard_library.install_aliases",
"os.path.exists",
"numpy.zeros",
"collections.defaultdict",
"neon.data.text_preprocessing.clean_string",
"numpy.random.rand",
"numpy.unique"
] | [((784, 818), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (816, 818), False, 'from future import standard_library\n'), ((2190, 2214), 'h5py.File', 'h5py.File', (['fname_h5', '"""w"""'], {}), "(fname_h5, 'w')\n", (2199, 2214), False, 'import h5py\n'), ((3054, 3078), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'dt'}), '((1,), dtype=dt)\n', (3062, 3078), True, 'import numpy as np\n'), ((4195, 4254), 'numpy.unique', 'np.unique', (["reviews_text['y'][:nsamples]"], {'return_counts': '(True)'}), "(reviews_text['y'][:nsamples], return_counts=True)\n", (4204, 4254), True, 'import numpy as np\n'), ((4302, 4369), 'numpy.unique', 'np.unique', (["reviews_text['num_words'][:nsamples]"], {'return_counts': '(True)'}), "(reviews_text['num_words'][:nsamples], return_counts=True)\n", (4311, 4369), True, 'import numpy as np\n'), ((6535, 6562), 'os.path.exists', 'os.path.exists', (['fname_vocab'], {}), '(fname_vocab)\n', (6549, 6562), False, 'import os\n'), ((1920, 1944), 'os.path.exists', 'os.path.exists', (['fname_h5'], {}), '(fname_h5)\n', (1934, 1944), False, 'import os\n'), ((1952, 1979), 'os.path.exists', 'os.path.exists', (['fname_vocab'], {}), '(fname_vocab)\n', (1966, 1979), False, 'import os\n'), ((3196, 3212), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3207, 3212), False, 'from collections import defaultdict\n'), ((3548, 3568), 'neon.data.text_preprocessing.clean_string', 'clean_string', (['review'], {}), '(review)\n', (3560, 3568), False, 'from neon.data.text_preprocessing import clean_string\n'), ((2811, 2844), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'np.int32'}), '(vlen=np.int32)\n', (2829, 2844), False, 'import h5py\n'), ((2982, 3015), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'np.int32'}), '(vlen=np.int32)\n', (3000, 3015), False, 'import h5py\n'), ((2483, 2511), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (2501, 2511), False, 'import h5py\n'), ((3685, 3701), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3699, 3701), True, 'import numpy as np\n')] |
#Metrics
import torch
import numpy as np
import pandas as pd
from torch.nn import functional as F
from src import data
def site_confusion(y_true, y_pred, site_lists):
"""What proportion of misidentified species come from the same site?
Args:
y_true: string values of true labels
y_pred: string values or predicted labels
site_lists: list of site labels for each string label taxonID -> sites
Returns:
Within site confusion score
"""
within_site = 0
cross_site = 0
for index, value in enumerate(y_pred):
#If not correctly predicted
if not value == y_true[index]:
correct_sites = site_lists[y_true[index]]
incorrect_site = site_lists[y_pred[index]]
#Do they co-occur?
site_overlap = any([site in incorrect_site for site in correct_sites])
if site_overlap:
within_site +=1
else:
cross_site +=1
else:
pass
#don't divide by zero
if within_site + cross_site == 0:
return 0
#Get proportion of within site error
proportion_within = within_site/(within_site + cross_site)
return proportion_within
def genus_confusion(y_true, y_pred, scientific_dict):
"""What proportion of misidentified species come from the same genus?
Args:
y_true: taxonID of true labels
y_pred: taxonID of predicted labels
scientific_dict: a dict of taxonID -> scientific name
Returns:
Within site confusion score
"""
within_genus = 0
cross_genus = 0
for index, value in enumerate(y_pred):
#If not correctly predicted
if not value == y_true[index]:
true_genus = scientific_dict[y_true[index]][0].split()[0]
pred_genus = scientific_dict[y_pred[index]][0].split()[0]
if true_genus == pred_genus:
within_genus +=1
else:
cross_genus +=1
#don't divide by zero
if within_genus + cross_genus == 0:
return 0
#Get proportion of within site error
proportion_within = within_genus/(within_genus + cross_genus)
return proportion_within
def novel_prediction(model, csv_file, config):
"""Predict a dataset of species not included in the dataset and get the final activation score before/after softmax"""
novel_ds = data.TreeDataset(csv_file, image_size=config["image_size"], config=config)
data_loader = torch.utils.data.DataLoader(
novel_ds,
batch_size=config["batch_size"],
num_workers=config["workers"])
model.eval()
top_scores = []
softmax_scores = []
individuals = []
for batch in data_loader:
individual, inputs, targets = batch
with torch.no_grad():
pred = model(inputs["HSI"])
top_score = pred[np.arange(len(pred)), np.argmax(pred, 1)]
softmax_layer = F.softmax(pred, dim=1)
softmax_score = softmax_layer[np.arange(len(softmax_layer)), np.argmax(softmax_layer, 1)]
individuals.append(individual)
top_scores.append(top_score)
softmax_scores.append(softmax_score)
top_scores = np.concatenate(top_scores)
individuals = np.concatenate(individuals)
softmax_scores = np.concatenate(softmax_scores)
features = pd.DataFrame({"individualID":individuals, "top_score": top_scores,"softmax_score":softmax_scores})
original = pd.read_csv(csv_file)
mergeddf = features.merge(original)
return mergeddf | [
"pandas.DataFrame",
"torch.utils.data.DataLoader",
"src.data.TreeDataset",
"pandas.read_csv",
"numpy.argmax",
"torch.nn.functional.softmax",
"torch.no_grad",
"numpy.concatenate"
] | [((2451, 2525), 'src.data.TreeDataset', 'data.TreeDataset', (['csv_file'], {'image_size': "config['image_size']", 'config': 'config'}), "(csv_file, image_size=config['image_size'], config=config)\n", (2467, 2525), False, 'from src import data\n'), ((2549, 2654), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['novel_ds'], {'batch_size': "config['batch_size']", 'num_workers': "config['workers']"}), "(novel_ds, batch_size=config['batch_size'],\n num_workers=config['workers'])\n", (2576, 2654), False, 'import torch\n'), ((3295, 3321), 'numpy.concatenate', 'np.concatenate', (['top_scores'], {}), '(top_scores)\n', (3309, 3321), True, 'import numpy as np\n'), ((3342, 3369), 'numpy.concatenate', 'np.concatenate', (['individuals'], {}), '(individuals)\n', (3356, 3369), True, 'import numpy as np\n'), ((3405, 3435), 'numpy.concatenate', 'np.concatenate', (['softmax_scores'], {}), '(softmax_scores)\n', (3419, 3435), True, 'import numpy as np\n'), ((3453, 3558), 'pandas.DataFrame', 'pd.DataFrame', (["{'individualID': individuals, 'top_score': top_scores, 'softmax_score':\n softmax_scores}"], {}), "({'individualID': individuals, 'top_score': top_scores,\n 'softmax_score': softmax_scores})\n", (3465, 3558), True, 'import pandas as pd\n'), ((3572, 3593), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (3583, 3593), True, 'import pandas as pd\n'), ((2850, 2865), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2863, 2865), False, 'import torch\n'), ((3006, 3028), 'torch.nn.functional.softmax', 'F.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (3015, 3028), True, 'from torch.nn import functional as F\n'), ((2958, 2976), 'numpy.argmax', 'np.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (2967, 2976), True, 'import numpy as np\n'), ((3102, 3129), 'numpy.argmax', 'np.argmax', (['softmax_layer', '(1)'], {}), '(softmax_layer, 1)\n', (3111, 3129), True, 'import numpy as np\n')] |
from __future__ import print_function
# Copyright (c) 2013, <NAME>
# All rights reserved.
import unittest
import numpy as np
from numpy.testing import assert_array_almost_equal
import matplotlib.pyplot as plt
from undaqTools.misc.cdf import CDF, percentile
testdata = './data/normaltestdata' # space delimited ASCII of 10,000 random digits
# from normal distribution with mean of -270 and
# sd of 270
class Test_percentile(unittest.TestCase):
def setUp(self):
global testdata
with open(testdata,'rb') as f:
self.data = map(float, f.read().split())
def test0(self):
r_bins = \
np.array([ -1050.231 , -1043.33599147, -1036.44098295, -1029.54597442,
-1022.6509659 , -1015.75595737, -1008.86094885, -1001.96594033,
-995.0709318 , -988.17592327, -981.28091475, -974.38590622,
-967.4908977 , -960.59588918, -953.70088065, -946.80587212,
-939.9108636 , -933.01585507, -926.12084655, -919.22583803,
-912.3308295 , -905.43582097, -898.54081245, -891.64580392,
-884.7507954 , -877.85578687, -870.96077835, -864.06576982,
-857.1707613 , -850.27575278, -843.38074425, -836.48573573,
-829.5907272 , -822.69571867, -815.80071015, -808.90570163,
-802.0106931 , -795.11568457, -788.22067605, -781.32566752,
-774.430659 , -767.53565048, -760.64064195, -753.74563343,
-746.8506249 , -739.95561637, -733.06060785, -726.16559932,
-719.2705908 , -712.37558227, -705.48057375, -698.58556522,
-691.6905567 , -684.79554818, -677.90053965, -671.00553112,
-664.1105226 , -657.21551407, -650.32050555, -643.42549702,
-636.5304885 , -629.63547997, -622.74047145, -615.84546292,
-608.9504544 , -602.05544588, -595.16043735, -588.26542882,
-581.3704203 , -574.47541177, -567.58040325, -560.68539472,
-553.7903862 , -546.89537767, -540.00036915, -533.10536063,
-526.2103521 , -519.31534357, -512.42033505, -505.52532652,
-498.630318 , -491.73530947, -484.84030095, -477.94529242,
-471.0502839 , -464.15527537, -457.26026685, -450.36525832,
-443.4702498 , -436.57524127, -429.68023275, -422.78522422,
-415.8902157 , -408.99520717, -402.10019865, -395.20519012,
-388.3101816 , -381.41517307, -374.52016455, -367.62515602,
-360.7301475 , -353.83513897, -346.94013045, -340.04512192,
-333.1501134 , -326.25510487, -319.36009635, -312.46508782,
-305.5700793 , -298.67507077, -291.78006225, -284.88505372,
-277.9900452 , -271.09503667, -264.20002815, -257.30501962,
-250.4100111 , -243.51500257, -236.61999405, -229.72498552,
-222.829977 , -215.93496847, -209.03995995, -202.14495142,
-195.2499429 , -188.35493437, -181.45992585, -174.56491732,
-167.6699088 , -160.77490027, -153.87989175, -146.98488322,
-140.0898747 , -133.19486617, -126.29985765, -119.40484912,
-112.5098406 , -105.61483207, -98.71982355, -91.82481502,
-84.9298065 , -78.03479797, -71.13978945, -64.24478092,
-57.3497724 , -50.45476387, -43.55975535, -36.66474682,
-29.7697383 , -22.87472977, -15.97972125, -9.08471272,
-2.1897042 , 4.70530433, 11.60031285, 18.49532138,
25.3903299 , 32.28533843, 39.18034695, 46.07535548,
52.970364 , 59.86537253, 66.76038105, 73.65538958,
80.5503981 , 87.44540663, 94.34041515, 101.23542368,
108.1304322 , 115.02544073, 121.92044925, 128.81545778,
135.7104663 , 142.60547483, 149.50048335, 156.39549188,
163.2905004 , 170.18550893, 177.08051745, 183.97552598,
190.8705345 , 197.76554303, 204.66055155, 211.55556008,
218.4505686 , 225.34557713, 232.24058565, 239.13559418,
246.0306027 , 252.92561123, 259.82061975, 266.71562828,
273.6106368 , 280.50564533, 287.40065385, 294.29566238,
301.1906709 , 308.08567943, 314.98068795, 321.87569648,
328.770705 ])
r_percentiles = \
np.array([ 0.00788452, 0.00860721, 0.00932989, 0.01005257, 0.01077526,
0.01149794, 0.01222062, 0.01294331, 0.01366599, 0.01438868,
0.01511136, 0.01949853, 0.02447029, 0.02685264, 0.029235 ,
0.03816535, 0.03870205, 0.03923875, 0.03977545, 0.04031214,
0.04084884, 0.04138554, 0.04192224, 0.04245893, 0.04299563,
0.04353233, 0.04406903, 0.04460572, 0.04514242, 0.04587917,
0.04666457, 0.04744996, 0.04823536, 0.04902075, 0.04980615,
0.05059154, 0.05137694, 0.05216234, 0.05515276, 0.05817619,
0.06144373, 0.06480917, 0.07465794, 0.08016102, 0.0870895 ,
0.08841823, 0.08974696, 0.09107569, 0.09240443, 0.09606674,
0.10644608, 0.11426144, 0.11800995, 0.12511504, 0.13906042,
0.14134499, 0.14362955, 0.1500741 , 0.15956579, 0.17179116,
0.18368956, 0.18592559, 0.18816161, 0.19211337, 0.19841317,
0.2029537 , 0.20541953, 0.20788537, 0.21266664, 0.22193011,
0.22927148, 0.23452336, 0.23801757, 0.24402634, 0.24829478,
0.2593486 , 0.26444222, 0.26710829, 0.2786814 , 0.28588673,
0.29055915, 0.30362949, 0.30988025, 0.31149286, 0.31310547,
0.31486499, 0.31998583, 0.33378933, 0.34527317, 0.36329378,
0.36957257, 0.37964864, 0.38529406, 0.38825826, 0.39021331,
0.39216836, 0.41376866, 0.42376066, 0.43533998, 0.460219 ,
0.47645825, 0.49932141, 0.51131487, 0.5139218 , 0.52057234,
0.52577312, 0.53170762, 0.54289616, 0.5514161 , 0.55749038,
0.56657762, 0.57084495, 0.5756384 , 0.58497631, 0.59406697,
0.60124967, 0.61191264, 0.61861008, 0.63526535, 0.64147876,
0.64424147, 0.64812518, 0.65628559, 0.6611236 , 0.66513402,
0.67247715, 0.6865401 , 0.69855775, 0.70405424, 0.70993554,
0.71712121, 0.74659026, 0.75933937, 0.76285267, 0.77698097,
0.77843327, 0.77988556, 0.78203854, 0.79001124, 0.79668959,
0.80160361, 0.81324827, 0.81889569, 0.83359027, 0.8376273 ,
0.84045596, 0.84676012, 0.85137567, 0.86174448, 0.86334239,
0.86940656, 0.88236389, 0.88443058, 0.88731063, 0.89183958,
0.89734229, 0.90254643, 0.90947058, 0.91505731, 0.92255361,
0.93161672, 0.93531119, 0.93928433, 0.94184878, 0.94358849,
0.9453282 , 0.94657215, 0.94779349, 0.94905589, 0.95096511,
0.95565876, 0.9591155 , 0.96179936, 0.96485932, 0.96873016,
0.97047112, 0.9715957 , 0.97272028, 0.97393729, 0.97516837,
0.97661306, 0.97872525, 0.97977577, 0.98043513, 0.98109449,
0.98175384, 0.98267924, 0.9861103 , 0.9879409 , 0.99216572,
0.99465729, 0.99529709, 0.99593689, 0.99657669, 0.99721649,
0.99777873, 0.99833404, 0.99888936, 0.99944468, 1. ])
u = self.data
cdf = percentile(u[:200])
np.testing.assert_array_almost_equal(r_bins, cdf.bin_edges)
np.testing.assert_array_almost_equal(r_percentiles, cdf.percentiles)
def test1_spec_numbins(self):
r_bins = \
np.array([ -1265.052 , -1244.1328598, -1223.2137196, -1202.2945794,
-1181.3754392, -1160.456299 , -1139.5371588, -1118.6180186,
-1097.6988784, -1076.7797382, -1055.860598 , -1034.9414578,
-1014.0223176, -993.1031774, -972.1840372, -951.264897 ,
-930.3457568, -909.4266166, -888.5074764, -867.5883362,
-846.669196 , -825.7500558, -804.8309156, -783.9117754,
-762.9926352, -742.073495 , -721.1543548, -700.2352146,
-679.3160744, -658.3969342, -637.477794 , -616.5586538,
-595.6395136, -574.7203734, -553.8012332, -532.882093 ,
-511.9629528, -491.0438126, -470.1246724, -449.2055322,
-428.286392 , -407.3672518, -386.4481116, -365.5289714,
-344.6098312, -323.690691 , -302.7715508, -281.8524106,
-260.9332704, -240.0141302, -219.09499 , -198.1758498,
-177.2567096, -156.3375694, -135.4184292, -114.499289 ,
-93.5801488, -72.6610086, -51.7418684, -30.8227282,
-9.903588 , 11.0155522, 31.9346924, 52.8538326,
73.7729728, 94.692113 , 115.6112532, 136.5303934,
157.4495336, 178.3686738, 199.287814 , 220.2069542,
241.1260944, 262.0452346, 282.9643748, 303.883515 ,
324.8026552, 345.7217954, 366.6409356, 387.5600758,
408.479216 , 429.3983562, 450.3174964, 471.2366366,
492.1557768, 513.074917 , 533.9940572, 554.9131974,
575.8323376, 596.7514778, 617.670618 , 638.5897582,
659.5088984, 680.4280386, 701.3471788, 722.266319 ,
743.1854592, 764.1045994, 785.0237396, 805.9428798,
826.86202 ])
r_percentiles = \
np.array([ 1.52682325e-04, 2.94558006e-04, 6.16047975e-04,
7.02918092e-04, 7.73474112e-04, 8.21793025e-04,
8.70111939e-04, 1.10401110e-03, 1.34481409e-03,
1.83295542e-03, 2.47881609e-03, 3.68607187e-03,
5.15865394e-03, 6.73055980e-03, 8.61060936e-03,
1.04788857e-02, 1.23147839e-02, 1.56306574e-02,
1.87118249e-02, 2.12461678e-02, 2.56479572e-02,
3.05641053e-02, 3.62309399e-02, 4.08826236e-02,
4.93068431e-02, 5.73786087e-02, 6.70094435e-02,
7.80975453e-02, 8.85867316e-02, 9.97965662e-02,
1.13123710e-01, 1.25876664e-01, 1.42376196e-01,
1.62244613e-01, 1.83208974e-01, 2.05366050e-01,
2.29556330e-01, 2.53282959e-01, 2.77540635e-01,
3.05427136e-01, 3.36100492e-01, 3.63831154e-01,
3.94043118e-01, 4.25583501e-01, 4.55163960e-01,
4.86837208e-01, 5.20599620e-01, 5.48129193e-01,
5.78151849e-01, 6.08547876e-01, 6.40669188e-01,
6.70746937e-01, 7.01026286e-01, 7.25971707e-01,
7.51021605e-01, 7.77398045e-01, 8.00059380e-01,
8.22450308e-01, 8.40847440e-01, 8.57498407e-01,
8.75729509e-01, 8.91310252e-01, 9.05933655e-01,
9.18565943e-01, 9.29274795e-01, 9.40657581e-01,
9.48881056e-01, 9.57538817e-01, 9.63601434e-01,
9.70164957e-01, 9.75166523e-01, 9.79733128e-01,
9.83692163e-01, 9.86929642e-01, 9.89432271e-01,
9.91544580e-01, 9.93344271e-01, 9.94115203e-01,
9.95269177e-01, 9.96206415e-01, 9.97047263e-01,
9.97575959e-01, 9.98237416e-01, 9.98678170e-01,
9.98903131e-01, 9.99062383e-01, 9.99389219e-01,
9.99710548e-01, 9.99739859e-01, 9.99802006e-01,
9.99820441e-01, 9.99861218e-01, 9.99912599e-01,
9.99946445e-01, 9.99963921e-01, 9.99975528e-01,
9.99981646e-01, 9.99987764e-01, 9.99993882e-01,
1.00000000e+00])
u = self.data
cdf = percentile(u, numbins=100)
np.testing.assert_array_almost_equal(r_bins, cdf.bin_edges)
np.testing.assert_array_almost_equal(r_percentiles, cdf.percentiles)
class Test_percentile_bounds(unittest.TestCase):
def setUp(self):
global testdata
with open(testdata,'rb') as f:
self.data = map(float, f.read().split())
def test_find_at_lower_bound(self):
u = self.data
cdf = percentile(u)
self.assertAlmostEqual(cdf.find(0.0), -1265.052)
def test_find_at_upper_bound(self):
u = self.data
cdf = percentile(u)
self.assertAlmostEqual(cdf.find(1.0), 806.14998224278111)
def test_find_out_of_bounds(self):
u = self.data
cdf = percentile(u)
with self.assertRaises(ValueError):
cdf.find(-0.1)
def test_find_out_of_bounds2(self):
u = self.data
cdf = percentile(u)
with self.assertRaises(ValueError):
cdf.find(1.1)
class Test_cdf_find(unittest.TestCase):
def setUp(self):
global testdata
with open(testdata,'rb') as f:
self.data = map(float, f.read().split())
def test0(self):
u = self.data
cdf = percentile(u)
self.assertAlmostEqual(cdf.find(.5), -315.27959365994514)
class Test_cdf_plot(unittest.TestCase):
def setUp(self):
global testdata
with open(testdata,'rb') as f:
self.data = map(float, f.read().split())
def test0(self):
u = self.data
cdf = percentile(u)
import matplotlib.pyplot as plt
fig = cdf.plot()
fig.savefig('./output/cdf.png')
plt.close('all')
class Test_cdf_repr(unittest.TestCase):
def setUp(self):
global testdata
with open(testdata,'rb') as f:
self.data = map(float, f.read().split())
def test0(self):
u = self.data
cdf = percentile(u, numbins=100)
cdf2 = eval(repr(cdf))
assert_array_almost_equal(cdf.percentiles, cdf2.percentiles)
assert_array_almost_equal(cdf.bin_edges, cdf2.bin_edges)
class Test_cdf_str(unittest.TestCase):
def setUp(self):
global testdata
with open(testdata,'rb') as f:
self.data = map(float, f.read().split())
def test0(self):
u = self.data
cdf = percentile(u, numbins=100)
#open('./data/cdftest0','wb').write(str(cdf))
r = open('./data/cdftest0').read()
self.assertEqual(str(cdf), r)
def test1(self):
"""truncated bins"""
u = self.data
cdf = percentile(u)
#open('./data/cdftest1','wb').write(str(cdf))
r = open('./data/cdftest1').read()
self.assertEqual(str(cdf), r)
def suite():
return unittest.TestSuite((
unittest.makeSuite(Test_percentile),
unittest.makeSuite(Test_percentile_bounds),
unittest.makeSuite(Test_cdf_find),
unittest.makeSuite(Test_cdf_plot),
unittest.makeSuite(Test_cdf_repr),
unittest.makeSuite(Test_cdf_str),
))
if __name__ == "__main__":
## # build test data
## u = np.random.normal(-270, 270, size=(3*60*60,))
## with open('./normaltestdata','wb') as f:
## f.write(' '.join(['%.3f'%v for v in u]))
# run tests
runner = unittest.TextTestRunner()
runner.run(suite())
| [
"unittest.TextTestRunner",
"matplotlib.pyplot.close",
"unittest.makeSuite",
"undaqTools.misc.cdf.percentile",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
] | [((17117, 17142), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (17140, 17142), False, 'import unittest\n'), ((751, 3837), 'numpy.array', 'np.array', (['[-1050.231, -1043.33599147, -1036.44098295, -1029.54597442, -1022.6509659, \n -1015.75595737, -1008.86094885, -1001.96594033, -995.0709318, -\n 988.17592327, -981.28091475, -974.38590622, -967.4908977, -960.59588918,\n -953.70088065, -946.80587212, -939.9108636, -933.01585507, -\n 926.12084655, -919.22583803, -912.3308295, -905.43582097, -898.54081245,\n -891.64580392, -884.7507954, -877.85578687, -870.96077835, -\n 864.06576982, -857.1707613, -850.27575278, -843.38074425, -836.48573573,\n -829.5907272, -822.69571867, -815.80071015, -808.90570163, -802.0106931,\n -795.11568457, -788.22067605, -781.32566752, -774.430659, -767.53565048,\n -760.64064195, -753.74563343, -746.8506249, -739.95561637, -\n 733.06060785, -726.16559932, -719.2705908, -712.37558227, -705.48057375,\n -698.58556522, -691.6905567, -684.79554818, -677.90053965, -\n 671.00553112, -664.1105226, -657.21551407, -650.32050555, -643.42549702,\n -636.5304885, -629.63547997, -622.74047145, -615.84546292, -608.9504544,\n -602.05544588, -595.16043735, -588.26542882, -581.3704203, -\n 574.47541177, -567.58040325, -560.68539472, -553.7903862, -546.89537767,\n -540.00036915, -533.10536063, -526.2103521, -519.31534357, -\n 512.42033505, -505.52532652, -498.630318, -491.73530947, -484.84030095,\n -477.94529242, -471.0502839, -464.15527537, -457.26026685, -\n 450.36525832, -443.4702498, -436.57524127, -429.68023275, -422.78522422,\n -415.8902157, -408.99520717, -402.10019865, -395.20519012, -388.3101816,\n -381.41517307, -374.52016455, -367.62515602, -360.7301475, -\n 353.83513897, -346.94013045, -340.04512192, -333.1501134, -326.25510487,\n -319.36009635, -312.46508782, -305.5700793, -298.67507077, -\n 291.78006225, -284.88505372, -277.9900452, -271.09503667, -264.20002815,\n -257.30501962, -250.4100111, -243.51500257, -236.61999405, -\n 229.72498552, -222.829977, -215.93496847, -209.03995995, -202.14495142,\n -195.2499429, -188.35493437, -181.45992585, -174.56491732, -167.6699088,\n -160.77490027, -153.87989175, -146.98488322, -140.0898747, -\n 133.19486617, -126.29985765, -119.40484912, -112.5098406, -105.61483207,\n -98.71982355, -91.82481502, -84.9298065, -78.03479797, -71.13978945, -\n 64.24478092, -57.3497724, -50.45476387, -43.55975535, -36.66474682, -\n 29.7697383, -22.87472977, -15.97972125, -9.08471272, -2.1897042, \n 4.70530433, 11.60031285, 18.49532138, 25.3903299, 32.28533843, \n 39.18034695, 46.07535548, 52.970364, 59.86537253, 66.76038105, \n 73.65538958, 80.5503981, 87.44540663, 94.34041515, 101.23542368, \n 108.1304322, 115.02544073, 121.92044925, 128.81545778, 135.7104663, \n 142.60547483, 149.50048335, 156.39549188, 163.2905004, 170.18550893, \n 177.08051745, 183.97552598, 190.8705345, 197.76554303, 204.66055155, \n 211.55556008, 218.4505686, 225.34557713, 232.24058565, 239.13559418, \n 246.0306027, 252.92561123, 259.82061975, 266.71562828, 273.6106368, \n 280.50564533, 287.40065385, 294.29566238, 301.1906709, 308.08567943, \n 314.98068795, 321.87569648, 328.770705]'], {}), '([-1050.231, -1043.33599147, -1036.44098295, -1029.54597442, -\n 1022.6509659, -1015.75595737, -1008.86094885, -1001.96594033, -\n 995.0709318, -988.17592327, -981.28091475, -974.38590622, -967.4908977,\n -960.59588918, -953.70088065, -946.80587212, -939.9108636, -\n 933.01585507, -926.12084655, -919.22583803, -912.3308295, -905.43582097,\n -898.54081245, -891.64580392, -884.7507954, -877.85578687, -\n 870.96077835, -864.06576982, -857.1707613, -850.27575278, -843.38074425,\n -836.48573573, -829.5907272, -822.69571867, -815.80071015, -\n 808.90570163, -802.0106931, -795.11568457, -788.22067605, -781.32566752,\n -774.430659, -767.53565048, -760.64064195, -753.74563343, -746.8506249,\n -739.95561637, -733.06060785, -726.16559932, -719.2705908, -\n 712.37558227, -705.48057375, -698.58556522, -691.6905567, -684.79554818,\n -677.90053965, -671.00553112, -664.1105226, -657.21551407, -\n 650.32050555, -643.42549702, -636.5304885, -629.63547997, -622.74047145,\n -615.84546292, -608.9504544, -602.05544588, -595.16043735, -\n 588.26542882, -581.3704203, -574.47541177, -567.58040325, -560.68539472,\n -553.7903862, -546.89537767, -540.00036915, -533.10536063, -526.2103521,\n -519.31534357, -512.42033505, -505.52532652, -498.630318, -491.73530947,\n -484.84030095, -477.94529242, -471.0502839, -464.15527537, -\n 457.26026685, -450.36525832, -443.4702498, -436.57524127, -429.68023275,\n -422.78522422, -415.8902157, -408.99520717, -402.10019865, -\n 395.20519012, -388.3101816, -381.41517307, -374.52016455, -367.62515602,\n -360.7301475, -353.83513897, -346.94013045, -340.04512192, -333.1501134,\n -326.25510487, -319.36009635, -312.46508782, -305.5700793, -\n 298.67507077, -291.78006225, -284.88505372, -277.9900452, -271.09503667,\n -264.20002815, -257.30501962, -250.4100111, -243.51500257, -\n 236.61999405, -229.72498552, -222.829977, -215.93496847, -209.03995995,\n -202.14495142, -195.2499429, -188.35493437, -181.45992585, -\n 174.56491732, -167.6699088, -160.77490027, -153.87989175, -146.98488322,\n -140.0898747, -133.19486617, -126.29985765, -119.40484912, -112.5098406,\n -105.61483207, -98.71982355, -91.82481502, -84.9298065, -78.03479797, -\n 71.13978945, -64.24478092, -57.3497724, -50.45476387, -43.55975535, -\n 36.66474682, -29.7697383, -22.87472977, -15.97972125, -9.08471272, -\n 2.1897042, 4.70530433, 11.60031285, 18.49532138, 25.3903299, \n 32.28533843, 39.18034695, 46.07535548, 52.970364, 59.86537253, \n 66.76038105, 73.65538958, 80.5503981, 87.44540663, 94.34041515, \n 101.23542368, 108.1304322, 115.02544073, 121.92044925, 128.81545778, \n 135.7104663, 142.60547483, 149.50048335, 156.39549188, 163.2905004, \n 170.18550893, 177.08051745, 183.97552598, 190.8705345, 197.76554303, \n 204.66055155, 211.55556008, 218.4505686, 225.34557713, 232.24058565, \n 239.13559418, 246.0306027, 252.92561123, 259.82061975, 266.71562828, \n 273.6106368, 280.50564533, 287.40065385, 294.29566238, 301.1906709, \n 308.08567943, 314.98068795, 321.87569648, 328.770705])\n', (759, 3837), True, 'import numpy as np\n'), ((5168, 7700), 'numpy.array', 'np.array', (['[0.00788452, 0.00860721, 0.00932989, 0.01005257, 0.01077526, 0.01149794, \n 0.01222062, 0.01294331, 0.01366599, 0.01438868, 0.01511136, 0.01949853,\n 0.02447029, 0.02685264, 0.029235, 0.03816535, 0.03870205, 0.03923875, \n 0.03977545, 0.04031214, 0.04084884, 0.04138554, 0.04192224, 0.04245893,\n 0.04299563, 0.04353233, 0.04406903, 0.04460572, 0.04514242, 0.04587917,\n 0.04666457, 0.04744996, 0.04823536, 0.04902075, 0.04980615, 0.05059154,\n 0.05137694, 0.05216234, 0.05515276, 0.05817619, 0.06144373, 0.06480917,\n 0.07465794, 0.08016102, 0.0870895, 0.08841823, 0.08974696, 0.09107569, \n 0.09240443, 0.09606674, 0.10644608, 0.11426144, 0.11800995, 0.12511504,\n 0.13906042, 0.14134499, 0.14362955, 0.1500741, 0.15956579, 0.17179116, \n 0.18368956, 0.18592559, 0.18816161, 0.19211337, 0.19841317, 0.2029537, \n 0.20541953, 0.20788537, 0.21266664, 0.22193011, 0.22927148, 0.23452336,\n 0.23801757, 0.24402634, 0.24829478, 0.2593486, 0.26444222, 0.26710829, \n 0.2786814, 0.28588673, 0.29055915, 0.30362949, 0.30988025, 0.31149286, \n 0.31310547, 0.31486499, 0.31998583, 0.33378933, 0.34527317, 0.36329378,\n 0.36957257, 0.37964864, 0.38529406, 0.38825826, 0.39021331, 0.39216836,\n 0.41376866, 0.42376066, 0.43533998, 0.460219, 0.47645825, 0.49932141, \n 0.51131487, 0.5139218, 0.52057234, 0.52577312, 0.53170762, 0.54289616, \n 0.5514161, 0.55749038, 0.56657762, 0.57084495, 0.5756384, 0.58497631, \n 0.59406697, 0.60124967, 0.61191264, 0.61861008, 0.63526535, 0.64147876,\n 0.64424147, 0.64812518, 0.65628559, 0.6611236, 0.66513402, 0.67247715, \n 0.6865401, 0.69855775, 0.70405424, 0.70993554, 0.71712121, 0.74659026, \n 0.75933937, 0.76285267, 0.77698097, 0.77843327, 0.77988556, 0.78203854,\n 0.79001124, 0.79668959, 0.80160361, 0.81324827, 0.81889569, 0.83359027,\n 0.8376273, 0.84045596, 0.84676012, 0.85137567, 0.86174448, 0.86334239, \n 0.86940656, 0.88236389, 0.88443058, 0.88731063, 0.89183958, 0.89734229,\n 0.90254643, 0.90947058, 0.91505731, 0.92255361, 0.93161672, 0.93531119,\n 0.93928433, 0.94184878, 0.94358849, 0.9453282, 0.94657215, 0.94779349, \n 0.94905589, 0.95096511, 0.95565876, 0.9591155, 0.96179936, 0.96485932, \n 0.96873016, 0.97047112, 0.9715957, 0.97272028, 0.97393729, 0.97516837, \n 0.97661306, 0.97872525, 0.97977577, 0.98043513, 0.98109449, 0.98175384,\n 0.98267924, 0.9861103, 0.9879409, 0.99216572, 0.99465729, 0.99529709, \n 0.99593689, 0.99657669, 0.99721649, 0.99777873, 0.99833404, 0.99888936,\n 0.99944468, 1.0]'], {}), '([0.00788452, 0.00860721, 0.00932989, 0.01005257, 0.01077526, \n 0.01149794, 0.01222062, 0.01294331, 0.01366599, 0.01438868, 0.01511136,\n 0.01949853, 0.02447029, 0.02685264, 0.029235, 0.03816535, 0.03870205, \n 0.03923875, 0.03977545, 0.04031214, 0.04084884, 0.04138554, 0.04192224,\n 0.04245893, 0.04299563, 0.04353233, 0.04406903, 0.04460572, 0.04514242,\n 0.04587917, 0.04666457, 0.04744996, 0.04823536, 0.04902075, 0.04980615,\n 0.05059154, 0.05137694, 0.05216234, 0.05515276, 0.05817619, 0.06144373,\n 0.06480917, 0.07465794, 0.08016102, 0.0870895, 0.08841823, 0.08974696, \n 0.09107569, 0.09240443, 0.09606674, 0.10644608, 0.11426144, 0.11800995,\n 0.12511504, 0.13906042, 0.14134499, 0.14362955, 0.1500741, 0.15956579, \n 0.17179116, 0.18368956, 0.18592559, 0.18816161, 0.19211337, 0.19841317,\n 0.2029537, 0.20541953, 0.20788537, 0.21266664, 0.22193011, 0.22927148, \n 0.23452336, 0.23801757, 0.24402634, 0.24829478, 0.2593486, 0.26444222, \n 0.26710829, 0.2786814, 0.28588673, 0.29055915, 0.30362949, 0.30988025, \n 0.31149286, 0.31310547, 0.31486499, 0.31998583, 0.33378933, 0.34527317,\n 0.36329378, 0.36957257, 0.37964864, 0.38529406, 0.38825826, 0.39021331,\n 0.39216836, 0.41376866, 0.42376066, 0.43533998, 0.460219, 0.47645825, \n 0.49932141, 0.51131487, 0.5139218, 0.52057234, 0.52577312, 0.53170762, \n 0.54289616, 0.5514161, 0.55749038, 0.56657762, 0.57084495, 0.5756384, \n 0.58497631, 0.59406697, 0.60124967, 0.61191264, 0.61861008, 0.63526535,\n 0.64147876, 0.64424147, 0.64812518, 0.65628559, 0.6611236, 0.66513402, \n 0.67247715, 0.6865401, 0.69855775, 0.70405424, 0.70993554, 0.71712121, \n 0.74659026, 0.75933937, 0.76285267, 0.77698097, 0.77843327, 0.77988556,\n 0.78203854, 0.79001124, 0.79668959, 0.80160361, 0.81324827, 0.81889569,\n 0.83359027, 0.8376273, 0.84045596, 0.84676012, 0.85137567, 0.86174448, \n 0.86334239, 0.86940656, 0.88236389, 0.88443058, 0.88731063, 0.89183958,\n 0.89734229, 0.90254643, 0.90947058, 0.91505731, 0.92255361, 0.93161672,\n 0.93531119, 0.93928433, 0.94184878, 0.94358849, 0.9453282, 0.94657215, \n 0.94779349, 0.94905589, 0.95096511, 0.95565876, 0.9591155, 0.96179936, \n 0.96485932, 0.96873016, 0.97047112, 0.9715957, 0.97272028, 0.97393729, \n 0.97516837, 0.97661306, 0.97872525, 0.97977577, 0.98043513, 0.98109449,\n 0.98175384, 0.98267924, 0.9861103, 0.9879409, 0.99216572, 0.99465729, \n 0.99529709, 0.99593689, 0.99657669, 0.99721649, 0.99777873, 0.99833404,\n 0.99888936, 0.99944468, 1.0])\n', (5176, 7700), True, 'import numpy as np\n'), ((8713, 8732), 'undaqTools.misc.cdf.percentile', 'percentile', (['u[:200]'], {}), '(u[:200])\n', (8723, 8732), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((8750, 8809), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['r_bins', 'cdf.bin_edges'], {}), '(r_bins, cdf.bin_edges)\n', (8786, 8809), True, 'import numpy as np\n'), ((8818, 8886), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['r_percentiles', 'cdf.percentiles'], {}), '(r_percentiles, cdf.percentiles)\n', (8854, 8886), True, 'import numpy as np\n'), ((8953, 10406), 'numpy.array', 'np.array', (['[-1265.052, -1244.1328598, -1223.2137196, -1202.2945794, -1181.3754392, -\n 1160.456299, -1139.5371588, -1118.6180186, -1097.6988784, -1076.7797382,\n -1055.860598, -1034.9414578, -1014.0223176, -993.1031774, -972.1840372,\n -951.264897, -930.3457568, -909.4266166, -888.5074764, -867.5883362, -\n 846.669196, -825.7500558, -804.8309156, -783.9117754, -762.9926352, -\n 742.073495, -721.1543548, -700.2352146, -679.3160744, -658.3969342, -\n 637.477794, -616.5586538, -595.6395136, -574.7203734, -553.8012332, -\n 532.882093, -511.9629528, -491.0438126, -470.1246724, -449.2055322, -\n 428.286392, -407.3672518, -386.4481116, -365.5289714, -344.6098312, -\n 323.690691, -302.7715508, -281.8524106, -260.9332704, -240.0141302, -\n 219.09499, -198.1758498, -177.2567096, -156.3375694, -135.4184292, -\n 114.499289, -93.5801488, -72.6610086, -51.7418684, -30.8227282, -\n 9.903588, 11.0155522, 31.9346924, 52.8538326, 73.7729728, 94.692113, \n 115.6112532, 136.5303934, 157.4495336, 178.3686738, 199.287814, \n 220.2069542, 241.1260944, 262.0452346, 282.9643748, 303.883515, \n 324.8026552, 345.7217954, 366.6409356, 387.5600758, 408.479216, \n 429.3983562, 450.3174964, 471.2366366, 492.1557768, 513.074917, \n 533.9940572, 554.9131974, 575.8323376, 596.7514778, 617.670618, \n 638.5897582, 659.5088984, 680.4280386, 701.3471788, 722.266319, \n 743.1854592, 764.1045994, 785.0237396, 805.9428798, 826.86202]'], {}), '([-1265.052, -1244.1328598, -1223.2137196, -1202.2945794, -\n 1181.3754392, -1160.456299, -1139.5371588, -1118.6180186, -1097.6988784,\n -1076.7797382, -1055.860598, -1034.9414578, -1014.0223176, -993.1031774,\n -972.1840372, -951.264897, -930.3457568, -909.4266166, -888.5074764, -\n 867.5883362, -846.669196, -825.7500558, -804.8309156, -783.9117754, -\n 762.9926352, -742.073495, -721.1543548, -700.2352146, -679.3160744, -\n 658.3969342, -637.477794, -616.5586538, -595.6395136, -574.7203734, -\n 553.8012332, -532.882093, -511.9629528, -491.0438126, -470.1246724, -\n 449.2055322, -428.286392, -407.3672518, -386.4481116, -365.5289714, -\n 344.6098312, -323.690691, -302.7715508, -281.8524106, -260.9332704, -\n 240.0141302, -219.09499, -198.1758498, -177.2567096, -156.3375694, -\n 135.4184292, -114.499289, -93.5801488, -72.6610086, -51.7418684, -\n 30.8227282, -9.903588, 11.0155522, 31.9346924, 52.8538326, 73.7729728, \n 94.692113, 115.6112532, 136.5303934, 157.4495336, 178.3686738, \n 199.287814, 220.2069542, 241.1260944, 262.0452346, 282.9643748, \n 303.883515, 324.8026552, 345.7217954, 366.6409356, 387.5600758, \n 408.479216, 429.3983562, 450.3174964, 471.2366366, 492.1557768, \n 513.074917, 533.9940572, 554.9131974, 575.8323376, 596.7514778, \n 617.670618, 638.5897582, 659.5088984, 680.4280386, 701.3471788, \n 722.266319, 743.1854592, 764.1045994, 785.0237396, 805.9428798, 826.86202])\n', (8961, 10406), True, 'import numpy as np\n'), ((11094, 12536), 'numpy.array', 'np.array', (['[0.000152682325, 0.000294558006, 0.000616047975, 0.000702918092, \n 0.000773474112, 0.000821793025, 0.000870111939, 0.0011040111, \n 0.00134481409, 0.00183295542, 0.00247881609, 0.00368607187, \n 0.00515865394, 0.0067305598, 0.00861060936, 0.0104788857, 0.0123147839,\n 0.0156306574, 0.0187118249, 0.0212461678, 0.0256479572, 0.0305641053, \n 0.0362309399, 0.0408826236, 0.0493068431, 0.0573786087, 0.0670094435, \n 0.0780975453, 0.0885867316, 0.0997965662, 0.11312371, 0.125876664, \n 0.142376196, 0.162244613, 0.183208974, 0.20536605, 0.22955633, \n 0.253282959, 0.277540635, 0.305427136, 0.336100492, 0.363831154, \n 0.394043118, 0.425583501, 0.45516396, 0.486837208, 0.52059962, \n 0.548129193, 0.578151849, 0.608547876, 0.640669188, 0.670746937, \n 0.701026286, 0.725971707, 0.751021605, 0.777398045, 0.80005938, \n 0.822450308, 0.84084744, 0.857498407, 0.875729509, 0.891310252, \n 0.905933655, 0.918565943, 0.929274795, 0.940657581, 0.948881056, \n 0.957538817, 0.963601434, 0.970164957, 0.975166523, 0.979733128, \n 0.983692163, 0.986929642, 0.989432271, 0.99154458, 0.993344271, \n 0.994115203, 0.995269177, 0.996206415, 0.997047263, 0.997575959, \n 0.998237416, 0.99867817, 0.998903131, 0.999062383, 0.999389219, \n 0.999710548, 0.999739859, 0.999802006, 0.999820441, 0.999861218, \n 0.999912599, 0.999946445, 0.999963921, 0.999975528, 0.999981646, \n 0.999987764, 0.999993882, 1.0]'], {}), '([0.000152682325, 0.000294558006, 0.000616047975, 0.000702918092, \n 0.000773474112, 0.000821793025, 0.000870111939, 0.0011040111, \n 0.00134481409, 0.00183295542, 0.00247881609, 0.00368607187, \n 0.00515865394, 0.0067305598, 0.00861060936, 0.0104788857, 0.0123147839,\n 0.0156306574, 0.0187118249, 0.0212461678, 0.0256479572, 0.0305641053, \n 0.0362309399, 0.0408826236, 0.0493068431, 0.0573786087, 0.0670094435, \n 0.0780975453, 0.0885867316, 0.0997965662, 0.11312371, 0.125876664, \n 0.142376196, 0.162244613, 0.183208974, 0.20536605, 0.22955633, \n 0.253282959, 0.277540635, 0.305427136, 0.336100492, 0.363831154, \n 0.394043118, 0.425583501, 0.45516396, 0.486837208, 0.52059962, \n 0.548129193, 0.578151849, 0.608547876, 0.640669188, 0.670746937, \n 0.701026286, 0.725971707, 0.751021605, 0.777398045, 0.80005938, \n 0.822450308, 0.84084744, 0.857498407, 0.875729509, 0.891310252, \n 0.905933655, 0.918565943, 0.929274795, 0.940657581, 0.948881056, \n 0.957538817, 0.963601434, 0.970164957, 0.975166523, 0.979733128, \n 0.983692163, 0.986929642, 0.989432271, 0.99154458, 0.993344271, \n 0.994115203, 0.995269177, 0.996206415, 0.997047263, 0.997575959, \n 0.998237416, 0.99867817, 0.998903131, 0.999062383, 0.999389219, \n 0.999710548, 0.999739859, 0.999802006, 0.999820441, 0.999861218, \n 0.999912599, 0.999946445, 0.999963921, 0.999975528, 0.999981646, \n 0.999987764, 0.999993882, 1.0])\n', (11102, 12536), True, 'import numpy as np\n'), ((13710, 13736), 'undaqTools.misc.cdf.percentile', 'percentile', (['u'], {'numbins': '(100)'}), '(u, numbins=100)\n', (13720, 13736), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((13746, 13805), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['r_bins', 'cdf.bin_edges'], {}), '(r_bins, cdf.bin_edges)\n', (13782, 13805), True, 'import numpy as np\n'), ((13814, 13882), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['r_percentiles', 'cdf.percentiles'], {}), '(r_percentiles, cdf.percentiles)\n', (13850, 13882), True, 'import numpy as np\n'), ((14159, 14172), 'undaqTools.misc.cdf.percentile', 'percentile', (['u'], {}), '(u)\n', (14169, 14172), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((14307, 14320), 'undaqTools.misc.cdf.percentile', 'percentile', (['u'], {}), '(u)\n', (14317, 14320), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((14471, 14484), 'undaqTools.misc.cdf.percentile', 'percentile', (['u'], {}), '(u)\n', (14481, 14484), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((14634, 14647), 'undaqTools.misc.cdf.percentile', 'percentile', (['u'], {}), '(u)\n', (14644, 14647), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((14975, 14988), 'undaqTools.misc.cdf.percentile', 'percentile', (['u'], {}), '(u)\n', (14985, 14988), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((15291, 15304), 'undaqTools.misc.cdf.percentile', 'percentile', (['u'], {}), '(u)\n', (15301, 15304), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((15419, 15435), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (15428, 15435), True, 'import matplotlib.pyplot as plt\n'), ((15672, 15698), 'undaqTools.misc.cdf.percentile', 'percentile', (['u'], {'numbins': '(100)'}), '(u, numbins=100)\n', (15682, 15698), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((15739, 15799), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cdf.percentiles', 'cdf2.percentiles'], {}), '(cdf.percentiles, cdf2.percentiles)\n', (15764, 15799), False, 'from numpy.testing import assert_array_almost_equal\n'), ((15808, 15864), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cdf.bin_edges', 'cdf2.bin_edges'], {}), '(cdf.bin_edges, cdf2.bin_edges)\n', (15833, 15864), False, 'from numpy.testing import assert_array_almost_equal\n'), ((16100, 16126), 'undaqTools.misc.cdf.percentile', 'percentile', (['u'], {'numbins': '(100)'}), '(u, numbins=100)\n', (16110, 16126), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((16349, 16362), 'undaqTools.misc.cdf.percentile', 'percentile', (['u'], {}), '(u)\n', (16359, 16362), False, 'from undaqTools.misc.cdf import CDF, percentile\n'), ((16564, 16599), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_percentile'], {}), '(Test_percentile)\n', (16582, 16599), False, 'import unittest\n'), ((16613, 16655), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_percentile_bounds'], {}), '(Test_percentile_bounds)\n', (16631, 16655), False, 'import unittest\n'), ((16669, 16702), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_cdf_find'], {}), '(Test_cdf_find)\n', (16687, 16702), False, 'import unittest\n'), ((16716, 16749), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_cdf_plot'], {}), '(Test_cdf_plot)\n', (16734, 16749), False, 'import unittest\n'), ((16763, 16796), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_cdf_repr'], {}), '(Test_cdf_repr)\n', (16781, 16796), False, 'import unittest\n'), ((16810, 16842), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_cdf_str'], {}), '(Test_cdf_str)\n', (16828, 16842), False, 'import unittest\n')] |
from __future__ import print_function
import os
import numpy as np
import cv2
from keras.utils import Sequence
from cocoaugmenter.datagen import CocoDataGen
class CocoSeq(Sequence):
def __init__(self,
batch_size,
batches_per_epoch,
data_dir,
class_grps,
grp_probs,
training = True,
target_width = 128,
min_src_width = 64,
height_shift_range = 0.2,
width_shift_range = 0.2,
zoom_range = (0.5, 1.0),
horizontal_flip = True,
cache_mask_imgs = False):
self.batch_size = batch_size
self.batches_per_epoch = batches_per_epoch
self.data_dir = data_dir
self.class_grps = class_grps
self.grp_probs = grp_probs
self.training = training
self.target_width = target_width
self.min_src_width = min_src_width
self.height_shift_range = height_shift_range
self.width_shift_range = width_shift_range
self.zoom_range = zoom_range
self.horizontal_flip = horizontal_flip
self.cache_mask_imgs = cache_mask_imgs
self.data_gen = CocoDataGen(dataDir = self.data_dir,
classGrps = self.class_grps,
grpProbs = self.grp_probs,
cacheMaskImgs = self.cache_mask_imgs)
def __len__(self):
return self.batches_per_epoch
def __getitem__(self, idx):
sample_args = {'training': self.training,
'targetWidth': self.target_width,
'minSrcWidth': self.min_src_width,
'heightShiftRange': self.height_shift_range,
'widthShiftRange': self.width_shift_range,
'zoomRange': self.zoom_range,
'horizontalFlip': self.horizontal_flip}
batch_x, batch_y = [], []
for idx in range(self.batch_size):
img_tensor, seg_tensor, metadata = self.data_gen.sample(**sample_args)
batch_x.append(img_tensor)
batch_y.append(seg_tensor)
return np.array(batch_x), np.array(batch_y)
| [
"numpy.array",
"cocoaugmenter.datagen.CocoDataGen"
] | [((1358, 1485), 'cocoaugmenter.datagen.CocoDataGen', 'CocoDataGen', ([], {'dataDir': 'self.data_dir', 'classGrps': 'self.class_grps', 'grpProbs': 'self.grp_probs', 'cacheMaskImgs': 'self.cache_mask_imgs'}), '(dataDir=self.data_dir, classGrps=self.class_grps, grpProbs=self\n .grp_probs, cacheMaskImgs=self.cache_mask_imgs)\n', (1369, 1485), False, 'from cocoaugmenter.datagen import CocoDataGen\n'), ((2391, 2408), 'numpy.array', 'np.array', (['batch_x'], {}), '(batch_x)\n', (2399, 2408), True, 'import numpy as np\n'), ((2410, 2427), 'numpy.array', 'np.array', (['batch_y'], {}), '(batch_y)\n', (2418, 2427), True, 'import numpy as np\n')] |
import numpy as np
from numpy import pi,sin,cos,arctan
import subprocess
#########################################
args={}
### Commonly changed for art
args['output_image']="out/out.jpg" #[out.png]
args['style_image']="styles/elephant.jpg" #Style target image [examples/inputs/seated-nude.jpg]
args['content_image']="content/carlos.jpg" #Content target image [examples/inputs/tubingen.jpg]
args['style_scale']=1 #[1]
args['style_weight']=100 #[100]
args['content_weight']=5 #[5]
args['init']="random" #random|image [random]
#args['init_image']="" #[]
args['original_colors']=0 #[0]
args['image_size']=512 #Maximum height / width of generated image [512]
args['num_iterations']=601 #[1000]
args['save_iter']=100 #[100]
args['print_iter']=50 #[50]
### I havent touched
args['tv_weight']=0.001 #[0.001]
args['pooling']="max" #max|avg [max]
args['seed']=-1 #[-1]
args['proto_file']="models/VGG_ILSVRC_19_layers_deploy.prototxt" #[models/VGG_ILSVRC_19_layers_deploy.prototxt]
args['model_file']="models/VGG_ILSVRC_19_layers.caffemodel" #[models/VGG_ILSVRC_19_layers.caffemodel]
args['content_layers']="relu4_2" #layers for content [relu4_2]
args['style_layers']="relu1_1,relu2_1,relu3_1,relu4_1,relu5_1" #layers for style [relu1_1,relu2_1,relu3_1,relu4_1,relu5_1]
args['lbfgs_num_correction']=0 #[0]
#args['style_blend_weights']="" #[nil]
### Seem to be well tuned
args['optimizer']="lbfgs" #lbfgs|adam [lbfgs]
#args['normalize_gradients']="" #[]
args['learning_rate']=10 #[10]
args['gpu']=0 #Zero-indexed ID of the GPU to use; for CPU mode set -gpu = -1 [0]
args['cudnn_autotune']="" #[false]
args['backend']="cudnn" #nn|cudnn|clnn [nn]
def build_args(args):
return list(filter(None,["th", "neural_style.lua"]+ sum([ "-{} {}".format(key,args[key]).split(" ") for key in args.keys() ], []) ))
def get_name(path):
return path.split("/")[-1].split(".")[0]
#########################################
max_sw=args['style_weight']
max_cw=args['content_weight']
style=args['style_image'].split("/")[-1].split(".")[0]
content=args['content_image'].split("/")[-1].split(".")[0]
rowlist=[]
for style in filter(None, subprocess.run("ls styles",shell=True,capture_output=True).stdout.decode('utf-8').split("\n") ):
style="bubblewrap.jpg"
print(style)
print(style)
print(style)
print(style)
print(style)
print(style)
print(style)
print(style)
print(style)
print(style)
print(style)
print(style)
print(style)
print(style)
args['style_image']="styles/"+style #Style target image [examples/inputs/seated-nude.jpg]
suff=style.rstrip(".jpg")+"-"+content+"-calibration.jpg"
for tan_theta in np.linspace(0.1,0.9,6):
# break
print(tan_theta)
print(tan_theta)
print(tan_theta)
print(tan_theta)
print(tan_theta)
print(tan_theta)
print(tan_theta)
print(tan_theta)
print(tan_theta)
theta=arctan(tan_theta)
args['style_weight'],args['content_weight']=max_sw*sin(theta), max_cw*cos(theta)
#content_ratio="{0:1.2f}".format(cos(theta))
tan_theta_str="{0:1.2f}".format(tan_theta)
# build output filename
args['output_image']="output/"+tan_theta_str+"-"+suff
print(build_args(args))
rowname="{}-row.jpg".format(args['output_image'].rstrip(".jpg") )
subprocess.run(build_args(args) )
subprocess.run(\
["convert", "+append", "{0}_*.jpg".format( args['output_image'].rstrip(".jpg") ), rowname]\
)
rowlist.append(rowname)
subprocess.run(\
["convert", "-append"]+rowlist+["output/full-{}".format(suff)]\
)
subprocess.run("notify-send finished another calibration!",shell=True)
break
| [
"subprocess.run",
"numpy.sin",
"numpy.cos",
"numpy.linspace",
"numpy.arctan"
] | [((2661, 2685), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.9)', '(6)'], {}), '(0.1, 0.9, 6)\n', (2672, 2685), True, 'import numpy as np\n'), ((3681, 3752), 'subprocess.run', 'subprocess.run', (['"""notify-send finished another calibration!"""'], {'shell': '(True)'}), "('notify-send finished another calibration!', shell=True)\n", (3695, 3752), False, 'import subprocess\n'), ((2939, 2956), 'numpy.arctan', 'arctan', (['tan_theta'], {}), '(tan_theta)\n', (2945, 2956), False, 'from numpy import pi, sin, cos, arctan\n'), ((3016, 3026), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (3019, 3026), False, 'from numpy import pi, sin, cos, arctan\n'), ((3035, 3045), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (3038, 3045), False, 'from numpy import pi, sin, cos, arctan\n'), ((2121, 2181), 'subprocess.run', 'subprocess.run', (['"""ls styles"""'], {'shell': '(True)', 'capture_output': '(True)'}), "('ls styles', shell=True, capture_output=True)\n", (2135, 2181), False, 'import subprocess\n')] |
import numpy as np
import pickle
import multiprocessing as mp
import tqdm
from models.vaccination.create_model_vaccination import create_model_splines
from functions.tools import get_model_output
create_model = False
model_name = "vaccination_multi_test"
path_sbml = f"stored_models/{model_name}/" + model_name
model_directory = "stored_models/" + model_name + "/vaccination_dir"
max_T = 140
created_model = create_model_splines(
create_model=create_model,
number_areas=2,
number_vaccines=2,
vaccinated_compartments=["susceptible", "infectious"],
number_viruses=2,
length_decision_period=max_T,
number_yy=int(max_T / 14 + 1),
model_name=model_name,
path_sbml=path_sbml,
model_directory=model_directory,
number_xx_R=3,
)
model = created_model["model"]
solver = created_model["solver"]
vac = "Unequal" #only
initial_states = "Unequal"
def run_n_vacc(n_vacc):
# type (used for name while saving pickle object)
model = created_model["model"]
solver = created_model["solver"]
vac = "Unequal" #only
initial_states = "Unequal"
specification = f"inital{initial_states}_vac{vac}"
path = (
f"/home/manuel/Documents/VaccinationDistribution/code/objects/{specification}_nvacc_{n_vacc}.pkl"
)
if vac == "Equal":
par_to_optimize = [x for x in model.getParameterNames() if "vac1" in x]
vaccine_supply_parameter_values = np.concatenate(
(
np.repeat( # change here
float(n_vacc), (created_model["information"]["number_yy"] - 1)
),
np.repeat(float(0), (created_model["information"]["number_yy"] - 1)),
)
)
delta = {
"delta_vac1_virus1": 0.95,
"delta_vac1_virus2": 0.6,
"delta_vac2_virus1": 0,
"delta_vac2_virus2": 0,
}
omega = {
"omega_vac1_virus1": 0.9,
"omega_vac1_virus2": 0.9,
"omega_vac2_virus1": 0,
"omega_vac2_virus2": 0,
}
elif vac == "Unequal":
par_to_optimize = model.getParameterNames()
vaccine_supply_parameter_values = np.repeat(
float(n_vacc), 2 * (created_model["information"]["number_yy"] - 1)
)
delta = {
"delta_vac1_virus1": 0.95,
"delta_vac1_virus2": 0.6,
"delta_vac2_virus1": 0.6,
"delta_vac2_virus2": 0.95,
}
omega = {
"omega_vac1_virus1": 0.9,
"omega_vac1_virus2": 0.9,
"omega_vac2_virus1": 0.9,
"omega_vac2_virus2": 0.9,
}
if initial_states == "Equal":
infectious_t0 = {
"infectious_countryA_vac0_virus1_t0": 5,
"infectious_countryA_vac0_virus2_t0": 5,
"infectious_countryB_vac0_virus1_t0": 5,
"infectious_countryB_vac0_virus2_t0": 5,
}
elif initial_states == "Unequal":
infectious_t0 = {
"infectious_countryA_vac0_virus1_t0": 10,
"infectious_countryA_vac0_virus2_t0": 0,
"infectious_countryB_vac0_virus1_t0": 0,
"infectious_countryB_vac0_virus2_t0": 10,
}
par_to_optimize = [x for x in par_to_optimize if float(x.rsplit("_", 1)[-1]) <= 8]
# areas
areas = created_model["information"]["areas"]
# set Parameters
timepoints = np.linspace(0, max_T, 6000)
model.setTimepoints(timepoints)
vaccine_supply_parameter_strings = [
x for x in model.getFixedParameterNames() if "vaccine_supply" in x
]
vaccine_supply_parameter = dict(
zip(vaccine_supply_parameter_strings, vaccine_supply_parameter_values)
)
general_parameters = {
"lambda1": 0.1,
"prob_deceasing": 0.02,
"gamma": 0.5,
"beta": 0.24,
}
R0s = {
"yR0_countryA_0": 1,
"yR0_countryA_1": 1,
"yR0_countryA_2": 1,
"yR0_countryB_0": 1,
"yR0_countryB_1": 1,
"yR0_countryB_2": 1,
}
eta = {"eta_virus2": 2}
susceptible_t0 = {
"susceptible_countryA_vac0_t0": 10 ** 7,
"susceptible_countryB_vac0_t0": 10 ** 7,
}
distances = {
"distance_countryA_countryB": 1 / 1000,
"distance_countryB_countryA": 1 / 1000,
"distance_countryA_countryA": 1,
"distance_countryB_countryB": 1,
}
parameters = {
**general_parameters,
**omega,
**delta,
**eta,
**susceptible_t0,
**infectious_t0,
**distances,
**vaccine_supply_parameter,
**R0s,
}
solver.setAbsoluteTolerance(1e-01)
solver.setRelativeTolerance(1e-01)
dict_out = get_model_output(
model,
solver,
parameters,
areas,
par_to_optimize,
n_starts_pb=50,
n_starts_pareto=500,
number_generations_pareto=50,
)
with open(
path,
"wb",
) as output:
out = dict_out
pickle.dump(out, output, pickle.HIGHEST_PROTOCOL)
return dict_out
with mp.Pool() as p:
results = list(
tqdm.tqdm(
p.imap_unordered(
run_n_vacc, np.array([10, 15, 20, 25, 35, 45, 55, 65, 75, 85, 95, 105, 110])*10**3
),
total=13,
)
)
path = (
f"/home/manuel/Documents/VaccinationDistribution/code/objects/{specification}_nvacc_30000.pkl"
)
with open(
path,
"rb",
) as input:
loaded_object = pickle.load(input)
| [
"functions.tools.get_model_output",
"pickle.dump",
"pickle.load",
"numpy.array",
"numpy.linspace",
"multiprocessing.Pool"
] | [((3459, 3486), 'numpy.linspace', 'np.linspace', (['(0)', 'max_T', '(6000)'], {}), '(0, max_T, 6000)\n', (3470, 3486), True, 'import numpy as np\n'), ((4824, 4962), 'functions.tools.get_model_output', 'get_model_output', (['model', 'solver', 'parameters', 'areas', 'par_to_optimize'], {'n_starts_pb': '(50)', 'n_starts_pareto': '(500)', 'number_generations_pareto': '(50)'}), '(model, solver, parameters, areas, par_to_optimize,\n n_starts_pb=50, n_starts_pareto=500, number_generations_pareto=50)\n', (4840, 4962), False, 'from functions.tools import get_model_output\n'), ((5208, 5217), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (5215, 5217), True, 'import multiprocessing as mp\n'), ((5638, 5656), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (5649, 5656), False, 'import pickle\n'), ((5127, 5176), 'pickle.dump', 'pickle.dump', (['out', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(out, output, pickle.HIGHEST_PROTOCOL)\n', (5138, 5176), False, 'import pickle\n'), ((5321, 5385), 'numpy.array', 'np.array', (['[10, 15, 20, 25, 35, 45, 55, 65, 75, 85, 95, 105, 110]'], {}), '([10, 15, 20, 25, 35, 45, 55, 65, 75, 85, 95, 105, 110])\n', (5329, 5385), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model.config import cfg
from model.train_val import filter_roidb, SolverWrapper
from utils.timer import Timer
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import os
import sys
import glob
import time
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
class MemorySolverWrapper(SolverWrapper):
"""
A wrapper class for the training process of spatial memory
"""
def construct_graph(self, sess):
with sess.graph.as_default():
# Set the random seed for tensorflow
tf.set_random_seed(cfg.RNG_SEED)
# Build the main computation graph
layers = self.net.create_architecture('TRAIN', self.imdb.num_classes, tag='default')
# Define the loss
loss = layers['total_loss']
# Set learning rate and momentum
lr = tf.Variable(cfg.TRAIN.RATE, trainable=False)
self.optimizer = tf.train.MomentumOptimizer(lr, cfg.TRAIN.MOMENTUM)
# Compute the gradients with regard to the loss
gvs = self.optimizer.compute_gradients(loss)
grad_summaries = []
for grad, var in gvs:
if 'SMN' not in var.name and 'GMN' not in var.name:
continue
grad_summaries.append(tf.summary.histogram('TRAIN/' + var.name, var))
if grad is not None:
grad_summaries.append(tf.summary.histogram('GRAD/' + var.name, grad))
# Double the gradient of the bias if set
if cfg.TRAIN.DOUBLE_BIAS:
final_gvs = []
with tf.variable_scope('Gradient_Mult') as scope:
for grad, var in gvs:
scale = 1.
if cfg.TRAIN.DOUBLE_BIAS and '/biases:' in var.name:
scale *= 2.
if not np.allclose(scale, 1.0):
grad = tf.multiply(grad, scale)
final_gvs.append((grad, var))
train_op = self.optimizer.apply_gradients(final_gvs)
else:
train_op = self.optimizer.apply_gradients(gvs)
self.summary_grads = tf.summary.merge(grad_summaries)
# We will handle the snapshots ourselves
self.saver = tf.train.Saver(max_to_keep=100000)
# Write the train and validation information to tensorboard
self.writer = tf.summary.FileWriter(self.tbdir, sess.graph)
self.valwriter = tf.summary.FileWriter(self.tbvaldir)
return lr, train_op
def train_model(self, sess, max_iters):
# Build data layers for both training and validation set
self.data_layer = self.imdb.data_layer(self.roidb, self.imdb.num_classes)
self.data_layer_val = self.imdb.data_layer(self.valroidb, self.imdb.num_classes, random=True)
# Construct the computation graph
lr, train_op = self.construct_graph(sess)
# Find previous snapshots if there is any to restore from
lsf, nfiles, sfiles = self.find_previous()
# Initialize the variables or restore them from the last snapshot
if lsf == 0:
rate, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.initialize(sess)
else:
rate, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.restore(sess,
str(sfiles[-1]),
str(nfiles[-1]))
timer = Timer()
iter = last_snapshot_iter + 1
last_summary_iter = iter
last_summary_time = time.time()
# Make sure the lists are not empty
stepsizes.append(max_iters)
stepsizes.reverse()
next_stepsize = stepsizes.pop()
while iter < max_iters + 1:
# Learning rate
if iter == next_stepsize + 1:
# Add snapshot here before reducing the learning rate
self.snapshot(sess, iter)
rate *= cfg.TRAIN.GAMMA
sess.run(tf.assign(lr, rate))
next_stepsize = stepsizes.pop()
timer.tic()
# Get training data, one batch at a time
blobs = self.data_layer.forward()
now = time.time()
if iter == 1 or \
(now - last_summary_time > cfg.TRAIN.SUMMARY_INTERVAL and \
iter - last_summary_iter > cfg.TRAIN.SUMMARY_ITERS):
# Compute the graph with summary
loss_cls, total_loss, summary, gsummary = \
self.net.train_step_with_summary(sess, blobs, train_op, self.summary_grads)
self.writer.add_summary(summary, float(iter))
self.writer.add_summary(gsummary, float(iter+1))
# Also check the summary on the validation set
blobs_val = self.data_layer_val.forward()
summary_val = self.net.get_summary(sess, blobs_val)
self.valwriter.add_summary(summary_val, float(iter))
last_summary_iter = iter
last_summary_time = now
else:
# Compute the graph without summary
loss_cls, total_loss = self.net.train_step(sess, blobs, train_op)
timer.toc()
# Display training information
if iter % (cfg.TRAIN.DISPLAY) == 0:
print('iter: %d / %d, total loss: %.6f\n >>> loss_cls: %.6f\n >>> lr: %f' % \
(iter, max_iters, total_loss, loss_cls, lr.eval()))
print('speed: {:.3f}s / iter'.format(timer.average_time))
# Snapshotting
if iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
ss_path, np_path = self.snapshot(sess, iter)
np_paths.append(np_path)
ss_paths.append(ss_path)
# Remove the old snapshots if there are too many
if len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
self.remove_snapshot(np_paths, ss_paths)
iter += 1
if last_snapshot_iter != iter - 1:
self.snapshot(sess, iter - 1)
self.writer.close()
self.valwriter.close()
def train_net(network, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=None,
max_iters=40000):
"""Train a Faster R-CNN network with memory."""
roidb = filter_roidb(roidb)
valroidb = filter_roidb(valroidb)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
sw = MemorySolverWrapper(sess, network, imdb, roidb, valroidb,
output_dir, tb_dir,
pretrained_model=pretrained_model)
print('Solving...')
sw.train_model(sess, max_iters)
print('done solving')
| [
"tensorflow.train.Saver",
"numpy.allclose",
"tensorflow.Session",
"tensorflow.variable_scope",
"time.time",
"tensorflow.set_random_seed",
"tensorflow.ConfigProto",
"tensorflow.assign",
"model.train_val.filter_roidb",
"tensorflow.Variable",
"tensorflow.train.MomentumOptimizer",
"tensorflow.summ... | [((5949, 5968), 'model.train_val.filter_roidb', 'filter_roidb', (['roidb'], {}), '(roidb)\n', (5961, 5968), False, 'from model.train_val import filter_roidb, SolverWrapper\n'), ((5982, 6004), 'model.train_val.filter_roidb', 'filter_roidb', (['valroidb'], {}), '(valroidb)\n', (5994, 6004), False, 'from model.train_val import filter_roidb, SolverWrapper\n'), ((6019, 6060), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (6033, 6060), True, 'import tensorflow as tf\n'), ((3374, 3381), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (3379, 3381), False, 'from utils.timer import Timer\n'), ((3469, 3480), 'time.time', 'time.time', ([], {}), '()\n', (3478, 3480), False, 'import time\n'), ((6112, 6139), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tfconfig'}), '(config=tfconfig)\n', (6122, 6139), True, 'import tensorflow as tf\n'), ((668, 700), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['cfg.RNG_SEED'], {}), '(cfg.RNG_SEED)\n', (686, 700), True, 'import tensorflow as tf\n'), ((941, 985), 'tensorflow.Variable', 'tf.Variable', (['cfg.TRAIN.RATE'], {'trainable': '(False)'}), '(cfg.TRAIN.RATE, trainable=False)\n', (952, 985), True, 'import tensorflow as tf\n'), ((1009, 1059), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['lr', 'cfg.TRAIN.MOMENTUM'], {}), '(lr, cfg.TRAIN.MOMENTUM)\n', (1035, 1059), True, 'import tensorflow as tf\n'), ((2080, 2112), 'tensorflow.summary.merge', 'tf.summary.merge', (['grad_summaries'], {}), '(grad_summaries)\n', (2096, 2112), True, 'import tensorflow as tf\n'), ((2180, 2214), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(100000)'}), '(max_to_keep=100000)\n', (2194, 2214), True, 'import tensorflow as tf\n'), ((2301, 2346), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.tbdir', 'sess.graph'], {}), '(self.tbdir, sess.graph)\n', (2322, 2346), True, 'import tensorflow as tf\n'), ((2370, 2406), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.tbvaldir'], {}), '(self.tbvaldir)\n', (2391, 2406), True, 'import tensorflow as tf\n'), ((4028, 4039), 'time.time', 'time.time', ([], {}), '()\n', (4037, 4039), False, 'import time\n'), ((1329, 1375), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('TRAIN/' + var.name)", 'var'], {}), "('TRAIN/' + var.name, var)\n", (1349, 1375), True, 'import tensorflow as tf\n'), ((1602, 1636), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Gradient_Mult"""'], {}), "('Gradient_Mult')\n", (1619, 1636), True, 'import tensorflow as tf\n'), ((3848, 3867), 'tensorflow.assign', 'tf.assign', (['lr', 'rate'], {}), '(lr, rate)\n', (3857, 3867), True, 'import tensorflow as tf\n'), ((1438, 1484), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('GRAD/' + var.name)", 'grad'], {}), "('GRAD/' + var.name, grad)\n", (1458, 1484), True, 'import tensorflow as tf\n'), ((1812, 1835), 'numpy.allclose', 'np.allclose', (['scale', '(1.0)'], {}), '(scale, 1.0)\n', (1823, 1835), True, 'import numpy as np\n'), ((1858, 1882), 'tensorflow.multiply', 'tf.multiply', (['grad', 'scale'], {}), '(grad, scale)\n', (1869, 1882), True, 'import tensorflow as tf\n')] |
import numpy as np
class A2CRunner:
def __init__(self, agent, env, n_updates=10000, n_steps=16, train=True):
self.agent = agent
self.env = env
self.n_updates = n_updates
self.n_steps = n_steps
self.observation = self.env.reset()
self.reset()
def reset(self):
self.observation = self.env.reset()
# Begin running and learning (if desired)!
def begin(self):
# Run for n_updates batches, training if self.train=True
for i in range(self.n_updates):
train_args = self.run_batch()
if self.train:
self.agent.train(*train_args)
# Run a batch and return the results
def run_batch(self):
# Define 2D information arrays of size n_steps by n_envs and type float
shapes = (self.n_steps, self.env.n_envs)
rewards = np.zeros(shapes, dtype=np.float32)
values = np.zeros(shapes, dtype=np.float32)
dones = np.zeros(shapes, dtype=np.float32)
# We will assign observation arrays of n_envs length so these become multidimensional too
observations = [None] * self.n_steps
actions = [None] * self.n_steps
# For every step, get an action to perform and then perform it (recording results)
for i in range(self.n_steps):
action, value = self.agent.act(self.observation)
actions[i], values[i] = action, value
observations[i] = self.observation
self.observation, rewards[i], dones[i] = self.env.step(self.agent.convert_actions(action))
# Modify the reward given such a modifier
rewards[i] = self.agent.agent_modifier.modify_reward(
self.observation, rewards[i], observations[i])
# Get the next value (for use in returns calculation)
next_value = self.agent.act(self.observation)
return observations, actions, rewards, dones, values, next_value
| [
"numpy.zeros"
] | [((865, 899), 'numpy.zeros', 'np.zeros', (['shapes'], {'dtype': 'np.float32'}), '(shapes, dtype=np.float32)\n', (873, 899), True, 'import numpy as np\n'), ((917, 951), 'numpy.zeros', 'np.zeros', (['shapes'], {'dtype': 'np.float32'}), '(shapes, dtype=np.float32)\n', (925, 951), True, 'import numpy as np\n'), ((968, 1002), 'numpy.zeros', 'np.zeros', (['shapes'], {'dtype': 'np.float32'}), '(shapes, dtype=np.float32)\n', (976, 1002), True, 'import numpy as np\n')] |
'''
convenience functions for raster of points
'''
import numpy
import math
def createRaster(shape, spacing, angle, indices=False, limit=None):
'''
raster across entire image
'''
co = spacing * numpy.cos(angle)
si = spacing * numpy.sin(angle)
E = numpy.array(((co,si),(-si,co)), numpy.float32)
Einv = numpy.linalg.inv(E)
## define a range for the raster
corners = []
for p in ((0,shape[1]),(shape[0],0),shape):
i,j = numpy.dot(Einv, numpy.array(p, numpy.float32))
i = int(i)
j = int(j)
corners.append((i,j))
mini = maxi = minj = maxj = 0
for corner in corners:
if corner[0] > maxi:
maxi = corner[0]
if corner[0] < mini:
mini = corner[0]
if corner[1] > maxj:
maxj = corner[1]
if corner[1] < minj:
minj = corner[1]
# create full raster over whole image
rasterpoints = []
ind = []
for i in range(mini,maxi+1):
for j in range(minj,maxj+1):
p = numpy.dot(E, numpy.array((i,j), numpy.float32))
if (0 <= p[0] < shape[0]) and (0 <= p[1] < shape[1]):
rasterpoints.append(tuple(p))
ind.append((i,j))
if indices:
return ind
else:
return rasterpoints
def createIndices(shape):
'''
square indices
'''
ind = numpy.indices(shape, numpy.float32)
center0 = shape[0] / 2.0 - 0.5
center1 = shape[1] / 2.0 - 0.5
ind[0] = ind[0] - center0
ind[1] = ind[1] - center1
indices = zip(ind[0].flat, ind[1].flat)
return indices
def createIndices2(a,b,angle,limiting_shape='ellipse',offset=False,odd=False,tiltoffset=(0,0)):
'''
indices enclosed by an ellipse or rotated rectangle
'''
offsetvalues = (0.5,0.25)
cos = math.cos(angle)
sin = math.sin(angle)
maxab = math.ceil(max(a,b))
if offset and not maxab % 2:
# keep center offset pattern consistent
maxab = maxab + 1
maxind = 2 + 2 * maxab
shape = maxind,maxind
ind = numpy.indices(shape, numpy.float32)
if offset:
adds = numpy.ma.where(ind[0] % 2 == 0, numpy.zeros(shape),numpy.ones(shape)*0.5)
ind = numpy.array((ind[0],ind[1]+adds.data))
if odd:
ind[0] = ind[0] + offsetvalues[0]
ind[1] = ind[1] + offsetvalues[1]
ind[0] = ind[0] + tiltoffset[0]
ind[1] = ind[1] + tiltoffset[1]
center0 = shape[0] / 2.0
center1 = shape[1] / 2.0
ind[0] = ind[0] - center0
ind[1] = ind[1] - center1
indices = zip(ind[0].flat, ind[1].flat)
goodindices = []
for index in indices:
good = False
row = abs(index[0]*cos-index[1]*sin)
col = abs(index[0]*sin+index[1]*cos)
testrange = (0.0,0.1,0.2,0.3,0.4,0.5)
for deltarow in testrange:
for deltacol in testrange:
if row > deltarow:
testrow = row - deltarow
else:
testrow = row
if col > deltacol:
testcol = col - deltacol
else:
testcol = col
if limiting_shape == 'ellipse':
# ellipse shape
if (testcol/a)**2+(testrow/b)**2 <= 1:
goodindices.append(index)
good = True
break
elif limiting_shape == 'rectangle':
# rectangular shape
if abs(testcol) <= a and abs(testrow) <= b:
goodindices.append(index)
good = True
break
if good:
break
return goodindices
def createRaster2(spacing, angle, limit):
'''
raster across image, limited by square defined by limit
'''
co = spacing * numpy.cos(angle)
si = spacing * numpy.sin(angle)
E = numpy.array(((co,si),(-si,co)), numpy.float32)
Einv = numpy.linalg.inv(E)
# create full raster over whole image
rasterpoints = []
shape = limit,limit
ind = createIndices(shape)
for i in ind:
p = numpy.dot(E, numpy.array(i, numpy.float32))
rasterpoints.append(tuple(p))
return rasterpoints
def createRaster3(spacing, angle, limitindices):
'''
raster across entire image, limited by index list
'''
co = spacing * numpy.cos(angle)
si = spacing * numpy.sin(angle)
E = numpy.array(((co,si),(-si,co)), numpy.float32)
Einv = numpy.linalg.inv(E)
# create full raster over whole image
rasterpoints = []
for i in limitindices:
p = numpy.dot(E, numpy.array(i, numpy.float32))
rasterpoints.append(tuple(p))
return rasterpoints
| [
"numpy.zeros",
"numpy.ones",
"math.sin",
"numpy.indices",
"numpy.sin",
"numpy.linalg.inv",
"numpy.array",
"math.cos",
"numpy.cos"
] | [((254, 303), 'numpy.array', 'numpy.array', (['((co, si), (-si, co))', 'numpy.float32'], {}), '(((co, si), (-si, co)), numpy.float32)\n', (265, 303), False, 'import numpy\n'), ((309, 328), 'numpy.linalg.inv', 'numpy.linalg.inv', (['E'], {}), '(E)\n', (325, 328), False, 'import numpy\n'), ((1171, 1206), 'numpy.indices', 'numpy.indices', (['shape', 'numpy.float32'], {}), '(shape, numpy.float32)\n', (1184, 1206), False, 'import numpy\n'), ((1577, 1592), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (1585, 1592), False, 'import math\n'), ((1600, 1615), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (1608, 1615), False, 'import math\n'), ((1791, 1826), 'numpy.indices', 'numpy.indices', (['shape', 'numpy.float32'], {}), '(shape, numpy.float32)\n', (1804, 1826), False, 'import numpy\n'), ((3228, 3277), 'numpy.array', 'numpy.array', (['((co, si), (-si, co))', 'numpy.float32'], {}), '(((co, si), (-si, co)), numpy.float32)\n', (3239, 3277), False, 'import numpy\n'), ((3283, 3302), 'numpy.linalg.inv', 'numpy.linalg.inv', (['E'], {}), '(E)\n', (3299, 3302), False, 'import numpy\n'), ((3716, 3765), 'numpy.array', 'numpy.array', (['((co, si), (-si, co))', 'numpy.float32'], {}), '(((co, si), (-si, co)), numpy.float32)\n', (3727, 3765), False, 'import numpy\n'), ((3771, 3790), 'numpy.linalg.inv', 'numpy.linalg.inv', (['E'], {}), '(E)\n', (3787, 3790), False, 'import numpy\n'), ((199, 215), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (208, 215), False, 'import numpy\n'), ((232, 248), 'numpy.sin', 'numpy.sin', (['angle'], {}), '(angle)\n', (241, 248), False, 'import numpy\n'), ((1930, 1971), 'numpy.array', 'numpy.array', (['(ind[0], ind[1] + adds.data)'], {}), '((ind[0], ind[1] + adds.data))\n', (1941, 1971), False, 'import numpy\n'), ((3173, 3189), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (3182, 3189), False, 'import numpy\n'), ((3206, 3222), 'numpy.sin', 'numpy.sin', (['angle'], {}), '(angle)\n', (3215, 3222), False, 'import numpy\n'), ((3661, 3677), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (3670, 3677), False, 'import numpy\n'), ((3694, 3710), 'numpy.sin', 'numpy.sin', (['angle'], {}), '(angle)\n', (3703, 3710), False, 'import numpy\n'), ((447, 476), 'numpy.array', 'numpy.array', (['p', 'numpy.float32'], {}), '(p, numpy.float32)\n', (458, 476), False, 'import numpy\n'), ((1880, 1898), 'numpy.zeros', 'numpy.zeros', (['shape'], {}), '(shape)\n', (1891, 1898), False, 'import numpy\n'), ((3448, 3477), 'numpy.array', 'numpy.array', (['i', 'numpy.float32'], {}), '(i, numpy.float32)\n', (3459, 3477), False, 'import numpy\n'), ((3894, 3923), 'numpy.array', 'numpy.array', (['i', 'numpy.float32'], {}), '(i, numpy.float32)\n', (3905, 3923), False, 'import numpy\n'), ((906, 940), 'numpy.array', 'numpy.array', (['(i, j)', 'numpy.float32'], {}), '((i, j), numpy.float32)\n', (917, 940), False, 'import numpy\n'), ((1899, 1916), 'numpy.ones', 'numpy.ones', (['shape'], {}), '(shape)\n', (1909, 1916), False, 'import numpy\n')] |
# I know it's not much but it's honest work :')
import numpy as np
import cv2 # REMOVE THIS IMPORT
def rotation_vector_to_rotation_matrix(rotation_vector):
"""Transforms rotation vector (axis-angle) form to rotation matrix.
# Arguments
rotation_vector: Array (3). Rotation vector in axis-angle form.
# Returns
Array (3, 3) rotation matrix.
"""
rotation_matrix = np.eye(3)
cv2.Rodrigues(rotation_vector, rotation_matrix)
return rotation_matrix
def build_rotation_matrix_z(angle):
"""Builds rotation matrix in Z axis.
# Arguments
angle: Float. Angle in radians.
# Return
Array (3, 3) rotation matrix in Z axis.
"""
cos_angle = np.cos(angle)
sin_angle = np.sin(angle)
rotation_matrix_z = np.array([[+cos_angle, -sin_angle, 0.0],
[+sin_angle, +cos_angle, 0.0],
[0.0, 0.0, 1.0]])
return rotation_matrix_z
def build_rotation_matrix_x(angle):
"""Builds rotation matrix in X axis.
# Arguments
angle: Float. Angle in radians.
# Return
Array (3, 3) rotation matrix in Z axis.
"""
cos_angle = np.cos(angle)
sin_angle = np.sin(angle)
rotation_matrix_x = np.array([[1.0, 0.0, 0.0],
[0.0, +cos_angle, -sin_angle],
[0.0, +sin_angle, +cos_angle]])
return rotation_matrix_x
def build_rotation_matrix_y(angle):
"""Builds rotation matrix in Y axis.
# Arguments
angle: Float. Angle in radians.
# Return
Array (3, 3) rotation matrix in Z axis.
"""
cos_angle = np.cos(angle)
sin_angle = np.sin(angle)
rotation_matrix_y = np.array([[+cos_angle, 0.0, +sin_angle],
[0.0, 1.0, 0.0],
[-sin_angle, 0.0, +cos_angle]])
return rotation_matrix_y
def compute_norm_SO3(rotation_mesh, rotation):
"""Computes norm between SO3 elements.
# Arguments
rotation_mesh: Array (3, 3), rotation matrix.
rotation: Array (3, 3), rotation matrix.
# Returns
Scalar representing the distance between both rotation matrices.
"""
difference = np.dot(np.linalg.inv(rotation), rotation_mesh) - np.eye(3)
distance = np.linalg.norm(difference, ord='fro')
return distance
def calculate_canonical_rotation(rotation_mesh, rotations):
"""Returns the rotation matrix closest to rotation mesh.
# Arguments
rotation_mesh: Array (3, 3), rotation matrix.
rotations: List of array of (3, 3), rotation matrices.
# Returns
Element of list closest to rotation mesh.
"""
norms = [compute_norm_SO3(rotation_mesh, R) for R in rotations]
closest_rotation_arg = np.argmin(norms)
closest_rotation = rotations[closest_rotation_arg]
canonical_rotation = np.linalg.inv(closest_rotation)
return canonical_rotation
| [
"numpy.argmin",
"cv2.Rodrigues",
"numpy.sin",
"numpy.array",
"numpy.linalg.norm",
"numpy.cos",
"numpy.linalg.inv",
"numpy.eye"
] | [((404, 413), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (410, 413), True, 'import numpy as np\n'), ((418, 465), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rotation_vector', 'rotation_matrix'], {}), '(rotation_vector, rotation_matrix)\n', (431, 465), False, 'import cv2\n'), ((715, 728), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (721, 728), True, 'import numpy as np\n'), ((745, 758), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (751, 758), True, 'import numpy as np\n'), ((783, 877), 'numpy.array', 'np.array', (['[[+cos_angle, -sin_angle, 0.0], [+sin_angle, +cos_angle, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[+cos_angle, -sin_angle, 0.0], [+sin_angle, +cos_angle, 0.0], [\n 0.0, 0.0, 1.0]])\n', (791, 877), True, 'import numpy as np\n'), ((1192, 1205), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1198, 1205), True, 'import numpy as np\n'), ((1222, 1235), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1228, 1235), True, 'import numpy as np\n'), ((1260, 1353), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, +cos_angle, -sin_angle], [0.0, +sin_angle, +cos_angle]]'], {}), '([[1.0, 0.0, 0.0], [0.0, +cos_angle, -sin_angle], [0.0, +sin_angle,\n +cos_angle]])\n', (1268, 1353), True, 'import numpy as np\n'), ((1669, 1682), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1675, 1682), True, 'import numpy as np\n'), ((1699, 1712), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1705, 1712), True, 'import numpy as np\n'), ((1737, 1830), 'numpy.array', 'np.array', (['[[+cos_angle, 0.0, +sin_angle], [0.0, 1.0, 0.0], [-sin_angle, 0.0, +cos_angle]]'], {}), '([[+cos_angle, 0.0, +sin_angle], [0.0, 1.0, 0.0], [-sin_angle, 0.0,\n +cos_angle]])\n', (1745, 1830), True, 'import numpy as np\n'), ((2323, 2360), 'numpy.linalg.norm', 'np.linalg.norm', (['difference'], {'ord': '"""fro"""'}), "(difference, ord='fro')\n", (2337, 2360), True, 'import numpy as np\n'), ((2806, 2822), 'numpy.argmin', 'np.argmin', (['norms'], {}), '(norms)\n', (2815, 2822), True, 'import numpy as np\n'), ((2903, 2934), 'numpy.linalg.inv', 'np.linalg.inv', (['closest_rotation'], {}), '(closest_rotation)\n', (2916, 2934), True, 'import numpy as np\n'), ((2298, 2307), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2304, 2307), True, 'import numpy as np\n'), ((2256, 2279), 'numpy.linalg.inv', 'np.linalg.inv', (['rotation'], {}), '(rotation)\n', (2269, 2279), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# See top-level LICENSE.rst file for Copyright information
#
# -*- coding: utf-8 -*-
"""
Generate S/N plots as a function of object type for the
current production
"""
import argparse
from desisim.spec_qa import __qa_version__
def parse(options=None):
parser = argparse.ArgumentParser(description="Generate S/N QA for a production [v{:s}]".format(__qa_version__), formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#parser.add_argument('--rawdir', type = str, default = None, metavar = 'PATH',
# help = 'Override default path ($DESI_SPECTRO_DATA) to processed data.')
parser.add_argument('--qaprod_dir', type=str, default=None, help = 'Path to where QA figure files are generated. Default is qaprod_dir')
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(namespace=options)
return args
def main(args):
import os.path
import numpy as np
import desispec.io
from desiutil.log import get_logger
from desisim.spec_qa.s2n import load_s2n_values, obj_s2n_wave, obj_s2n_z
from desisim.spec_qa.s2n import load_all_s2n_values
from desisim.spec_qa.s2n import parse_s2n_values
# Initialize
if args.qaprod_dir is not None:
qaprod_dir = args.qaprod_dir
else:
qaprod_dir = desispec.io.meta.qaprod_root()
# Generate the path
# Grab nights
nights = desispec.io.get_nights()
# Load all s2n (once)
all_s2n_values = []
channels = ['b', 'r', 'z']
for channel in channels:
print("Loading S/N for channel {}".format(channel))
all_s2n_values.append(load_all_s2n_values(nights, channel))
# Loop on channel
for ss, channel in enumerate(channels):
if channel == 'b':
wv_bins = np.arange(3570., 5700., 20.)
elif channel == 'r':
wv_bins = np.arange(5750., 7600., 20.)
elif channel == 'z':
wv_bins = np.arange(7500., 9800., 20.)
z_bins = np.linspace(1.0, 1.6, 100) # z camera
else:
raise IOError("Bad channel value: {}".format(channel))
# Loop on OBJTYPE
for objtype in ['ELG', 'LRG', 'QSO']:
if objtype == 'ELG':
flux_bins = np.linspace(19., 24., 6)
oii_bins = np.array([1., 6., 10., 30., 100., 1000.])
elif objtype == 'LRG':
flux_bins = np.linspace(16., 22., 6)
elif objtype == 'QSO':
flux_bins = np.linspace(15., 24., 6)
# Parse
fdict = all_s2n_values[ss]
s2n_dict = parse_s2n_values(objtype, fdict)
# Plot
outfile = qaprod_dir+'/QA_s2n_{:s}_{:s}.png'.format(objtype, channel)
desispec.io.util.makepath(outfile)
obj_s2n_wave(s2n_dict, wv_bins, flux_bins, objtype, outfile=outfile)
# S/N vs. z for ELG
if (channel == 'z') & (objtype=='ELG'):
outfile = qaprod_dir+'/QA_s2n_{:s}_{:s}_redshift.png'.format(objtype,channel)
desispec.io.util.makepath(outfile)
obj_s2n_z(s2n_dict, z_bins, oii_bins, objtype, outfile=outfile)
| [
"desisim.spec_qa.s2n.parse_s2n_values",
"desisim.spec_qa.s2n.obj_s2n_z",
"desisim.spec_qa.s2n.obj_s2n_wave",
"numpy.arange",
"numpy.array",
"numpy.linspace",
"desisim.spec_qa.s2n.load_all_s2n_values"
] | [((1656, 1692), 'desisim.spec_qa.s2n.load_all_s2n_values', 'load_all_s2n_values', (['nights', 'channel'], {}), '(nights, channel)\n', (1675, 1692), False, 'from desisim.spec_qa.s2n import load_all_s2n_values\n'), ((1810, 1841), 'numpy.arange', 'np.arange', (['(3570.0)', '(5700.0)', '(20.0)'], {}), '(3570.0, 5700.0, 20.0)\n', (1819, 1841), True, 'import numpy as np\n'), ((2624, 2656), 'desisim.spec_qa.s2n.parse_s2n_values', 'parse_s2n_values', (['objtype', 'fdict'], {}), '(objtype, fdict)\n', (2640, 2656), False, 'from desisim.spec_qa.s2n import parse_s2n_values\n'), ((2817, 2885), 'desisim.spec_qa.s2n.obj_s2n_wave', 'obj_s2n_wave', (['s2n_dict', 'wv_bins', 'flux_bins', 'objtype'], {'outfile': 'outfile'}), '(s2n_dict, wv_bins, flux_bins, objtype, outfile=outfile)\n', (2829, 2885), False, 'from desisim.spec_qa.s2n import load_s2n_values, obj_s2n_wave, obj_s2n_z\n'), ((1890, 1921), 'numpy.arange', 'np.arange', (['(5750.0)', '(7600.0)', '(20.0)'], {}), '(5750.0, 7600.0, 20.0)\n', (1899, 1921), True, 'import numpy as np\n'), ((2272, 2298), 'numpy.linspace', 'np.linspace', (['(19.0)', '(24.0)', '(6)'], {}), '(19.0, 24.0, 6)\n', (2283, 2298), True, 'import numpy as np\n'), ((2324, 2371), 'numpy.array', 'np.array', (['[1.0, 6.0, 10.0, 30.0, 100.0, 1000.0]'], {}), '([1.0, 6.0, 10.0, 30.0, 100.0, 1000.0])\n', (2332, 2371), True, 'import numpy as np\n'), ((3131, 3194), 'desisim.spec_qa.s2n.obj_s2n_z', 'obj_s2n_z', (['s2n_dict', 'z_bins', 'oii_bins', 'objtype'], {'outfile': 'outfile'}), '(s2n_dict, z_bins, oii_bins, objtype, outfile=outfile)\n', (3140, 3194), False, 'from desisim.spec_qa.s2n import load_s2n_values, obj_s2n_wave, obj_s2n_z\n'), ((1970, 2001), 'numpy.arange', 'np.arange', (['(7500.0)', '(9800.0)', '(20.0)'], {}), '(7500.0, 9800.0, 20.0)\n', (1979, 2001), True, 'import numpy as np\n'), ((2020, 2046), 'numpy.linspace', 'np.linspace', (['(1.0)', '(1.6)', '(100)'], {}), '(1.0, 1.6, 100)\n', (2031, 2046), True, 'import numpy as np\n'), ((2429, 2455), 'numpy.linspace', 'np.linspace', (['(16.0)', '(22.0)', '(6)'], {}), '(16.0, 22.0, 6)\n', (2440, 2455), True, 'import numpy as np\n'), ((2517, 2543), 'numpy.linspace', 'np.linspace', (['(15.0)', '(24.0)', '(6)'], {}), '(15.0, 24.0, 6)\n', (2528, 2543), True, 'import numpy as np\n')] |
# !/usr/bin/env python
# encoding: utf-8
__author__ = '<NAME>'
from sklearn.naive_bayes import GaussianNB,BernoulliNB
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn import model_selection
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
def iris_type(s):
class_label={b'Iris-setosa':0,b'Iris-versicolor':1,b'Iris-virginica':2}
return class_label[s]
filepath='E:/datas/iris.csv' # 数据文件路径
data=np.loadtxt(filepath,dtype=float,delimiter=',',converters={4:iris_type})
# print(data)
#读入结果示例为:
# [[ 5.1 3.5 1.4 0.2 0. ]
# [ 4.9 3. 1.4 0.2 0. ]
# [ 4.7 3.2 1.3 0.2 0. ]
# [ 4.6 3.1 1.5 0.2 0. ]
# [ 5. 3.6 1.4 0.2 0. ]]
X ,y=np.split(data,(4,),axis=1)
x=X[:,0:2]
x_train,x_test,y_train,y_test=model_selection.train_test_split(x,y,random_state=1,test_size=0.3)
# 搭建模型,训练GaussianNaiveBayes分类器
classifier = GaussianNB()
# classifier=BernoulliNB()
# 开始训练
classifier.fit(x_train,y_train.ravel())
print("GaussianNB在训练集上的准确率为:",classifier.score(x_train,y_train))
y_hat=classifier.predict(x_train)
print("GaussianNB在测试集上的准确率为:",classifier.score(x_test,y_test))
y_hat=classifier.predict(x_test)
# GaussianNB-输出训练集的准确率为: 0.809523809524
# GaussianNB-输出测试集的准确率为: 0.755555555556
# # 查看决策函数,可以通过decision_function()实现。decision_function中每一列的值代表距离各类别的距离。
# print('decision_function:\n', classifier.decision_function(x_train))
# print('\npredict:\n', classifier.predict(x_train))
# 绘制图像
# 1.确定坐标轴范围,x,y轴分别表示两个特征
x1_min, x1_max = x[:, 0].min(), x[:, 0].max() # 第0列的范围
x2_min, x2_max = x[:, 1].min(), x[:, 1].max() # 第1列的范围
x1, x2 = np.mgrid[x1_min:x1_max:200j, x2_min:x2_max:200j] # 生成网格采样点
grid_test = np.stack((x1.flat, x2.flat), axis=1) # 测试点
grid_hat = classifier.predict(grid_test) # 预测分类值
grid_hat = grid_hat.reshape(x1.shape) # 使之与输入的形状相同
# 2.指定默认字体
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
# 3.绘制
cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
alpha=0.5
plt.pcolormesh(x1, x2, grid_hat, cmap=cm_light) # 预测值的显示
# plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', s=50, cmap=cm_dark) # 样本
plt.plot(x[:, 0], x[:, 1], 'o', alpha=alpha, color='blue', markeredgecolor='k')
plt.scatter(x_test[:, 0], x_test[:, 1], s=120, facecolors='none', zorder=10) # 圈中测试集样本
plt.xlabel(u'花萼长度', fontsize=13)
plt.ylabel(u'花萼宽度', fontsize=13)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.title(u'鸢尾花GaussianNB分类结果', fontsize=15)
plt.grid() #显示网格
plt.show()
| [
"numpy.stack",
"matplotlib.pyplot.xlim",
"sklearn.naive_bayes.GaussianNB",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.scatter",
"numpy.split",
"numpy.loadtxt",
"matplotl... | [((567, 644), 'numpy.loadtxt', 'np.loadtxt', (['filepath'], {'dtype': 'float', 'delimiter': '""","""', 'converters': '{(4): iris_type}'}), "(filepath, dtype=float, delimiter=',', converters={(4): iris_type})\n", (577, 644), True, 'import numpy as np\n'), ((820, 848), 'numpy.split', 'np.split', (['data', '(4,)'], {'axis': '(1)'}), '(data, (4,), axis=1)\n', (828, 848), True, 'import numpy as np\n'), ((888, 957), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['x', 'y'], {'random_state': '(1)', 'test_size': '(0.3)'}), '(x, y, random_state=1, test_size=0.3)\n', (920, 957), False, 'from sklearn import model_selection\n'), ((1000, 1012), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1010, 1012), False, 'from sklearn.naive_bayes import GaussianNB, BernoulliNB\n'), ((1786, 1822), 'numpy.stack', 'np.stack', (['(x1.flat, x2.flat)'], {'axis': '(1)'}), '((x1.flat, x2.flat), axis=1)\n', (1794, 1822), True, 'import numpy as np\n'), ((2057, 2117), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (["['#A0FFA0', '#FFA0A0', '#A0A0FF']"], {}), "(['#A0FFA0', '#FFA0A0', '#A0A0FF'])\n", (2082, 2117), True, 'import matplotlib as mpl\n'), ((2128, 2170), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (["['g', 'r', 'b']"], {}), "(['g', 'r', 'b'])\n", (2153, 2170), True, 'import matplotlib as mpl\n'), ((2183, 2230), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['x1', 'x2', 'grid_hat'], {'cmap': 'cm_light'}), '(x1, x2, grid_hat, cmap=cm_light)\n', (2197, 2230), True, 'import matplotlib.pyplot as plt\n'), ((2319, 2398), 'matplotlib.pyplot.plot', 'plt.plot', (['x[:, 0]', 'x[:, 1]', '"""o"""'], {'alpha': 'alpha', 'color': '"""blue"""', 'markeredgecolor': '"""k"""'}), "(x[:, 0], x[:, 1], 'o', alpha=alpha, color='blue', markeredgecolor='k')\n", (2327, 2398), True, 'import matplotlib.pyplot as plt\n'), ((2399, 2475), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_test[:, 0]', 'x_test[:, 1]'], {'s': '(120)', 'facecolors': '"""none"""', 'zorder': '(10)'}), "(x_test[:, 0], x_test[:, 1], s=120, facecolors='none', zorder=10)\n", (2410, 2475), True, 'import matplotlib.pyplot as plt\n'), ((2487, 2519), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['u"""花萼长度"""'], {'fontsize': '(13)'}), "(u'花萼长度', fontsize=13)\n", (2497, 2519), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2552), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['u"""花萼宽度"""'], {'fontsize': '(13)'}), "(u'花萼宽度', fontsize=13)\n", (2530, 2552), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2577), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x1_min', 'x1_max'], {}), '(x1_min, x1_max)\n', (2561, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2578, 2602), 'matplotlib.pyplot.ylim', 'plt.ylim', (['x2_min', 'x2_max'], {}), '(x2_min, x2_max)\n', (2586, 2602), True, 'import matplotlib.pyplot as plt\n'), ((2603, 2647), 'matplotlib.pyplot.title', 'plt.title', (['u"""鸢尾花GaussianNB分类结果"""'], {'fontsize': '(15)'}), "(u'鸢尾花GaussianNB分类结果', fontsize=15)\n", (2612, 2647), True, 'import matplotlib.pyplot as plt\n'), ((2648, 2658), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2656, 2658), True, 'import matplotlib.pyplot as plt\n'), ((2665, 2675), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2673, 2675), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from keras import backend as K
from keras.engine.topology import Layer, InputSpec
from keras.layers.core import Dropout, Reshape
from keras.layers.convolutional import ZeroPadding2D
from keras.models import Sequential
import numpy as np
# test harness
# creates a Sequential model out of a single layer and passes the
# input through it to produce output
def test_layer(layer, x):
layer_config = layer.get_config()
layer_config["input_shape"] = x.shape
layer = layer.__class__.from_config(layer_config)
model = Sequential()
model.add(layer)
model.compile("rmsprop", "mse")
x_ = np.expand_dims(x, axis=0)
return model.predict(x_)[0]
# custom layer
class LocalResponseNormalization(Layer):
def __init__(self, n=5, alpha=0.0005, beta=0.75, k=2, **kwargs):
self.n = n
self.alpha = alpha
self.beta = beta
self.k = k
super(LocalResponseNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.shape = input_shape
super(LocalResponseNormalization, self).build(input_shape)
def call(self, x, mask=None):
if K.image_dim_ordering == "th":
_, f, r, c = self.shape
else:
_, r, c, f = self.shape
half_n = self.n // 2
squared = K.square(x)
pooled = K.pool2d(squared, (half_n, half_n), strides=(1, 1),
padding="same", pool_mode="avg")
if K.image_dim_ordering == "th":
summed = K.sum(pooled, axis=1, keepdims=True)
averaged = (self.alpha / self.n) * K.repeat_elements(summed, f, axis=1)
else:
summed = K.sum(pooled, axis=3, keepdims=True)
averaged = (self.alpha / self.n) * K.repeat_elements(summed, f, axis=3)
denom = K.pow(self.k + averaged, self.beta)
return x / denom
def compute_output_shape(self, input_shape):
return input_shape
# test the test harness
x = np.random.randn(10, 10)
layer = Dropout(0.5)
y = test_layer(layer, x)
assert(x.shape == y.shape)
x = np.random.randn(10, 10, 3)
layer = ZeroPadding2D(padding=(1,1))
y = test_layer(layer, x)
assert(x.shape[0] + 2 == y.shape[0])
assert(x.shape[1] + 2 == y.shape[1])
x = np.random.randn(10, 10)
layer = Reshape((5, 20))
y = test_layer(layer, x)
assert(y.shape == (5, 20))
# test custom layer
x = np.random.randn(225, 225, 3)
layer = LocalResponseNormalization()
y = test_layer(layer, x)
assert(x.shape == y.shape)
| [
"keras.backend.pool2d",
"keras.layers.core.Reshape",
"numpy.random.randn",
"keras.backend.sum",
"numpy.expand_dims",
"keras.backend.pow",
"keras.layers.core.Dropout",
"keras.layers.convolutional.ZeroPadding2D",
"keras.models.Sequential",
"keras.backend.square",
"keras.backend.repeat_elements"
] | [((2040, 2063), 'numpy.random.randn', 'np.random.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (2055, 2063), True, 'import numpy as np\n'), ((2072, 2084), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2079, 2084), False, 'from keras.layers.core import Dropout, Reshape\n'), ((2142, 2168), 'numpy.random.randn', 'np.random.randn', (['(10)', '(10)', '(3)'], {}), '(10, 10, 3)\n', (2157, 2168), True, 'import numpy as np\n'), ((2177, 2206), 'keras.layers.convolutional.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '(1, 1)'}), '(padding=(1, 1))\n', (2190, 2206), False, 'from keras.layers.convolutional import ZeroPadding2D\n'), ((2310, 2333), 'numpy.random.randn', 'np.random.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (2325, 2333), True, 'import numpy as np\n'), ((2342, 2358), 'keras.layers.core.Reshape', 'Reshape', (['(5, 20)'], {}), '((5, 20))\n', (2349, 2358), False, 'from keras.layers.core import Dropout, Reshape\n'), ((2436, 2464), 'numpy.random.randn', 'np.random.randn', (['(225)', '(225)', '(3)'], {}), '(225, 225, 3)\n', (2451, 2464), True, 'import numpy as np\n'), ((601, 613), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (611, 613), False, 'from keras.models import Sequential\n'), ((680, 705), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (694, 705), True, 'import numpy as np\n'), ((1374, 1385), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (1382, 1385), True, 'from keras import backend as K\n'), ((1403, 1491), 'keras.backend.pool2d', 'K.pool2d', (['squared', '(half_n, half_n)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'pool_mode': '"""avg"""'}), "(squared, (half_n, half_n), strides=(1, 1), padding='same',\n pool_mode='avg')\n", (1411, 1491), True, 'from keras import backend as K\n'), ((1868, 1903), 'keras.backend.pow', 'K.pow', (['(self.k + averaged)', 'self.beta'], {}), '(self.k + averaged, self.beta)\n', (1873, 1903), True, 'from keras import backend as K\n'), ((1575, 1611), 'keras.backend.sum', 'K.sum', (['pooled'], {'axis': '(1)', 'keepdims': '(True)'}), '(pooled, axis=1, keepdims=True)\n', (1580, 1611), True, 'from keras import backend as K\n'), ((1731, 1767), 'keras.backend.sum', 'K.sum', (['pooled'], {'axis': '(3)', 'keepdims': '(True)'}), '(pooled, axis=3, keepdims=True)\n', (1736, 1767), True, 'from keras import backend as K\n'), ((1659, 1695), 'keras.backend.repeat_elements', 'K.repeat_elements', (['summed', 'f'], {'axis': '(1)'}), '(summed, f, axis=1)\n', (1676, 1695), True, 'from keras import backend as K\n'), ((1815, 1851), 'keras.backend.repeat_elements', 'K.repeat_elements', (['summed', 'f'], {'axis': '(3)'}), '(summed, f, axis=3)\n', (1832, 1851), True, 'from keras import backend as K\n')] |
from skimage.io import imread
from image import convolution
import numpy as np
from matplotlib import pyplot as plt
def to_image(array):
a_min = np.min(array)
a_max = np.min(array)
return ((array - a_min)/float(a_max-a_min))*255
image = imread('./civetta.jpg', as_grey=True)
box_blur_kernel = np.ones((20,20))
result = convolution(image,box_blur_kernel)
plt.imshow(result)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.ones",
"numpy.min",
"image.convolution",
"skimage.io.imread"
] | [((251, 288), 'skimage.io.imread', 'imread', (['"""./civetta.jpg"""'], {'as_grey': '(True)'}), "('./civetta.jpg', as_grey=True)\n", (257, 288), False, 'from skimage.io import imread\n'), ((307, 324), 'numpy.ones', 'np.ones', (['(20, 20)'], {}), '((20, 20))\n', (314, 324), True, 'import numpy as np\n'), ((333, 368), 'image.convolution', 'convolution', (['image', 'box_blur_kernel'], {}), '(image, box_blur_kernel)\n', (344, 368), False, 'from image import convolution\n'), ((368, 386), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (378, 386), True, 'from matplotlib import pyplot as plt\n'), ((387, 397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (395, 397), True, 'from matplotlib import pyplot as plt\n'), ((150, 163), 'numpy.min', 'np.min', (['array'], {}), '(array)\n', (156, 163), True, 'import numpy as np\n'), ((176, 189), 'numpy.min', 'np.min', (['array'], {}), '(array)\n', (182, 189), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
# data
df = pd.read_csv("test.csv")
print(df)
print()
# separate the output column
y_name = df.columns[-1]
y_df = df[y_name]
X_df = df.drop(y_name, axis=1)
# numpy arrays
X_ar = np.array(X_df, dtype=np.float32)
y_ar = np.array(y_df, dtype=np.float32)
# torch tensors
X_tensor = torch.from_numpy(X_ar)
y_tensor = torch.from_numpy(y_ar)
# https://stackoverflow.com/questions/65219569/pytorch-gives-incorrect-results-due-to-broadcasting
print(y_tensor.shape)
new_shape = (26, 1)
y_tensor = y_tensor.view(new_shape)
print(y_tensor.shape)
# hyperparameters
in_features = X_ar.shape[1]
hidden_size = 100
out_features = 1
epochs = 500
# model
class Net(nn.Module):
def __init__(self, hidden_size):
super(Net, self).__init__()
self.L0 = nn.Linear(in_features, hidden_size)
self.N0 = nn.ReLU()
self.L1 = nn.Linear(hidden_size, hidden_size)
self.N1 = nn.Tanh()
self.L2 = nn.Linear(hidden_size, hidden_size)
self.N2 = nn.ReLU()
self.L3 = nn.Linear(hidden_size, 1)
def forward(self, x):
x = self.L0(x)
x = self.N0(x)
x = self.L1(x)
x = self.N1(x)
x = self.L2(x)
x = self.N2(x)
x = self.L3(x)
return x
model = Net(hidden_size)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
# train
print("training")
for epoch in range(1, epochs + 1):
# forward
output = model(X_tensor)
cost = criterion(output, y_tensor)
# backward
optimizer.zero_grad()
cost.backward()
optimizer.step()
# print progress
if epoch % (epochs // 10) == 0:
print(f"{epoch:6d} {cost.item():10f}")
print()
output = model(X_tensor)
cost = criterion(output, y_tensor)
print("mean squared error:", cost.item())
| [
"torch.nn.MSELoss",
"torch.nn.ReLU",
"pandas.read_csv",
"torch.nn.Tanh",
"numpy.array",
"torch.nn.Linear",
"torch.from_numpy"
] | [((87, 110), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (98, 110), True, 'import pandas as pd\n'), ((255, 287), 'numpy.array', 'np.array', (['X_df'], {'dtype': 'np.float32'}), '(X_df, dtype=np.float32)\n', (263, 287), True, 'import numpy as np\n'), ((295, 327), 'numpy.array', 'np.array', (['y_df'], {'dtype': 'np.float32'}), '(y_df, dtype=np.float32)\n', (303, 327), True, 'import numpy as np\n'), ((356, 378), 'torch.from_numpy', 'torch.from_numpy', (['X_ar'], {}), '(X_ar)\n', (372, 378), False, 'import torch\n'), ((390, 412), 'torch.from_numpy', 'torch.from_numpy', (['y_ar'], {}), '(y_ar)\n', (406, 412), False, 'import torch\n'), ((1346, 1358), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1356, 1358), True, 'import torch.nn as nn\n'), ((830, 865), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'hidden_size'], {}), '(in_features, hidden_size)\n', (839, 865), True, 'import torch.nn as nn\n'), ((884, 893), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (891, 893), True, 'import torch.nn as nn\n'), ((912, 947), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (921, 947), True, 'import torch.nn as nn\n'), ((966, 975), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (973, 975), True, 'import torch.nn as nn\n'), ((994, 1029), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (1003, 1029), True, 'import torch.nn as nn\n'), ((1048, 1057), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1055, 1057), True, 'import torch.nn as nn\n'), ((1076, 1101), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (1085, 1101), True, 'import torch.nn as nn\n')] |
import numpy as np
from .datahandler import DataHandler as DH
from tqdm import tqdm
class GeoUtils:
@staticmethod
def zerodata_augmentation(data, x_range=(-175, -64), y_range=(18, 71),
fineness=(20, 20), numdata_sqrt_oneclass=10):
labels = set([i for i in range(fineness[0] * fineness[1])])
# データのないブロックを取得
x_min, x_max = sorted(x_range)
y_min, y_max = sorted(y_range)
xl, yl = (x_max - x_min) / fineness[0], (y_max - y_min) / fineness[1]
for item in tqdm(data):
x, y = item['locate']
xlabel = (x - x_min) // xl
ylabel = (y - y_min) // yl
labels.discard(int(ylabel * fineness[0] + xlabel))
# 全てのクラスに対し負例となるデータの追加
for zerolabel in tqdm(labels):
ylabel, xlabel = divmod(zerolabel, fineness[0])
xgrid = np.linspace(
x_min + xl * xlabel, x_min + xl * (xlabel + 1),
numdata_sqrt_oneclass, endpoint=False
)
ygrid = np.linspace(
y_min + yl * ylabel, y_min + yl * (ylabel + 1),
numdata_sqrt_oneclass, endpoint=False
)
for x in xgrid:
for y in ygrid:
data.append({'labels': [], 'locate': [x, y]})
return data
@staticmethod
def base_dataset(x_range=(-175, -64), y_range=(18, 71), fineness=(20, 20),
numdata_sqrt_oneclass=32):
print('preparing dataset for basenet ...')
x_min, x_max = sorted(x_range)
y_min, y_max = sorted(y_range)
xgrid = np.arange(
x_min, x_max,
(x_max - x_min) / (fineness[0] * numdata_sqrt_oneclass)
)
ygrid = np.arange(
y_min, y_max,
(y_max - y_min) / (fineness[1] * numdata_sqrt_oneclass)
)
locate_tags_dictlist = []
temp = []
xl, yl = (x_max - x_min) / fineness[0], (y_max - y_min) / fineness[1]
for x in tqdm(xgrid):
xlabel = (x - x_min) // xl
for y in ygrid:
ylabel = (y - y_min) // yl
locate_tags_dictlist.append({
'labels': [int(ylabel * fineness[0] + xlabel)],
'locate': [x, y]
})
temp.append([x, y])
mean, std = np.mean(temp, axis=0), np.std(temp, axis=0)
return locate_tags_dictlist, (mean, std)
@staticmethod
def rep_dataset(category, phase='train', base_path='../datas/bases/'):
print('preparing dataset: {0} ...'.format(phase))
lda = DH.loadPickle('local_df_area16_wocoth_new.pickle', base_path)
locates = 'geo' if phase == 'train' else 'geo_val'
locates = list(lda[locates])
temp = [item for gl in locates for item in gl]
mean = np.mean(temp, axis=0)
std = np.std(temp, axis=0)
locates = [item if len(item) >= 1 else [] for item in locates]
tags = list(lda.index)
temp_dict = {key: [] for item in locates for key in item}
for item, tag in zip(locates, tags):
for locate in item:
temp_dict[locate].append(tag)
for key, val in temp_dict.items():
temp_dict[key] = sorted(list(set(val)))
locate_tags_dictlist = []
for key, val in tqdm(temp_dict.items()):
temp = [category[label] for label in val if label in category]
if temp:
locate_tags_dictlist.append({
'labels': temp,
'locate': list(key)
})
return locate_tags_dictlist, (mean, std)
@staticmethod
def rep_mask(category, sim_thr=5, reverse=False, saved=True,
save_path='../datas/geo_rep/inputs/',
base_path='../datas/bases/'):
print('calculating mask ...')
repsnum = len(category)
_mask = np.zeros((repsnum, repsnum), int)
sim_dict = DH.loadJson('geo_rep_simdict', base_path)
for tag1 in category:
for tag2 in category:
if tag1 == tag2:
continue
sim = sim_dict[tag2][tag1] if reverse else sim_dict[tag1][tag2]
if sim <= sim_thr:
_mask[category[tag1]][category[tag2]] = 1
if saved:
DH.savePickle(_mask, 'mask_{0}'.format(sim_thr), save_path)
return _mask
@staticmethod
def down_dataset(rep_category, local_category, phase='train',
base_path='../datas/bases/'):
print('preparing dataset: {0} ...'.format(phase))
gda = DH.loadPickle('geospatial_df_area16_wocoth.pickle', base_path)
down_category = sorted(list(set(local_category) - set(rep_category)))
locates = 'geo' if phase == 'train' else 'geo_val'
locates = list(gda[locates])
locates = [item if len(item) >= 1 else [] for item in locates]
tags = list(gda.index)
temp_dict = {key: [] for item in locates for key in item}
for item, tag in zip(locates, tags):
for locate in item:
temp_dict[locate].append(tag)
for key, val in temp_dict.items():
temp_dict[key] = sorted(list(set(val)))
tags_dict = {key: val for val, key in enumerate(local_category)}
locate_tags_dictlist = []
for key, val in tqdm(temp_dict.items()):
temp = [tags_dict[label] for label in val if label in down_category]
if temp:
locate_tags_dictlist.append({
'labels': temp,
'locate': list(key)
})
return locate_tags_dictlist
@staticmethod
def _down_mask(rep_category, local_category, sim_thr=5, reverse=False,
saved=True, save_path='../datas/geo_down/inputs/',
base_path='../datas/bases/'):
print('calculating mask ...')
geo_category = {key: idx for idx, key in enumerate(local_category)}
down_category = sorted(list(set(local_category) - set(rep_category)))
num_classes = len(local_category)
_mask = np.zeros((num_classes, num_classes), int)
sim_dict = DH.loadJson('geo_all_sim', base_path)
for tag1 in tqdm(down_category):
for tag2 in down_category:
if tag1 == tag2:
continue
sim = sim_dict[tag2][tag1] if reverse else sim_dict[tag1][tag2]
if sim <= sim_thr:
_mask[geo_category[tag1]][geo_category[tag2]] = 1
if saved:
DH.savePickle(_mask, 'mask_{0}'.format(sim_thr), save_path)
return _mask
@staticmethod
def down_mask(rep_category, local_category, sim_thr=5, reverse=False,
saved=True, save_path='../datas/geo_down/inputs/',
base_path='../datas/bases/'):
print('calculating mask ...')
all_sim = DH.loadJson('geo_all_sim.json', base_path)
gsp_dict = DH.loadPickle('geospatial_df_area16_wocoth', base_path)
gsp_dict = gsp_dict.to_dict('index')
rep_dict = DH.loadPickle('geo_rep_df_area16_kl5', base_path)
rep_dict = rep_dict.to_dict('index')
comb_dict = {key: [] for key in local_category}
for tag1 in local_category:
for tag2 in local_category:
if tag1 == tag2:
continue
if all_sim[tag1][tag2] <= sim_thr:
comb_dict[tag1].append(tag2)
# ---------------------------------------------------------------------
lc = local_category
down = sorted(list(set(lc) - set(rep_category)))
tagsnum = len(lc)
gspkeys = set(list(gsp_dict.keys()))
lcset = set(lc)
repset = set(rep_category)
mask = np.zeros((tagsnum, tagsnum), int)
for tag in tqdm(down):
flgs = {tag}
prev = set()
checked = set()
while flgs:
for item in flgs:
checked.add(item)
prev = prev | set(gsp_dict[item]['geo_representative'])
prev = prev & lcset
flgs = (prev - checked) & gspkeys
exprev = set()
for ptag in prev:
if ptag in comb_dict:
exprev = exprev | (set(comb_dict[ptag]) & repset)
prev = prev | exprev
for ptag in prev:
mask[lc[tag]][lc[ptag]] = 1
for ctag in comb_dict[ptag]:
mask[lc[tag]][lc[ctag]] = 1
temp = set(rep_dict[ptag]['down']) & lcset
for ttag in temp:
if ttag != tag:
mask[lc[tag]][lc[ttag]] = 1
if ttag in rep_dict:
ttagdown = set(rep_dict[ttag]['down']) & lcset
for tdtag in ttagdown:
if tdtag != tag:
mask[lc[tag]][lc[tdtag]] = 1
if tag in rep_dict:
for rtag in rep_dict[tag]['down']:
if rtag in lcset and rtag != tag:
mask[lc[tag]][lc[rtag]] = 1
for ctag in comb_dict[tag]:
mask[lc[tag]][lc[ctag]] = 1
np.fill_diagonal(mask, 0)
if saved:
DH.savePickle(mask, 'mask_{0}'.format(sim_thr), save_path)
return mask
| [
"numpy.fill_diagonal",
"tqdm.tqdm",
"numpy.std",
"numpy.zeros",
"numpy.mean",
"numpy.arange",
"numpy.linspace"
] | [((540, 550), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (544, 550), False, 'from tqdm import tqdm\n'), ((785, 797), 'tqdm.tqdm', 'tqdm', (['labels'], {}), '(labels)\n', (789, 797), False, 'from tqdm import tqdm\n'), ((1628, 1713), 'numpy.arange', 'np.arange', (['x_min', 'x_max', '((x_max - x_min) / (fineness[0] * numdata_sqrt_oneclass))'], {}), '(x_min, x_max, (x_max - x_min) / (fineness[0] * numdata_sqrt_oneclass)\n )\n', (1637, 1713), True, 'import numpy as np\n'), ((1759, 1844), 'numpy.arange', 'np.arange', (['y_min', 'y_max', '((y_max - y_min) / (fineness[1] * numdata_sqrt_oneclass))'], {}), '(y_min, y_max, (y_max - y_min) / (fineness[1] * numdata_sqrt_oneclass)\n )\n', (1768, 1844), True, 'import numpy as np\n'), ((2022, 2033), 'tqdm.tqdm', 'tqdm', (['xgrid'], {}), '(xgrid)\n', (2026, 2033), False, 'from tqdm import tqdm\n'), ((2861, 2882), 'numpy.mean', 'np.mean', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (2868, 2882), True, 'import numpy as np\n'), ((2897, 2917), 'numpy.std', 'np.std', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (2903, 2917), True, 'import numpy as np\n'), ((3949, 3982), 'numpy.zeros', 'np.zeros', (['(repsnum, repsnum)', 'int'], {}), '((repsnum, repsnum), int)\n', (3957, 3982), True, 'import numpy as np\n'), ((6197, 6238), 'numpy.zeros', 'np.zeros', (['(num_classes, num_classes)', 'int'], {}), '((num_classes, num_classes), int)\n', (6205, 6238), True, 'import numpy as np\n'), ((6317, 6336), 'tqdm.tqdm', 'tqdm', (['down_category'], {}), '(down_category)\n', (6321, 6336), False, 'from tqdm import tqdm\n'), ((7889, 7922), 'numpy.zeros', 'np.zeros', (['(tagsnum, tagsnum)', 'int'], {}), '((tagsnum, tagsnum), int)\n', (7897, 7922), True, 'import numpy as np\n'), ((7942, 7952), 'tqdm.tqdm', 'tqdm', (['down'], {}), '(down)\n', (7946, 7952), False, 'from tqdm import tqdm\n'), ((9391, 9416), 'numpy.fill_diagonal', 'np.fill_diagonal', (['mask', '(0)'], {}), '(mask, 0)\n', (9407, 9416), True, 'import numpy as np\n'), ((879, 981), 'numpy.linspace', 'np.linspace', (['(x_min + xl * xlabel)', '(x_min + xl * (xlabel + 1))', 'numdata_sqrt_oneclass'], {'endpoint': '(False)'}), '(x_min + xl * xlabel, x_min + xl * (xlabel + 1),\n numdata_sqrt_oneclass, endpoint=False)\n', (890, 981), True, 'import numpy as np\n'), ((1044, 1146), 'numpy.linspace', 'np.linspace', (['(y_min + yl * ylabel)', '(y_min + yl * (ylabel + 1))', 'numdata_sqrt_oneclass'], {'endpoint': '(False)'}), '(y_min + yl * ylabel, y_min + yl * (ylabel + 1),\n numdata_sqrt_oneclass, endpoint=False)\n', (1055, 1146), True, 'import numpy as np\n'), ((2373, 2394), 'numpy.mean', 'np.mean', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (2380, 2394), True, 'import numpy as np\n'), ((2396, 2416), 'numpy.std', 'np.std', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (2402, 2416), True, 'import numpy as np\n')] |
import numpy as np
class NN:
"""
Arguments:
data: data
labels: labels
layers: List (of lists) of net layer sizes and activation functions, e.g.
[[8,"relu"],
[5,"relu"],
[3,"relu"],
[2, "sigmoid"]]
Currently supported functions: "relu", "tanh", "sigmoid"
Notes:
- Need to pass data or array-like of similar shape on initialization for creation of first layer
- Currently only works with sigmoid activation in the last layer due to
the cost function partial derivative
learning_rate: learning rate
Uses heuristic initialization similar to Xavier (initial weights multiplied by np.sqrt(2/layer_sizes[i-1]))
Uses cross-entropy cost
"""
def __init__(self,
layers,
data,
labels,
learning_rate):
self.layers = layers
self.data = data
self.labels = labels
self.learning_rate = learning_rate
self.params = self.init_params()
def sigmoid(self, Z):
#Also returns original to help with backprop
return 1/(1+np.exp(-Z)), Z
def d_sigmoid(self, dA, cache):
s, _ = self.sigmoid(cache)
dZ = dA * s * (1-s)
assert (dZ.shape == cache.shape)
return dZ
def relu(self, Z):
#Also returns original to help with backprop
return Z.clip(min=0), Z
def d_relu(self, dA, cache):
dZ = np.array(dA, copy=True)
dZ[cache <= 0] = 0
assert (dZ.shape == cache.shape)
return dZ
def tanh(self, Z):
#Also returns original to help with backprop
A, _ = (self.sigmoid(Z * 2) * 2) - 1
return A, Z
def d_tanh(self, dA, cache):
t, _ = self.tanh(cache)
dZ = dA * (1 - t**2)
assert (dZ.shape == cache.shape)
return dZ
def init_params(self):
layer_sizes = [item[0] for item in self.layers]
layer_sizes.insert(0, self.data.shape[0])
params = {}
for l in range(1,len(layer_sizes)):
params['W' + str(l)] = np.random.randn(layer_sizes[l], layer_sizes[l-1]) * np.sqrt(2/self.data.shape[1])
params['b' + str(l)] = np.zeros((layer_sizes[l], 1))
return params
def forward_linear_step(self, A, W, b):
Z = np.dot(W, A) + b
return Z, (A, W, b)
def forward_activation_step(self, A_prev, W, b, function):
Z, lin_cache = self.forward_linear_step(A_prev, W, b)
assert (function in ["relu", "sigmoid", "tanh"])
A, act_cache = getattr(self, function)(Z)
return A, (lin_cache, act_cache)
def model_forward(self, X):
caches = []
A = X
funcs = [item[1] for item in self.layers]
L = len(self.params) // 2
assert (len(funcs) == L)
for l in range(L):
A_prev = A
A, cache = self.forward_activation_step(A_prev, self.params['W' + str(l+1)], self.params['b' + str(l+1)], funcs[l])
caches.append(cache)
return A, caches
def cross_entropy_cost(self, AL, Y):
cost = -np.mean(Y*np.log(AL) + (1-Y)*np.log(1-AL))
cost = np.squeeze(cost)
assert (cost.shape == ())
return cost
def backward_linear_step(self, dZ, cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1/m) * np.dot(dZ, A_prev.T)
db = (1/m) * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def backward_activation_step(self, dA, cache, function):
lin_cache, act_cache = cache
assert (function in ["relu", "sigmoid", "tanh"])
function = str("d_" + function)
dZ = getattr(self, function)(dA, act_cache)
dA_prev, dW, db = self.backward_linear_step(dZ, lin_cache)
return dA_prev, dW, db
def model_backward(self, AL, Y, caches):
grads = {}
L = len(caches)
m = AL.shape[1]
Y = Y.reshape(AL.shape)
grads["dA" + str(L)] = -(np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
funcs = [item[1] for item in self.layers]
assert (len(funcs) == L)
for l in reversed(range(L)):
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = self.backward_activation_step(grads["dA" + str(l+1)], current_cache, funcs[l])
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def gradient_descent_update(self, grads):
L = len(self.params) // 2
for l in range(L):
self.params["W" + str(l+1)] = self.params["W" + str(l+1)] - grads["dW" + str(l+1)] * self.learning_rate
self.params["b" + str(l+1)] = self.params["b" + str(l+1)] - grads["db" + str(l+1)] * self.learning_rate
def train(self, iterations, verbose=False):
costs = []
for i in range(0, iterations):
AL, caches = self.model_forward(self.data)
cost = self.cross_entropy_cost(AL, self.labels)
grads = self.model_backward(AL, self.labels, caches)
self.gradient_descent_update(grads)
if i % 100 == 0:
if verbose:
print ("Cost after iteration %i: %f" % (i, cost), end='\r')
costs.append(cost)
return costs, grads
def minibatch_gen_from_pddf(data, target_label, batch_size, shuffle=True):
"""
Args:
data: data as pandas df
target_label: target label column name in df
batch_size: batch size
shuffle: whether to shuffle the data.
Yields:
Data in num_batches equal batches with the last one (possibly) shorter
"""
target = np.array(data.pop(target_label))
data = np.array(data)
if shuffle:
perm = np.random.permutation(len(target))
target, data = target[perm], data[perm]
num_batches = int(np.ceil(len(target) / batch_size))
for i in range(1,num_batches+1):
yield data[(i-1)*batch_size:i*batch_size, :], \
target[(i-1)*batch_size:i*batch_size] | [
"numpy.divide",
"numpy.sum",
"numpy.log",
"numpy.random.randn",
"numpy.zeros",
"numpy.array",
"numpy.exp",
"numpy.squeeze",
"numpy.dot",
"numpy.sqrt"
] | [((6157, 6171), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (6165, 6171), True, 'import numpy as np\n'), ((1503, 1526), 'numpy.array', 'np.array', (['dA'], {'copy': '(True)'}), '(dA, copy=True)\n', (1511, 1526), True, 'import numpy as np\n'), ((3288, 3304), 'numpy.squeeze', 'np.squeeze', (['cost'], {}), '(cost)\n', (3298, 3304), True, 'import numpy as np\n'), ((3583, 3598), 'numpy.dot', 'np.dot', (['W.T', 'dZ'], {}), '(W.T, dZ)\n', (3589, 3598), True, 'import numpy as np\n'), ((2284, 2313), 'numpy.zeros', 'np.zeros', (['(layer_sizes[l], 1)'], {}), '((layer_sizes[l], 1))\n', (2292, 2313), True, 'import numpy as np\n'), ((2393, 2405), 'numpy.dot', 'np.dot', (['W', 'A'], {}), '(W, A)\n', (2399, 2405), True, 'import numpy as np\n'), ((3489, 3509), 'numpy.dot', 'np.dot', (['dZ', 'A_prev.T'], {}), '(dZ, A_prev.T)\n', (3495, 3509), True, 'import numpy as np\n'), ((3531, 3564), 'numpy.sum', 'np.sum', (['dZ'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ, axis=1, keepdims=True)\n', (3537, 3564), True, 'import numpy as np\n'), ((2167, 2218), 'numpy.random.randn', 'np.random.randn', (['layer_sizes[l]', 'layer_sizes[l - 1]'], {}), '(layer_sizes[l], layer_sizes[l - 1])\n', (2182, 2218), True, 'import numpy as np\n'), ((2219, 2250), 'numpy.sqrt', 'np.sqrt', (['(2 / self.data.shape[1])'], {}), '(2 / self.data.shape[1])\n', (2226, 2250), True, 'import numpy as np\n'), ((4325, 4341), 'numpy.divide', 'np.divide', (['Y', 'AL'], {}), '(Y, AL)\n', (4334, 4341), True, 'import numpy as np\n'), ((4344, 4368), 'numpy.divide', 'np.divide', (['(1 - Y)', '(1 - AL)'], {}), '(1 - Y, 1 - AL)\n', (4353, 4368), True, 'import numpy as np\n'), ((1165, 1175), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (1171, 1175), True, 'import numpy as np\n'), ((3240, 3250), 'numpy.log', 'np.log', (['AL'], {}), '(AL)\n', (3246, 3250), True, 'import numpy as np\n'), ((3259, 3273), 'numpy.log', 'np.log', (['(1 - AL)'], {}), '(1 - AL)\n', (3265, 3273), True, 'import numpy as np\n')] |
import sys
import gym
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import DQN.sparsemountaincar
from util.network import QNetworkBuilder
from util.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'CartPole-v0', 'Environment name')
flags.DEFINE_string('save_path', './checkpoints', 'Save location for the model')
flags.DEFINE_string('log_path', './logs', 'Location to log training data')
flags.DEFINE_float('learning_rate', 0.001, 'network learning rate')
flags.DEFINE_float('gamma', 0.99, 'discount factor')
flags.DEFINE_integer('target_update', 10000, 'Steps before we update target network')
flags.DEFINE_integer('batch_size', 32, 'Number of training examples in the batch')
flags.DEFINE_integer('buffer_capacity', 50, 'Number of transitions to keep in replay buffer')
flags.DEFINE_integer('max_steps', 10, 'Maximum number of training steps')
flags.DEFINE_integer('max_episode_len', 10, 'Maximum length of each episode')
flags.DEFINE_integer('print_freq', 1, 'Episodes between displaying log info')
flags.DEFINE_integer('action_repeat', 1, 'Number of times to repeat action')
flags.DEFINE_string('load_path', None, 'Load location for the model')
flags.DEFINE_float('min_eps', 0.1, 'minimum for epsilon greedy exploration')
flags.DEFINE_float('max_eps', 1.0, 'maximum for epsilon greedy exploration')
flags.DEFINE_float('eps_decay', -1e-4, 'decay schedule for epsilon')
flags.DEFINE_boolean('render', False, 'Render the environment during training')
flags.DEFINE_integer('seed', 1234, 'Random seed for reproducible results')
class TrainDQN:
def __init__(self,
env,
sess,
learning_rate=1e-3,
seed=1234,
gamma=0.99,
max_eps=1.0,
min_eps=0.1,
render=False,
print_freq=20,
load_path=None,
save_path=None,
batch_size=32,
log_dir='logs/train',
max_steps=100000,
buffer_capacity=None,
max_episode_len=2000,
eps_decay_rate=-0.0001,
target_update_freq=1000,
):
"""Trains an openai gym-like environment with deep q learning.
Args:
env: gym.Env where our agent resides
seed: Random seed for reproducibility
gamma: Discount factor
max_eps: Starting exploration factor
min_eps: Exploration factor to decay towards
max_episode_len: Maximum length of an individual episode
render: True to render the environment, else False
print_freq: Displays logging information every 'print_freq' episodes
load_path: (str) Path to load existing model from
save_path: (str) Path to save model during training
max_steps: maximum number of times to sample the environment
buffer_capacity: How many state, action, next state, reward tuples the replay buffer should store
max_episode_len: Maximum number of timesteps in an episode
eps_decay_rate: lambda parameter in exponential decay for epsilon
target_update_fraction: Fraction of max_steps update the target network
"""
np.random.seed(seed)
self.sess = sess
self.env = env
self.env_name = env.spec.id
self.input_dim = env.observation_space.shape[0]
self.output_dim = env.action_space.n
self.max_steps = max_steps
self.max_eps = max_eps
self.min_eps = min_eps
self.eps_decay_rate = eps_decay_rate
self.max_episode_len = max_episode_len
self.render = render
self.print_freq = print_freq
self.rewards = []
self.metrics = []
self.save_path = save_path
self.load_path = load_path
self.batch_size = batch_size
self.num_updates = 0
self.gamma = gamma
self.buffer = PrioritizedReplayBuffer(capacity=max_steps // 2 if buffer_capacity is None else buffer_capacity)
self.target_update_freq = target_update_freq
self.learning_rate = learning_rate
with tf.variable_scope('q_network'):
self.q_network = QNetworkBuilder(self.input_dim, self.output_dim, (64,))
with tf.variable_scope('target_network'):
self.target_network = QNetworkBuilder(self.input_dim, self.output_dim, (64,))
self.update_target_network = [old.assign(new) for (new, old) in
zip(tf.trainable_variables('q_network'),
tf.trainable_variables('target_network'))]
self._add_summaries(log_dir)
def _add_summaries(self, log_dir):
tf.summary.scalar('Loss', self.q_network.loss, )
tf.summary.scalar('Mean Estimated Value', tf.reduce_mean(self.q_network.output_pred))
# Merge all the summaries and write them out to log_dir
self.merged = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(log_dir, self.sess.graph)
def learn(self):
"""Learns via Deep-Q-Networks (DQN)"""
obs = self.env.reset()
mean_reward = None
total_reward = 0
ep = 0
ep_len = 0
rand_actions = 0
for t in range(self.max_steps):
# weight decay from https://jaromiru.com/2016/10/03/lets-make-a-dqn-implementation/
eps = self.min_eps + (self.max_eps - self.min_eps) * np.exp(
self.eps_decay_rate * t)
if self.render:
self.env.render()
# Take exploratory action with probability epsilon
if np.random.uniform() < eps:
action = self.env.action_space.sample()
rand_actions += 1
else:
action = self.act(obs)
# Execute action in emulator and observe reward and next state
new_obs, reward, done, info = self.env.step(action)
# if reward > 0:
# print('Got the reward')
total_reward += reward
# Store transition s_t, a_t, r_t, s_t+1 in replay buffer
self.buffer.add((obs, action, reward, new_obs, done))
# Perform learning step
self._update()
obs = new_obs
ep_len += 1
if done or ep_len >= self.max_episode_len:
# print("Episode Length:", ep_len)
# print(f"Episode {ep} Reward:{total_reward}")
# print(f"Random Action Percent: {rand_actions/ep_len}")
ep += 1
ep_len = 0
rand_actions = 0
self.rewards.append(total_reward)
total_reward = 0
obs = self.env.reset()
if ep % self.print_freq == 0 and ep > 0:
new_mean_reward = np.mean(self.rewards[-self.print_freq - 1:])
print(f"-------------------------------------------------------")
print(f"Mean {self.print_freq} Episode Reward: {new_mean_reward}")
print(f"Exploration fraction: {eps}")
print(f"Total Episodes: {ep}")
print(f"Total timesteps: {t}")
print(f"-------------------------------------------------------")
# Add reward summary
summary = tf.Summary()
summary.value.add(tag=f'Mean {self.print_freq} Episode Reward',
simple_value=new_mean_reward)
summary.value.add(tag=f'Epsilon', simple_value=eps)
self.train_writer.add_summary(summary, self.num_updates)
# Model saving inspired by Open AI Baseline implementation
if (mean_reward is None or new_mean_reward >= mean_reward) and self.save_path is not None:
print(f"Saving model due to mean reward increase:{mean_reward} -> {new_mean_reward}")
self.save()
mean_reward = new_mean_reward
def act(self, observation):
"""Takes an action given the observation.
Args:
observation: observation from the environment
Returns:
integer index of the selected action
"""
pred = self.sess.run([self.q_network.output_pred],
feed_dict={self.q_network.input_ph: np.reshape(observation, (1, self.input_dim))})
return np.argmax(pred)
def _update(self):
"""Applies gradients to the Q network computed from a minibatch of self.batch_size."""
if self.batch_size <= len(self.buffer):
self.num_updates += 1
# Update the Q network with model parameters from the target network
if self.num_updates % self.target_update_freq == 0:
self.sess.run(self.update_target_network)
# Sample random minibatch of transitions from the replay buffer
sample = self.buffer.sample(self.batch_size)
(tds, num, states, action, reward, next_states, done), inds = sample
# Calculate discounted predictions for the subsequent states using target network
next_state_pred = self.gamma * self.sess.run(self.target_network.output_pred,
feed_dict={self.target_network.input_ph: next_states}, )
state_pred = self.sess.run(self.q_network.output_pred,
feed_dict={self.q_network.input_ph: states}, )
# Adjust the targets for non-terminal states
reward = reward.reshape(len(reward), 1)
targets = reward
loc = np.argwhere(done != True).flatten()
if len(loc) > 0:
max_q = np.amax(next_state_pred, axis=1)
targets[loc] = np.add(
targets[loc],
max_q[loc].reshape(max_q[loc].shape[0], 1),
casting='unsafe')
# Compute TD Error for updating the prioritized replay buffer
rang = np.arange(len(action))
curr_q = state_pred[rang, action]
td_error = np.abs(targets.flatten() - curr_q)
self.buffer.update_priorities(indices=inds, tds=td_error)
# Update discount factor and train model on batch
_, loss = self.sess.run([self.q_network.opt, self.q_network.loss],
feed_dict={self.q_network.input_ph: states,
self.q_network.target_ph: targets.flatten(),
self.q_network.action_indices_ph: action})
def save(self):
"""Saves the Q network."""
loc = f'{self.save_path}/{self.env_name}/{self.env_name}.ckpt'
self.q_network.saver.save(self.sess, loc)
print(f'Successfully saved model to: {loc}')
def load(self):
"""Loads the Q network."""
loc = f'{self.load_path}/{self.env_name}/{self.env_name}.ckpt'
self.q_network.saver.restore(self.sess, loc)
print(f'Successfully loaded model from {loc}')
def plot_rewards(self, path=None):
"""Plots rewards per episode.
Args:
path: Location to save the rewards plot. If None, image will be displayed with plt.show()
"""
plt.plot(self.rewards)
plt.xlabel('Episode')
plt.ylabel('Reward')
if path is None:
plt.show()
else:
plt.savefig(path)
plt.close('all')
def main():
with tf.Session() as sess:
env_name = FLAGS.env_name
env = gym.make(env_name)
dqn = TrainDQN(env=env,
sess=sess,
learning_rate=FLAGS.learning_rate,
gamma=FLAGS.gamma,
print_freq=FLAGS.print_freq,
target_update_freq=FLAGS.target_update,
batch_size=FLAGS.batch_size,
seed=FLAGS.seed,
buffer_capacity=FLAGS.buffer_capacity,
render=FLAGS.render,
max_steps=FLAGS.max_steps,
min_eps=FLAGS.min_eps,
max_eps=FLAGS.max_eps,
eps_decay_rate=FLAGS.eps_decay,
max_episode_len=FLAGS.max_episode_len,
log_dir=FLAGS.log_path,
save_path=FLAGS.save_path,
load_path=FLAGS.load_path)
# save_path=f'checkpoints/{env_name}.ckpt')
sess.run(tf.initialize_all_variables())
dqn.learn()
dqn.plot_rewards()
if __name__ == '__main__':
main()
| [
"tensorflow.python.platform.flags.DEFINE_string",
"numpy.random.seed",
"tensorflow.trainable_variables",
"numpy.argmax",
"numpy.mean",
"tensorflow.python.platform.flags.DEFINE_float",
"numpy.exp",
"util.replay_buffer.PrioritizedReplayBuffer",
"tensorflow.Summary",
"matplotlib.pyplot.close",
"ten... | [((302, 368), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""env_name"""', '"""CartPole-v0"""', '"""Environment name"""'], {}), "('env_name', 'CartPole-v0', 'Environment name')\n", (321, 368), False, 'from tensorflow.python.platform import flags\n'), ((369, 454), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""save_path"""', '"""./checkpoints"""', '"""Save location for the model"""'], {}), "('save_path', './checkpoints',\n 'Save location for the model')\n", (388, 454), False, 'from tensorflow.python.platform import flags\n'), ((451, 525), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""log_path"""', '"""./logs"""', '"""Location to log training data"""'], {}), "('log_path', './logs', 'Location to log training data')\n", (470, 525), False, 'from tensorflow.python.platform import flags\n'), ((526, 593), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""', '(0.001)', '"""network learning rate"""'], {}), "('learning_rate', 0.001, 'network learning rate')\n", (544, 593), False, 'from tensorflow.python.platform import flags\n'), ((594, 646), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""gamma"""', '(0.99)', '"""discount factor"""'], {}), "('gamma', 0.99, 'discount factor')\n", (612, 646), False, 'from tensorflow.python.platform import flags\n'), ((647, 736), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""target_update"""', '(10000)', '"""Steps before we update target network"""'], {}), "('target_update', 10000,\n 'Steps before we update target network')\n", (667, 736), False, 'from tensorflow.python.platform import flags\n'), ((733, 819), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""', '(32)', '"""Number of training examples in the batch"""'], {}), "('batch_size', 32,\n 'Number of training examples in the batch')\n", (753, 819), False, 'from tensorflow.python.platform import flags\n'), ((816, 913), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""buffer_capacity"""', '(50)', '"""Number of transitions to keep in replay buffer"""'], {}), "('buffer_capacity', 50,\n 'Number of transitions to keep in replay buffer')\n", (836, 913), False, 'from tensorflow.python.platform import flags\n'), ((910, 983), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_steps"""', '(10)', '"""Maximum number of training steps"""'], {}), "('max_steps', 10, 'Maximum number of training steps')\n", (930, 983), False, 'from tensorflow.python.platform import flags\n'), ((984, 1061), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_episode_len"""', '(10)', '"""Maximum length of each episode"""'], {}), "('max_episode_len', 10, 'Maximum length of each episode')\n", (1004, 1061), False, 'from tensorflow.python.platform import flags\n'), ((1062, 1139), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""print_freq"""', '(1)', '"""Episodes between displaying log info"""'], {}), "('print_freq', 1, 'Episodes between displaying log info')\n", (1082, 1139), False, 'from tensorflow.python.platform import flags\n'), ((1140, 1216), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""action_repeat"""', '(1)', '"""Number of times to repeat action"""'], {}), "('action_repeat', 1, 'Number of times to repeat action')\n", (1160, 1216), False, 'from tensorflow.python.platform import flags\n'), ((1217, 1286), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""load_path"""', 'None', '"""Load location for the model"""'], {}), "('load_path', None, 'Load location for the model')\n", (1236, 1286), False, 'from tensorflow.python.platform import flags\n'), ((1287, 1363), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""min_eps"""', '(0.1)', '"""minimum for epsilon greedy exploration"""'], {}), "('min_eps', 0.1, 'minimum for epsilon greedy exploration')\n", (1305, 1363), False, 'from tensorflow.python.platform import flags\n'), ((1364, 1440), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""max_eps"""', '(1.0)', '"""maximum for epsilon greedy exploration"""'], {}), "('max_eps', 1.0, 'maximum for epsilon greedy exploration')\n", (1382, 1440), False, 'from tensorflow.python.platform import flags\n'), ((1441, 1511), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""eps_decay"""', '(-0.0001)', '"""decay schedule for epsilon"""'], {}), "('eps_decay', -0.0001, 'decay schedule for epsilon')\n", (1459, 1511), False, 'from tensorflow.python.platform import flags\n'), ((1510, 1589), 'tensorflow.python.platform.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""render"""', '(False)', '"""Render the environment during training"""'], {}), "('render', False, 'Render the environment during training')\n", (1530, 1589), False, 'from tensorflow.python.platform import flags\n'), ((1590, 1664), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""', '(1234)', '"""Random seed for reproducible results"""'], {}), "('seed', 1234, 'Random seed for reproducible results')\n", (1610, 1664), False, 'from tensorflow.python.platform import flags\n'), ((3421, 3441), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3435, 3441), True, 'import numpy as np\n'), ((4119, 4219), 'util.replay_buffer.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', ([], {'capacity': '(max_steps // 2 if buffer_capacity is None else buffer_capacity)'}), '(capacity=max_steps // 2 if buffer_capacity is None else\n buffer_capacity)\n', (4142, 4219), False, 'from util.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\n'), ((4904, 4950), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'self.q_network.loss'], {}), "('Loss', self.q_network.loss)\n", (4921, 4950), True, 'import tensorflow as tf\n'), ((5133, 5155), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5153, 5155), True, 'import tensorflow as tf\n'), ((5184, 5231), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir', 'self.sess.graph'], {}), '(log_dir, self.sess.graph)\n', (5205, 5231), True, 'import tensorflow as tf\n'), ((8734, 8749), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (8743, 8749), True, 'import numpy as np\n'), ((11649, 11671), 'matplotlib.pyplot.plot', 'plt.plot', (['self.rewards'], {}), '(self.rewards)\n', (11657, 11671), True, 'import matplotlib.pyplot as plt\n'), ((11680, 11701), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (11690, 11701), True, 'import matplotlib.pyplot as plt\n'), ((11710, 11730), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reward"""'], {}), "('Reward')\n", (11720, 11730), True, 'import matplotlib.pyplot as plt\n'), ((11875, 11887), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11885, 11887), True, 'import tensorflow as tf\n'), ((11945, 11963), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (11953, 11963), False, 'import gym\n'), ((4326, 4356), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q_network"""'], {}), "('q_network')\n", (4343, 4356), True, 'import tensorflow as tf\n'), ((4387, 4442), 'util.network.QNetworkBuilder', 'QNetworkBuilder', (['self.input_dim', 'self.output_dim', '(64,)'], {}), '(self.input_dim, self.output_dim, (64,))\n', (4402, 4442), False, 'from util.network import QNetworkBuilder\n'), ((4456, 4491), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""target_network"""'], {}), "('target_network')\n", (4473, 4491), True, 'import tensorflow as tf\n'), ((4527, 4582), 'util.network.QNetworkBuilder', 'QNetworkBuilder', (['self.input_dim', 'self.output_dim', '(64,)'], {}), '(self.input_dim, self.output_dim, (64,))\n', (4542, 4582), False, 'from util.network import QNetworkBuilder\n'), ((5003, 5045), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.q_network.output_pred'], {}), '(self.q_network.output_pred)\n', (5017, 5045), True, 'import tensorflow as tf\n'), ((11768, 11778), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11776, 11778), True, 'import matplotlib.pyplot as plt\n'), ((11805, 11822), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (11816, 11822), True, 'import matplotlib.pyplot as plt\n'), ((11835, 11851), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11844, 11851), True, 'import matplotlib.pyplot as plt\n'), ((12919, 12948), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (12946, 12948), True, 'import tensorflow as tf\n'), ((5834, 5853), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5851, 5853), True, 'import numpy as np\n'), ((10067, 10099), 'numpy.amax', 'np.amax', (['next_state_pred'], {'axis': '(1)'}), '(next_state_pred, axis=1)\n', (10074, 10099), True, 'import numpy as np\n'), ((4697, 4732), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""q_network"""'], {}), "('q_network')\n", (4719, 4732), True, 'import tensorflow as tf\n'), ((4776, 4816), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""target_network"""'], {}), "('target_network')\n", (4798, 4816), True, 'import tensorflow as tf\n'), ((5644, 5675), 'numpy.exp', 'np.exp', (['(self.eps_decay_rate * t)'], {}), '(self.eps_decay_rate * t)\n', (5650, 5675), True, 'import numpy as np\n'), ((7073, 7117), 'numpy.mean', 'np.mean', (['self.rewards[-self.print_freq - 1:]'], {}), '(self.rewards[-self.print_freq - 1:])\n', (7080, 7117), True, 'import numpy as np\n'), ((7610, 7622), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (7620, 7622), True, 'import tensorflow as tf\n'), ((8672, 8716), 'numpy.reshape', 'np.reshape', (['observation', '(1, self.input_dim)'], {}), '(observation, (1, self.input_dim))\n', (8682, 8716), True, 'import numpy as np\n'), ((9978, 10003), 'numpy.argwhere', 'np.argwhere', (['(done != True)'], {}), '(done != True)\n', (9989, 10003), True, 'import numpy as np\n')] |
import cv2
import numpy as np
im = cv2.imread("./example.png", 1)
# ch = im[:, :, 0]
# n_bins = 256.
# hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
#
def equalize_func(img):
'''
same output as PIL.ImageOps.equalize
PIL's implementation is different from cv2.equalize
'''
n_bins = 256
def tune_channel(ch, flag=False):
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
non_zero_hist = hist[hist != 0].reshape(-1)
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
if step == 0: return ch
n = np.empty_like(hist)
n[0] = step // 2
n[1:] = hist[:-1]
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
if flag:
flag = False
print(table)
return table[ch]
channels = [tune_channel(ch) for i, ch in enumerate(cv2.split(img))]
# channels = []
# for i, ch in enumerate(cv2.split(img)):
# if i == 0 :
# channels.append(tune_channel(ch, False))
# else:
# channels.append(tune_channel(ch, False))
out = cv2.merge(channels)
return out
def autocontrast_func(img, cutoff=0):
'''
same output as PIL.ImageOps.autocontrast
'''
n_bins = 256
def tune_channel(ch, verbose=False):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.max().astype(np.int64), ch.min().astype(np.int64)
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = np.argwhere(np.cumsum(hist) > cut)
low = 0 if low.shape[0] == 0 else low[0]
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
if high <= low:
table = np.arange(n_bins)
else:
scale = (n_bins - 1) / (high - low)
offset = (-low) * scale
table = np.arange(n_bins) * scale + offset
table[table < 0] = 0
table[table > n_bins - 1] = n_bins - 1
table = table.clip(0, 255).astype(np.uint8)
if verbose:
print("table[125]: ", table[125])
print("cut: ", cut)
print("high: ", high)
print("low: ", low)
print(type(low))
print(low.dtype)
# print(scale.dtype)
print(offset.dtype)
print(table)
print("scale: ", scale)
print("offset: ", offset)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
# channels = []
# for i, ch in enumerate(cv2.split(img)):
# if i == 1 :
# channels.append(tune_channel(ch, True))
# else:
# channels.append(tune_channel(ch, False))
out = cv2.merge(channels)
return out
imcpp = np.fromfile('./build/res_cpp.bin', dtype=np.uint8)
imcv = im.transpose((2, 0, 1)).ravel()
print(np.sum(imcv.ravel() - imcpp))
for i in range(10):
# hist = equalize_func(im)
hist = autocontrast_func(im, cutoff=0)
print(np.sum(imcpp - hist.ravel()))
# print(hist.astype(np.int64).ravel())
# print(hist.astype(np.int64).ravel().dtype)
# print(np.bincount(ch.ravel(), minlength=256))
| [
"numpy.sum",
"numpy.fromfile",
"cv2.calcHist",
"numpy.empty_like",
"cv2.imread",
"numpy.cumsum",
"cv2.split",
"numpy.arange",
"cv2.merge"
] | [((37, 67), 'cv2.imread', 'cv2.imread', (['"""./example.png"""', '(1)'], {}), "('./example.png', 1)\n", (47, 67), False, 'import cv2\n'), ((2905, 2955), 'numpy.fromfile', 'np.fromfile', (['"""./build/res_cpp.bin"""'], {'dtype': 'np.uint8'}), "('./build/res_cpp.bin', dtype=np.uint8)\n", (2916, 2955), True, 'import numpy as np\n'), ((1131, 1150), 'cv2.merge', 'cv2.merge', (['channels'], {}), '(channels)\n', (1140, 1150), False, 'import cv2\n'), ((2860, 2879), 'cv2.merge', 'cv2.merge', (['channels'], {}), '(channels)\n', (2869, 2879), False, 'import cv2\n'), ((389, 441), 'cv2.calcHist', 'cv2.calcHist', (['[ch]', '[0]', 'None', '[n_bins]', '[0, n_bins]'], {}), '([ch], [0], None, [n_bins], [0, n_bins])\n', (401, 441), False, 'import cv2\n'), ((596, 615), 'numpy.empty_like', 'np.empty_like', (['hist'], {}), '(hist)\n', (609, 615), True, 'import numpy as np\n'), ((509, 535), 'numpy.sum', 'np.sum', (['non_zero_hist[:-1]'], {}), '(non_zero_hist[:-1])\n', (515, 535), True, 'import numpy as np\n'), ((1512, 1564), 'cv2.calcHist', 'cv2.calcHist', (['[ch]', '[0]', 'None', '[n_bins]', '[0, n_bins]'], {}), '([ch], [0], None, [n_bins], [0, n_bins])\n', (1524, 1564), False, 'import cv2\n'), ((1853, 1870), 'numpy.arange', 'np.arange', (['n_bins'], {}), '(n_bins)\n', (1862, 1870), True, 'import numpy as np\n'), ((2615, 2629), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (2624, 2629), False, 'import cv2\n'), ((884, 898), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (893, 898), False, 'import cv2\n'), ((1595, 1610), 'numpy.cumsum', 'np.cumsum', (['hist'], {}), '(hist)\n', (1604, 1610), True, 'import numpy as np\n'), ((1702, 1723), 'numpy.cumsum', 'np.cumsum', (['hist[::-1]'], {}), '(hist[::-1])\n', (1711, 1723), True, 'import numpy as np\n'), ((1989, 2006), 'numpy.arange', 'np.arange', (['n_bins'], {}), '(n_bins)\n', (1998, 2006), True, 'import numpy as np\n'), ((684, 696), 'numpy.cumsum', 'np.cumsum', (['n'], {}), '(n)\n', (693, 696), True, 'import numpy as np\n')] |
from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node
from nbdt.utils import (
DEFAULT_CIFAR10_TREE, DEFAULT_CIFAR10_WNIDS, DEFAULT_CIFAR100_TREE,
DEFAULT_CIFAR100_WNIDS, DEFAULT_TINYIMAGENET200_TREE,
DEFAULT_TINYIMAGENET200_WNIDS, DEFAULT_IMAGENET1000_TREE,
DEFAULT_IMAGENET1000_WNIDS, set_np_printoptions
)
from nbdt.loss import HardTreeSupLoss, SoftTreeSupLoss
from nbdt.data.custom import Node
from generate_vis import generate_vis, build_tree
from networkx.readwrite.json_graph import node_link_data, node_link_graph
import torch
import torch.nn as nn
import numpy as np
import csv
import networkx as nx
import os
import json
import wandb
import pandas as pd
from saliency.RISE.explanations import RISE
from saliency.RISE.utils import get_cam
from saliency.Grad_CAM.gcam import GradCAM
from PIL import Image
import cv2
__all__ = names = (
'Noop', 'ConfusionMatrix', 'HardEmbeddedDecisionRules', 'SoftEmbeddedDecisionRules',
'SingleInference', 'HardFullTreePrior', 'HardTrackNodes', 'SoftFullTreePrior', 'SoftTrackDepth', 'SoftFullTreeOODPrior',
'SingleRISE', 'SingleGradCAM')
keys = ('path_graph', 'path_graph_analysis', 'path_wnids', 'weighted_average',
'trainset', 'testset', 'json_save_path', 'experiment_name', 'csv_save_path', 'ignore_labels',
'oodset', 'ood_path_wnids')
def add_arguments(parser):
parser.add_argument('--json-save-path', default=None, type=str,
help='Directory to save jsons under for full tree analysis')
parser.add_argument('--csv-save-path', default=None, type=str,
help='Directory to save jsons under for full tree analysis')
parser.add_argument('--path-graph-analysis', default=None, type=str,
help='path for graph for analysis')
parser.add_argument('--track-nodes', default=None, type=str, nargs='*',
help='node wnids to track')
parser.add_argument('--ignore-labels', nargs='*', type=int,
help='node label indices to ignore for zeroshot')
class Noop:
accepts_trainset = lambda trainset, **kwargs: trainset
accepts_testset = lambda testset, **kwargs: testset
def __init__(self, trainset, testset, experiment_name,
use_wandb=False, run_name="Noop"):
set_np_printoptions()
self.trainset = trainset
self.testset = testset
self.use_wandb = use_wandb
if self.use_wandb:
wandb.init(project=experiment_name, name=run_name, reinit=True, entity='lisadunlap')
self.epoch = None
def start_epoch(self, epoch):
self.epoch = epoch
def start_train(self, epoch):
assert epoch == self.epoch
def update_batch(self, outputs, predicted, targets):
pass
def end_train(self, epoch):
assert epoch == self.epoch
def start_test(self, epoch):
assert epoch == self.epoch
def end_test(self, epoch):
assert epoch == self.epoch
def end_epoch(self, epoch):
assert epoch == self.epoch
def write_to_csv(self, path):
pass
class ConfusionMatrix(Noop):
def __init__(self, trainset, testset, experiment_name, use_wandb=False):
super().__init__(trainset, testset, experiment_name, use_wandb)
self.k = len(trainset.classes)
self.m = None
def start_train(self, epoch):
super().start_train(epoch)
raise NotImplementedError()
def start_test(self, epoch):
super().start_test(epoch)
self.m = np.zeros((self.k, self.k))
def update_batch(self, outputs, predicted, targets):
super().update_batch(outputs, predicted, targets)
if len(predicted.shape) == 1:
predicted = predicted.numpy().ravel()
targets = targets.numpy().ravel()
ConfusionMatrix.update(self.m, predicted, targets)
def end_test(self, epoch):
super().end_test(epoch)
recall = self.recall()
for row, cls in zip(recall, self.trainset.classes):
print(row, cls)
print(recall.diagonal(), '(diagonal)')
@staticmethod
def update(confusion_matrix, preds, labels):
preds = tuple(preds)
labels = tuple(labels)
for pred, label in zip(preds, labels):
confusion_matrix[label, pred] += 1
@staticmethod
def normalize(confusion_matrix, axis):
total = confusion_matrix.astype(np.float).sum(axis=axis)
total = total[:, None] if axis == 1 else total[None]
return confusion_matrix / total
def recall(self):
return ConfusionMatrix.normalize(self.m, 1)
def precision(self):
return ConfusionMatrix.normalize(self.m, 0)
class HardEmbeddedDecisionRules(Noop):
"""Evaluation is hard."""
accepts_path_graph = True
accepts_path_wnids = True
accepts_weighted_average = True
accepts_ignore_labels = True
def __init__(self, trainset, testset, experiment_name, path_graph, path_wnids, ignore_labels=[],
weighted_average=False, use_wandb=False, run_name="HardEmbeddedDecisionRules"):
super().__init__(trainset, testset, experiment_name, use_wandb,
run_name=run_name)
self.nodes = Node.get_nodes(path_graph, path_wnids, trainset.classes)
self.G = self.nodes[0].G
self.wnid_to_node = {node.wnid: node for node in self.nodes}
self.ignore_labels = ignore_labels
self.wnids = get_wnids(path_wnids)
self.classes = trainset.classes
self.wnid_to_class = {wnid: cls for wnid, cls in zip(self.wnids, self.classes)}
self.weighted_average = weighted_average
self.correct = 0
self.total = 0
self.class_accuracies = {c:0 for c in self.classes}
self.class_totals = {c: 0 for c in self.classes}
def update_batch(self, outputs, predicted, targets):
super().update_batch(outputs, predicted, targets)
targets_ints = [int(target) for target in targets.cpu().long()]
wnid_to_pred_selector = {}
for node in self.nodes:
selector, outputs_sub, targets_sub = HardTreeSupLoss.inference(
node, outputs, targets, self.weighted_average)
if not any(selector):
continue
_, preds_sub = torch.max(outputs_sub, dim=1)
preds_sub = list(map(int, preds_sub.cpu()))
wnid_to_pred_selector[node.wnid] = (preds_sub, selector)
n_samples = outputs.size(0)
predicted = self.traverse_tree(
predicted, wnid_to_pred_selector, n_samples).to(targets.device)
self.total += n_samples
self.correct += (predicted == targets).sum().item()
for i in range(len(predicted)):
self.class_accuracies[self.classes[predicted[i]]] += int(predicted[i] == targets[i])
self.class_totals[self.classes[targets[i]]] += 1
accuracy = round(self.correct / float(self.total), 4) * 100
# return f'NBDT-Hard: {accuracy}%'
return (predicted == targets).sum().item()
def traverse_tree(self, _, wnid_to_pred_selector, n_samples):
wnid_root = get_root(self.G)
node_root = self.wnid_to_node[wnid_root]
preds = []
for index in range(n_samples):
wnid, node = wnid_root, node_root
while node is not None:
if node.wnid not in wnid_to_pred_selector:
wnid = node = None
break
pred_sub, selector = wnid_to_pred_selector[node.wnid]
if not selector[index]: # we took a wrong turn. wrong.
wnid = node = None
break
index_new = sum(selector[:index + 1]) - 1
index_child = pred_sub[index_new]
wnid = node.children[index_child]
node = self.wnid_to_node.get(wnid, None)
cls = self.wnid_to_class.get(wnid, None)
pred = -1 if cls is None else self.classes.index(cls)
preds.append(pred)
return torch.Tensor(preds).long()
def end_test(self, epoch):
super().end_test(epoch)
accuracy = round(self.correct / self.total * 100., 2)
print(f'NBDT-Hard Accuracy: {accuracy}%, {self.correct}/{self.total}')
# print([(self.class_accuracies[k]/self.class_totals[k])*100 for k in self.class_accuracies.keys()])
# if self.use_wandb:
# wandb.run.summary["NBDT hard accuracy"] = accuracy
# data = [[(self.class_accuracies[k]/self.class_totals[k])*100 for k in self.class_accuracies.keys()]]
# wandb.log({"class accuracies": wandb.Table(data=data, columns=self.classes)})
class SoftEmbeddedDecisionRules(HardEmbeddedDecisionRules):
"""Evaluation is soft."""
def __init__(self, trainset, testset, experiment_name, path_graph, path_wnids,
weighted_average=False, use_wandb=False, run_name="SoftEmbeddedDecisionRules"):
super().__init__(trainset, testset, experiment_name, path_graph, path_wnids, use_wandb,
run_name=run_name)
self.num_classes = len(trainset.classes)
def update_batch(self, outputs, predicted, targets):
bayesian_outputs = SoftTreeSupLoss.inference(
self.nodes, outputs, self.num_classes, self.weighted_average)
n_samples = outputs.size(0)
predicted = bayesian_outputs.max(1)[1].to(targets.device)
self.total += n_samples
self.correct += (predicted == targets).sum().item()
for i in range(len(predicted)):
self.class_accuracies[self.classes[predicted[i]]] += int(predicted[i] == targets[i])
self.class_totals[self.classes[targets[i]]] += 1
accuracy = round(self.correct / float(self.total), 4) * 100
#return f'NBDT-Soft: {accuracy}%'
return (predicted == targets).sum().item()
def end_test(self, epoch):
accuracy = round(self.correct / self.total * 100., 2)
print(f'NBDT-Soft Accuracy: {accuracy}%, {self.correct}/{self.total}')
if self.use_wandb:
data = [[(self.class_accuracies[k] / self.class_totals[k]) * 100 for k in self.class_accuracies.keys()]]
wandb.log({"class accuracies": wandb.Table(data=data, columns=self.classes)})
class SingleInference(HardEmbeddedDecisionRules):
"""Inference on a single image ."""
def __init__(self, trainset, testset, experiment_name, path_graph, path_wnids,
weighted_average=False, use_wandb=False, run_name="SingleInference"):
super().__init__(trainset, testset, experiment_name, path_graph, path_wnids, use_wandb,
run_name=run_name)
get_path = lambda wnid: nx.shortest_path(self.G, source=get_root(self.G), target=wnid)
self.paths = {self.wnid_to_class[wnid]: get_path(wnid) for wnid in self.wnids}
self.num_classes = len(trainset.classes)
def single_traversal(self, _, wnid_to_pred_selector):
wnid_root = get_root(self.G)
node_root = self.wnid_to_node[wnid_root]
wnid, node = wnid_root, node_root
path = [wnid]
while node is not None:
if node.wnid not in wnid_to_pred_selector:
wnid = node = None
break
pred_sub, selector = wnid_to_pred_selector[node.wnid]
index_new = sum(selector[:0 + 1]) - 1
index_child = pred_sub[index_new]
wnid = node.children[index_child]
path.append(wnid)
node = self.wnid_to_node.get(wnid, None)
return path
def inf(self, img, outputs):
wnid_to_pred_selector = {}
for node in self.nodes:
outputs_sub = HardTreeSupLoss.get_output_sub(
outputs, node, self.weighted_average)
selector = [1 for c in range(node.num_classes)]
if not any(selector):
continue
_, preds_sub = torch.max(outputs_sub, dim=1)
preds_sub = list(map(int, preds_sub.cpu()))
wnid_to_pred_selector[node.wnid] = (preds_sub, selector)
predicted = self.single_traversal(
[], wnid_to_pred_selector)
wandb.log({"examples": [wandb.Image(torch.squeeze(img).cpu().numpy().transpose((1, 2, 0)), caption=str(predicted))]})
cls = self.wnid_to_class.get(predicted[-1], None)
pred = -1 if cls is None else self.classes.index(cls)
print("class: ", pred)
print("inference: ", predicted)
class HardFullTreePrior(Noop):
accepts_path_graph = True
accepts_path_wnids = True
accepts_json_save_path = True
accepts_weighted_average = True
accepts_ignore_labels = True
"""Evaluates model on a decision tree prior. Evaluation is deterministic."""
"""Evaluates on entire tree, tracks all paths."""
def __init__(self, trainset, testset, experiment_name, path_graph, path_wnids, ignore_labels=[],
json_save_path='./out/full_tree_analysis/', csv_save_path='./out/cifar100.csv',
weighted_average=False, use_wandb=False, run_name="HardFullTreePrior"):
super().__init__(trainset, testset, experiment_name, use_wandb, run_name=run_name)
# weird, sometimes self.classes are wnids, and sometimes they are direct classes.
# just gotta do a check. Its basically CIFAR vs wordnet
self.nodes = Node.get_nodes(path_graph, path_wnids, trainset.classes)
self.G = self.nodes[0].G
self.wnid_to_node = {node.wnid: node for node in self.nodes}
self.ignore_labels = ignore_labels
self.wnids = get_wnids(path_wnids)
self.classes = trainset.classes
self.wnid_to_class = {wnid: cls for wnid, cls in zip(self.wnids, self.classes)}
self.weighted_average = weighted_average
self.correct = 0
self.total = 0
self.wnid_to_name = {wnid: synset_to_name(wnid_to_synset(wnid)) for wnid in self.wnids}
self.leaf_counts = {cls:{node:0 for node in get_leaves(self.G)} for cls in self.classes}
self.node_counts = {cls:{node.wnid:0 for node in self.nodes} for cls in self.classes}
self.class_counts = {cls:0 for cls in self.classes} # count how many samples weve seen for each class
for cls in self.classes:
self.node_counts[cls].update({wnid:0 for wnid in self.wnids})
self.csv_save_path = csv_save_path
self.json_save_path = json_save_path
if not os.path.exists(self.json_save_path):
os.mkdir(self.json_save_path)
self.class_to_wnid = {self.wnid_to_class[wnid]:wnid for wnid in self.wnids}
self.class_accuracies = {c: 0 for c in self.classes}
self.class_totals = {c: 0 for c in self.classes}
self.ignored_classes = ()
def update_batch(self, outputs, predicted, targets):
wnid_to_pred_selector = {}
n_samples = outputs.size(0)
for node in self.nodes:
ignore_classes_pruned = node.prune_ignore_labels(self.ignore_labels)
outputs_sub = HardTreeSupLoss.get_output_sub(outputs, node, self.weighted_average, ignore_classes_pruned)
_, preds_sub = torch.max(outputs_sub, dim=1)
preds_sub = list(map(int, preds_sub.cpu()))
wnid_to_pred_selector[node.wnid] = preds_sub
paths = self.traverse_tree(wnid_to_pred_selector, n_samples, targets)
for cls, leaf in zip(targets.numpy(), paths):
self.leaf_counts[self.classes[cls]][leaf] += 1
self.class_counts[self.classes[cls]] += 1
for i in range(len(predicted)):
self.class_accuracies[self.classes[predicted[i]]] += int(predicted[i] == targets[i])
self.class_totals[self.classes[targets[i]]] += 1
predicted = [self.classes.index(self.wnid_to_class[wnid]) for wnid in paths]
self.correct += np.sum((predicted == targets.numpy()))
self.total += len(paths)
accuracy = round(self.correct / self.total, 4) * 100
return f'TreePrior: {accuracy}%'
# return leaf node wnids corresponding to each output
def traverse_tree(self, wnid_to_pred_selector, nsamples, targets):
leaf_wnids = []
wnid_root = get_root(self.G)
node_root = self.wnid_to_node[wnid_root]
target_classes = targets.numpy()
for index in range(nsamples):
wnid, node = wnid_root, node_root
while node is not None:
pred_sub = wnid_to_pred_selector[node.wnid]
index_child = pred_sub[index]
wnid = node.children[index_child]
node = self.wnid_to_node.get(wnid, None)
try:
self.node_counts[self.class_to_wnid[self.classes[target_classes[index]]]][wnid] += 1
except:
self.node_counts[self.classes[target_classes[index]]][wnid] += 1
leaf_wnids.append(wnid)
return leaf_wnids
def end_test(self, epoch):
if self.csv_save_path is not None or self.use_wandb:
self.write_to_csv(self.csv_save_path)
self.write_to_json(self.json_save_path)
if self.use_wandb:
for cls in self.class_accuracies:
label = cls+"-acc"
wandb.run.summary[label] = self.class_accuracies[cls]
print(self.class_accuracies)
def write_to_csv(self, path):
columns = {node:[] for node in get_leaves(self.G)}
classes_to_count = self.classes
for cls in self.classes:
for node in get_leaves(self.G):
if node in self.leaf_counts[cls]:
columns[node].append(self.leaf_counts[cls][node])
else:
columns[node].append(0)
new_columns = {}
for node in get_leaves(self.G):
new_columns["%s %s" % (synset_to_name(wnid_to_synset(node)), node)] = columns[node]
try:
int(classes_to_count[1:])
index = [self.wnid_to_name[cls] for cls in classes_to_count]
except:
index = [cls for cls in classes_to_count]
df = pd.DataFrame(data=new_columns, index=index)
df.to_csv(path)
print("CSV saved to %s" % path)
def write_to_json(self, path):
# create separate graph for each node
if not os.path.exists(path):
os.makedirs(path)
for cls in self.classes:
try:
int(cls[1:])
cls = self.class_to_wnid[cls]
except:
pass
G = nx.DiGraph(self.G)
for node in self.G.nodes():
ignore=self.class_counts[cls] == 0
G.nodes[node]['weight'] = self.node_counts[cls][node] / (self.class_counts[cls] + 1e-4)
G.nodes[get_root(self.G)]['weight'] = 1
json_data = node_link_data(G)
try:
int(cls[1:])
cls = self.wnid_to_name[cls]
except:
pass
if not ignore:
cls_path = path + cls + '.json'
with open(cls_path, 'w') as f:
json.dump(json_data, f)
print("Json saved to %s" % cls_path)
root = next(get_roots(G))
tree = build_tree(G, root)
generate_vis(os.getcwd() + '/vis/tree-weighted-template.html', tree, 'tree', cls, out_dir=path)
if self.use_wandb:
wandb.log({cls + "-path": wandb.Html(open(cls_path.replace('.json', '') + '-tree.html'), inject=False)})
print("Json saved to %s" % cls_path)
class HardTrackNodes(HardFullTreePrior):
accepts_path_wnids = True
accepts_weighted_average = True
accepts_track_nodes = True
"""Evaluates model on a decision tree prior. Evaluation is deterministic."""
"""Evaluates on entire tree, tracks all paths. Additionally, tracks which images
go to each node by retaining their index numbers. Stores this into a json.
Note: only works if dataloader for evaluation is NOT shuffled."""
def __init__(self, trainset, testset, experiment_name, path_graph, path_wnids, track_nodes,
json_save_path='./out/hard_track_nodes_analysis/', csv_save_path='./out/hard_track_nodes_analysis.csv', weighted_average=False,
use_wandb=False, run_name="HardTrackNodes"):
super().__init__(trainset, testset, experiment_name, path_graph, path_wnids, json_save_path,
csv_save_path, weighted_average, use_wandb, run_name)
self.track_nodes = {wnid:[] for wnid in track_nodes}
# return leaf node wnids corresponding to each output
def traverse_tree(self, wnid_to_pred_selector, nsamples, targets):
leaf_wnids = []
wnid_root = get_root(self.G)
node_root = self.wnid_to_node[wnid_root]
target_classes = targets.numpy()
for index in range(nsamples):
wnid, node = wnid_root, node_root
while node is not None:
pred_sub = wnid_to_pred_selector[node.wnid]
index_child = pred_sub[index]
wnid = node.children[index_child]
if wnid in self.track_nodes:
self.track_nodes[wnid].append(self.total + index)
node = self.wnid_to_node.get(wnid, None)
try:
self.node_counts[self.class_to_wnid[self.classes[target_classes[index]]]][wnid] += 1
except:
self.node_counts[self.classes[target_classes[index]]][wnid] += 1
leaf_wnids.append(wnid)
return leaf_wnids
def write_to_json(self, path):
# create separate graph for each node
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
for cls in self.classes:
cls_path = path + cls + '.json'
with open(cls_path, 'w') as f:
json.dump(self.track_nodes, f)
G = nx.DiGraph(self.G)
for node in self.G.nodes():
if self.class_counts[cls] == 0:
print(cls)
G.nodes[node]['weight'] = self.node_counts[cls][node] / self.class_counts[cls]
G.nodes[get_root(self.G)]['weight'] = 1
root=next(get_roots(G))
tree = build_tree(G, root)
generate_vis(os.getcwd()+'/vis/tree-weighted-template.html', tree, 'tree', cls, out_dir=path)
if self.use_wandb:
wandb.log({cls+"-path": wandb.Html(open(cls_path.replace('.json', '')+'-tree.html'), inject=False)})
print("Json saved to %s" % cls_path)
class SoftFullTreePrior(HardFullTreePrior):
"""Evaluates model on a decision tree prior. Evaluation is soft.
"""
def __init__(self, trainset, testset, experiment_name, path_graph, path_wnids, ignore_labels=[],
json_save_path='./out/full_tree_analysis/', csv_save_path='./out/cifar100.csv',
weighted_average=False, use_wandb=False, run_name="SoftFullTreePrior"):
super().__init__(trainset, testset, experiment_name, path_graph, path_wnids, ignore_labels,
json_save_path, csv_save_path, weighted_average, use_wandb, run_name)
self.num_classes = len(trainset.classes)
get_path = lambda wnid: nx.shortest_path(self.G, source=get_root(self.G), target=wnid)
self.paths = {self.wnid_to_class[wnid]: get_path(wnid) for wnid in self.wnids}
def update_batch(self, outputs, predicted, targets):
bayesian_outputs = SoftTreeSupLoss.inference(
self.nodes, outputs, self.num_classes, self.weighted_average)
n_samples = outputs.size(0)
predicted = bayesian_outputs.max(1)[1].to(targets.device)
paths = self.traverse_tree(predicted.cpu().numpy(), n_samples, targets)
for cls, leaf in zip(targets.numpy(), paths):
self.leaf_counts[self.classes[cls]][leaf] += 1
self.class_counts[self.classes[cls]] += 1
for i in range(len(predicted)):
self.class_accuracies[self.classes[predicted[i]]] += int(predicted[i] == targets[i])
self.class_totals[self.classes[targets[i]]] += 1
predicted = [self.classes.index(self.wnid_to_class[wnid]) for wnid in paths]
self.correct += np.sum((predicted == targets.numpy()))
self.total += len(paths)
accuracy = round(self.correct / self.total, 4) * 100
return f'TreePrior: {accuracy}%'
# return leaf node wnids corresponding to each output
def traverse_tree(self, wnid_to_pred_selector, nsamples, targets):
target_classes = targets.numpy()
for index in range(nsamples):
path = self.paths[self.classes[wnid_to_pred_selector[index]]]
for wnid in path:
try:
self.node_counts[self.class_to_wnid[self.classes[target_classes[index]]]][wnid] += 1
except:
self.node_counts[self.classes[target_classes[index]]][wnid] += 1
return [self.wnids[i] for i in wnid_to_pred_selector]
class SoftTrackDepth(SoftFullTreePrior):
""" Track depth metric with SoftFullTreePrior
"""
def __init__(self, trainset, testset, experiment_name, path_graph, path_wnids, ignore_labels=[],
json_save_path='./out/soft_track_depth/', csv_save_path='./out/soft_track_depth/cifar10.csv',
weighted_average=False, use_wandb=False, run_name="SoftTrackDepth"):
super().__init__(trainset, testset, experiment_name, path_graph, path_wnids, ignore_labels,
json_save_path, csv_save_path, weighted_average, use_wandb, run_name)
def calculate_depth_metrics(self):
self.depth_counts = {cls: {"depth": 0, "total": 0} for cls in self.classes}
for cls in self.classes:
cls_counts = self.node_counts[cls]
cls_wnid = self.class_to_wnid[cls]
cls_node = self.G.nodes[self.class_to_wnid[cls]]
true_path_wnids = get_path_to_node(self.G, self.class_to_wnid[cls])
cls_depth_count, cls_total_count = 0, 0
for node in true_path_wnids:
cls_depth_count += cls_counts.get(node, 0)
cls_total_count += self.class_counts[cls]
self.depth_counts[cls] = {
"depth": cls_depth_count,
"total": cls_total_count,
"ratio": cls_depth_count / cls_total_count,
}
return self.depth_counts
def end_test(self, epoch):
self.calculate_depth_metrics()
print("===> Depth metrics:")
for cls, depth_dict in self.depth_counts.items():
print(f"{cls}: {depth_dict['ratio']} ({depth_dict['depth']} / {depth_dict['total']})")
total_depth_counts = sum(d["depth"] for d in self.depth_counts.values())
total_counts = sum(d["total"] for d in self.depth_counts.values())
print(f"Total: {total_depth_counts / total_counts} ({total_depth_counts} / {total_counts})")
class SoftFullTreeOODPrior(SoftFullTreePrior):
"""Evaluates model on a decision tree prior. Evaluation is soft.
"""
accepts_path_graph = True
accepts_path_wnids = True
accepts_json_save_path = True
accepts_weighted_average = True
accepts_csv_save_path = True
accepts_ignore_labels = True
accepts_oodset = True
accepts_ood_path_wnids = True
def __init__(self, trainset, testset, experiment_name, path_graph, path_wnids,
oodset, ood_path_wnids, ignore_labels=[],
json_save_path='./out/soft_full_tree_analysis/', csv_save_path='./out/cifar100.csv',
weighted_average=False, use_wandb=False, run_name="SoftFullTreeOODPrior"):
self.weighted_average = weighted_average
self.use_wandb = use_wandb
self.csv_save_path = csv_save_path
self.json_save_path = json_save_path
if not os.path.exists(self.json_save_path):
os.mkdir(self.json_save_path)
self.nodes = Node.get_nodes(path_graph, path_wnids, trainset.classes, ood_path_wnids)
self.G = self.nodes[0].G
self.wnid_to_node = {node.wnid: node for node in self.nodes}
self.wnids = get_wnids(path_wnids, ood_path_wnids)
self.classes = trainset.classes
self.wnid_to_class = {wnid: cls for wnid, cls in zip(self.wnids, self.classes)}
self.wnid_to_name = {wnid: synset_to_name(wnid_to_synset(wnid)) for wnid in self.wnids}
self.ood_classes = oodset.classes
self.ood_wnids = get_wnids(ood_path_wnids)
self.wnid_to_class.update({wnid: cls for wnid, cls in zip(self.ood_wnids, self.ood_classes)})
self.class_to_wnid = {self.wnid_to_class[wnid]:wnid for wnid in self.wnid_to_class.keys()}
self.leaf_counts = {cls:{node:0 for node in get_leaves(self.G)} for cls in self.ood_classes}
self.class_counts = {cls:0 for cls in self.ood_classes}
self.node_counts = {} # count how many samples weve seen for each class
for cls in self.ood_classes:
curr_counts = {w: 0 for w in self.wnid_to_class.keys()}
curr_counts.update({n.wnid: 0 for n in self.nodes})
self.node_counts[cls] = curr_counts
self.num_classes = len(trainset.classes)
get_path = lambda wnid: nx.shortest_path(self.G, source=get_root(self.G), target=wnid)
self.paths = {self.wnid_to_class[wnid]: get_path(wnid) for wnid in self.wnids}
def update_batch(self, outputs, predicted, targets):
bayesian_outputs = SoftTreeSupLoss.inference(
self.nodes, outputs, self.num_classes, self.weighted_average)
n_samples = outputs.size(0)
predicted = bayesian_outputs.max(1)[1].to(targets.device)
paths = self.traverse_tree(predicted.cpu().numpy(), n_samples, targets)
for cls, leaf in zip(targets.numpy(), paths):
self.leaf_counts[self.ood_classes[cls]][leaf] += 1
self.class_counts[self.ood_classes[cls]] += 1
accuracy = -1 # cannot evaluate accuracy for OOD samples
return f'TreePrior: {accuracy}%'
# return leaf node wnids corresponding to each output
def traverse_tree(self, wnid_to_pred_selector, nsamples, targets):
target_classes = targets.numpy()
for index in range(nsamples):
path = self.paths[self.classes[wnid_to_pred_selector[index]]]
for wnid in path:
try:
self.node_counts[self.class_to_wnid[self.ood_classes[target_classes[index]]]][wnid] += 1
except:
self.node_counts[self.ood_classes[target_classes[index]]][wnid] += 1
return [self.wnids[i] for i in wnid_to_pred_selector]
def write_to_csv(self, path):
columns = {node:[] for node in get_leaves(self.G)}
for cls in self.ood_classes:
for node in get_leaves(self.G):
if node in self.leaf_counts[cls]:
columns[node].append(self.leaf_counts[cls][node])
else:
columns[node].append(0)
new_columns = {}
for node in get_leaves(self.G):
new_columns["%s %s" % (synset_to_name(wnid_to_synset(node)), node)] = columns[node]
try:
int(self.ood_classes[1:])
index = [self.wnid_to_name[cls] for cls in self.ood_classes]
except:
index = [cls for cls in self.ood_classes]
df = pd.DataFrame(data=new_columns, index=index)
df.to_csv(path)
print("CSV saved to %s" % path)
def write_to_json(self, path):
# create separate graph for each node
if not os.path.exists(path):
os.makedirs(path)
for cls in self.ood_classes:
try:
int(cls[1:])
cls = self.class_to_wnid[cls]
except:
pass
G = nx.DiGraph(self.G)
for node in self.G.nodes():
if self.class_counts[cls] == 0:
print(cls)
G.nodes[node]['weight'] = self.node_counts[cls][node] / self.class_counts[cls]
G.nodes[get_root(self.G)]['weight'] = 1
json_data = node_link_data(G)
try:
int(cls[1:])
cls = self.wnid_to_name[cls]
except:
pass
cls_path = path + cls + '.json'
with open(cls_path, 'w') as f:
json.dump(json_data, f)
print("Json saved to %s" % cls_path)
class SingleRISE(SingleInference):
"""Generate RISE saliency map for a single image ."""
def __init__(self, trainset, testset, experiment_name, path_graph, path_wnids, net,
weighted_average=False, use_wandb=True, run_name="SingleRISE"):
super().__init__(trainset, testset, experiment_name, path_graph, path_wnids, use_wandb=use_wandb,
run_name=run_name)
try:
H,L,W=trainset[0][0].shape
except:
H, L, W = trainset[0][0][0].shape
print("INPUT SIZE: ", (L,W))
self.net = net
self.rise = RISE(net, input_size=(L,W))
self.use_wandb = True
def inf(self, img, outputs):
print("=====> starting RISE", img.shape)
wnid_to_pred_selector = {}
wnid_to_rise = {}
examples, sals = [], []
for node in self.nodes:
outputs_sub = HardTreeSupLoss.get_output_sub(
outputs, node, self.weighted_average)
outputs_sub = nn.functional.softmax(outputs_sub, dim=1)
selector = [1 for c in range(node.num_classes)]
if not any(selector):
continue
_, preds_sub = torch.max(outputs_sub, dim=1)
preds_sub = list(map(int, preds_sub.cpu()))
wnid_to_pred_selector[node.wnid] = (preds_sub, selector)
predicted = self.single_traversal(
[], wnid_to_pred_selector)
for node in self.nodes:
if node.wnid in predicted:
print("Generating Rise for ", node.wnid)
rise_saliency = self.rise.explain_instance(img, node, self.weighted_average)
wnid_to_rise[node.wnid] = rise_saliency
if self.use_wandb:
print("log image")
wandb.log({"examples": [wandb.Image(torch.squeeze(img).cpu().numpy().transpose((1, 2, 0)),
caption=str(predicted))]})
cls = self.wnid_to_class.get(predicted[-1], None)
pred = -1 if cls is None else self.classes.index(cls)
print("class: ", pred)
print("inference: ", predicted)
for wnid, rise_output in wnid_to_rise.items():
overlay = get_cam(torch.squeeze(img), rise_output.cpu().detach().numpy())
sals.append(wandb.Image(overlay, caption=f"RISE (wnid={wnid}, idx={predicted.index(wnid)})"))
if not os.path.exists("./out/RISE/"):
os.makedirs("./out/RISE/")
if not cv2.imwrite(f"./out/RISE/RISE_{wnid}.jpg", overlay):
print("ERROR writing image to file")
if self.use_wandb:
print("logging")
wandb.log({"rise examples": sals})
class SingleGradCAM(SingleInference):
"""Generate RISE saliency map for a single image ."""
def __init__(self, trainset, testset, experiment_name, path_graph, path_wnids, net,
weighted_average=False, use_wandb=True, run_name="SingleGradCAM"):
super().__init__(trainset, testset, experiment_name, path_graph, path_wnids, use_wandb=use_wandb,
run_name=run_name)
try:
H,L,W=trainset[0][0].shape
except:
H, L, W = trainset[0][0][0].shape
print("INPUT SIZE: ", (L,W))
self.net = net
self.gcam = GradCAM(model=net)
self.use_wandb = True
def inf(self, img, outputs):
wnid_to_pred_selector = {}
wnid_to_rise = {}
examples, sals = [], []
for node in self.nodes:
outputs_sub = HardTreeSupLoss.get_output_sub(
outputs, node, self.weighted_average)
outputs_sub = nn.functional.softmax(outputs_sub, dim=1)
selector = [1 for c in range(node.num_classes)]
if not any(selector):
continue
_, preds_sub = torch.max(outputs_sub, dim=1)
preds_sub = list(map(int, preds_sub.cpu()))
wnid_to_pred_selector[node.wnid] = (preds_sub, selector)
predicted = self.single_traversal(
[], wnid_to_pred_selector)
for node in self.nodes:
if node.wnid in predicted:
print("Generating GradCAM for ", node.wnid)
rise_saliency = self.gen_gcam(img, node)
rise_saliency = self.get_mask(rise_saliency)
wnid_to_rise[node.wnid] = rise_saliency
if self.use_wandb:
print("log image")
wandb.log({"examples": [wandb.Image(torch.squeeze(img).cpu().numpy().transpose((1, 2, 0)),
caption=str(predicted))]})
cls = self.wnid_to_class.get(predicted[-1], None)
pred = -1 if cls is None else self.classes.index(cls)
print("class: ", pred)
print("inference: ", predicted)
for wnid, rise_output in wnid_to_rise.items():
overlay = get_cam(torch.squeeze(img), rise_output)
if wnid in predicted:
sals.append(wandb.Image(overlay, caption=f"GradCAM (idx={predicted.index(wnid)})"))
else:
sals.append(wandb.Image(overlay, caption=f"GradCAM (wnid={wnid})"))
if not os.path.exists("./out/GradCAM/"):
os.makedirs("./out/GradCAM/")
if not cv2.imwrite(f"./out/GradCAM/GradCAM_{wnid}.jpg", overlay):
print("ERROR writing image to file")
if self.use_wandb:
print("logging")
wandb.log({"gcam examples": sals})
def gen_gcam(self, img, node, target_index=1):
"""
Visualize model responses given multiple images
"""
# Get model and forward pass
probs, ids = self.gcam.forward(img, node)
for i in range(target_index):
# Grad-CAM
self.gcam.backward(ids=ids[:, [i]])
regions = self.gcam.generate(target_layer='module.layer4')
masks = []
for j in range(len(img)):
# Grad-CAM
mask = regions[j, 0].cpu().numpy()
masks += [mask]
if len(masks) == 1:
return masks[0]
self.gcam.remove_hook()
return masks
def get_mask(self, mask, sigma=.55, omega=100):
sigma *= np.max(mask)
mask = 1/(1+np.exp(-omega*(mask - sigma)))
return mask
| [
"nbdt.loss.HardTreeSupLoss.inference",
"os.mkdir",
"wandb.log",
"nbdt.graph.wnid_to_synset",
"nbdt.loss.SoftTreeSupLoss.inference",
"generate_vis.build_tree",
"nbdt.graph.get_leaves",
"numpy.exp",
"nbdt.loss.HardTreeSupLoss.get_output_sub",
"pandas.DataFrame",
"saliency.Grad_CAM.gcam.GradCAM",
... | [((2351, 2372), 'nbdt.utils.set_np_printoptions', 'set_np_printoptions', ([], {}), '()\n', (2370, 2372), False, 'from nbdt.utils import DEFAULT_CIFAR10_TREE, DEFAULT_CIFAR10_WNIDS, DEFAULT_CIFAR100_TREE, DEFAULT_CIFAR100_WNIDS, DEFAULT_TINYIMAGENET200_TREE, DEFAULT_TINYIMAGENET200_WNIDS, DEFAULT_IMAGENET1000_TREE, DEFAULT_IMAGENET1000_WNIDS, set_np_printoptions\n'), ((3580, 3606), 'numpy.zeros', 'np.zeros', (['(self.k, self.k)'], {}), '((self.k, self.k))\n', (3588, 3606), True, 'import numpy as np\n'), ((5286, 5342), 'nbdt.data.custom.Node.get_nodes', 'Node.get_nodes', (['path_graph', 'path_wnids', 'trainset.classes'], {}), '(path_graph, path_wnids, trainset.classes)\n', (5300, 5342), False, 'from nbdt.data.custom import Node\n'), ((5510, 5531), 'nbdt.graph.get_wnids', 'get_wnids', (['path_wnids'], {}), '(path_wnids)\n', (5519, 5531), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((7204, 7220), 'nbdt.graph.get_root', 'get_root', (['self.G'], {}), '(self.G)\n', (7212, 7220), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((9305, 9397), 'nbdt.loss.SoftTreeSupLoss.inference', 'SoftTreeSupLoss.inference', (['self.nodes', 'outputs', 'self.num_classes', 'self.weighted_average'], {}), '(self.nodes, outputs, self.num_classes, self.\n weighted_average)\n', (9330, 9397), False, 'from nbdt.loss import HardTreeSupLoss, SoftTreeSupLoss\n'), ((11073, 11089), 'nbdt.graph.get_root', 'get_root', (['self.G'], {}), '(self.G)\n', (11081, 11089), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((13455, 13511), 'nbdt.data.custom.Node.get_nodes', 'Node.get_nodes', (['path_graph', 'path_wnids', 'trainset.classes'], {}), '(path_graph, path_wnids, trainset.classes)\n', (13469, 13511), False, 'from nbdt.data.custom import Node\n'), ((13679, 13700), 'nbdt.graph.get_wnids', 'get_wnids', (['path_wnids'], {}), '(path_wnids)\n', (13688, 13700), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((16286, 16302), 'nbdt.graph.get_root', 'get_root', (['self.G'], {}), '(self.G)\n', (16294, 16302), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((17871, 17889), 'nbdt.graph.get_leaves', 'get_leaves', (['self.G'], {}), '(self.G)\n', (17881, 17889), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((18194, 18237), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'new_columns', 'index': 'index'}), '(data=new_columns, index=index)\n', (18206, 18237), True, 'import pandas as pd\n'), ((20866, 20882), 'nbdt.graph.get_root', 'get_root', (['self.G'], {}), '(self.G)\n', (20874, 20882), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((23713, 23805), 'nbdt.loss.SoftTreeSupLoss.inference', 'SoftTreeSupLoss.inference', (['self.nodes', 'outputs', 'self.num_classes', 'self.weighted_average'], {}), '(self.nodes, outputs, self.num_classes, self.\n weighted_average)\n', (23738, 23805), False, 'from nbdt.loss import HardTreeSupLoss, SoftTreeSupLoss\n'), ((28208, 28280), 'nbdt.data.custom.Node.get_nodes', 'Node.get_nodes', (['path_graph', 'path_wnids', 'trainset.classes', 'ood_path_wnids'], {}), '(path_graph, path_wnids, trainset.classes, ood_path_wnids)\n', (28222, 28280), False, 'from nbdt.data.custom import Node\n'), ((28405, 28442), 'nbdt.graph.get_wnids', 'get_wnids', (['path_wnids', 'ood_path_wnids'], {}), '(path_wnids, ood_path_wnids)\n', (28414, 28442), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((28735, 28760), 'nbdt.graph.get_wnids', 'get_wnids', (['ood_path_wnids'], {}), '(ood_path_wnids)\n', (28744, 28760), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((29743, 29835), 'nbdt.loss.SoftTreeSupLoss.inference', 'SoftTreeSupLoss.inference', (['self.nodes', 'outputs', 'self.num_classes', 'self.weighted_average'], {}), '(self.nodes, outputs, self.num_classes, self.\n weighted_average)\n', (29768, 29835), False, 'from nbdt.loss import HardTreeSupLoss, SoftTreeSupLoss\n'), ((31333, 31351), 'nbdt.graph.get_leaves', 'get_leaves', (['self.G'], {}), '(self.G)\n', (31343, 31351), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((31656, 31699), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'new_columns', 'index': 'index'}), '(data=new_columns, index=index)\n', (31668, 31699), True, 'import pandas as pd\n'), ((33337, 33365), 'saliency.RISE.explanations.RISE', 'RISE', (['net'], {'input_size': '(L, W)'}), '(net, input_size=(L, W))\n', (33341, 33365), False, 'from saliency.RISE.explanations import RISE\n'), ((36049, 36067), 'saliency.Grad_CAM.gcam.GradCAM', 'GradCAM', ([], {'model': 'net'}), '(model=net)\n', (36056, 36067), False, 'from saliency.Grad_CAM.gcam import GradCAM\n'), ((38994, 39006), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (39000, 39006), True, 'import numpy as np\n'), ((2512, 2601), 'wandb.init', 'wandb.init', ([], {'project': 'experiment_name', 'name': 'run_name', 'reinit': '(True)', 'entity': '"""lisadunlap"""'}), "(project=experiment_name, name=run_name, reinit=True, entity=\n 'lisadunlap')\n", (2522, 2601), False, 'import wandb\n'), ((6181, 6253), 'nbdt.loss.HardTreeSupLoss.inference', 'HardTreeSupLoss.inference', (['node', 'outputs', 'targets', 'self.weighted_average'], {}), '(node, outputs, targets, self.weighted_average)\n', (6206, 6253), False, 'from nbdt.loss import HardTreeSupLoss, SoftTreeSupLoss\n'), ((6357, 6386), 'torch.max', 'torch.max', (['outputs_sub'], {'dim': '(1)'}), '(outputs_sub, dim=1)\n', (6366, 6386), False, 'import torch\n'), ((11785, 11853), 'nbdt.loss.HardTreeSupLoss.get_output_sub', 'HardTreeSupLoss.get_output_sub', (['outputs', 'node', 'self.weighted_average'], {}), '(outputs, node, self.weighted_average)\n', (11815, 11853), False, 'from nbdt.loss import HardTreeSupLoss, SoftTreeSupLoss\n'), ((12017, 12046), 'torch.max', 'torch.max', (['outputs_sub'], {'dim': '(1)'}), '(outputs_sub, dim=1)\n', (12026, 12046), False, 'import torch\n'), ((14536, 14571), 'os.path.exists', 'os.path.exists', (['self.json_save_path'], {}), '(self.json_save_path)\n', (14550, 14571), False, 'import os\n'), ((14585, 14614), 'os.mkdir', 'os.mkdir', (['self.json_save_path'], {}), '(self.json_save_path)\n', (14593, 14614), False, 'import os\n'), ((15121, 15216), 'nbdt.loss.HardTreeSupLoss.get_output_sub', 'HardTreeSupLoss.get_output_sub', (['outputs', 'node', 'self.weighted_average', 'ignore_classes_pruned'], {}), '(outputs, node, self.weighted_average,\n ignore_classes_pruned)\n', (15151, 15216), False, 'from nbdt.loss import HardTreeSupLoss, SoftTreeSupLoss\n'), ((15240, 15269), 'torch.max', 'torch.max', (['outputs_sub'], {'dim': '(1)'}), '(outputs_sub, dim=1)\n', (15249, 15269), False, 'import torch\n'), ((17620, 17638), 'nbdt.graph.get_leaves', 'get_leaves', (['self.G'], {}), '(self.G)\n', (17630, 17638), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((18399, 18419), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (18413, 18419), False, 'import os\n'), ((18433, 18450), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (18444, 18450), False, 'import os\n'), ((18633, 18651), 'networkx.DiGraph', 'nx.DiGraph', (['self.G'], {}), '(self.G)\n', (18643, 18651), True, 'import networkx as nx\n'), ((18923, 18940), 'networkx.readwrite.json_graph.node_link_data', 'node_link_data', (['G'], {}), '(G)\n', (18937, 18940), False, 'from networkx.readwrite.json_graph import node_link_data, node_link_graph\n'), ((26187, 26236), 'nbdt.graph.get_path_to_node', 'get_path_to_node', (['self.G', 'self.class_to_wnid[cls]'], {}), '(self.G, self.class_to_wnid[cls])\n', (26203, 26236), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((28107, 28142), 'os.path.exists', 'os.path.exists', (['self.json_save_path'], {}), '(self.json_save_path)\n', (28121, 28142), False, 'import os\n'), ((28156, 28185), 'os.mkdir', 'os.mkdir', (['self.json_save_path'], {}), '(self.json_save_path)\n', (28164, 28185), False, 'import os\n'), ((31082, 31100), 'nbdt.graph.get_leaves', 'get_leaves', (['self.G'], {}), '(self.G)\n', (31092, 31100), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((31861, 31881), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (31875, 31881), False, 'import os\n'), ((31895, 31912), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (31906, 31912), False, 'import os\n'), ((32099, 32117), 'networkx.DiGraph', 'nx.DiGraph', (['self.G'], {}), '(self.G)\n', (32109, 32117), True, 'import networkx as nx\n'), ((32408, 32425), 'networkx.readwrite.json_graph.node_link_data', 'node_link_data', (['G'], {}), '(G)\n', (32422, 32425), False, 'from networkx.readwrite.json_graph import node_link_data, node_link_graph\n'), ((33629, 33697), 'nbdt.loss.HardTreeSupLoss.get_output_sub', 'HardTreeSupLoss.get_output_sub', (['outputs', 'node', 'self.weighted_average'], {}), '(outputs, node, self.weighted_average)\n', (33659, 33697), False, 'from nbdt.loss import HardTreeSupLoss, SoftTreeSupLoss\n'), ((33741, 33782), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['outputs_sub'], {'dim': '(1)'}), '(outputs_sub, dim=1)\n', (33762, 33782), True, 'import torch.nn as nn\n'), ((33929, 33958), 'torch.max', 'torch.max', (['outputs_sub'], {'dim': '(1)'}), '(outputs_sub, dim=1)\n', (33938, 33958), False, 'import torch\n'), ((35405, 35439), 'wandb.log', 'wandb.log', (["{'rise examples': sals}"], {}), "({'rise examples': sals})\n", (35414, 35439), False, 'import wandb\n'), ((36283, 36351), 'nbdt.loss.HardTreeSupLoss.get_output_sub', 'HardTreeSupLoss.get_output_sub', (['outputs', 'node', 'self.weighted_average'], {}), '(outputs, node, self.weighted_average)\n', (36313, 36351), False, 'from nbdt.loss import HardTreeSupLoss, SoftTreeSupLoss\n'), ((36395, 36436), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['outputs_sub'], {'dim': '(1)'}), '(outputs_sub, dim=1)\n', (36416, 36436), True, 'import torch.nn as nn\n'), ((36583, 36612), 'torch.max', 'torch.max', (['outputs_sub'], {'dim': '(1)'}), '(outputs_sub, dim=1)\n', (36592, 36612), False, 'import torch\n'), ((38207, 38241), 'wandb.log', 'wandb.log', (["{'gcam examples': sals}"], {}), "({'gcam examples': sals})\n", (38216, 38241), False, 'import wandb\n'), ((8121, 8140), 'torch.Tensor', 'torch.Tensor', (['preds'], {}), '(preds)\n', (8133, 8140), False, 'import torch\n'), ((13978, 13998), 'nbdt.graph.wnid_to_synset', 'wnid_to_synset', (['wnid'], {}), '(wnid)\n', (13992, 13998), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((17503, 17521), 'nbdt.graph.get_leaves', 'get_leaves', (['self.G'], {}), '(self.G)\n', (17513, 17521), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((19357, 19376), 'generate_vis.build_tree', 'build_tree', (['G', 'root'], {}), '(G, root)\n', (19367, 19376), False, 'from generate_vis import generate_vis, build_tree\n'), ((21830, 21851), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (21845, 21851), False, 'import os\n'), ((21878, 21899), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (21893, 21899), False, 'import os\n'), ((22038, 22068), 'json.dump', 'json.dump', (['self.track_nodes', 'f'], {}), '(self.track_nodes, f)\n', (22047, 22068), False, 'import json\n'), ((22090, 22108), 'networkx.DiGraph', 'nx.DiGraph', (['self.G'], {}), '(self.G)\n', (22100, 22108), True, 'import networkx as nx\n'), ((22459, 22478), 'generate_vis.build_tree', 'build_tree', (['G', 'root'], {}), '(G, root)\n', (22469, 22478), False, 'from generate_vis import generate_vis, build_tree\n'), ((28621, 28641), 'nbdt.graph.wnid_to_synset', 'wnid_to_synset', (['wnid'], {}), '(wnid)\n', (28635, 28641), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((31001, 31019), 'nbdt.graph.get_leaves', 'get_leaves', (['self.G'], {}), '(self.G)\n', (31011, 31019), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((32661, 32684), 'json.dump', 'json.dump', (['json_data', 'f'], {}), '(json_data, f)\n', (32670, 32684), False, 'import json\n'), ((34957, 34975), 'torch.squeeze', 'torch.squeeze', (['img'], {}), '(img)\n', (34970, 34975), False, 'import torch\n'), ((35138, 35167), 'os.path.exists', 'os.path.exists', (['"""./out/RISE/"""'], {}), "('./out/RISE/')\n", (35152, 35167), False, 'import os\n'), ((35185, 35211), 'os.makedirs', 'os.makedirs', (['"""./out/RISE/"""'], {}), "('./out/RISE/')\n", (35196, 35211), False, 'import os\n'), ((35231, 35282), 'cv2.imwrite', 'cv2.imwrite', (['f"""./out/RISE/RISE_{wnid}.jpg"""', 'overlay'], {}), "(f'./out/RISE/RISE_{wnid}.jpg', overlay)\n", (35242, 35282), False, 'import cv2\n'), ((37640, 37658), 'torch.squeeze', 'torch.squeeze', (['img'], {}), '(img)\n', (37653, 37658), False, 'import torch\n'), ((37928, 37960), 'os.path.exists', 'os.path.exists', (['"""./out/GradCAM/"""'], {}), "('./out/GradCAM/')\n", (37942, 37960), False, 'import os\n'), ((37978, 38007), 'os.makedirs', 'os.makedirs', (['"""./out/GradCAM/"""'], {}), "('./out/GradCAM/')\n", (37989, 38007), False, 'import os\n'), ((38027, 38084), 'cv2.imwrite', 'cv2.imwrite', (['f"""./out/GradCAM/GradCAM_{wnid}.jpg"""', 'overlay'], {}), "(f'./out/GradCAM/GradCAM_{wnid}.jpg', overlay)\n", (38038, 38084), False, 'import cv2\n'), ((39027, 39058), 'numpy.exp', 'np.exp', (['(-omega * (mask - sigma))'], {}), '(-omega * (mask - sigma))\n', (39033, 39058), True, 'import numpy as np\n'), ((10319, 10363), 'wandb.Table', 'wandb.Table', ([], {'data': 'data', 'columns': 'self.classes'}), '(data=data, columns=self.classes)\n', (10330, 10363), False, 'import wandb\n'), ((10827, 10843), 'nbdt.graph.get_root', 'get_root', (['self.G'], {}), '(self.G)\n', (10835, 10843), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((14076, 14094), 'nbdt.graph.get_leaves', 'get_leaves', (['self.G'], {}), '(self.G)\n', (14086, 14094), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((18867, 18883), 'nbdt.graph.get_root', 'get_root', (['self.G'], {}), '(self.G)\n', (18875, 18883), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((19215, 19238), 'json.dump', 'json.dump', (['json_data', 'f'], {}), '(json_data, f)\n', (19224, 19238), False, 'import json\n'), ((19320, 19332), 'nbdt.graph.get_roots', 'get_roots', (['G'], {}), '(G)\n', (19329, 19332), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((22422, 22434), 'nbdt.graph.get_roots', 'get_roots', (['G'], {}), '(G)\n', (22431, 22434), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((23510, 23526), 'nbdt.graph.get_root', 'get_root', (['self.G'], {}), '(self.G)\n', (23518, 23526), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((29015, 29033), 'nbdt.graph.get_leaves', 'get_leaves', (['self.G'], {}), '(self.G)\n', (29025, 29033), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((29540, 29556), 'nbdt.graph.get_root', 'get_root', (['self.G'], {}), '(self.G)\n', (29548, 29556), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((32352, 32368), 'nbdt.graph.get_root', 'get_root', (['self.G'], {}), '(self.G)\n', (32360, 32368), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((37853, 37907), 'wandb.Image', 'wandb.Image', (['overlay'], {'caption': 'f"""GradCAM (wnid={wnid})"""'}), "(overlay, caption=f'GradCAM (wnid={wnid})')\n", (37864, 37907), False, 'import wandb\n'), ((19406, 19417), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (19415, 19417), False, 'import os\n'), ((22363, 22379), 'nbdt.graph.get_root', 'get_root', (['self.G'], {}), '(self.G)\n', (22371, 22379), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((22508, 22519), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (22517, 22519), False, 'import os\n'), ((17941, 17961), 'nbdt.graph.wnid_to_synset', 'wnid_to_synset', (['node'], {}), '(node)\n', (17955, 17961), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((31403, 31423), 'nbdt.graph.wnid_to_synset', 'wnid_to_synset', (['node'], {}), '(node)\n', (31417, 31423), False, 'from nbdt.graph import get_root, get_roots, get_wnids, synset_to_name, wnid_to_synset, get_leaves, get_path_to_node\n'), ((12298, 12316), 'torch.squeeze', 'torch.squeeze', (['img'], {}), '(img)\n', (12311, 12316), False, 'import torch\n'), ((34550, 34568), 'torch.squeeze', 'torch.squeeze', (['img'], {}), '(img)\n', (34563, 34568), False, 'import torch\n'), ((37233, 37251), 'torch.squeeze', 'torch.squeeze', (['img'], {}), '(img)\n', (37246, 37251), False, 'import torch\n')] |
import numpy as np
import torch
import shutil
import matplotlib.pyplot as plt
import os
from PIL import Image
def resize_padding(im, desired_size, mode="RGB"):
# compute the new size
old_size = im.size
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
# create a new image and paste the resized on it
new_im = Image.new(mode, (desired_size, desired_size))
new_im.paste(im, ((desired_size - new_size[0]) // 2, (desired_size - new_size[1]) // 2))
return new_im
def KaiMingInit(net):
"""Kaiming Init layer parameters."""
for m in net.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, a=0.2) # slope = 0.2 in the original implementation
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.bias, 0)
torch.nn.init.constant_(m.weight, 1)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, std=1e-3)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
def save_checkpoint(state, is_best, filename, result_path):
"""save state and best model ever"""
torch.save(state, filename)
if is_best: # store model with best accuracy
shutil.copyfile(filename, os.path.join(result_path, 'model_best.pth'))
def load_checkpoint(model, pth_file):
"""load state and network weights"""
checkpoint = torch.load(pth_file, map_location=lambda storage, loc: storage.cuda())
pretrained_dict = checkpoint['state_dict']
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print('Previous weight loaded')
class AverageValueMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0.
self.avg = 0.
self.count = 0
def update(self, val, n=1):
self.val = val
self.avg = self.avg * (self.count / (self.count + n)) + val * (n / (self.count + n))
self.count += n
def get_pred_from_cls_output(outputs):
preds = []
for n in range(0, len(outputs)):
output = outputs[n]
_, pred = output.topk(1, 1, True, True)
preds.append(pred.view(-1))
return preds
def accuracy(outputs, targets):
"""Compute accuracy for each euler angle separately"""
with torch.no_grad(): # no grad computation to reduce memory
preds = get_pred_from_cls_output(outputs)
res = []
for n in range(0, len(outputs)):
res.append(100. * torch.mean((preds[n] == targets[:, n]).float()))
return res
def accuracyViews(outputs, targets, classes, views=(4, 8, 12)):
"""Compute accuracy for different number of views"""
with torch.no_grad(): # no grad computation to reduce memory
# compute the predictions
_, preds = outputs.topk(1, 1)
preds = preds.view(-1)
# compute the accuracy according to number of views
accs = []
for view in views:
preds_n = preds // (classes / view)
targets_n = targets // (classes / view)
correts_n = torch.eq(preds_n, targets_n)
acc = correts_n.float().mean() * 100
accs.append(acc)
return accs
def gen_confusion(outputs, targets, azi_classes, ele_classes, rol_classes=None, epoch=0, result_dir="./"):
"""generate confusion matrix for phi and theta"""
confusion_azi = torch.zeros(azi_classes, azi_classes)
confusion_ele = torch.zeros(ele_classes, ele_classes)
# split groundtruth for phi and theta
target_azi = targets[:, 0]
target_ele = targets[:, 1]
# split output for phi and theta
output_azi = outputs[:, 0:azi_classes]
output_ele = outputs[:, azi_classes:azi_classes+ele_classes]
_, pred_azi = output_azi.topk(1, 1, True, True) # predicted class indices
_, pred_ele = output_ele.topk(1, 1, True, True)
if rol_classes is not None:
confusion_rol = torch.zeros(rol_classes, rol_classes)
target_rol = targets[:, 2]
output_rol = outputs[:, azi_classes + ele_classes:]
_, pred_rol = output_rol.topk(1, 1, True, True)
# compute the confusion matrix
for i in range(0, targets.size(0)): # each row represents a ground-truth class
confusion_azi[target_azi[i], pred_azi[i]] += 1
confusion_ele[target_ele[i], pred_ele[i]] += 1
if rol_classes is not None:
confusion_rol[target_rol[i], pred_rol[i]] += 1
# normalize the confusion matrix
for i in range(0, azi_classes):
confusion_azi[i] = confusion_azi[i] / confusion_azi[i].sum() if confusion_azi[i].sum() != 0 else 0
for i in range(0, ele_classes):
confusion_ele[i] = confusion_ele[i] / confusion_ele[i].sum() if confusion_ele[i].sum() != 0 else 0
if rol_classes is not None:
for i in range(0, rol_classes):
confusion_rol[i] = confusion_rol[i] / confusion_rol[i].sum() if confusion_rol[i].sum() != 0 else 0
# plot the confusion matrix and save it
fig_conf_azi = plt.figure()
ax = fig_conf_azi.add_subplot(111)
cax = ax.matshow(confusion_azi.numpy(), vmin=0, vmax=1)
fig_conf_azi.colorbar(cax)
plt.xlabel('predicted class')
plt.ylabel('actual class')
plt.title('confusion matrix for azimuth')
fig_name = 'fig_confusion_azi_' + str(epoch) + '.jpg'
fig_conf_azi.savefig(os.path.join(result_dir, fig_name))
plt.close(fig_conf_azi)
fig_conf_ele = plt.figure()
ax = fig_conf_ele.add_subplot(111)
cax = ax.matshow(confusion_ele.numpy(), vmin=0, vmax=1)
fig_conf_ele.colorbar(cax)
plt.xlabel('predicted class')
plt.ylabel('actual class')
plt.title('confusion matrix for elevation')
fig_name = 'fig_confusion_ele_' + str(epoch) + '.jpg'
fig_conf_ele.savefig(os.path.join(result_dir, fig_name))
plt.close(fig_conf_ele)
if rol_classes is None:
return False
fig_conf_rol = plt.figure()
ax = fig_conf_rol.add_subplot(111)
cax = ax.matshow(confusion_rol.numpy(), vmin=0, vmax=1)
fig_conf_rol.colorbar(cax)
plt.xlabel('predicted class')
plt.ylabel('actual class')
plt.title('confusion matrix for inplane rotation')
fig_name = 'fig_confusion_rol_' + str(epoch) + '.jpg'
fig_conf_rol.savefig(os.path.join(result_dir, fig_name))
plt.close(fig_conf_rol)
return True
def plot_loss_fig(epoch, losses):
epochs = np.arange(1, epoch + 2)
fig_loss = plt.figure()
plt.grid()
if losses.shape[1] == 3:
plt.plot(epochs, losses[0:epoch + 1, 0], 'b+-',
epochs, losses[0:epoch + 1, 1], 'g+-',
epochs, losses[0:epoch + 1, 2], 'r+-')
plt.legend(('train_loss', 'val_loss', 'test_loss'), loc='upper right', fontsize='xx-small')
else:
plt.plot(epochs, losses[0:epoch + 1, 0], 'b+-',
epochs, losses[0:epoch + 1, 1], 'r+-')
plt.legend(('train_loss', 'val_loss'), loc='upper right', fontsize='xx-small')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Training curve')
return fig_loss
def plot_acc_fig(epoch, accs):
epochs = np.arange(1, epoch + 2)
fig_acc = plt.figure()
plt.grid()
if accs.shape[1] == 3:
plt.plot(epochs, accs[0:epoch + 1, 0], 'b+-',
epochs, accs[0:epoch + 1, 1], 'g+-',
epochs, accs[0:epoch + 1, 2], 'r+-')
plt.legend(('train_acc', 'val_acc', 'test_acc'), loc='upper left', fontsize='xx-small')
else:
plt.plot(epochs, accs[0:epoch + 1, 0], 'b+-',
epochs, accs[0:epoch + 1, 1], 'r+-')
plt.legend(('train_acc', 'val_acc'), loc='upper left', fontsize='xx-small')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy curve')
return fig_acc
def plot_acc_angle_cls_fig(epoch, accuracies):
epochs = np.arange(1, epoch + 2)
fig_acc = plt.figure()
plt.grid()
plt.plot(epochs, accuracies[0:epoch + 1, 0], 'b+-',
epochs, accuracies[0:epoch + 1, 1], 'bo--',
epochs, accuracies[0:epoch + 1, 2], 'g+-',
epochs, accuracies[0:epoch + 1, 3], 'go--',
epochs, accuracies[0:epoch + 1, 4], 'r+-',
epochs, accuracies[0:epoch + 1, 5], 'ro--')
plt.legend(('train_azi', 'val_azi', 'train_ele', 'val_ele', 'train_rol', 'val_rol'), loc='upper left', fontsize='xx-small')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracies for euler angle classification')
return fig_acc
def plot_angle_acc_fig(epoch, accs):
epochs = np.arange(1, epoch + 2)
fig_acc = plt.figure()
plt.grid()
if accs.shape[1] == 3:
plt.plot(epochs, accs[0:epoch + 1, 0], 'r+-',
epochs, accs[0:epoch + 1, 1], 'g+-',
epochs, accs[0:epoch + 1, 2], 'b+-')
plt.legend(('azimuth', 'elevation', 'inplane'), loc='upper left', fontsize='xx-small')
else:
plt.plot(epochs, accs[0:epoch + 1, 0], 'r+-',
epochs, accs[0:epoch + 1, 1], 'g+-')
plt.legend(('azimuth', 'elevation'), loc='upper left', fontsize='xx-small')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy curve for angles')
return fig_acc
def angles_to_matrix(angles):
"""Compute the rotation matrix from euler angles for a mini-batch"""
azi = angles[:, 0]
ele = angles[:, 1]
rol = angles[:, 2]
element1 = (torch.cos(rol) * torch.cos(azi) - torch.sin(rol) * torch.cos(ele) * torch.sin(azi)).unsqueeze(1)
element2 = (torch.sin(rol) * torch.cos(azi) + torch.cos(rol) * torch.cos(ele) * torch.sin(azi)).unsqueeze(1)
element3 = (torch.sin(ele) * torch.sin(azi)).unsqueeze(1)
element4 = (-torch.cos(rol) * torch.sin(azi) - torch.sin(rol) * torch.cos(ele) * torch.cos(azi)).unsqueeze(1)
element5 = (-torch.sin(rol) * torch.sin(azi) + torch.cos(rol) * torch.cos(ele) * torch.cos(azi)).unsqueeze(1)
element6 = (torch.sin(ele) * torch.cos(azi)).unsqueeze(1)
element7 = (torch.sin(rol) * torch.sin(ele)).unsqueeze(1)
element8 = (-torch.cos(rol) * torch.sin(ele)).unsqueeze(1)
element9 = (torch.cos(ele)).unsqueeze(1)
return torch.cat((element1, element2, element3, element4, element5, element6, element7, element8, element9), dim=1)
def rotation_err(preds, targets):
"""compute rotation error for viewpoint estimation"""
preds = preds.float().clone()
targets = targets.float().clone()
preds[:, 1] = preds[:, 1] - 180.
preds[:, 2] = preds[:, 2] - 180.
targets[:, 1] = targets[:, 1] - 180.
targets[:, 2] = targets[:, 2] - 180.
preds = preds * np.pi / 180.
targets = targets * np.pi / 180.
R_pred = angles_to_matrix(preds)
R_gt = angles_to_matrix(targets)
R_err = torch.acos(((torch.sum(R_pred * R_gt, 1)).clamp(-1., 3.) - 1.) / 2)
R_err = R_err * 180. / np.pi
return R_err
def rotation_acc(preds, targets, th=30.):
R_err = rotation_err(preds, targets)
return 100. * torch.mean((R_err <= th).float())
def angle_err(preds, targets):
"""compute rotation error for viewpoint estimation"""
errs = torch.abs(preds - targets)
errs = torch.min(errs, 360. - errs)
return errs
if __name__ == '__main__':
a = torch.randint(360, (4, 3)).float()
b = torch.randint(360, (4, 3)).float()
print(a)
err = rotation_err(a, b)
print(a)
| [
"matplotlib.pyplot.title",
"PIL.Image.new",
"torch.cat",
"torch.cos",
"matplotlib.pyplot.figure",
"torch.nn.init.constant_",
"numpy.arange",
"torch.no_grad",
"os.path.join",
"torch.nn.init.kaiming_normal_",
"matplotlib.pyplot.close",
"torch.zeros",
"torch.randint",
"matplotlib.pyplot.legen... | [((427, 472), 'PIL.Image.new', 'Image.new', (['mode', '(desired_size, desired_size)'], {}), '(mode, (desired_size, desired_size))\n', (436, 472), False, 'from PIL import Image\n'), ((1349, 1376), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (1359, 1376), False, 'import torch\n'), ((3776, 3813), 'torch.zeros', 'torch.zeros', (['azi_classes', 'azi_classes'], {}), '(azi_classes, azi_classes)\n', (3787, 3813), False, 'import torch\n'), ((3834, 3871), 'torch.zeros', 'torch.zeros', (['ele_classes', 'ele_classes'], {}), '(ele_classes, ele_classes)\n', (3845, 3871), False, 'import torch\n'), ((5397, 5409), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5407, 5409), True, 'import matplotlib.pyplot as plt\n'), ((5544, 5573), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""predicted class"""'], {}), "('predicted class')\n", (5554, 5573), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5604), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""actual class"""'], {}), "('actual class')\n", (5588, 5604), True, 'import matplotlib.pyplot as plt\n'), ((5609, 5650), 'matplotlib.pyplot.title', 'plt.title', (['"""confusion matrix for azimuth"""'], {}), "('confusion matrix for azimuth')\n", (5618, 5650), True, 'import matplotlib.pyplot as plt\n'), ((5774, 5797), 'matplotlib.pyplot.close', 'plt.close', (['fig_conf_azi'], {}), '(fig_conf_azi)\n', (5783, 5797), True, 'import matplotlib.pyplot as plt\n'), ((5818, 5830), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5828, 5830), True, 'import matplotlib.pyplot as plt\n'), ((5965, 5994), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""predicted class"""'], {}), "('predicted class')\n", (5975, 5994), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6025), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""actual class"""'], {}), "('actual class')\n", (6009, 6025), True, 'import matplotlib.pyplot as plt\n'), ((6030, 6073), 'matplotlib.pyplot.title', 'plt.title', (['"""confusion matrix for elevation"""'], {}), "('confusion matrix for elevation')\n", (6039, 6073), True, 'import matplotlib.pyplot as plt\n'), ((6197, 6220), 'matplotlib.pyplot.close', 'plt.close', (['fig_conf_ele'], {}), '(fig_conf_ele)\n', (6206, 6220), True, 'import matplotlib.pyplot as plt\n'), ((6290, 6302), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6300, 6302), True, 'import matplotlib.pyplot as plt\n'), ((6437, 6466), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""predicted class"""'], {}), "('predicted class')\n", (6447, 6466), True, 'import matplotlib.pyplot as plt\n'), ((6471, 6497), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""actual class"""'], {}), "('actual class')\n", (6481, 6497), True, 'import matplotlib.pyplot as plt\n'), ((6502, 6552), 'matplotlib.pyplot.title', 'plt.title', (['"""confusion matrix for inplane rotation"""'], {}), "('confusion matrix for inplane rotation')\n", (6511, 6552), True, 'import matplotlib.pyplot as plt\n'), ((6676, 6699), 'matplotlib.pyplot.close', 'plt.close', (['fig_conf_rol'], {}), '(fig_conf_rol)\n', (6685, 6699), True, 'import matplotlib.pyplot as plt\n'), ((6765, 6788), 'numpy.arange', 'np.arange', (['(1)', '(epoch + 2)'], {}), '(1, epoch + 2)\n', (6774, 6788), True, 'import numpy as np\n'), ((6804, 6816), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6814, 6816), True, 'import matplotlib.pyplot as plt\n'), ((6821, 6831), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6829, 6831), True, 'import matplotlib.pyplot as plt\n'), ((7342, 7361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (7352, 7361), True, 'import matplotlib.pyplot as plt\n'), ((7366, 7384), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (7376, 7384), True, 'import matplotlib.pyplot as plt\n'), ((7389, 7416), 'matplotlib.pyplot.title', 'plt.title', (['"""Training curve"""'], {}), "('Training curve')\n", (7398, 7416), True, 'import matplotlib.pyplot as plt\n'), ((7483, 7506), 'numpy.arange', 'np.arange', (['(1)', '(epoch + 2)'], {}), '(1, epoch + 2)\n', (7492, 7506), True, 'import numpy as np\n'), ((7521, 7533), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7531, 7533), True, 'import matplotlib.pyplot as plt\n'), ((7538, 7548), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7546, 7548), True, 'import matplotlib.pyplot as plt\n'), ((8040, 8059), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (8050, 8059), True, 'import matplotlib.pyplot as plt\n'), ((8064, 8086), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (8074, 8086), True, 'import matplotlib.pyplot as plt\n'), ((8091, 8118), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy curve"""'], {}), "('Accuracy curve')\n", (8100, 8118), True, 'import matplotlib.pyplot as plt\n'), ((8200, 8223), 'numpy.arange', 'np.arange', (['(1)', '(epoch + 2)'], {}), '(1, epoch + 2)\n', (8209, 8223), True, 'import numpy as np\n'), ((8238, 8250), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8248, 8250), True, 'import matplotlib.pyplot as plt\n'), ((8255, 8265), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8263, 8265), True, 'import matplotlib.pyplot as plt\n'), ((8270, 8552), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'accuracies[0:epoch + 1, 0]', '"""b+-"""', 'epochs', 'accuracies[0:epoch + 1, 1]', '"""bo--"""', 'epochs', 'accuracies[0:epoch + 1, 2]', '"""g+-"""', 'epochs', 'accuracies[0:epoch + 1, 3]', '"""go--"""', 'epochs', 'accuracies[0:epoch + 1, 4]', '"""r+-"""', 'epochs', 'accuracies[0:epoch + 1, 5]', '"""ro--"""'], {}), "(epochs, accuracies[0:epoch + 1, 0], 'b+-', epochs, accuracies[0:\n epoch + 1, 1], 'bo--', epochs, accuracies[0:epoch + 1, 2], 'g+-',\n epochs, accuracies[0:epoch + 1, 3], 'go--', epochs, accuracies[0:epoch +\n 1, 4], 'r+-', epochs, accuracies[0:epoch + 1, 5], 'ro--')\n", (8278, 8552), True, 'import matplotlib.pyplot as plt\n'), ((8609, 8736), 'matplotlib.pyplot.legend', 'plt.legend', (["('train_azi', 'val_azi', 'train_ele', 'val_ele', 'train_rol', 'val_rol')"], {'loc': '"""upper left"""', 'fontsize': '"""xx-small"""'}), "(('train_azi', 'val_azi', 'train_ele', 'val_ele', 'train_rol',\n 'val_rol'), loc='upper left', fontsize='xx-small')\n", (8619, 8736), True, 'import matplotlib.pyplot as plt\n'), ((8737, 8756), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (8747, 8756), True, 'import matplotlib.pyplot as plt\n'), ((8761, 8783), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (8771, 8783), True, 'import matplotlib.pyplot as plt\n'), ((8788, 8842), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracies for euler angle classification"""'], {}), "('Accuracies for euler angle classification')\n", (8797, 8842), True, 'import matplotlib.pyplot as plt\n'), ((8914, 8937), 'numpy.arange', 'np.arange', (['(1)', '(epoch + 2)'], {}), '(1, epoch + 2)\n', (8923, 8937), True, 'import numpy as np\n'), ((8952, 8964), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8962, 8964), True, 'import matplotlib.pyplot as plt\n'), ((8969, 8979), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8977, 8979), True, 'import matplotlib.pyplot as plt\n'), ((9470, 9489), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (9480, 9489), True, 'import matplotlib.pyplot as plt\n'), ((9494, 9516), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (9504, 9516), True, 'import matplotlib.pyplot as plt\n'), ((9521, 9559), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy curve for angles"""'], {}), "('Accuracy curve for angles')\n", (9530, 9559), True, 'import matplotlib.pyplot as plt\n'), ((10512, 10624), 'torch.cat', 'torch.cat', (['(element1, element2, element3, element4, element5, element6, element7,\n element8, element9)'], {'dim': '(1)'}), '((element1, element2, element3, element4, element5, element6,\n element7, element8, element9), dim=1)\n', (10521, 10624), False, 'import torch\n'), ((11456, 11482), 'torch.abs', 'torch.abs', (['(preds - targets)'], {}), '(preds - targets)\n', (11465, 11482), False, 'import torch\n'), ((11494, 11523), 'torch.min', 'torch.min', (['errs', '(360.0 - errs)'], {}), '(errs, 360.0 - errs)\n', (11503, 11523), False, 'import torch\n'), ((2681, 2696), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2694, 2696), False, 'import torch\n'), ((3076, 3091), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3089, 3091), False, 'import torch\n'), ((4312, 4349), 'torch.zeros', 'torch.zeros', (['rol_classes', 'rol_classes'], {}), '(rol_classes, rol_classes)\n', (4323, 4349), False, 'import torch\n'), ((5734, 5768), 'os.path.join', 'os.path.join', (['result_dir', 'fig_name'], {}), '(result_dir, fig_name)\n', (5746, 5768), False, 'import os\n'), ((6157, 6191), 'os.path.join', 'os.path.join', (['result_dir', 'fig_name'], {}), '(result_dir, fig_name)\n', (6169, 6191), False, 'import os\n'), ((6636, 6670), 'os.path.join', 'os.path.join', (['result_dir', 'fig_name'], {}), '(result_dir, fig_name)\n', (6648, 6670), False, 'import os\n'), ((6869, 6998), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'losses[0:epoch + 1, 0]', '"""b+-"""', 'epochs', 'losses[0:epoch + 1, 1]', '"""g+-"""', 'epochs', 'losses[0:epoch + 1, 2]', '"""r+-"""'], {}), "(epochs, losses[0:epoch + 1, 0], 'b+-', epochs, losses[0:epoch + 1,\n 1], 'g+-', epochs, losses[0:epoch + 1, 2], 'r+-')\n", (6877, 6998), True, 'import matplotlib.pyplot as plt\n'), ((7037, 7132), 'matplotlib.pyplot.legend', 'plt.legend', (["('train_loss', 'val_loss', 'test_loss')"], {'loc': '"""upper right"""', 'fontsize': '"""xx-small"""'}), "(('train_loss', 'val_loss', 'test_loss'), loc='upper right',\n fontsize='xx-small')\n", (7047, 7132), True, 'import matplotlib.pyplot as plt\n'), ((7147, 7237), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'losses[0:epoch + 1, 0]', '"""b+-"""', 'epochs', 'losses[0:epoch + 1, 1]', '"""r+-"""'], {}), "(epochs, losses[0:epoch + 1, 0], 'b+-', epochs, losses[0:epoch + 1,\n 1], 'r+-')\n", (7155, 7237), True, 'import matplotlib.pyplot as plt\n'), ((7259, 7337), 'matplotlib.pyplot.legend', 'plt.legend', (["('train_loss', 'val_loss')"], {'loc': '"""upper right"""', 'fontsize': '"""xx-small"""'}), "(('train_loss', 'val_loss'), loc='upper right', fontsize='xx-small')\n", (7269, 7337), True, 'import matplotlib.pyplot as plt\n'), ((7584, 7707), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'accs[0:epoch + 1, 0]', '"""b+-"""', 'epochs', 'accs[0:epoch + 1, 1]', '"""g+-"""', 'epochs', 'accs[0:epoch + 1, 2]', '"""r+-"""'], {}), "(epochs, accs[0:epoch + 1, 0], 'b+-', epochs, accs[0:epoch + 1, 1],\n 'g+-', epochs, accs[0:epoch + 1, 2], 'r+-')\n", (7592, 7707), True, 'import matplotlib.pyplot as plt\n'), ((7746, 7838), 'matplotlib.pyplot.legend', 'plt.legend', (["('train_acc', 'val_acc', 'test_acc')"], {'loc': '"""upper left"""', 'fontsize': '"""xx-small"""'}), "(('train_acc', 'val_acc', 'test_acc'), loc='upper left', fontsize\n ='xx-small')\n", (7756, 7838), True, 'import matplotlib.pyplot as plt\n'), ((7852, 7938), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'accs[0:epoch + 1, 0]', '"""b+-"""', 'epochs', 'accs[0:epoch + 1, 1]', '"""r+-"""'], {}), "(epochs, accs[0:epoch + 1, 0], 'b+-', epochs, accs[0:epoch + 1, 1],\n 'r+-')\n", (7860, 7938), True, 'import matplotlib.pyplot as plt\n'), ((7960, 8035), 'matplotlib.pyplot.legend', 'plt.legend', (["('train_acc', 'val_acc')"], {'loc': '"""upper left"""', 'fontsize': '"""xx-small"""'}), "(('train_acc', 'val_acc'), loc='upper left', fontsize='xx-small')\n", (7970, 8035), True, 'import matplotlib.pyplot as plt\n'), ((9015, 9138), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'accs[0:epoch + 1, 0]', '"""r+-"""', 'epochs', 'accs[0:epoch + 1, 1]', '"""g+-"""', 'epochs', 'accs[0:epoch + 1, 2]', '"""b+-"""'], {}), "(epochs, accs[0:epoch + 1, 0], 'r+-', epochs, accs[0:epoch + 1, 1],\n 'g+-', epochs, accs[0:epoch + 1, 2], 'b+-')\n", (9023, 9138), True, 'import matplotlib.pyplot as plt\n'), ((9177, 9268), 'matplotlib.pyplot.legend', 'plt.legend', (["('azimuth', 'elevation', 'inplane')"], {'loc': '"""upper left"""', 'fontsize': '"""xx-small"""'}), "(('azimuth', 'elevation', 'inplane'), loc='upper left', fontsize=\n 'xx-small')\n", (9187, 9268), True, 'import matplotlib.pyplot as plt\n'), ((9282, 9368), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'accs[0:epoch + 1, 0]', '"""r+-"""', 'epochs', 'accs[0:epoch + 1, 1]', '"""g+-"""'], {}), "(epochs, accs[0:epoch + 1, 0], 'r+-', epochs, accs[0:epoch + 1, 1],\n 'g+-')\n", (9290, 9368), True, 'import matplotlib.pyplot as plt\n'), ((9390, 9465), 'matplotlib.pyplot.legend', 'plt.legend', (["('azimuth', 'elevation')"], {'loc': '"""upper left"""', 'fontsize': '"""xx-small"""'}), "(('azimuth', 'elevation'), loc='upper left', fontsize='xx-small')\n", (9400, 9465), True, 'import matplotlib.pyplot as plt\n'), ((732, 778), 'torch.nn.init.kaiming_normal_', 'torch.nn.init.kaiming_normal_', (['m.weight'], {'a': '(0.2)'}), '(m.weight, a=0.2)\n', (761, 778), False, 'import torch\n'), ((1461, 1504), 'os.path.join', 'os.path.join', (['result_path', '"""model_best.pth"""'], {}), "(result_path, 'model_best.pth')\n", (1473, 1504), False, 'import os\n'), ((3465, 3493), 'torch.eq', 'torch.eq', (['preds_n', 'targets_n'], {}), '(preds_n, targets_n)\n', (3473, 3493), False, 'import torch\n'), ((10472, 10486), 'torch.cos', 'torch.cos', (['ele'], {}), '(ele)\n', (10481, 10486), False, 'import torch\n'), ((11576, 11602), 'torch.randint', 'torch.randint', (['(360)', '(4, 3)'], {}), '(360, (4, 3))\n', (11589, 11602), False, 'import torch\n'), ((11619, 11645), 'torch.randint', 'torch.randint', (['(360)', '(4, 3)'], {}), '(360, (4, 3))\n', (11632, 11645), False, 'import torch\n'), ((876, 910), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (899, 910), False, 'import torch\n'), ((973, 1007), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (996, 1007), False, 'import torch\n'), ((1020, 1056), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (1043, 1056), False, 'import torch\n'), ((9995, 10009), 'torch.sin', 'torch.sin', (['ele'], {}), '(ele)\n', (10004, 10009), False, 'import torch\n'), ((10012, 10026), 'torch.sin', 'torch.sin', (['azi'], {}), '(azi)\n', (10021, 10026), False, 'import torch\n'), ((10285, 10299), 'torch.sin', 'torch.sin', (['ele'], {}), '(ele)\n', (10294, 10299), False, 'import torch\n'), ((10302, 10316), 'torch.cos', 'torch.cos', (['azi'], {}), '(azi)\n', (10311, 10316), False, 'import torch\n'), ((10347, 10361), 'torch.sin', 'torch.sin', (['rol'], {}), '(rol)\n', (10356, 10361), False, 'import torch\n'), ((10364, 10378), 'torch.sin', 'torch.sin', (['ele'], {}), '(ele)\n', (10373, 10378), False, 'import torch\n'), ((10427, 10441), 'torch.sin', 'torch.sin', (['ele'], {}), '(ele)\n', (10436, 10441), False, 'import torch\n'), ((1114, 1156), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['m.weight'], {'std': '(0.001)'}), '(m.weight, std=0.001)\n', (1135, 1156), False, 'import torch\n'), ((9769, 9783), 'torch.cos', 'torch.cos', (['rol'], {}), '(rol)\n', (9778, 9783), False, 'import torch\n'), ((9786, 9800), 'torch.cos', 'torch.cos', (['azi'], {}), '(azi)\n', (9795, 9800), False, 'import torch\n'), ((9837, 9851), 'torch.sin', 'torch.sin', (['azi'], {}), '(azi)\n', (9846, 9851), False, 'import torch\n'), ((9882, 9896), 'torch.sin', 'torch.sin', (['rol'], {}), '(rol)\n', (9891, 9896), False, 'import torch\n'), ((9899, 9913), 'torch.cos', 'torch.cos', (['azi'], {}), '(azi)\n', (9908, 9913), False, 'import torch\n'), ((9950, 9964), 'torch.sin', 'torch.sin', (['azi'], {}), '(azi)\n', (9959, 9964), False, 'import torch\n'), ((10075, 10089), 'torch.sin', 'torch.sin', (['azi'], {}), '(azi)\n', (10084, 10089), False, 'import torch\n'), ((10126, 10140), 'torch.cos', 'torch.cos', (['azi'], {}), '(azi)\n', (10135, 10140), False, 'import torch\n'), ((10189, 10203), 'torch.sin', 'torch.sin', (['azi'], {}), '(azi)\n', (10198, 10203), False, 'import torch\n'), ((10240, 10254), 'torch.cos', 'torch.cos', (['azi'], {}), '(azi)\n', (10249, 10254), False, 'import torch\n'), ((10410, 10424), 'torch.cos', 'torch.cos', (['rol'], {}), '(rol)\n', (10419, 10424), False, 'import torch\n'), ((1207, 1241), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (1230, 1241), False, 'import torch\n'), ((9803, 9817), 'torch.sin', 'torch.sin', (['rol'], {}), '(rol)\n', (9812, 9817), False, 'import torch\n'), ((9820, 9834), 'torch.cos', 'torch.cos', (['ele'], {}), '(ele)\n', (9829, 9834), False, 'import torch\n'), ((9916, 9930), 'torch.cos', 'torch.cos', (['rol'], {}), '(rol)\n', (9925, 9930), False, 'import torch\n'), ((9933, 9947), 'torch.cos', 'torch.cos', (['ele'], {}), '(ele)\n', (9942, 9947), False, 'import torch\n'), ((10058, 10072), 'torch.cos', 'torch.cos', (['rol'], {}), '(rol)\n', (10067, 10072), False, 'import torch\n'), ((10092, 10106), 'torch.sin', 'torch.sin', (['rol'], {}), '(rol)\n', (10101, 10106), False, 'import torch\n'), ((10109, 10123), 'torch.cos', 'torch.cos', (['ele'], {}), '(ele)\n', (10118, 10123), False, 'import torch\n'), ((10172, 10186), 'torch.sin', 'torch.sin', (['rol'], {}), '(rol)\n', (10181, 10186), False, 'import torch\n'), ((10206, 10220), 'torch.cos', 'torch.cos', (['rol'], {}), '(rol)\n', (10215, 10220), False, 'import torch\n'), ((10223, 10237), 'torch.cos', 'torch.cos', (['ele'], {}), '(ele)\n', (10232, 10237), False, 'import torch\n'), ((11112, 11139), 'torch.sum', 'torch.sum', (['(R_pred * R_gt)', '(1)'], {}), '(R_pred * R_gt, 1)\n', (11121, 11139), False, 'import torch\n')] |
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class NavigationVel2DEnv(gym.Env):
"""2D navigation problems, as described in [1]. The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/maml_examples/point_env_randgoal.py
At each time step, the 2D agent takes an action (its velocity, clipped in
[-0.1, 0.1]), and receives a penalty equal to its L2 distance to the goal
position (ie. the reward is `-distance`). The 2D navigation tasks are
generated by sampling goal positions from the uniform distribution
on [-0.5, 0.5]^2.
[1] <NAME>, <NAME>, <NAME>, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
"""
def __init__(self, task={}):
super(NavigationVel2DEnv, self).__init__()
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32)
self.action_space = spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32)
self._task = task
self._goal = task.get('goal', np.zeros(2, dtype=np.float32))
self._state = np.zeros(2, dtype=np.float32)
self.clip_position = True
self.update_vel = True
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def sample_tasks(self, num_tasks):
goals = self.np_random.uniform(-5., 5., size=(num_tasks, 2))
tasks = [{'goal': goal} for goal in goals]
return tasks
def reset_task(self, task):
self._task = task
self._goal = task['goal']
def reset(self, env=True):
self._state = np.zeros(2, dtype=np.float32)
self._vel = np.zeros(2, dtype=np.float32)
return np.concatenate([self._state, self._vel])
def step(self, action):
# action = np.clip(action, -0.1, 0.1)
self._state = self._state + action
if self.clip_position:
self._state = np.clip(self._state, -10, 10)
next_obs = np.concatenate([self._state, self._vel])
x = self._state[0] - self._goal[0]
y = self._state[1] - self._goal[1]
reward = -np.sqrt(x ** 2 + y ** 2) - 0.01 * np.linalg.norm(action)
# Update velocity (i.e., adding previous action as input)
if self.update_vel:
self._vel = action
return next_obs, reward, False, self._task
| [
"numpy.zeros",
"numpy.clip",
"numpy.linalg.norm",
"gym.spaces.Box",
"numpy.sqrt",
"numpy.concatenate",
"gym.utils.seeding.np_random"
] | [((925, 991), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(4,)', 'dtype': 'np.float32'}), '(low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32)\n', (935, 991), False, 'from gym import spaces\n'), ((1020, 1076), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=-1, high=1, shape=(2,), dtype=np.float32)\n', (1030, 1076), False, 'from gym import spaces\n'), ((1195, 1224), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (1203, 1224), True, 'import numpy as np\n'), ((1373, 1396), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (1390, 1396), False, 'from gym.utils import seeding\n'), ((1747, 1776), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (1755, 1776), True, 'import numpy as np\n'), ((1797, 1826), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (1805, 1826), True, 'import numpy as np\n'), ((1842, 1882), 'numpy.concatenate', 'np.concatenate', (['[self._state, self._vel]'], {}), '([self._state, self._vel])\n', (1856, 1882), True, 'import numpy as np\n'), ((2108, 2148), 'numpy.concatenate', 'np.concatenate', (['[self._state, self._vel]'], {}), '([self._state, self._vel])\n', (2122, 2148), True, 'import numpy as np\n'), ((1142, 1171), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (1150, 1171), True, 'import numpy as np\n'), ((2059, 2088), 'numpy.clip', 'np.clip', (['self._state', '(-10)', '(10)'], {}), '(self._state, -10, 10)\n', (2066, 2088), True, 'import numpy as np\n'), ((2254, 2278), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (2261, 2278), True, 'import numpy as np\n'), ((2288, 2310), 'numpy.linalg.norm', 'np.linalg.norm', (['action'], {}), '(action)\n', (2302, 2310), True, 'import numpy as np\n')] |
# TODO: your agent here!
import numpy as np
from task import Task
from keras import layers, models, optimizers, regularizers
from keras import backend as K
import random
from collections import namedtuple, deque
class ReplayBuffer:
def __init__(self, buffer_size, batch_size):
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self, batch_size=64):
return random.sample(self.memory, k=self.batch_size)
def __len__(self):
return len(self.memory)
class Actor():
# For personal future reference: https://towardsdatascience.com/reinforcement-learning-w-keras-openai-actor-critic-models-f084612cfd69
def __init__(self, state_size, action_size, action_low, action_high):
self.state_size = state_size
self.action_size = action_size
self.action_low = action_low
self.action_high = action_high
self.action_range = self.action_high - self.action_low
self.build_model()
def build_model(self):
states = layers.Input(shape=(self.state_size ,), name = 'states')
# We use relu to focus on the positive rewards.
neural_network = layers.Dense(units = 32, use_bias = True)(states)
neural_network = layers.BatchNormalization()(neural_network)
neural_network = layers.Activation('relu')(neural_network)
# We use dropout to make some nodes of the NN stronger
neural_network = layers.Dropout(0.7)(neural_network)
neural_network = layers.Dense(units = 64, use_bias = True)(neural_network)
neural_network = layers.BatchNormalization()(neural_network)
neural_network = layers.Activation('relu')(neural_network)
neural_network = layers.Dropout(0.7)(neural_network)
neural_network = layers.Dense(units = 128, use_bias = True)(neural_network)
neural_network = layers.BatchNormalization()(neural_network)
neural_network = layers.Activation('relu')(neural_network)
neural_network = layers.Dropout(0.7)(neural_network)
neural_network = layers.Dense(units = 64, use_bias = True)(neural_network)
neural_network = layers.BatchNormalization()(neural_network)
neural_network = layers.Activation('relu')(neural_network)
neural_network = layers.Dropout(0.7)(neural_network)
neural_network = layers.Dense(units = 64, use_bias = True)(neural_network)
neural_network = layers.BatchNormalization()(neural_network)
neural_network = layers.Activation('relu')(neural_network)
neural_network = layers.Dropout(0.7)(neural_network)
neural_network = layers.Dense(units = 64, use_bias = True)(neural_network)
neural_network = layers.BatchNormalization()(neural_network)
neural_network = layers.Activation('relu')(neural_network)
neural_network = layers.Dropout(0.7)(neural_network)
neural_network = layers.Dense(units = 64, use_bias = True)(neural_network)
neural_network = layers.BatchNormalization()(neural_network)
neural_network = layers.Activation('relu')(neural_network)
neural_network = layers.Dropout(0.7)(neural_network)
neural_network = layers.Dense(units = 64, use_bias = True)(neural_network)
neural_network = layers.BatchNormalization()(neural_network)
neural_network = layers.Activation('relu')(neural_network)
neural_network = layers.Dropout(0.7)(neural_network)
neural_network = layers.Dense(units = 64, use_bias = True)(neural_network)
neural_network = layers.BatchNormalization()(neural_network)
neural_network = layers.Activation('relu')(neural_network)
neural_network = layers.Dropout(0.7)(neural_network)
# output_layer
raw_actions = layers.Dense(units = self.action_size, activation = 'sigmoid', name = 'raw_actions')(neural_network)
actions = layers.Lambda(lambda x: (x * self.action_range) + self.action_low, name = 'actions')(raw_actions)
# Build the model using the layers above
self.model = models.Model(inputs = states, outputs = actions)
# Loss function using action value (Q value) gradients
action_gradients = layers.Input(shape=(self.action_size,))
loss = K.mean(-action_gradients * actions)
optimizer = optimizers.Adam()
updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)
self.train_fn = K.function(inputs=[self.model.input, action_gradients, K.learning_phase()], outputs=[], updates=updates_op)
class Critic:
# For personal future reference: https://towardsdatascience.com/reinforcement-learning-w-keras-openai-actor-critic-models-f084612cfd69
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.build_model()
def build_model(self):
# Define input layers
states = layers.Input(shape=(self.state_size,), name='states')
actions = layers.Input(shape=(self.action_size,), name='actions')
neural_net_states = layers.Dense(units=32, use_bias = True)(states)
neural_net_states = layers.BatchNormalization()(neural_net_states)
neural_net_states = layers.Activation('relu')(neural_net_states)
neural_net_states = layers.Dropout(0.7)(neural_net_states)
neural_net_states = layers.Dense(units=64, use_bias = True)(states)
neural_net_states = layers.BatchNormalization()(neural_net_states)
neural_net_states = layers.Activation('relu')(neural_net_states)
neural_net_states = layers.Dropout(0.7)(neural_net_states)
neural_net_states = layers.Dense(units=128, use_bias = True)(states)
neural_net_states = layers.BatchNormalization()(neural_net_states)
neural_net_states = layers.Activation('relu')(neural_net_states)
neural_net_states = layers.Dropout(0.6)(neural_net_states)
neural_net_actions = layers.Dense(units=32, use_bias = True)(actions)
neural_net_actions = layers.BatchNormalization()(neural_net_actions)
neural_net_actions = layers.Activation('relu')(neural_net_actions)
neural_net_actions = layers.Dropout(0.7)(neural_net_actions)
neural_net_actions = layers.Dense(units=64, use_bias = True)(neural_net_actions)
neural_net_actions = layers.BatchNormalization()(neural_net_actions)
neural_net_actions = layers.Activation('relu')(neural_net_actions)
neural_net_actions = layers.Dropout(0.7)(neural_net_actions)
neural_net_actions = layers.Dense(units=64, use_bias = True)(neural_net_actions)
neural_net_actions = layers.BatchNormalization()(neural_net_actions)
neural_net_actions = layers.Activation('relu')(neural_net_actions)
neural_net_actions = layers.Dropout(0.7)(neural_net_actions)
neural_net_actions = layers.Dense(units=128, use_bias = True)(neural_net_actions)
neural_net_actions = layers.BatchNormalization()(neural_net_actions)
neural_net_actions = layers.Activation('relu')(neural_net_actions)
neural_net_actions = layers.Dropout(0.7)(neural_net_actions)
# neural_net_actions = layers.Dense(units=128, use_bias = True)(neural_net_actions)
# neural_net_actions = layers.BatchNormalization()(neural_net_actions)
# neural_net_actions = layers.Activation('relu')(neural_net_actions)
# neural_net_actions = layers.Dropout(0.7)(neural_net_actions)
nn = layers.Add()([neural_net_states, neural_net_actions])
nn = layers.Activation('relu')(nn)
Q_values = layers.Dense(units=1, name='q_values')(nn)
self.model = models.Model(inputs=[states, actions], outputs=Q_values)
optimizer = optimizers.Adam()
self.model.compile(optimizer=optimizer, loss='mse')
action_gradients = K.gradients(Q_values, actions)
self.get_action_gradients = K.function(inputs=[*self.model.input, K.learning_phase()],outputs=action_gradients)
class DDPG():
# For future self reference: https://towardsdatascience.com/introduction-to-various-reinforcement-learning-algorithms-i-q-learning-sarsa-dqn-ddpg-72a5e0cb6287
def __init__(self, Task):
self.task = Task
self.state_size = Task.state_size
self.action_size = Task.action_size
self.action_low = Task.action_low
self.action_high = Task.action_high
self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high)
self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high)
self.critic_local = Critic(self.state_size, self.action_size)
self.critic_target = Critic(self.state_size, self.action_size)
self.critic_target.model.set_weights(self.critic_local.model.get_weights())
self.actor_target.model.set_weights(self.actor_local.model.get_weights())
self.exploration_mu = 0
self.exploration_theta = 0.10
self.exploration_sigma = 0.1
self.noise = OrnsteinUhlenbeckNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma)
# Replay memory
self.buffer_size = 100000
self.batch_size = 128
self.memory = ReplayBuffer(self.buffer_size, self.batch_size)
# Discount factor
self.gamma = 0.95
# For soft update of target parameters
self.tau = 0.1
def reset_episode(self):
self.noise.reset()
state = self.task.reset()
self.last_state = state
return state
def step(self, action, reward, next_state, done):
self.memory.add(self.last_state, action, reward, next_state, done)
if len(self.memory) > self.batch_size:
experiences = self.memory.sample()
self.learn(experiences)
self.last_state = next_state
def act(self, states):
state = np.reshape(states, [-1, self.state_size])
action = self.actor_local.model.predict(state)[0]
return list(action + self.noise.sample())
def learn(self, experiences):
states = np.vstack([e.state for e in experiences if e is not None])
actions = np.array([e.action for e in experiences if e is not None]).astype(np.float32).reshape(-1, self.action_size)
rewards = np.array([e.reward for e in experiences if e is not None]).astype(np.float32).reshape(-1, 1)
dones = np.array([e.done for e in experiences if e is not None]).astype(np.uint8).reshape(-1, 1)
next_states = np.vstack([e.next_state for e in experiences if e is not None])
actions_next = self.actor_target.model.predict_on_batch(next_states)
Q_targets_next = self.critic_target.model.predict_on_batch([next_states, actions_next])
Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones)
self.critic_local.model.train_on_batch(x=[states, actions], y=Q_targets)
action_gradients = np.reshape(self.critic_local.get_action_gradients([states, actions, 0]), (-1, self.action_size))
self.actor_local.train_fn([states, action_gradients, 1])
self.soft_update(self.critic_local.model, self.critic_target.model)
self.soft_update(self.actor_local.model, self.actor_target.model)
def soft_update(self, local_model, target_model):
local_weights = np.array(local_model.get_weights())
target_weights = np.array(target_model.get_weights())
assert len(local_weights) == len(target_weights), "Local and target model parameters must have the same size"
new_weights = self.tau * local_weights + (1 - self.tau) * target_weights
target_model.set_weights(new_weights)
class OrnsteinUhlenbeckNoise:
# For personal future reference:
# https://www.quora.com/Why-do-we-use-the-Ornstein-Uhlenbeck-Process-in-the-exploration-of-DDPG
def __init__(self, size, mu, theta, sigma):
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.reset()
def reset(self):
self.state = self.mu
def sample(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state | [
"random.sample",
"numpy.ones",
"keras.models.Model",
"keras.layers.Input",
"collections.deque",
"numpy.reshape",
"keras.backend.gradients",
"keras.backend.learning_phase",
"keras.layers.Dropout",
"keras.optimizers.Adam",
"keras.layers.BatchNormalization",
"numpy.vstack",
"keras.layers.Activa... | [((307, 332), 'collections.deque', 'deque', ([], {'maxlen': 'buffer_size'}), '(maxlen=buffer_size)\n', (312, 332), False, 'from collections import namedtuple, deque\n'), ((396, 489), 'collections.namedtuple', 'namedtuple', (['"""Experience"""'], {'field_names': "['state', 'action', 'reward', 'next_state', 'done']"}), "('Experience', field_names=['state', 'action', 'reward',\n 'next_state', 'done'])\n", (406, 489), False, 'from collections import namedtuple, deque\n'), ((699, 744), 'random.sample', 'random.sample', (['self.memory'], {'k': 'self.batch_size'}), '(self.memory, k=self.batch_size)\n', (712, 744), False, 'import random\n'), ((1326, 1379), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(self.state_size,)', 'name': '"""states"""'}), "(shape=(self.state_size,), name='states')\n", (1338, 1379), False, 'from keras import layers, models, optimizers, regularizers\n'), ((4321, 4365), 'keras.models.Model', 'models.Model', ([], {'inputs': 'states', 'outputs': 'actions'}), '(inputs=states, outputs=actions)\n', (4333, 4365), False, 'from keras import layers, models, optimizers, regularizers\n'), ((4462, 4501), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(self.action_size,)'}), '(shape=(self.action_size,))\n', (4474, 4501), False, 'from keras import layers, models, optimizers, regularizers\n'), ((4515, 4550), 'keras.backend.mean', 'K.mean', (['(-action_gradients * actions)'], {}), '(-action_gradients * actions)\n', (4521, 4550), True, 'from keras import backend as K\n'), ((4577, 4594), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (4592, 4594), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5199, 5252), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(self.state_size,)', 'name': '"""states"""'}), "(shape=(self.state_size,), name='states')\n", (5211, 5252), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5271, 5326), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(self.action_size,)', 'name': '"""actions"""'}), "(shape=(self.action_size,), name='actions')\n", (5283, 5326), False, 'from keras import layers, models, optimizers, regularizers\n'), ((8044, 8100), 'keras.models.Model', 'models.Model', ([], {'inputs': '[states, actions]', 'outputs': 'Q_values'}), '(inputs=[states, actions], outputs=Q_values)\n', (8056, 8100), False, 'from keras import layers, models, optimizers, regularizers\n'), ((8122, 8139), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (8137, 8139), False, 'from keras import layers, models, optimizers, regularizers\n'), ((8228, 8258), 'keras.backend.gradients', 'K.gradients', (['Q_values', 'actions'], {}), '(Q_values, actions)\n', (8239, 8258), True, 'from keras import backend as K\n'), ((10317, 10358), 'numpy.reshape', 'np.reshape', (['states', '[-1, self.state_size]'], {}), '(states, [-1, self.state_size])\n', (10327, 10358), True, 'import numpy as np\n'), ((10519, 10577), 'numpy.vstack', 'np.vstack', (['[e.state for e in experiences if e is not None]'], {}), '([e.state for e in experiences if e is not None])\n', (10528, 10577), True, 'import numpy as np\n'), ((10942, 11005), 'numpy.vstack', 'np.vstack', (['[e.next_state for e in experiences if e is not None]'], {}), '([e.next_state for e in experiences if e is not None])\n', (10951, 11005), True, 'import numpy as np\n'), ((1465, 1502), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(32)', 'use_bias': '(True)'}), '(units=32, use_bias=True)\n', (1477, 1502), False, 'from keras import layers, models, optimizers, regularizers\n'), ((1538, 1565), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1563, 1565), False, 'from keras import layers, models, optimizers, regularizers\n'), ((1605, 1630), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1622, 1630), False, 'from keras import layers, models, optimizers, regularizers\n'), ((1731, 1750), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (1745, 1750), False, 'from keras import layers, models, optimizers, regularizers\n'), ((1799, 1836), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(64)', 'use_bias': '(True)'}), '(units=64, use_bias=True)\n', (1811, 1836), False, 'from keras import layers, models, optimizers, regularizers\n'), ((1880, 1907), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1905, 1907), False, 'from keras import layers, models, optimizers, regularizers\n'), ((1947, 1972), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1964, 1972), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2012, 2031), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (2026, 2031), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2076, 2114), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(128)', 'use_bias': '(True)'}), '(units=128, use_bias=True)\n', (2088, 2114), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2158, 2185), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2183, 2185), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2225, 2250), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2242, 2250), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2290, 2309), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (2304, 2309), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2354, 2391), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(64)', 'use_bias': '(True)'}), '(units=64, use_bias=True)\n', (2366, 2391), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2435, 2462), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2460, 2462), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2502, 2527), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2519, 2527), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2567, 2586), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (2581, 2586), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2631, 2668), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(64)', 'use_bias': '(True)'}), '(units=64, use_bias=True)\n', (2643, 2668), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2712, 2739), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2737, 2739), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2779, 2804), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2796, 2804), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2844, 2863), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (2858, 2863), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2908, 2945), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(64)', 'use_bias': '(True)'}), '(units=64, use_bias=True)\n', (2920, 2945), False, 'from keras import layers, models, optimizers, regularizers\n'), ((2989, 3016), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3014, 3016), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3056, 3081), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (3073, 3081), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3121, 3140), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (3135, 3140), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3181, 3218), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(64)', 'use_bias': '(True)'}), '(units=64, use_bias=True)\n', (3193, 3218), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3262, 3289), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3287, 3289), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3329, 3354), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (3346, 3354), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3394, 3413), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (3408, 3413), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3458, 3495), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(64)', 'use_bias': '(True)'}), '(units=64, use_bias=True)\n', (3470, 3495), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3539, 3566), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3564, 3566), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3606, 3631), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (3623, 3631), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3671, 3690), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (3685, 3690), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3735, 3772), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(64)', 'use_bias': '(True)'}), '(units=64, use_bias=True)\n', (3747, 3772), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3816, 3843), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3841, 3843), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3883, 3908), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (3900, 3908), False, 'from keras import layers, models, optimizers, regularizers\n'), ((3948, 3967), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (3962, 3967), False, 'from keras import layers, models, optimizers, regularizers\n'), ((4030, 4108), 'keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.action_size', 'activation': '"""sigmoid"""', 'name': '"""raw_actions"""'}), "(units=self.action_size, activation='sigmoid', name='raw_actions')\n", (4042, 4108), False, 'from keras import layers, models, optimizers, regularizers\n'), ((4152, 4237), 'keras.layers.Lambda', 'layers.Lambda', (['(lambda x: x * self.action_range + self.action_low)'], {'name': '"""actions"""'}), "(lambda x: x * self.action_range + self.action_low, name='actions'\n )\n", (4165, 4237), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5356, 5393), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(32)', 'use_bias': '(True)'}), '(units=32, use_bias=True)\n', (5368, 5393), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5432, 5459), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (5457, 5459), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5507, 5532), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (5524, 5532), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5580, 5599), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (5594, 5599), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5656, 5693), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(64)', 'use_bias': '(True)'}), '(units=64, use_bias=True)\n', (5668, 5693), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5732, 5759), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (5757, 5759), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5807, 5832), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (5824, 5832), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5880, 5899), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (5894, 5899), False, 'from keras import layers, models, optimizers, regularizers\n'), ((5956, 5994), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(128)', 'use_bias': '(True)'}), '(units=128, use_bias=True)\n', (5968, 5994), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6033, 6060), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6058, 6060), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6108, 6133), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (6125, 6133), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6181, 6200), 'keras.layers.Dropout', 'layers.Dropout', (['(0.6)'], {}), '(0.6)\n', (6195, 6200), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6250, 6287), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(32)', 'use_bias': '(True)'}), '(units=32, use_bias=True)\n', (6262, 6287), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6328, 6355), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6353, 6355), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6405, 6430), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (6422, 6430), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6480, 6499), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (6494, 6499), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6558, 6595), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(64)', 'use_bias': '(True)'}), '(units=64, use_bias=True)\n', (6570, 6595), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6647, 6674), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6672, 6674), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6724, 6749), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (6741, 6749), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6799, 6818), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (6813, 6818), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6877, 6914), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(64)', 'use_bias': '(True)'}), '(units=64, use_bias=True)\n', (6889, 6914), False, 'from keras import layers, models, optimizers, regularizers\n'), ((6966, 6993), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6991, 6993), False, 'from keras import layers, models, optimizers, regularizers\n'), ((7043, 7068), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (7060, 7068), False, 'from keras import layers, models, optimizers, regularizers\n'), ((7118, 7137), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (7132, 7137), False, 'from keras import layers, models, optimizers, regularizers\n'), ((7230, 7268), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(128)', 'use_bias': '(True)'}), '(units=128, use_bias=True)\n', (7242, 7268), False, 'from keras import layers, models, optimizers, regularizers\n'), ((7320, 7347), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (7345, 7347), False, 'from keras import layers, models, optimizers, regularizers\n'), ((7397, 7422), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (7414, 7422), False, 'from keras import layers, models, optimizers, regularizers\n'), ((7472, 7491), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (7486, 7491), False, 'from keras import layers, models, optimizers, regularizers\n'), ((7862, 7874), 'keras.layers.Add', 'layers.Add', ([], {}), '()\n', (7872, 7874), False, 'from keras import layers, models, optimizers, regularizers\n'), ((7929, 7954), 'keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (7946, 7954), False, 'from keras import layers, models, optimizers, regularizers\n'), ((7979, 8017), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(1)', 'name': '"""q_values"""'}), "(units=1, name='q_values')\n", (7991, 8017), False, 'from keras import layers, models, optimizers, regularizers\n'), ((12345, 12358), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (12352, 12358), True, 'import numpy as np\n'), ((4761, 4779), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (4777, 4779), True, 'from keras import backend as K\n'), ((8334, 8352), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (8350, 8352), True, 'from keras import backend as K\n'), ((10596, 10654), 'numpy.array', 'np.array', (['[e.action for e in experiences if e is not None]'], {}), '([e.action for e in experiences if e is not None])\n', (10604, 10654), True, 'import numpy as np\n'), ((10722, 10780), 'numpy.array', 'np.array', (['[e.reward for e in experiences if e is not None]'], {}), '([e.reward for e in experiences if e is not None])\n', (10730, 10780), True, 'import numpy as np\n'), ((10831, 10887), 'numpy.array', 'np.array', (['[e.done for e in experiences if e is not None]'], {}), '([e.done for e in experiences if e is not None])\n', (10839, 10887), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 26 15:46:31 2019
@author: david
"""
import numpy as np
from physique import exportToCsv
x=np.array([0,1,2,3,4,5,6,7,8,9])
y=np.array([4.98, 3.59, 2.57, 1.83, 1.32, 0.93, 0.67, 0.48, 0.34, 0.25])
exportToCsv((x,y), fileName = "data_exp2.txt")
| [
"numpy.array",
"physique.exportToCsv"
] | [((162, 202), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (170, 202), True, 'import numpy as np\n'), ((196, 266), 'numpy.array', 'np.array', (['[4.98, 3.59, 2.57, 1.83, 1.32, 0.93, 0.67, 0.48, 0.34, 0.25]'], {}), '([4.98, 3.59, 2.57, 1.83, 1.32, 0.93, 0.67, 0.48, 0.34, 0.25])\n', (204, 266), True, 'import numpy as np\n'), ((268, 313), 'physique.exportToCsv', 'exportToCsv', (['(x, y)'], {'fileName': '"""data_exp2.txt"""'}), "((x, y), fileName='data_exp2.txt')\n", (279, 313), False, 'from physique import exportToCsv\n')] |
import nbp
import numpy as np
from nbp.tests.tools import make_system
@nbp.timing
def setup(specific_pos=False, use_neighbours=False):
characteristic_length = 20
if specific_pos:
positions = (np.asarray([[1, 0, -2 ** (-1 / 2)],
[-1, 0, -2 ** (-1 / 2)],
[0, 1, 2 ** (-1 / 2)],
[0, -1, 2 ** (-1 / 2)]]) + characteristic_length / 2)
else:
positions = np.random.rand(100, 3) * characteristic_length
system = make_system(characteristic_length=characteristic_length, positions=positions, lj=True, ewald=True,
use_neighbours=use_neighbours, reci_cutoff=5)
return system
@nbp.timing
def optimize(system, cov):
system = system.optimize(cov=cov, num_particles=2)
print(len(system.states()))
print(system.state().distance().distances_unwrapped())
return system
@nbp.timing
def simu(system, steps, temp):
system.simulate(steps, temp)
print(len(system.states()))
return system
# For no neighbour list
sys = setup()
op_sys = optimize(sys, sys.info().cutoff() / 24)
op_sys = optimize(op_sys, sys.info().cutoff() / 32)
simu_sys = simu(op_sys, 100, 100)
# With the neighbour list
sys = setup(use_neighbours=True)
op_sys = optimize(sys, sys.info().cutoff() / 24)
op_sys = optimize(op_sys, sys.info().cutoff() / 32)
simu_sys = simu(op_sys, 100, 100)
| [
"numpy.random.rand",
"numpy.asarray",
"nbp.tests.tools.make_system"
] | [((538, 691), 'nbp.tests.tools.make_system', 'make_system', ([], {'characteristic_length': 'characteristic_length', 'positions': 'positions', 'lj': '(True)', 'ewald': '(True)', 'use_neighbours': 'use_neighbours', 'reci_cutoff': '(5)'}), '(characteristic_length=characteristic_length, positions=\n positions, lj=True, ewald=True, use_neighbours=use_neighbours,\n reci_cutoff=5)\n', (549, 691), False, 'from nbp.tests.tools import make_system\n'), ((211, 324), 'numpy.asarray', 'np.asarray', (['[[1, 0, -2 ** (-1 / 2)], [-1, 0, -2 ** (-1 / 2)], [0, 1, 2 ** (-1 / 2)], [0,\n -1, 2 ** (-1 / 2)]]'], {}), '([[1, 0, -2 ** (-1 / 2)], [-1, 0, -2 ** (-1 / 2)], [0, 1, 2 ** (-\n 1 / 2)], [0, -1, 2 ** (-1 / 2)]])\n', (221, 324), True, 'import numpy as np\n'), ((478, 500), 'numpy.random.rand', 'np.random.rand', (['(100)', '(3)'], {}), '(100, 3)\n', (492, 500), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Provide basic interface to handle a single material being studied.
Created on Wed Jul 29 23:09:54 2020
author: <NAME>
"""
import numpy as np
from pyabsorp.air import AirProperties
from pyabsorp.absorption import absorption_coefficient
from pyabsorp.models import delany_bazley, rayleigh, biot_allard, johnson_champoux
class Material(object):
"""Basic material object interface."""
def __init__(self, thick: float, freq: np.ndarray,
air: AirProperties, *, poros: float = None,
tortus: float = None, flowres: float = None,
thermlen: float = None, visclen: float = None,
shape: str = None, thermperm: float = None):
"""
Representation of a material being studied.
This class provides an interface to all models of absorption coefficient
available in `PyAbsorp`.
For now each parameter, except `air`, can be easily set by an assignment
operation (a = b), but this should be improved to at least some basic type
and numerical range checking.
Most parameters are derived from laboratory tests and are described as LP,
for Laboratory Parameter. This class provides interface to semi-empirical
and analytical models, no regression based on measurements are available.
At least not yet.
All modelling is made on the frequency domain.
Parameters
----------
thick : float
Material thickness.
freq : np.ndarray
Array of frequencies.
air : AirProperties
Air acoustical properties.
poros : float, optional
Open porosity, LP. The default is None.
tortus : float, optional
Tortuosity, LP. The default is None.
flowres : float, optional
Static flow resistivity, LP. The default is None.
thermlen : float, optional
Thermal characteristic length, LP. The default is None.
visclen : float, optional
Viscous characteristic length, LP. The default is None.
shape : str, optional
Shape of the pore, LP. The default is None.
thermperm : float, optional
Static thermal permeability, LP. The default is None.
Returns
-------
None.
"""
self._air = air
self.thickness = thick
self.frequencies = np.float32(freq)
self.porosity = poros
self.tortuosity = tortus
self.flowResistivity = flowres
self.thermalLength = thermlen
self.viscousLength = visclen
self.poreShape = shape
self.thermalPerm = thermperm
self._kc = self._zc = self._absorp = None
return
@property
def thickness(self):
return self._thick
@thickness.setter
def thickness(self, thick):
self._thick = thick
return
@property
def porosity(self):
return self._poros
@porosity.setter
def porosity(self, poros):
self._poros = poros
return
@property
def poreShape(self):
return self._shape
@poreShape.setter
def poreShape(self, shape):
self._shape = shape
return
@property
def tortuosity(self):
return self._tortus
@tortuosity.setter
def tortuosity(self, tortus):
self._tortus = tortus
return
@property
def flowResistivity(self):
return self._flowres
@flowResistivity.setter
def flowResistivity(self, flowres):
self._flowres = flowres
return
@property
def thermalLength(self):
return self._thermlen
@thermalLength.setter
def thermalLength(self, thermlen):
self._thermlen = thermlen
return
@property
def thermalPerm(self):
return self._thermperm
@thermalPerm.setter
def thermalPerm(self, thermperm):
self._thermperm = thermperm
return
@property
def viscousLength(self):
return self._visclen
@viscousLength.setter
def viscousLength(self, visclen):
self._visclen = visclen
return
@property
def frequencies(self):
return self._freq
@frequencies.setter
def frequencies(self, freq):
self._freq = freq
return
@property
def air(self):
return self._air
@property
def impedance(self):
return self._zc
@property
def waveNum(self):
return self._kc
@property
def absorption(self):
return self._absorp
def estimate_absorption(self, method: str, var: str = 'default'):
"""
Estimate material absorption based on `method`.
The material will hold the resulting `absorption` coefficients and the
respective characteristic `impedance` and wave number (`waveNum`).
Only the result of one call can be held. This means that a comparison between
methods, or method variations must save separatedly each `absorption` array.
This behaviour may change in the future.
Can use `method` variations by providing the `var` parameter.
Parameters
----------
method : str
Names or first letters of desired method.
var : str, optional
Name of the method variation, see `johnson_champoux`. The default is 'default'.
Raises
------
ValueError
If some of the `method`'s required parameter is None
or an unknown `method` is specified.
Returns
-------
None.
"""
if method.upper() in ['DB', 'DELANY-BAZLEY']:
if not all([self.flowResistivity]):
raise ValueError("Some material parameters are not defined.")
zc, kc = delany_bazley(self.flowResistivity, self.air.density,
self.air.soundSpeed, self.frequencies, var)
elif method.upper() in ['R', 'RAY', 'RAYLEIGH']:
if not all([self.flowResistivity, self.porosity]):
raise ValueError("Some material parameters are not defined.")
zc, kc = rayleigh(self.flowResistivity, self.air.density,
self.air.soundSpeed, self.porosity,
self.frequencies)
elif method.upper() in ['BA', 'BIOT-ALLARD']:
if not all([self.flowResistivity, self.porosity,
self.tortuosity, self.poreShape]):
raise ValueError("Some material parameters are not defined.")
zc, kc = biot_allard(self.flowResistivity, self.air.density, self.porosity,
self.tortuosity, self.air.specHeatRatio, self.air.prandtl,
self.air.atmPressure, self.poreShape, self.frequencies)
elif method.upper() in ['JC', 'JOHNSON-CHAMPOUX']:
if not all([self.flowResistivity, self.porosity, self.thermalLength,
self.tortuosity, self.viscousLength]):
raise ValueError("Some material parameters are not defined.")
zc, kc = johnson_champoux(self.flowResistivity, self.air.density,
self.porosity, self.tortuosity,
self.air.specHeatRatio, self.air.prandtl,
self.air.atmPressure, self.viscousLength,
self.thermalLength, self.air.viscosity,
0 if not self.thermalPerm else self.thermalPerm,
self.air.specHeatCP, self.frequencies, var)
else:
raise ValueError(f"Unknown method {method}.")
self._zc = zc
self._kc = kc
self._absorp = absorption_coefficient(self.impedance, self.waveNum,
self.thickness, self.air.impedance)
return self.absorption
| [
"pyabsorp.models.delany_bazley",
"pyabsorp.models.biot_allard",
"numpy.float32",
"pyabsorp.models.johnson_champoux",
"pyabsorp.models.rayleigh",
"pyabsorp.absorption.absorption_coefficient"
] | [((2477, 2493), 'numpy.float32', 'np.float32', (['freq'], {}), '(freq)\n', (2487, 2493), True, 'import numpy as np\n'), ((7906, 7999), 'pyabsorp.absorption.absorption_coefficient', 'absorption_coefficient', (['self.impedance', 'self.waveNum', 'self.thickness', 'self.air.impedance'], {}), '(self.impedance, self.waveNum, self.thickness, self.\n air.impedance)\n', (7928, 7999), False, 'from pyabsorp.absorption import absorption_coefficient\n'), ((5888, 5989), 'pyabsorp.models.delany_bazley', 'delany_bazley', (['self.flowResistivity', 'self.air.density', 'self.air.soundSpeed', 'self.frequencies', 'var'], {}), '(self.flowResistivity, self.air.density, self.air.soundSpeed,\n self.frequencies, var)\n', (5901, 5989), False, 'from pyabsorp.models import delany_bazley, rayleigh, biot_allard, johnson_champoux\n'), ((6242, 6349), 'pyabsorp.models.rayleigh', 'rayleigh', (['self.flowResistivity', 'self.air.density', 'self.air.soundSpeed', 'self.porosity', 'self.frequencies'], {}), '(self.flowResistivity, self.air.density, self.air.soundSpeed, self.\n porosity, self.frequencies)\n', (6250, 6349), False, 'from pyabsorp.models import delany_bazley, rayleigh, biot_allard, johnson_champoux\n'), ((6680, 6871), 'pyabsorp.models.biot_allard', 'biot_allard', (['self.flowResistivity', 'self.air.density', 'self.porosity', 'self.tortuosity', 'self.air.specHeatRatio', 'self.air.prandtl', 'self.air.atmPressure', 'self.poreShape', 'self.frequencies'], {}), '(self.flowResistivity, self.air.density, self.porosity, self.\n tortuosity, self.air.specHeatRatio, self.air.prandtl, self.air.\n atmPressure, self.poreShape, self.frequencies)\n', (6691, 6871), False, 'from pyabsorp.models import delany_bazley, rayleigh, biot_allard, johnson_champoux\n'), ((7232, 7554), 'pyabsorp.models.johnson_champoux', 'johnson_champoux', (['self.flowResistivity', 'self.air.density', 'self.porosity', 'self.tortuosity', 'self.air.specHeatRatio', 'self.air.prandtl', 'self.air.atmPressure', 'self.viscousLength', 'self.thermalLength', 'self.air.viscosity', '(0 if not self.thermalPerm else self.thermalPerm)', 'self.air.specHeatCP', 'self.frequencies', 'var'], {}), '(self.flowResistivity, self.air.density, self.porosity,\n self.tortuosity, self.air.specHeatRatio, self.air.prandtl, self.air.\n atmPressure, self.viscousLength, self.thermalLength, self.air.viscosity,\n 0 if not self.thermalPerm else self.thermalPerm, self.air.specHeatCP,\n self.frequencies, var)\n', (7248, 7554), False, 'from pyabsorp.models import delany_bazley, rayleigh, biot_allard, johnson_champoux\n')] |
import numpy as np
from pathlib import Path
from aocd import get_data
lines = get_data(day=17, year=2020).splitlines()
p = Path(__file__).resolve()
with open(p.parent / 'in.txt') as f:
lines2 = f.read().splitlines()
iterations = 6
input_size = len(lines)
output_size = (iterations) * 2 + input_size
pocketdim = np.zeros((output_size, ) * 3, dtype=np.bool)
pocketdim4d = np.zeros((output_size, ) * 4, dtype=np.bool)
mid_i = output_size // 2
start_index = int((output_size - input_size) / 2)
end_index = start_index + input_size
for inner_i, outer_i in enumerate(range(start_index, end_index)):
this_array = [char == "#" for char in lines[inner_i]]
pocketdim[mid_i, outer_i, start_index:end_index] = this_array
pocketdim4d[mid_i, mid_i, outer_i, start_index:end_index] = this_array
def get_index(index, mshape, dim):
"""Helper function to return a valid start and and index for the specified `dim`"""
el = index[dim]
start_el = el - 1 if el > 0 else 0
end_el = el + 2 if el <= mshape[dim] else el
return start_el, end_el
def get_active_neighbors_3d(index, matrix):
"""Returns the count of active (= True) neighbors in a 3d matrix"""
start_z, end_z = get_index(index, matrix.shape, 0)
start_y, end_y = get_index(index, matrix.shape, 1)
start_x, end_x = get_index(index, matrix.shape, 2)
submatrix = matrix[start_z:end_z, start_y:end_y, start_x:end_x]
cnt = np.count_nonzero(submatrix == True)
return cnt
def get_active_neighbors_4d(index, matrix):
"""Pretty much the same as the 3d function, except that we use one dimensions more"""
start_w, end_w = get_index(index, matrix.shape, 0)
start_z, end_z = get_index(index, matrix.shape, 1)
start_y, end_y = get_index(index, matrix.shape, 2)
start_x, end_x = get_index(index, matrix.shape, 3)
submatrix = matrix[start_w:end_w, start_z:end_z, start_y:end_y, start_x:end_x]
cnt = np.count_nonzero(submatrix == True)
return cnt
# Loop for Part A
for _ in range(iterations):
pocketdim_old = pocketdim.copy()
for index, el in np.ndenumerate(pocketdim_old):
active_cnt = get_active_neighbors_3d(index, pocketdim_old)
if el and active_cnt not in [3, 4]:
pocketdim[index] = False
if not el and active_cnt == 3:
pocketdim[index] = True
continue
total_cnt = np.count_nonzero(pocketdim == True)
print(total_cnt)
# Loop for Part B
for i2 in range(iterations):
pocketdim_old = pocketdim4d.copy()
# This loop is a little bit inefficient,
# since we are comparing every element even in the first loops. Still runs fast enough though
for index, el in np.ndenumerate(pocketdim_old):
active_cnt = get_active_neighbors_4d(index, pocketdim_old)
if el and active_cnt not in [3, 4]:
pocketdim4d[index] = False
if not el and active_cnt == 3:
pocketdim4d[index] = True
continue
total_cnt = np.count_nonzero(pocketdim4d == True)
print(total_cnt)
| [
"numpy.count_nonzero",
"numpy.ndenumerate",
"numpy.zeros",
"pathlib.Path",
"aocd.get_data"
] | [((318, 361), 'numpy.zeros', 'np.zeros', (['((output_size,) * 3)'], {'dtype': 'np.bool'}), '((output_size,) * 3, dtype=np.bool)\n', (326, 361), True, 'import numpy as np\n'), ((377, 420), 'numpy.zeros', 'np.zeros', (['((output_size,) * 4)'], {'dtype': 'np.bool'}), '((output_size,) * 4, dtype=np.bool)\n', (385, 420), True, 'import numpy as np\n'), ((2358, 2393), 'numpy.count_nonzero', 'np.count_nonzero', (['(pocketdim == True)'], {}), '(pocketdim == True)\n', (2374, 2393), True, 'import numpy as np\n'), ((2946, 2983), 'numpy.count_nonzero', 'np.count_nonzero', (['(pocketdim4d == True)'], {}), '(pocketdim4d == True)\n', (2962, 2983), True, 'import numpy as np\n'), ((1422, 1457), 'numpy.count_nonzero', 'np.count_nonzero', (['(submatrix == True)'], {}), '(submatrix == True)\n', (1438, 1457), True, 'import numpy as np\n'), ((1922, 1957), 'numpy.count_nonzero', 'np.count_nonzero', (['(submatrix == True)'], {}), '(submatrix == True)\n', (1938, 1957), True, 'import numpy as np\n'), ((2079, 2108), 'numpy.ndenumerate', 'np.ndenumerate', (['pocketdim_old'], {}), '(pocketdim_old)\n', (2093, 2108), True, 'import numpy as np\n'), ((2663, 2692), 'numpy.ndenumerate', 'np.ndenumerate', (['pocketdim_old'], {}), '(pocketdim_old)\n', (2677, 2692), True, 'import numpy as np\n'), ((78, 105), 'aocd.get_data', 'get_data', ([], {'day': '(17)', 'year': '(2020)'}), '(day=17, year=2020)\n', (86, 105), False, 'from aocd import get_data\n'), ((124, 138), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (128, 138), False, 'from pathlib import Path\n')] |
from __future__ import print_function, absolute_import, division
import abc
import numpy as np
from sklearn.utils import check_array, check_random_state
class Coreset(object):
"""
Abstract class for coresets.
Parameters
----------
X : ndarray, shape (n_points, n_dims)
The data set to generate coreset from.
w : ndarray, shape (n_points), optional
The weights of the data points. This allows generating coresets from a
weighted data set, for example generating coreset of a coreset. If None,
the data is treated as unweighted and w will be replaced by all ones array.
random_state : int, RandomState instance or None, optional (default=None)
"""
__metaclass__ = abc.ABCMeta
def __init__(self, X, w=None, random_state=None):
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
self.X = X
self.w = w if w is not None else np.ones(X.shape[0])
self.n_samples = X.shape[0]
self.random_state = check_random_state(random_state)
self.calc_sampling_distribution()
@abc.abstractmethod
def calc_sampling_distribution(self):
"""
Calculates the coreset importance sampling distribution.
"""
pass
def generate_coreset(self, size):
"""
Generates a coreset of the data set.
Parameters
----------
size : int
The size of the coreset to generate.
"""
ind = np.random.choice(self.n_samples, size=size, p=self.p)
return self.X[ind], 1. / (size * self.p[ind])
| [
"sklearn.utils.check_array",
"sklearn.utils.check_random_state",
"numpy.ones",
"numpy.random.choice"
] | [((812, 890), 'sklearn.utils.check_array', 'check_array', (['X'], {'accept_sparse': '"""csr"""', 'order': '"""C"""', 'dtype': '[np.float64, np.float32]'}), "(X, accept_sparse='csr', order='C', dtype=[np.float64, np.float32])\n", (823, 890), False, 'from sklearn.utils import check_array, check_random_state\n'), ((1059, 1091), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (1077, 1091), False, 'from sklearn.utils import check_array, check_random_state\n'), ((1533, 1586), 'numpy.random.choice', 'np.random.choice', (['self.n_samples'], {'size': 'size', 'p': 'self.p'}), '(self.n_samples, size=size, p=self.p)\n', (1549, 1586), True, 'import numpy as np\n'), ((975, 994), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (982, 994), True, 'import numpy as np\n')] |
from __future__ import division, print_function, absolute_import
import time
import warnings
import numpy as np
import itertools as itr
import sys
from contextlib import contextmanager
warnings.simplefilter("ignore", np.ComplexWarning)
_is_verbose = False
_is_silent = False
class AbortException(Exception):
"""
This exception is used for when the user wants to quit algorithms mid-way.
The `AbortException` can for instance be sent by pygame input, and caught
by whatever is running the algorithm.
"""
pass
def bytesize(arr):
"""
Returns the memory byte size of a Numpy array as an integer.
"""
byte_size = np.prod(arr.shape) * np.dtype(arr.dtype).itemsize
return byte_size
def humanize_bytesize(byte_size):
order = np.log(byte_size) / np.log(1024)
orders = [
(5, 'PB'),
(4, 'TB'),
(3, 'GB'),
(2, 'MB'),
(1, 'KB'),
(0, 'B')
]
for ex, name in orders:
if order >= ex:
return '{:.4g} {}'.format(byte_size / 1024**ex, name)
def memsize(arr):
"""
Returns the required memory of a Numpy array as a humanly readable string.
"""
return humanize_bytesize(bytesize(arr))
def span(arr):
"""
Calculate and return the mininum and maximum of an array.
Parameters
----------
arr : ndarray
Numpy array.
Returns
-------
min : dtype
Minimum of array.
max : dtype
Maximum of array.
"""
# TODO: This could be made faster with a custom ufunc
return (np.min(arr), np.max(arr))
def apply_once(func, arr, axes, keepdims=True):
"""
Similar to `numpy.apply_over_axes`, except this performs the operation over
a flattened version of all the axes, meaning that the function will only be
called once. This only makes a difference for non-linear functions.
Parameters
----------
func : callback
Function that operates well on Numpy arrays and returns a single value
of compatible dtype.
arr : ndarray
Array to do operation over.
axes : int or iterable
Specifies the axes to perform the operation. Only one call will be made
to `func`, with all values flattened.
keepdims : bool
By default, this is True, so the collapsed dimensions remain with
length 1. This is simlar to `numpy.apply_over_axes` in that regard. If
this is set to False, the dimensions are removed, just like when using
for instance `numpy.sum` over a single axis. Note that this is safer
than subsequently calling squeeze, since this option will preserve
length-1 dimensions that were not operated on.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
>>> rs = np.random.RandomState(0)
>>> x = rs.uniform(size=(10, 3, 3))
Image that you have ten 3x3 images and you want to calculate each image's
intensity standard deviation:
>>> np.apply_over_axes(np.std, x, [1, 2]).ravel()
array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604,
0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635])
This is the same as ``x.std(1).std(1)``, which is not the standard
deviation of all 9 pixels together. To fix this we can flatten the pixels
and try again:
>>> x.reshape(10, 9).std(axis=1)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
This is exactly what this function does for you:
>>> dd.apply_once(np.std, x, [1, 2], keepdims=False)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
"""
all_axes = np.arange(arr.ndim)
if isinstance(axes, int):
axes = {axes}
else:
axes = set(axis % arr.ndim for axis in axes)
principal_axis = min(axes)
for i, axis in enumerate(axes):
axis0 = principal_axis + i
if axis != axis0:
all_axes[axis0], all_axes[axis] = all_axes[axis], all_axes[axis0]
transposed_arr = arr.transpose(all_axes)
new_shape = []
new_shape_keepdims = []
for axis, dim in enumerate(arr.shape):
if axis == principal_axis:
new_shape.append(-1)
elif axis not in axes:
new_shape.append(dim)
if axis in axes:
new_shape_keepdims.append(1)
else:
new_shape_keepdims.append(dim)
collapsed = np.apply_along_axis(func,
principal_axis,
transposed_arr.reshape(new_shape))
if keepdims:
return collapsed.reshape(new_shape_keepdims)
else:
return collapsed
def tupled_argmax(a):
"""
Argmax that returns an index tuple. Note that `numpy.argmax` will return a
scalar index as if you had flattened the array.
Parameters
----------
a : array_like
Input array.
Returns
-------
index : tuple
Tuple of index, even if `a` is one-dimensional. Note that this can
immediately be used to index `a` as in ``a[index]``.
Examples
--------
>>> import numpy as np
>>> import deepdish as dd
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> dd.tupled_argmax(a)
(1, 2)
"""
return np.unravel_index(np.argmax(a), np.shape(a))
def multi_range(*args):
return itr.product(*[range(a) for a in args])
@contextmanager
def timed(name=None, file=sys.stdout, callback=None, wall_clock=True):
"""
Context manager to make it easy to time the execution of a piece of code.
This timer will never run your code several times and is meant more for
simple in-production timing, instead of benchmarking. Reports the
wall-clock time (using `time.time`) and not the processor time.
Parameters
----------
name : str
Name of the timing block, to identify it.
file : file handler
Which file handler to print the results to. Default is standard output.
If a numpy array and size 1 is given, the time in seconds will be
stored inside it. Ignored if `callback` is set.
callback : callable
This offer even more flexibility than `file`. The callable will be
called at the end of the execution with a single floating point
argument with the elapsed time in seconds.
Examples
--------
>>> import deepdish as dd
>>> import time
The `timed` function is a context manager, so everything inside the
``with`` block will be timed. The results will be printed by default to
standard output:
>>> with dd.timed('Sleep'): # doctest: +SKIP
... time.sleep(1)
[timed] Sleep: 1.001035451889038 s
Using the `callback` parameter, we can accumulate multiple runs into a
list:
>>> times = []
>>> for i in range(3): # doctest: +SKIP
... with dd.timed(callback=times.append):
... time.sleep(1)
>>> times # doctest: +SKIP
[1.0035350322723389, 1.0035550594329834, 1.0039470195770264]
"""
start = time.time()
yield
end = time.time()
delta = end - start
if callback is not None:
callback(delta)
elif isinstance(file, np.ndarray) and len(file) == 1:
file[0] = delta
else:
name_str = ' {}'.format(name) if name is not None else ''
print(("[timed]{0}: {1} s".format(name_str, delta)), file=file)
class SliceClass(object):
def __getitem__(self, index):
return index
aslice = SliceClass()
| [
"numpy.log",
"warnings.simplefilter",
"numpy.argmax",
"numpy.dtype",
"time.time",
"numpy.shape",
"numpy.min",
"numpy.max",
"numpy.arange",
"numpy.prod"
] | [((185, 235), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'np.ComplexWarning'], {}), "('ignore', np.ComplexWarning)\n", (206, 235), False, 'import warnings\n'), ((3834, 3853), 'numpy.arange', 'np.arange', (['arr.ndim'], {}), '(arr.ndim)\n', (3843, 3853), True, 'import numpy as np\n'), ((7263, 7274), 'time.time', 'time.time', ([], {}), '()\n', (7272, 7274), False, 'import time\n'), ((7295, 7306), 'time.time', 'time.time', ([], {}), '()\n', (7304, 7306), False, 'import time\n'), ((653, 671), 'numpy.prod', 'np.prod', (['arr.shape'], {}), '(arr.shape)\n', (660, 671), True, 'import numpy as np\n'), ((772, 789), 'numpy.log', 'np.log', (['byte_size'], {}), '(byte_size)\n', (778, 789), True, 'import numpy as np\n'), ((792, 804), 'numpy.log', 'np.log', (['(1024)'], {}), '(1024)\n', (798, 804), True, 'import numpy as np\n'), ((1560, 1571), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (1566, 1571), True, 'import numpy as np\n'), ((1573, 1584), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (1579, 1584), True, 'import numpy as np\n'), ((5508, 5520), 'numpy.argmax', 'np.argmax', (['a'], {}), '(a)\n', (5517, 5520), True, 'import numpy as np\n'), ((5522, 5533), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (5530, 5533), True, 'import numpy as np\n'), ((674, 693), 'numpy.dtype', 'np.dtype', (['arr.dtype'], {}), '(arr.dtype)\n', (682, 693), True, 'import numpy as np\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts Flowers data to TFRecords of TF-Example protos.
This module downloads the Flowers data, uncompresses it, reads the files
that make up the Flowers data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import tensorflow as tf
import cv2
import numpy as np
from datasets import dataset_utils
# The URL where the UCF101 data can be downloaded.
# _DATA_URL = ''
# The ratios of train set and validation set.
_RATIO_TRAIN = 0.8
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 10
class VideoReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
# def __init__(self):
# # Initializes function that decodes RGB JPEG data.
# self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
# self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
@staticmethod
def read_video_props(file_name):
video = cv2.VideoCapture(file_name)
frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
return video, int(frame_count), int(height), int(width)
def convert_video_to_numpy(self, file_name):
video, frame_count, ori_height, ori_width = self.read_video_props(file_name)
width = height = 240
buf = np.empty((frame_count, height, width, 3), dtype=np.uint8)
fc = 0
ret = True
while fc < frame_count and ret:
ret, image = video.read()
image = cv2.resize(image, (height, width))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
buf[fc] = image
del(image)
fc += 1
video.release()
assert len(buf.shape) == 4
assert buf.shape[3] == 3
return buf, frame_count, height, width, ori_height, ori_width
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
ucf_root = os.path.join(dataset_dir, 'UCF101')
directories = []
class_names = []
for filename in os.listdir(ucf_root):
path = os.path.join(ucf_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
video_filenames = []
for directory in directories:
for sub_directory in os.listdir(directory):
if sub_directory != 'Annotation':
for filename in os.listdir(os.path.join(directory, sub_directory)):
path = os.path.join(directory, sub_directory, filename)
video_filenames.append(path)
return video_filenames, sorted(class_names)
def _get_class_in_filename(filename):
return os.path.basename(os.path.dirname(os.path.dirname(filename)))
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'ucf11_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, 'UCF101-tfrecord', output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation', 'test']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
video_reader = VideoReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id + 1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i + 1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
video_data, fc, height, width, _, _ = video_reader.convert_video_to_numpy(filenames[i])
video_data = video_data.tostring()
class_name = _get_class_in_filename(filenames[i])
class_id = class_names_to_ids[class_name]
example = dataset_utils.video_to_tfexample(
video_data, b'mpg', fc, height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
# def _clean_up_temporary_files(dataset_dir):
# """Removes temporary files used to create the dataset.
#
# Args:
# dataset_dir: The directory where the temporary files are stored.
# """
# filename = _DATA_URL.split('/')[-1]
# filepath = os.path.join(dataset_dir, filename)
# tf.gfile.Remove(filepath)
#
# tmp_dir = os.path.join(dataset_dir, 'flower_photos')
# tf.gfile.DeleteRecursively(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
# dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
video_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(video_filenames)
num_train = int(len(video_filenames) * _RATIO_TRAIN)
num_validation = len(video_filenames) - num_train
training_filenames = video_filenames[:num_train]
if num_validation != 0:
validation_filenames = video_filenames[num_train:]
test_filenames = video_filenames[num_validation:]
log_name = os.path.join(dataset_dir, 'UCF101-tfrecord', 'log_ucf101.txt')
with tf.gfile.Open(log_name, 'w') as log:
log.write('_NUM_SHARDS: %d\n\n' % _NUM_SHARDS)
log.write('Number training file names: %d\n' % len(training_filenames))
if num_validation != num_train:
log.write('Number validation file names: %d\n' % len(validation_filenames))
log.write('Number test file names: %d\n' % len(test_filenames))
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir)
if num_validation != num_train:
_convert_dataset('validation', validation_filenames, class_names_to_ids,
dataset_dir)
_convert_dataset('test', test_filenames, class_names_to_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, os.path.join(dataset_dir, 'UCF101-tfrecord'))
# _clean_up_temporary_files(dataset_dir)
print('\nFinished converting the UCF101 dataset!')
def test(dataset_dir):
"""Test the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
# dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
video_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(video_filenames)
num_train = math.floor(len(video_filenames) * _RATIO_TRAIN)
training_filenames = video_filenames[:num_train]
validation_filenames = video_filenames[num_train:]
test_filenames = video_filenames[:]
print('Number training file names:', len(training_filenames))
print('Number validation file names:', len(validation_filenames))
print('Number test file names:', len(test_filenames))
print('Test convert video')
video_reader = VideoReader()
filename_sample = training_filenames[0]
video_data, frame_count, height, width, ori_height, ori_width = video_reader.convert_video_to_numpy(filename_sample)
print('Class:', _get_class_in_filename(filename_sample))
print('Video size: %dx%dx%d' % (frame_count, ori_height, ori_width))
print('Show video')
for i in range(frame_count):
cv2.imshow(_get_class_in_filename(filename_sample), video_data[i])
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"sys.stdout.write",
"tensorflow.gfile.Exists",
"random.shuffle",
"numpy.empty",
"sys.stdout.flush",
"os.path.join",
"cv2.cvtColor",
"os.path.dirname",
"random.seed",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.waitKey",
"tensorflow.Session",
"tensorflow.Graph",
"datasets.dataset_utils.vi... | [((3401, 3436), 'os.path.join', 'os.path.join', (['dataset_dir', '"""UCF101"""'], {}), "(dataset_dir, 'UCF101')\n", (3413, 3436), False, 'import os\n'), ((3499, 3519), 'os.listdir', 'os.listdir', (['ucf_root'], {}), '(ucf_root)\n', (3509, 3519), False, 'import os\n'), ((4384, 4445), 'os.path.join', 'os.path.join', (['dataset_dir', '"""UCF101-tfrecord"""', 'output_filename'], {}), "(dataset_dir, 'UCF101-tfrecord', output_filename)\n", (4396, 4445), False, 'import os\n'), ((6366, 6388), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (6382, 6388), False, 'import sys\n'), ((6393, 6411), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6409, 6411), False, 'import sys\n'), ((7818, 7843), 'random.seed', 'random.seed', (['_RANDOM_SEED'], {}), '(_RANDOM_SEED)\n', (7829, 7843), False, 'import random\n'), ((7848, 7879), 'random.shuffle', 'random.shuffle', (['video_filenames'], {}), '(video_filenames)\n', (7862, 7879), False, 'import random\n'), ((8203, 8265), 'os.path.join', 'os.path.join', (['dataset_dir', '"""UCF101-tfrecord"""', '"""log_ucf101.txt"""'], {}), "(dataset_dir, 'UCF101-tfrecord', 'log_ucf101.txt')\n", (8215, 8265), False, 'import os\n'), ((10023, 10048), 'random.seed', 'random.seed', (['_RANDOM_SEED'], {}), '(_RANDOM_SEED)\n', (10034, 10048), False, 'import random\n'), ((10053, 10084), 'random.shuffle', 'random.shuffle', (['video_filenames'], {}), '(video_filenames)\n', (10067, 10084), False, 'import random\n'), ((11018, 11041), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (11039, 11041), False, 'import cv2\n'), ((2008, 2035), 'cv2.VideoCapture', 'cv2.VideoCapture', (['file_name'], {}), '(file_name)\n', (2024, 2035), False, 'import cv2\n'), ((2442, 2499), 'numpy.empty', 'np.empty', (['(frame_count, height, width, 3)'], {'dtype': 'np.uint8'}), '((frame_count, height, width, 3), dtype=np.uint8)\n', (2450, 2499), True, 'import numpy as np\n'), ((3536, 3568), 'os.path.join', 'os.path.join', (['ucf_root', 'filename'], {}), '(ucf_root, filename)\n', (3548, 3568), False, 'import os\n'), ((3580, 3599), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (3593, 3599), False, 'import os\n'), ((3768, 3789), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (3778, 3789), False, 'import os\n'), ((7352, 7380), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['dataset_dir'], {}), '(dataset_dir)\n', (7367, 7380), True, 'import tensorflow as tf\n'), ((7390, 7420), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['dataset_dir'], {}), '(dataset_dir)\n', (7407, 7420), True, 'import tensorflow as tf\n'), ((8275, 8303), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['log_name', '"""w"""'], {}), "(log_name, 'w')\n", (8288, 8303), True, 'import tensorflow as tf\n'), ((9234, 9278), 'os.path.join', 'os.path.join', (['dataset_dir', '"""UCF101-tfrecord"""'], {}), "(dataset_dir, 'UCF101-tfrecord')\n", (9246, 9278), False, 'import os\n'), ((9557, 9585), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['dataset_dir'], {}), '(dataset_dir)\n', (9572, 9585), True, 'import tensorflow as tf\n'), ((9595, 9625), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['dataset_dir'], {}), '(dataset_dir)\n', (9612, 9625), True, 'import tensorflow as tf\n'), ((10999, 11013), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (11010, 11013), False, 'import cv2\n'), ((2633, 2667), 'cv2.resize', 'cv2.resize', (['image', '(height, width)'], {}), '(image, (height, width))\n', (2643, 2667), False, 'import cv2\n'), ((2688, 2726), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2700, 2726), False, 'import cv2\n'), ((4179, 4204), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (4194, 4204), False, 'import os\n'), ((5126, 5140), 'tensorflow.Session', 'tf.Session', (['""""""'], {}), "('')\n", (5136, 5140), True, 'import tensorflow as tf\n'), ((5050, 5060), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5058, 5060), True, 'import tensorflow as tf\n'), ((7098, 7130), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['output_filename'], {}), '(output_filename)\n', (7113, 7130), True, 'import tensorflow as tf\n'), ((3880, 3918), 'os.path.join', 'os.path.join', (['directory', 'sub_directory'], {}), '(directory, sub_directory)\n', (3892, 3918), False, 'import os\n'), ((3948, 3996), 'os.path.join', 'os.path.join', (['directory', 'sub_directory', 'filename'], {}), '(directory, sub_directory, filename)\n', (3960, 3996), False, 'import os\n'), ((5333, 5377), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_filename'], {}), '(output_filename)\n', (5360, 5377), True, 'import tensorflow as tf\n'), ((5763, 5781), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5779, 5781), False, 'import sys\n'), ((6175, 6260), 'datasets.dataset_utils.video_to_tfexample', 'dataset_utils.video_to_tfexample', (['video_data', "b'mpg'", 'fc', 'height', 'width', 'class_id'], {}), "(video_data, b'mpg', fc, height, width,\n class_id)\n", (6207, 6260), False, 'from datasets import dataset_utils\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for gfsa.model.model_util."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from gfsa.model import model_util
class LossUtilTest(parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "min",
"minval": 1,
"maxval": None,
"expected": [1., 1., 2., 3., 4.],
}, {
"testcase_name": "max",
"minval": None,
"maxval": 3,
"expected": [0., 1., 2., 3., 3.],
}, {
"testcase_name": "both",
"minval": 1,
"maxval": 3,
"expected": [1., 1., 2., 3., 3.],
})
def test_forward_clip(self, minval, maxval, expected):
vals, tangents = jax.jvp(
functools.partial(
model_util.forward_clip, minval=minval, maxval=maxval),
(jnp.arange(5).astype(jnp.float32),), (jnp.ones((5,)),))
np.testing.assert_allclose(vals, expected)
np.testing.assert_allclose(tangents, np.ones((5,)))
def test_safe_logit(self):
probs = jnp.array([0, 1e-20, 1e-3, 0.9, 1])
logits = model_util.safe_logit(probs)
self.assertTrue(np.all(np.isfinite(logits)))
np.testing.assert_allclose(logits[1:3], jax.scipy.special.logit(probs[1:3]))
def test_binary_logit_cross_entropy(self):
logits = jnp.array([-10., -5., 0., 5., 10.])
true_probs = jax.nn.sigmoid(logits)
false_probs = jax.nn.sigmoid(-logits)
true_nll = model_util.binary_logit_cross_entropy(logits,
jnp.ones([5], dtype=bool))
false_nll = model_util.binary_logit_cross_entropy(
logits, jnp.zeros([5], dtype=bool))
np.testing.assert_allclose(true_nll, -jnp.log(true_probs), atol=1e-7)
np.testing.assert_allclose(false_nll, -jnp.log(false_probs), atol=1e-7)
def test_linear_cross_entropy(self):
probs = jnp.array([0, 1e-20, 1e-3, 0.9, 1, 1, 1 - 1e-7, 1 - 1e-3, 0.1, 0])
targets = jnp.array([True] * 5 + [False] * 5)
losses = model_util.linear_cross_entropy(probs, targets)
# Losses are clipped to be finite.
self.assertTrue(np.all(np.isfinite(losses)))
# Loss values make sense.
np.testing.assert_allclose(
losses[1:5], [-np.log(1e-20), -np.log(1e-3), -np.log(0.9), 0],
atol=1e-5)
self.assertGreater(losses[0], losses[1])
# note: losses for false targets have especially low precision due to
# rounding errors for small values close to 1.
np.testing.assert_allclose(losses[6], -np.log(1e-7), atol=0.2)
np.testing.assert_allclose(
losses[7:10], [-np.log(1e-3), -np.log(0.9), 0], atol=1e-4)
self.assertGreater(losses[5], losses[6])
# Gradients are finite.
gradients = jax.grad(
lambda x: jnp.sum(model_util.linear_cross_entropy(x, targets)))(
probs)
self.assertTrue(np.all(np.isfinite(gradients)))
if __name__ == "__main__":
absltest.main()
| [
"absl.testing.absltest.main",
"jax.numpy.array",
"functools.partial",
"jax.numpy.log",
"numpy.log",
"gfsa.model.model_util.linear_cross_entropy",
"jax.scipy.special.logit",
"jax.numpy.arange",
"jax.numpy.zeros",
"gfsa.model.model_util.safe_logit",
"numpy.ones",
"numpy.isfinite",
"jax.numpy.o... | [((895, 1224), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'min', 'minval': 1, 'maxval': None, 'expected': [1.0, 1.0,\n 2.0, 3.0, 4.0]}", "{'testcase_name': 'max', 'minval': None, 'maxval': 3, 'expected': [0.0, 1.0,\n 2.0, 3.0, 3.0]}", "{'testcase_name': 'both', 'minval': 1, 'maxval': 3, 'expected': [1.0, 1.0, \n 2.0, 3.0, 3.0]}"], {}), "({'testcase_name': 'min', 'minval': 1,\n 'maxval': None, 'expected': [1.0, 1.0, 2.0, 3.0, 4.0]}, {\n 'testcase_name': 'max', 'minval': None, 'maxval': 3, 'expected': [0.0, \n 1.0, 2.0, 3.0, 3.0]}, {'testcase_name': 'both', 'minval': 1, 'maxval': \n 3, 'expected': [1.0, 1.0, 2.0, 3.0, 3.0]})\n", (925, 1224), False, 'from absl.testing import parameterized\n'), ((3597, 3612), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3610, 3612), False, 'from absl.testing import absltest\n'), ((1597, 1639), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['vals', 'expected'], {}), '(vals, expected)\n', (1623, 1639), True, 'import numpy as np\n'), ((1738, 1774), 'jax.numpy.array', 'jnp.array', (['[0, 1e-20, 0.001, 0.9, 1]'], {}), '([0, 1e-20, 0.001, 0.9, 1])\n', (1747, 1774), True, 'import jax.numpy as jnp\n'), ((1787, 1815), 'gfsa.model.model_util.safe_logit', 'model_util.safe_logit', (['probs'], {}), '(probs)\n', (1808, 1815), False, 'from gfsa.model import model_util\n'), ((2005, 2045), 'jax.numpy.array', 'jnp.array', (['[-10.0, -5.0, 0.0, 5.0, 10.0]'], {}), '([-10.0, -5.0, 0.0, 5.0, 10.0])\n', (2014, 2045), True, 'import jax.numpy as jnp\n'), ((2058, 2080), 'jax.nn.sigmoid', 'jax.nn.sigmoid', (['logits'], {}), '(logits)\n', (2072, 2080), False, 'import jax\n'), ((2099, 2122), 'jax.nn.sigmoid', 'jax.nn.sigmoid', (['(-logits)'], {}), '(-logits)\n', (2113, 2122), False, 'import jax\n'), ((2566, 2635), 'jax.numpy.array', 'jnp.array', (['[0, 1e-20, 0.001, 0.9, 1, 1, 1 - 1e-07, 1 - 0.001, 0.1, 0]'], {}), '([0, 1e-20, 0.001, 0.9, 1, 1, 1 - 1e-07, 1 - 0.001, 0.1, 0])\n', (2575, 2635), True, 'import jax.numpy as jnp\n'), ((2647, 2682), 'jax.numpy.array', 'jnp.array', (['([True] * 5 + [False] * 5)'], {}), '([True] * 5 + [False] * 5)\n', (2656, 2682), True, 'import jax.numpy as jnp\n'), ((2696, 2743), 'gfsa.model.model_util.linear_cross_entropy', 'model_util.linear_cross_entropy', (['probs', 'targets'], {}), '(probs, targets)\n', (2727, 2743), False, 'from gfsa.model import model_util\n'), ((1440, 1512), 'functools.partial', 'functools.partial', (['model_util.forward_clip'], {'minval': 'minval', 'maxval': 'maxval'}), '(model_util.forward_clip, minval=minval, maxval=maxval)\n', (1457, 1512), False, 'import functools\n'), ((1681, 1694), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (1688, 1694), True, 'import numpy as np\n'), ((1909, 1944), 'jax.scipy.special.logit', 'jax.scipy.special.logit', (['probs[1:3]'], {}), '(probs[1:3])\n', (1932, 1944), False, 'import jax\n'), ((2237, 2262), 'jax.numpy.ones', 'jnp.ones', (['[5]'], {'dtype': 'bool'}), '([5], dtype=bool)\n', (2245, 2262), True, 'import jax.numpy as jnp\n'), ((2335, 2361), 'jax.numpy.zeros', 'jnp.zeros', (['[5]'], {'dtype': 'bool'}), '([5], dtype=bool)\n', (2344, 2361), True, 'import jax.numpy as jnp\n'), ((1574, 1588), 'jax.numpy.ones', 'jnp.ones', (['(5,)'], {}), '((5,))\n', (1582, 1588), True, 'import jax.numpy as jnp\n'), ((1843, 1862), 'numpy.isfinite', 'np.isfinite', (['logits'], {}), '(logits)\n', (1854, 1862), True, 'import numpy as np\n'), ((2406, 2425), 'jax.numpy.log', 'jnp.log', (['true_probs'], {}), '(true_probs)\n', (2413, 2425), True, 'import jax.numpy as jnp\n'), ((2481, 2501), 'jax.numpy.log', 'jnp.log', (['false_probs'], {}), '(false_probs)\n', (2488, 2501), True, 'import jax.numpy as jnp\n'), ((2811, 2830), 'numpy.isfinite', 'np.isfinite', (['losses'], {}), '(losses)\n', (2822, 2830), True, 'import numpy as np\n'), ((3199, 3212), 'numpy.log', 'np.log', (['(1e-07)'], {}), '(1e-07)\n', (3205, 3212), True, 'import numpy as np\n'), ((3541, 3563), 'numpy.isfinite', 'np.isfinite', (['gradients'], {}), '(gradients)\n', (3552, 3563), True, 'import numpy as np\n'), ((2919, 2932), 'numpy.log', 'np.log', (['(1e-20)'], {}), '(1e-20)\n', (2925, 2932), True, 'import numpy as np\n'), ((2935, 2948), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (2941, 2948), True, 'import numpy as np\n'), ((2950, 2961), 'numpy.log', 'np.log', (['(0.9)'], {}), '(0.9)\n', (2956, 2961), True, 'import numpy as np\n'), ((3279, 3292), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (3285, 3292), True, 'import numpy as np\n'), ((3294, 3305), 'numpy.log', 'np.log', (['(0.9)'], {}), '(0.9)\n', (3300, 3305), True, 'import numpy as np\n'), ((1536, 1549), 'jax.numpy.arange', 'jnp.arange', (['(5)'], {}), '(5)\n', (1546, 1549), True, 'import jax.numpy as jnp\n'), ((3448, 3491), 'gfsa.model.model_util.linear_cross_entropy', 'model_util.linear_cross_entropy', (['x', 'targets'], {}), '(x, targets)\n', (3479, 3491), False, 'from gfsa.model import model_util\n')] |
import numpy as np
import matplotlib.pyplot as plt
import os
from matplotlib.font_manager import FontProperties
from find_ring import load_dust_outputs, load_gas_outputs, get_dust_trap
G = 6.67e-11 # SI Gravitational Constant
M = 1.989e30 # mass of the Sun in kg (the default MSTAR in FARGO3D)
R = 5.2*1.4959e11 # 5.2 AU
GAMMA = 1.6667 # Adiabatic index
CS = 1000 # Speed of sound taken as 1 km/s
def omega_kepler(rmin, rmax, Ny):
"""
Function to calculate Kepler velocities
"""
omega_k = []
v_k = []
rarr = np.linspace(rmin, rmax, Ny)
for r in rarr:
omega_k.append(np.sqrt(G*M/(r*R)**3))
v_k.append(np.sqrt(G*M/(r*R)))
return omega_k, v_k
def v_rad_phi(path, r, phi, n_species, n_out):
"""
Getting radial and azimuthal velocities separately
"""
vphic = []
vradc = []
for i in range(n_species):
# Loading radial and azimuthal velocities for each dust species
vphi = np.fromfile(os.path.join(path, "dust"+str(i+1)+"vx"+str(n_out)+".dat")).reshape(len(r)-1, len(phi)-1)
vrad = np.fromfile(os.path.join(path, "dust"+str(i+1)+"vy"+str(n_out)+".dat")).reshape(len(r)-1, len(phi)-1)
# Centering since the grid is staggered
vphic.append(0.5*(vphi[:-1,1:]+vphi[:-1,:-1]))
vradc.append(0.5*(vrad[1:,:-1]+vrad[:-1,:-1]))
return vphic, vradc
def plots(vphic, vradc):
stokes = np.logspace(-4, 2, 127)
for i in range(len(vphic)):
plt.plot(stokes, vradc[i].mean(axis=1))
plt.xscale('log')
plt.show()
def plot_vel(vk, vdust, vgas, Ny, path):
"""
Plotting the actual output velocities and the calculated Keplerian velocities of the gas and the dust species
"""
plt.plot(range(Ny), vk, alpha=0.5, label="Keplerian velocity")
for i in range(len(vdust)):
plt.plot(range(Ny-1), vdust[i].mean(axis=1), alpha=0.5, label="Species %s"% str(i+1))
plt.plot(range(Ny-1), vgas.mean(axis=1), alpha=0.5, label="Gas velocity")
plt.xlabel("Radius (Ny)")
plt.ylabel("Velocity (m/s)")
plt.title("Azimuthally Averaged Velocities: Keplerian, Gas & Dust")
plt.legend(prop={'size':7})
plt.savefig(path + "kep_vs_actual.png")
plt.show()
def stopping_time(stokes, omega_k, dt):
t_stop = []
ok_trap = omega_k[dt]
for i in stokes:
t_stop.append(i/ok_trap)
return t_stop
def pressure(sigma, Ny, energy, rmin, rmax):
# Calculating gas pressure
rarr = np.linspace(rmin, rmax, Ny)
rho = []
pressure = []
for i in range(len(sigma)):
rho.append(sigma[i].mean(axis=1)/(np.sqrt(2)*np.pi*0.05*rarr)) # 0.05 is the Aspect Ratio I used in FARGO3D
pressure.append(rho[i]*CS**2)
gas_pressure = (GAMMA-1)*np.asarray(energy)
plt.plot(range(Ny), gas_pressure.mean(axis=1), linewidth=10, alpha=0.5, label="Gas")
# for i in range(len(pressure)):
# plt.plot(range(Ny), pressure[i], label=f"Dust {i+1}")
plt.title("Azimuthally Averaged Gas Pressure")
plt.xlabel("Radius [Ny]")
plt.ylabel("Pressure (Pa)")
plt.legend()
plt.show()
if __name__ == "__main__":
rmin = 0.4
rmax = 2.5
Ny = 128
path = "./Set 7/fargo_multifluid/"
pic_path = "./Set 7/pics/"
output_number = 200
species_number = 15
p_bumps = 2
r, phi, sigma, vel, energy = load_dust_outputs(path, species_number, output_number)
gas_sig, gas_vel, gas_energy = load_gas_outputs(r, phi, path, output_number)
dt = get_dust_trap(sigma, pic_path, p_bumps=p_bumps)
stoke = np.logspace(-5, 3, 15)
ok, vk = omega_kepler(rmin, rmax, Ny)
plot_vel(vk, vel, gas_vel, Ny, pic_path)
pressure(sigma, Ny, gas_energy, rmin, rmax)
vphic, vradc = v_rad_phi(path, r, phi, species_number, output_number)
# plots(vphic, vradc)
for i in dt:
stoptime = stopping_time(stoke, ok, i)
print(i, stoptime)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.show",
"find_ring.get_dust_trap",
"numpy.logspace",
"matplotlib.pyplot.legend",
"numpy.asarray",
"find_ring.load_dust_outputs",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.sqrt",
"m... | [((529, 556), 'numpy.linspace', 'np.linspace', (['rmin', 'rmax', 'Ny'], {}), '(rmin, rmax, Ny)\n', (540, 556), True, 'import numpy as np\n'), ((1331, 1354), 'numpy.logspace', 'np.logspace', (['(-4)', '(2)', '(127)'], {}), '(-4, 2, 127)\n', (1342, 1354), True, 'import numpy as np\n'), ((1450, 1460), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1458, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1888, 1913), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Radius (Ny)"""'], {}), "('Radius (Ny)')\n", (1898, 1913), True, 'import matplotlib.pyplot as plt\n'), ((1915, 1943), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Velocity (m/s)"""'], {}), "('Velocity (m/s)')\n", (1925, 1943), True, 'import matplotlib.pyplot as plt\n'), ((1945, 2012), 'matplotlib.pyplot.title', 'plt.title', (['"""Azimuthally Averaged Velocities: Keplerian, Gas & Dust"""'], {}), "('Azimuthally Averaged Velocities: Keplerian, Gas & Dust')\n", (1954, 2012), True, 'import matplotlib.pyplot as plt\n'), ((2014, 2042), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 7}"}), "(prop={'size': 7})\n", (2024, 2042), True, 'import matplotlib.pyplot as plt\n'), ((2043, 2082), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + 'kep_vs_actual.png')"], {}), "(path + 'kep_vs_actual.png')\n", (2054, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2084, 2094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2092, 2094), True, 'import matplotlib.pyplot as plt\n'), ((2323, 2350), 'numpy.linspace', 'np.linspace', (['rmin', 'rmax', 'Ny'], {}), '(rmin, rmax, Ny)\n', (2334, 2350), True, 'import numpy as np\n'), ((2777, 2823), 'matplotlib.pyplot.title', 'plt.title', (['"""Azimuthally Averaged Gas Pressure"""'], {}), "('Azimuthally Averaged Gas Pressure')\n", (2786, 2823), True, 'import matplotlib.pyplot as plt\n'), ((2825, 2850), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Radius [Ny]"""'], {}), "('Radius [Ny]')\n", (2835, 2850), True, 'import matplotlib.pyplot as plt\n'), ((2852, 2879), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (Pa)"""'], {}), "('Pressure (Pa)')\n", (2862, 2879), True, 'import matplotlib.pyplot as plt\n'), ((2881, 2893), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2891, 2893), True, 'import matplotlib.pyplot as plt\n'), ((2895, 2905), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2903, 2905), True, 'import matplotlib.pyplot as plt\n'), ((3121, 3175), 'find_ring.load_dust_outputs', 'load_dust_outputs', (['path', 'species_number', 'output_number'], {}), '(path, species_number, output_number)\n', (3138, 3175), False, 'from find_ring import load_dust_outputs, load_gas_outputs, get_dust_trap\n'), ((3208, 3253), 'find_ring.load_gas_outputs', 'load_gas_outputs', (['r', 'phi', 'path', 'output_number'], {}), '(r, phi, path, output_number)\n', (3224, 3253), False, 'from find_ring import load_dust_outputs, load_gas_outputs, get_dust_trap\n'), ((3261, 3308), 'find_ring.get_dust_trap', 'get_dust_trap', (['sigma', 'pic_path'], {'p_bumps': 'p_bumps'}), '(sigma, pic_path, p_bumps=p_bumps)\n', (3274, 3308), False, 'from find_ring import load_dust_outputs, load_gas_outputs, get_dust_trap\n'), ((3319, 3341), 'numpy.logspace', 'np.logspace', (['(-5)', '(3)', '(15)'], {}), '(-5, 3, 15)\n', (3330, 3341), True, 'import numpy as np\n'), ((1430, 1447), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1440, 1447), True, 'import matplotlib.pyplot as plt\n'), ((2576, 2594), 'numpy.asarray', 'np.asarray', (['energy'], {}), '(energy)\n', (2586, 2594), True, 'import numpy as np\n'), ((591, 620), 'numpy.sqrt', 'np.sqrt', (['(G * M / (r * R) ** 3)'], {}), '(G * M / (r * R) ** 3)\n', (598, 620), True, 'import numpy as np\n'), ((627, 651), 'numpy.sqrt', 'np.sqrt', (['(G * M / (r * R))'], {}), '(G * M / (r * R))\n', (634, 651), True, 'import numpy as np\n'), ((2442, 2452), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2449, 2452), True, 'import numpy as np\n')] |
####################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: modelHelpers.py
# Authors: <NAME>
# <NAME>
#
# Requires: Python 3.x
#
####################################################################################################
import os
import sys
import cv2
import numpy as np
script_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(script_path)
sys.path.append(os.path.join(script_path, 'build'))
sys.path.append(os.path.join(script_path, 'build/Release'))
def prepare_image_for_model(image, requiredWidth, requiredHeight, reorder_to_rgb=False, convert_to_float=True):
""" Prepare an image for use with a model. Typically, this involves:
- Resize and center crop to the required width and height while
preserving the image's aspect ratio. Simple resize may result in a
stretched or squashed image which will affect the model's ability to
classify images.
- OpenCV gives the image in BGR order, so we may need to re-order the
channels to RGB.
- Convert the OpenCV result to a std::vector<float> for use with ELL
model
"""
if image.shape[0] > image.shape[1]: # Tall (more rows than cols)
rowStart = int((image.shape[0] - image.shape[1]) / 2)
rowEnd = rowStart + image.shape[1]
colStart = 0
colEnd = image.shape[1]
else: # Wide (more cols than rows)
rowStart = 0
rowEnd = image.shape[0]
colStart = int((image.shape[1] - image.shape[0]) / 2)
colEnd = colStart + image.shape[0]
# Center crop the image maintaining aspect ratio
cropped = image[rowStart:rowEnd, colStart:colEnd]
# Resize to model's requirements
resized = cv2.resize(cropped, (requiredHeight, requiredWidth))
# Re-order if needed
if reorder_to_rgb:
resized = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
if convert_to_float:
# Return as a vector of floats
result = resized.astype(np.float).ravel()
else:
result = resized.ravel()
return result
def get_top_n_predictions(predictions, N=5, threshold=0.20):
"""Return at most the top N predictions as a list of tuples that meet the threshold.
The first of element of each tuple represents the index or class of the prediction and the second
element represents that probability or confidence value.
"""
map = [(i, predictions[i]) for i in range(len(predictions)) if predictions[i] >= threshold]
map.sort(key=lambda tup: tup[1], reverse=True)
result = map[:N]
return result
def get_mean_duration(accumulated, duration, maxAccumulatedEntries=30):
""" Add a duration to an array and calculate the mean duration. """
accumulated.append(duration)
if (len(accumulated) > maxAccumulatedEntries):
accumulated.pop(0)
durations = np.array(accumulated)
mean = np.mean(durations)
return mean
def draw_header(image, text):
"""Helper to draw header text block onto an image"""
draw_text_block(image, text, (0, 0), (50, 200, 50))
return
def draw_footer(image, text):
"""Helper to draw footer text block onto an image"""
draw_text_block(image, text, (0, image.shape[0] - 40), (200, 100, 100))
return
def draw_text_block(image, text, blockTopLeft=(0, 0), blockColor=(50, 200, 50), blockHeight=40):
"""Helper to draw a filled rectangle with text onto an image"""
fontScale = 0.7
cv2.rectangle(image, blockTopLeft, (image.shape[1], blockTopLeft[1] + blockHeight), blockColor, cv2.FILLED)
cv2.putText(image, text, (blockTopLeft[0] + int(blockHeight / 4), blockTopLeft[1] + int(blockHeight * 0.667)),
cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale, (0, 0, 0), 1, cv2.LINE_AA)
| [
"sys.path.append",
"os.path.abspath",
"cv2.cvtColor",
"numpy.mean",
"numpy.array",
"cv2.rectangle",
"os.path.join",
"cv2.resize"
] | [((456, 484), 'sys.path.append', 'sys.path.append', (['script_path'], {}), '(script_path)\n', (471, 484), False, 'import sys\n'), ((429, 454), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (444, 454), False, 'import os\n'), ((501, 535), 'os.path.join', 'os.path.join', (['script_path', '"""build"""'], {}), "(script_path, 'build')\n", (513, 535), False, 'import os\n'), ((553, 595), 'os.path.join', 'os.path.join', (['script_path', '"""build/Release"""'], {}), "(script_path, 'build/Release')\n", (565, 595), False, 'import os\n'), ((1819, 1871), 'cv2.resize', 'cv2.resize', (['cropped', '(requiredHeight, requiredWidth)'], {}), '(cropped, (requiredHeight, requiredWidth))\n', (1829, 1871), False, 'import cv2\n'), ((2943, 2964), 'numpy.array', 'np.array', (['accumulated'], {}), '(accumulated)\n', (2951, 2964), True, 'import numpy as np\n'), ((2976, 2994), 'numpy.mean', 'np.mean', (['durations'], {}), '(durations)\n', (2983, 2994), True, 'import numpy as np\n'), ((3534, 3645), 'cv2.rectangle', 'cv2.rectangle', (['image', 'blockTopLeft', '(image.shape[1], blockTopLeft[1] + blockHeight)', 'blockColor', 'cv2.FILLED'], {}), '(image, blockTopLeft, (image.shape[1], blockTopLeft[1] +\n blockHeight), blockColor, cv2.FILLED)\n', (3547, 3645), False, 'import cv2\n'), ((1938, 1978), 'cv2.cvtColor', 'cv2.cvtColor', (['resized', 'cv2.COLOR_BGR2RGB'], {}), '(resized, cv2.COLOR_BGR2RGB)\n', (1950, 1978), False, 'import cv2\n')] |
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
import os
import sys
import numpy as np
from utils import montage_tf, get_variables_to_train, assign_from_checkpoint_fn, remove_missing, weights_montage
from constants import LOG_DIR
slim = tf.contrib.slim
class CNetTrainer:
def __init__(self, model, dataset, pre_processor, num_epochs, optimizer='adam', lr_policy='const', init_lr=0.0003,
tag='default', end_lr=None, reinit_fc=False):
tf.logging.set_verbosity(tf.logging.DEBUG)
self.sess = tf.Session()
self.graph = tf.Graph()
self.model = model
self.dataset = dataset
self.num_epochs = num_epochs
self.tag = tag
self.additional_info = None
self.summaries = {}
self.pre_processor = pre_processor
self.opt_type = optimizer
self.lr_policy = lr_policy
self.init_lr = init_lr
self.end_lr = end_lr if end_lr is not None else 0.01 * init_lr
self.is_finetune = False
self.num_train_steps = None
self.reinit_fc = reinit_fc
self.opt_g = None
self.opt_d = None
with self.sess.as_default():
with self.graph.as_default():
self.global_step = slim.create_global_step()
def get_save_dir(self):
fname = '{}_{}_{}'.format(self.dataset.name, self.model.name, self.tag)
if self.is_finetune:
fname = '{}_finetune'.format(fname)
if self.additional_info:
fname = '{}_{}'.format(fname, self.additional_info)
return os.path.join(LOG_DIR, '{}/'.format(fname))
def optimizer(self):
opts = {'adam': tf.train.AdamOptimizer(learning_rate=self.learning_rate(), beta1=0.9, epsilon=1e-5),
'sgd': tf.train.MomentumOptimizer(learning_rate=self.learning_rate(), momentum=0.9)}
return opts[self.opt_type]
def learning_rate(self):
policies = {'const': self.init_lr,
'alex': self.learning_rate_alex(),
'linear': self.learning_rate_linear()}
return policies[self.lr_policy]
def get_train_batch(self, dataset_id):
with tf.device('/cpu:0'):
# Get the training dataset
if dataset_id:
train_set = self.dataset.get_split(dataset_id)
self.num_train_steps = (self.dataset.get_num_dataset(
dataset_id) / self.model.batch_size) * self.num_epochs
else:
train_set = self.dataset.get_trainset()
self.num_train_steps = (self.dataset.get_num_train() / self.model.batch_size) * self.num_epochs
print('Number of training steps: {}'.format(self.num_train_steps))
provider = slim.dataset_data_provider.DatasetDataProvider(train_set, num_readers=4,
common_queue_capacity=20 * self.model.batch_size,
common_queue_min=10 * self.model.batch_size)
# Parse a serialized Example proto to extract the image and metadata.
[img_train] = provider.get(['image'])
# Pre-process data
img_train = self.pre_processor.process_train(img_train)
# Make batches
imgs_train = tf.train.batch([img_train],
batch_size=self.model.batch_size,
num_threads=8,
capacity=5 * self.model.batch_size)
batch_queue = slim.prefetch_queue.prefetch_queue([imgs_train])
return batch_queue.dequeue()
def classification_loss(self, preds_train, labels_train):
# Define the loss
loss_scope = 'classification_loss'
if self.dataset.is_multilabel:
train_loss = tf.contrib.losses.sigmoid_cross_entropy(preds_train, labels_train, scope=loss_scope)
else:
train_loss = tf.contrib.losses.softmax_cross_entropy(preds_train, labels_train, scope=loss_scope)
tf.summary.scalar('losses/training loss', train_loss)
train_losses = tf.losses.get_losses(loss_scope)
train_losses += tf.losses.get_regularization_losses(loss_scope)
total_train_loss = math_ops.add_n(train_losses, name='total_train_loss')
# Compute accuracy
if not self.dataset.is_multilabel:
predictions = tf.argmax(preds_train, 1)
tf.summary.scalar('accuracy/training accuracy',
slim.metrics.accuracy(predictions, tf.argmax(labels_train, 1)))
tf.summary.histogram('labels', tf.argmax(labels_train, 1))
tf.summary.histogram('predictions', predictions)
return total_train_loss
def make_train_op(self, loss, optimizer, vars2train=None, scope=None):
if scope:
vars2train = get_variables_to_train(trainable_scopes=scope)
train_op = slim.learning.create_train_op(loss, optimizer, variables_to_train=vars2train,
global_step=self.global_step, summarize_gradients=True)
return train_op
def make_summaries(self):
# Handle summaries
for variable in slim.get_model_variables():
tf.summary.histogram(variable.op.name, variable)
def learning_rate_alex(self):
# Define learning rate schedule
num_train_steps = self.num_train_steps
boundaries = [np.int64(num_train_steps * 0.2), np.int64(num_train_steps * 0.4),
np.int64(num_train_steps * 0.6), np.int64(num_train_steps * 0.8)]
values = [0.01, 0.01 * 250. ** (-1. / 4.), 0.01 * 250 ** (-2. / 4.), 0.01 * 250 ** (-3. / 4.),
0.01 * 250. ** (-1.)]
return tf.train.piecewise_constant(self.global_step, boundaries=boundaries, values=values)
def learning_rate_linear(self):
return tf.train.polynomial_decay(self.init_lr, self.global_step, 0.9 * self.num_train_steps,
end_learning_rate=self.end_lr)
def make_init_fn(self, chpt_path):
var2restore = slim.get_variables_to_restore(include=['discriminator'])
init_fn = assign_from_checkpoint_fn(chpt_path, var2restore, ignore_missing_vars=True)
print('Variables to restore: {}'.format([v.op.name for v in var2restore]))
sys.stdout.flush()
return init_fn
def train_inverter(self, chpt_path, dataset_id=None):
print('Restoring from: {}'.format(chpt_path))
self.is_finetune = True
with self.sess.as_default():
with self.graph.as_default():
# Get training batches
imgs_train = self.get_train_batch(dataset_id)
# Get predictions
inv_im = self.model.net(imgs_train)
# Compute the loss
disc_loss = self.model.discriminator_loss()
invertion_loss = self.model.invertion_loss()
# Handle dependencies
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
disc_loss = control_flow_ops.with_dependencies([updates], disc_loss)
invertion_loss = control_flow_ops.with_dependencies([updates], invertion_loss)
# Make summaries
tf.summary.image('imgs/inv_imgs', montage_tf(inv_im, 2, 8), max_outputs=1)
tf.summary.image('imgs/imgs', montage_tf(imgs_train, 2, 8), max_outputs=1)
self.make_summaries()
# Create training operation
train_op_disc = self.make_train_op(disc_loss, self.optimizer(), scope='disc2')
train_op_dec = self.make_train_op(invertion_loss, self.optimizer(), scope='decoder')
# Start training
slim.learning.train(train_op_disc + train_op_dec, self.get_save_dir(),
init_fn=self.make_init_fn(chpt_path),
number_of_steps=self.num_train_steps,
save_summaries_secs=300, save_interval_secs=3000,
log_every_n_steps=100)
| [
"tensorflow.get_collection",
"tensorflow.logging.set_verbosity",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"sys.stdout.flush",
"tensorflow.train.batch",
"tensorflow.contrib.losses.softmax_cross_entropy",
"tensorflow.losses.get_losses",
"tensorflow.summary.histogram",
"utils.get_var... | [((539, 581), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.DEBUG'], {}), '(tf.logging.DEBUG)\n', (563, 581), True, 'import tensorflow as tf\n'), ((602, 614), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (612, 614), True, 'import tensorflow as tf\n'), ((636, 646), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (644, 646), True, 'import tensorflow as tf\n'), ((4174, 4227), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""losses/training loss"""', 'train_loss'], {}), "('losses/training loss', train_loss)\n", (4191, 4227), True, 'import tensorflow as tf\n'), ((4251, 4283), 'tensorflow.losses.get_losses', 'tf.losses.get_losses', (['loss_scope'], {}), '(loss_scope)\n', (4271, 4283), True, 'import tensorflow as tf\n'), ((4308, 4355), 'tensorflow.losses.get_regularization_losses', 'tf.losses.get_regularization_losses', (['loss_scope'], {}), '(loss_scope)\n', (4343, 4355), True, 'import tensorflow as tf\n'), ((4383, 4436), 'tensorflow.python.ops.math_ops.add_n', 'math_ops.add_n', (['train_losses'], {'name': '"""total_train_loss"""'}), "(train_losses, name='total_train_loss')\n", (4397, 4436), False, 'from tensorflow.python.ops import math_ops\n'), ((5897, 5985), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['self.global_step'], {'boundaries': 'boundaries', 'values': 'values'}), '(self.global_step, boundaries=boundaries, values\n =values)\n', (5924, 5985), True, 'import tensorflow as tf\n'), ((6033, 6154), 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['self.init_lr', 'self.global_step', '(0.9 * self.num_train_steps)'], {'end_learning_rate': 'self.end_lr'}), '(self.init_lr, self.global_step, 0.9 * self.\n num_train_steps, end_learning_rate=self.end_lr)\n', (6058, 6154), True, 'import tensorflow as tf\n'), ((6328, 6403), 'utils.assign_from_checkpoint_fn', 'assign_from_checkpoint_fn', (['chpt_path', 'var2restore'], {'ignore_missing_vars': '(True)'}), '(chpt_path, var2restore, ignore_missing_vars=True)\n', (6353, 6403), False, 'from utils import montage_tf, get_variables_to_train, assign_from_checkpoint_fn, remove_missing, weights_montage\n'), ((6495, 6513), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6511, 6513), False, 'import sys\n'), ((2235, 2254), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (2244, 2254), True, 'import tensorflow as tf\n'), ((3412, 3528), 'tensorflow.train.batch', 'tf.train.batch', (['[img_train]'], {'batch_size': 'self.model.batch_size', 'num_threads': '(8)', 'capacity': '(5 * self.model.batch_size)'}), '([img_train], batch_size=self.model.batch_size, num_threads=8,\n capacity=5 * self.model.batch_size)\n', (3426, 3528), True, 'import tensorflow as tf\n'), ((3957, 4046), 'tensorflow.contrib.losses.sigmoid_cross_entropy', 'tf.contrib.losses.sigmoid_cross_entropy', (['preds_train', 'labels_train'], {'scope': 'loss_scope'}), '(preds_train, labels_train, scope=\n loss_scope)\n', (3996, 4046), True, 'import tensorflow as tf\n'), ((4081, 4170), 'tensorflow.contrib.losses.softmax_cross_entropy', 'tf.contrib.losses.softmax_cross_entropy', (['preds_train', 'labels_train'], {'scope': 'loss_scope'}), '(preds_train, labels_train, scope=\n loss_scope)\n', (4120, 4170), True, 'import tensorflow as tf\n'), ((4534, 4559), 'tensorflow.argmax', 'tf.argmax', (['preds_train', '(1)'], {}), '(preds_train, 1)\n', (4543, 4559), True, 'import tensorflow as tf\n'), ((4797, 4845), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""predictions"""', 'predictions'], {}), "('predictions', predictions)\n", (4817, 4845), True, 'import tensorflow as tf\n'), ((4997, 5043), 'utils.get_variables_to_train', 'get_variables_to_train', ([], {'trainable_scopes': 'scope'}), '(trainable_scopes=scope)\n', (5019, 5043), False, 'from utils import montage_tf, get_variables_to_train, assign_from_checkpoint_fn, remove_missing, weights_montage\n'), ((5392, 5440), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['variable.op.name', 'variable'], {}), '(variable.op.name, variable)\n', (5412, 5440), True, 'import tensorflow as tf\n'), ((5585, 5616), 'numpy.int64', 'np.int64', (['(num_train_steps * 0.2)'], {}), '(num_train_steps * 0.2)\n', (5593, 5616), True, 'import numpy as np\n'), ((5618, 5649), 'numpy.int64', 'np.int64', (['(num_train_steps * 0.4)'], {}), '(num_train_steps * 0.4)\n', (5626, 5649), True, 'import numpy as np\n'), ((5673, 5704), 'numpy.int64', 'np.int64', (['(num_train_steps * 0.6)'], {}), '(num_train_steps * 0.6)\n', (5681, 5704), True, 'import numpy as np\n'), ((5706, 5737), 'numpy.int64', 'np.int64', (['(num_train_steps * 0.8)'], {}), '(num_train_steps * 0.8)\n', (5714, 5737), True, 'import numpy as np\n'), ((4757, 4783), 'tensorflow.argmax', 'tf.argmax', (['labels_train', '(1)'], {}), '(labels_train, 1)\n', (4766, 4783), True, 'import tensorflow as tf\n'), ((7174, 7216), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (7191, 7216), True, 'import tensorflow as tf\n'), ((4685, 4711), 'tensorflow.argmax', 'tf.argmax', (['labels_train', '(1)'], {}), '(labels_train, 1)\n', (4694, 4711), True, 'import tensorflow as tf\n'), ((7278, 7299), 'tensorflow.group', 'tf.group', (['*update_ops'], {}), '(*update_ops)\n', (7286, 7299), True, 'import tensorflow as tf\n'), ((7332, 7388), 'tensorflow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', (['[updates]', 'disc_loss'], {}), '([updates], disc_loss)\n', (7366, 7388), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((7426, 7487), 'tensorflow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', (['[updates]', 'invertion_loss'], {}), '([updates], invertion_loss)\n', (7460, 7487), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((7572, 7596), 'utils.montage_tf', 'montage_tf', (['inv_im', '(2)', '(8)'], {}), '(inv_im, 2, 8)\n', (7582, 7596), False, 'from utils import montage_tf, get_variables_to_train, assign_from_checkpoint_fn, remove_missing, weights_montage\n'), ((7659, 7687), 'utils.montage_tf', 'montage_tf', (['imgs_train', '(2)', '(8)'], {}), '(imgs_train, 2, 8)\n', (7669, 7687), False, 'from utils import montage_tf, get_variables_to_train, assign_from_checkpoint_fn, remove_missing, weights_montage\n')] |
from PIL import Image
import numpy as np
import copy
from PyQt5.QtGui import QImage
class ImageHandler:
def __init__(self):
self.reference_image = None
self.modified_image = None
self.view = None
self.metrics_engine = None
def subscribe_view(self, view) -> None:
self.view = view
def subscribe_metrics_engine(self, metrics_engine) -> None:
self.metrics_engine = metrics_engine
def load_image_from_file(self, image_path: str) -> None:
loaded_image = Image.open(image_path)
image_array = np.asarray(loaded_image)
# Remove alpha channel if exists
if image_array.shape[2] > 3:
image_array = np.delete(image_array, 3, 2)
self.reference_image = np.asarray(image_array, dtype=np.uint16)
self.modified_image = copy.deepcopy(self.reference_image)
def apply_modification(self, function) -> None:
self.modified_image = function(self.modified_image)
def revert_modifications(self):
self.modified_image = copy.deepcopy(self.reference_image)
def regenerate_view(self) -> None:
if self.view is not None:
self.view.display_ref_image(self.convert_matrix_to_qimage(self.reference_image))
self.view.display_mod_image(self.convert_matrix_to_qimage(self.modified_image))
else:
raise Exception("regenerate_view", "view is not set")
def trigger_metrics_calculation(self) -> None:
if self.metrics_engine is not None:
self.metrics_engine.calculate_metrics(self.reference_image, self.modified_image)
else:
raise Exception("trigger_metrics_calculation", "metrics_engine is not set")
def convert_matrix_to_qimage(self, matrix: np.asarray):
out = np.asarray(matrix, dtype=np.uint8)
return QImage(out.data, out.shape[1], out.shape[0], out.strides[0], QImage.Format_RGB888)
| [
"copy.deepcopy",
"numpy.asarray",
"PIL.Image.open",
"PyQt5.QtGui.QImage",
"numpy.delete"
] | [((526, 548), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (536, 548), False, 'from PIL import Image\n'), ((571, 595), 'numpy.asarray', 'np.asarray', (['loaded_image'], {}), '(loaded_image)\n', (581, 595), True, 'import numpy as np\n'), ((760, 800), 'numpy.asarray', 'np.asarray', (['image_array'], {'dtype': 'np.uint16'}), '(image_array, dtype=np.uint16)\n', (770, 800), True, 'import numpy as np\n'), ((831, 866), 'copy.deepcopy', 'copy.deepcopy', (['self.reference_image'], {}), '(self.reference_image)\n', (844, 866), False, 'import copy\n'), ((1047, 1082), 'copy.deepcopy', 'copy.deepcopy', (['self.reference_image'], {}), '(self.reference_image)\n', (1060, 1082), False, 'import copy\n'), ((1788, 1822), 'numpy.asarray', 'np.asarray', (['matrix'], {'dtype': 'np.uint8'}), '(matrix, dtype=np.uint8)\n', (1798, 1822), True, 'import numpy as np\n'), ((1838, 1925), 'PyQt5.QtGui.QImage', 'QImage', (['out.data', 'out.shape[1]', 'out.shape[0]', 'out.strides[0]', 'QImage.Format_RGB888'], {}), '(out.data, out.shape[1], out.shape[0], out.strides[0], QImage.\n Format_RGB888)\n', (1844, 1925), False, 'from PyQt5.QtGui import QImage\n'), ((700, 728), 'numpy.delete', 'np.delete', (['image_array', '(3)', '(2)'], {}), '(image_array, 3, 2)\n', (709, 728), True, 'import numpy as np\n')] |
"""Common functions used in scoring germ and fiducial sets."""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
from functools import total_ordering
import numpy as _np
def list_score(input_array, scoreFunc='all'):
"""Score an array of eigenvalues. Smaller scores are better.
Parameters
----------
input_array : numpy array
The eigenvalues to be scored.
scoreFunc : {'all', 'worst'}, optional
Sets the objective function for scoring the eigenvalues. If 'all',
score is ``sum(1/input_array)``. If 'worst', score is
``1/min(input_array)``.
Note: we use this function in various optimization routines, and
sometimes choosing one or the other objective function can help avoid
suboptimal local minima.
Returns
-------
float
Score for the eigenvalues.
"""
# We're expecting division by zero in many instances when we call this
# function, and the inf can be handled appropriately, so we suppress
# division warnings printed to stderr.
with _np.errstate(divide='ignore'):
if scoreFunc == 'all':
score = sum(1. / _np.abs(input_array))
elif scoreFunc == 'worst':
score = 1. / min(_np.abs(input_array))
else:
raise ValueError("'%s' is not a valid value for scoreFunc. "
"Either 'all' or 'worst' must be specified!"
% scoreFunc)
return score
@total_ordering
class CompositeScore():
"""Class for storing and comparing scores calculated from eigenvalues.
The comparison functions operate according to the logic that a lower score
is better. The score value is broken into two parts: 'major' and 'minor'.
A CompositeScore with a smaller 'major' part is always smaller than one
with a larger 'major' part. The 'minor' parts are only compared when the
major parts are equal. Typically, the negative of the number of non-zero
eigenvalues is used to as the major part so that a score that has more non-zero
eigenvalues (higher `N`) will always compare as less than a score that has
fewer non-zero eigenvalues (lower `N`), with ties for `N` being resolved by
comparing the minor score in the straightforward manner (since the non-AC
`score` is assumed to be better for lower values). For bookeeping, the
CompositeScore object also separately holds the number of non-zero eigenvalues,
as this may not always be recovered from the major part of the score.
Parameters
----------
major, minor : float
The major and minor parts of the score.
N : int
The number of non-zero eigenvalues.
"""
def __init__(self, major, minor, N):
self.major = major
self.minor = minor
self.N = N
def __lt__(self, other):
#Just base on *scores*
if self.major < other.major:
return True
elif self.major > other.major:
return False
else:
return self.minor < other.minor
def __eq__(self, other):
return self.major == other.major and \
self.minor == other.minor
def __repr__(self):
return 'Score: major={} minor={}, N: {}'.format(
self.major, self.minor, self.N)
def composite_rcl_fn(candidateScores, alpha):
"""Create a restricted candidate list (RCL) based on CompositeScore objects.
Parameters
----------
candidateScores : list of CompositScore
List of scores to be sorted in RCL and not RCL.
alpha : float
A number between 0 and 1 that roughly specifies a score theshold
relative to the spread of scores that a germ must score better than in
order to be included in the RCL. A value of 0 for `alpha` corresponds
to a purely greedy algorithm (only the best-scoring element is
included in the RCL), while a value of 1 for `alpha` will include all
elements in the RCL.
Intermediate values of alpha attempt to mimic the behavior of alpha for
simple float scores. For those scores, the score that all elements must
beat is ``(1 - alpha)*best + alpha*worst``. For CompositeScore objects,
thresholding is done on the major part of the score unless all the
candidates have the same major score, in which case thresholding is
performed using only the minor score.
Returns
-------
numpy.array
The indices of the scores sufficiently good to be in the RCL.
"""
maxScore = max(candidateScores)
minScore = min(candidateScores)
if maxScore.major == minScore.major:
threshold = CompositeScore(maxScore.major,
((1 - alpha) * minScore.minor
+ alpha * maxScore.minor), None)
else:
maxMinorScore = max([s.minor for s in candidateScores])
threshold = CompositeScore(((1 - alpha) * minScore.major
+ alpha * maxScore.major),
maxMinorScore, None)
# take *all* candidates with computed major score, so use
# maximal minor score
return _np.where(_np.array(candidateScores) <= threshold)[0]
| [
"numpy.abs",
"numpy.array",
"numpy.errstate"
] | [((1694, 1723), 'numpy.errstate', '_np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (1706, 1723), True, 'import numpy as _np\n'), ((5864, 5890), 'numpy.array', '_np.array', (['candidateScores'], {}), '(candidateScores)\n', (5873, 5890), True, 'import numpy as _np\n'), ((1785, 1805), 'numpy.abs', '_np.abs', (['input_array'], {}), '(input_array)\n', (1792, 1805), True, 'import numpy as _np\n'), ((1871, 1891), 'numpy.abs', '_np.abs', (['input_array'], {}), '(input_array)\n', (1878, 1891), True, 'import numpy as _np\n')] |
import numpy as np
from scipy import signal
import math
import itertools
import pickle
import matplotlib.pyplot as plt
def skewness(t, x, detrend=1):
# normalize
x = x / x[0]
if detrend == 1:
x = signal.detrend(x, type='linear')
nx = (x - np.mean(x)) / np.std(x - np.mean(x))
skew = np.mean(nx**3) / np.mean(nx**2)**(3.0/2.0)
return skew
def kurtosis(t, x, detrend=1):
# normalize
x = x / x[0]
if detrend == 1:
x = signal.detrend(x, type='linear')
nx = (x - np.mean(x)) / np.std(x - np.mean(x))
kurt = np.mean(nx**4) / np.mean(nx**2)**2 - 3
return kurt
def hurst(t, x, bins=30, detrend=1, fitlims=[10,1000], **kwargs):
# R/S method for fGm
# (generalized hurst exponent for fBm)
# axis
bsize = int(1.0*len(t)/bins)
ax = np.floor( 10**(np.arange(1.0, np.log10(bsize), 0.01)) )
ers = np.zeros((bins, len(ax)))
for b in range(bins):
idx1 = b*bsize
idx2 = idx1 + bsize
sx = x[idx1:idx2]
if detrend == 1:
sx = signal.detrend(sx, type='linear')
for i in range(len(ax)):
ls = int( ax[i] ) # length of each sub-region
ns = int( 1.0*ax[-1]/ls ) # number of sub-region
delta = np.zeros((ls + 1, 1))
for j in range(ns):
jdx1 = j*ls
jdx2 = jdx1 + ls
ssx = sx[jdx1:jdx2]
delta[1:,0] = np.cumsum(ssx) - np.cumsum(np.ones(ls))*sum(ssx)/ls
r = np.max(delta) - np.min(delta)
s = np.sqrt(np.sum(ssx**2)/ls - (np.sum(ssx)/ls)**2)
ers[b,i] = ers[b,i] + r/s/ns
# time lag axis
dt = t[1] - t[0]
tax = ax*dt*1e6 # [us]
# ERS
mean_ers = np.mean(ers, 0)
std_ers = np.std(ers, axis=0)
ptime = tax # time lag [us]
pdata = mean_ers
plt.plot(ptime, pdata, '-x')
fidx = (fitlims[0] <= ptime) * (ptime <= fitlims[1])
fit = np.polyfit(np.log10(ptime[fidx]), np.log10(pdata[fidx]), 1)
fit_data = 10**(fit[1])*ptime**(fit[0])
plt.plot(ptime, fit_data, 'r')
# Hurst exponent
hurst_exp = fit[0]
return tax, mean_ers, std_ers, hurst_exp, fit_data
def bp_prob(x, d=6, bins=1):
# BP_probability
nst = math.factorial(d) # number of possible states
ax = np.arange(nst) + 1 # state number
bsize = int(1.0*len(x)/bins)
# print('For an accurate estimation of the probability, bsize {:g} should be considerably larger than nst {:g}'.format(bsize, nst))
# possible orders
orders = np.empty((0,d))
for p in itertools.permutations(np.arange(d)):
orders = np.append(orders,np.atleast_2d(p),axis=0)
# calculate permutation probability
val = np.zeros((nst, bins))
for b in range(bins):
idx1 = b*bsize
idx2 = idx1 + bsize
sx = x[idx1:idx2]
jnum = len(sx) - d + 1
for j in range(jnum):
ssx = sx[j:(j+d)]
sso = np.argsort(ssx)
bingo = np.sum(np.abs(orders - np.tile(sso, (nst, 1))), 1) == 0
val[bingo, b] = val[bingo, b] + 1.0/jnum
pi = np.mean(val, 1) # bin averaged pi
pierr = np.std(val, 1)
# sort
pio = np.argsort(-pi)
val = pi[pio] # bin averaged sorted pi
std = pierr[pio]
return ax, val, std
def ns_entropy(pi):
nst = len(pi)
pinz = pi[pi != 0] # to avoid blow up in entropy calculation
spi = np.sum(-pinz * np.log(pinz)) # Shannon entropy
nsent = spi/np.log(nst) # normalized Shannon entropy
return nsent
def js_complexity(pi):
# Jensen Shannon complexity with a given probability [Rosso PRL 2007]
nst = len(pi)
nsent = ns_entropy(pi)
spi = nsent * np.log(nst) # Shannon entropy
pe = 1.0*np.ones(nst)/nst
spe = np.sum(-pe * np.log(pe))
pieh = (pi + pe)/2.0
spieh = np.sum(-pieh * np.log(pieh))
# Jensen Shannon complexity
jscom = -2.0*(spieh - spi/2.0 - spe/2.0)/((nst + 1.0)/nst*np.log(nst+1.0) - 2.0*np.log(2.0*nst) + np.log(nst))*nsent
return jscom
def ch_measure(pi):
# Jensen Shannon complexity, normalized Shannon entropy measure with a given BP probability [Rosso PRL 2007]
# chaotic : moderate C and H, above fBm
# stochastic : low C and high H, below fBm
# normalized Shannon entropy
nsent = ns_entropy(pi)
# Jensen Shannon complexity
jscom = js_complexity(pi)
return jscom, nsent
def lmc_complexity(pi, nst):
pe = np.ones(nst)/nst
pinz = pi[pi != 0] # to avoid blow up in log
nent = -1.0/np.log(nst)*np.sum(pinz * np.log(pinz))
diseq = np.sum((pi - pe)**2)
clmc = diseq*nent
return clmc, nent
def complexity_limits(d):
nst = math.factorial(d)
pval = np.arange(1.0/nst,1,0.001)
Hone = -1.0/np.log(nst)*(pval * np.log(pval) + (1.0-pval)*np.log((1.0-pval)/(nst-1.0)))
Cone = np.zeros(len(Hone))
for i in range(len(Hone)):
pi = np.zeros(nst)
pi[0] = pval[i]
pi[1:] = (1.0 - pval[i])/(nst - 1.0)
Cone[i] = js_complexity(pi)
# plt.plot(Hone, Cone, 'k')
Htwo = np.array([1])
Ctwo = np.array([0])
for n in range(nst-1):
pmin = np.arange(0.001,1.0/(nst-n),0.001)
# pmin = np.arange(0.001,0.1,0.001)
Hext = -1.0/np.log(nst)*(pmin * np.log(pmin) + (1.0-pmin)*np.log((1.0-pmin)/(nst-n-1.0)))
Cext = np.zeros(len(Hext))
for i in range(len(Hext)):
pi = np.zeros(nst)
pi[0:n] = 0
pi[n:(n+1)] = pmin[i]
pi[(n+1):] = (1.0 - pmin[i])/(nst - n - 1.0)
Cext[i] = js_complexity(pi)
# plt.plot(Hext, Cext, 'k')
Htwo = np.concatenate((Htwo, Hext), axis=0)
Ctwo = np.concatenate((Ctwo, Cext), axis=0)
idx = np.argsort(Htwo)
Htwo = Htwo[idx]
Ctwo = Ctwo[idx]
return Hone, Cone, Htwo, Ctwo
def fmb_fgn_locus(d):
try:
with open('../chdata/ch_fbm_fgn_d{:d}.pkl'.format(d), 'rb') as f:
[c_fbm, h_fbm, c_fgn, h_fgn] = pickle.load(f)
except:
pass
return c_fbm, h_fbm, c_fgn, h_fgn
def fisher_measure(pi):
# fisher information measure
if ns_entropy(pi) == 0:
f0 = 1.0
else:
f0 = 1.0/2.0
fim = f0*np.sum( ( np.sqrt(pi[1:]) - np.sqrt(pi[:-1]) )**2 )
return fim
def intermittency(t, x, bins=20, overlap=0.2, qstep=0.3, fitlims=[20.0,100.0], verbose=1, **kwargs):
# intermittency parameter from multi-fractal analysis [Carreras PoP 2000]
# this ranges from 0 (mono-fractal) to 1
# add D fitting later
# axis
qax = np.arange(-2,8,qstep) # order axis
N = len(x)
Tmax = int( N/(bins - overlap*(bins - 1.0)) ) # minimum bin -> maximum data length
Tax = np.floor( 10**(np.arange(1, np.log10(Tmax), 0.1)) ) # sub-data length axis
nTax = Tax/N # normalized axis
# data dimension
eTq = np.zeros((len(Tax), len(qax)))
K = np.zeros(len(qax))
C = np.zeros(len(qax))
D = np.zeros(len(qax))
# first axes
x = signal.detrend(x, type='linear')
if verbose == 1:
plt.subplots_adjust(hspace = 0.5, wspace = 0.3)
axes1 = plt.subplot(5,1,1)
plt.plot(t, x)
ndxe = (x - np.mean(x))**2 / np.mean((x - np.mean(x))**2) # Eq.(7)
for t, T in enumerate(Tax): # loop over different length T
bins = int( N/(T - overlap*(T-1)) ) # number of bins with length T
eT = np.zeros(bins)
bstep = int(T*(1 - overlap))
for j in range(bins):
idx1 = j*bstep
idx2 = int(idx1 + T)
eT[j] = np.mean(ndxe[idx1:idx2]) # Eq.(9)
# calculate moments
for k, q in enumerate(qax):
eTq[t, k] = np.mean(eT**(q)) # Eq.(10)
# second axes
if verbose == 1: plt.subplot(5,1,2)
# calculate K
for k, q in enumerate(qax):
if verbose == 1: plt.plot(nTax, eTq[:,k], 'o')
# fit range
nT1 = fitlims[0]/N
nT2 = fitlims[1]/N
idx = (nT1 < nTax) * (nTax < nT2)
lx = np.log(nTax[idx])
ly = np.log(eTq[idx,k])
fit = np.polyfit(lx, ly, 1)
fit_func = np.poly1d(fit)
K[k] = -fit[0]
fx = np.arange(nTax.min(), nTax.max(), 1.0/N)
fy = np.exp(fit_func(np.log(fx)))
if verbose == 1:
plt.plot(fx, fy)
plt.axvline(x=nT1, color='r')
plt.axvline(x=nT2, color='r')
if verbose == 1:
plt.title('Linear fit of loglog plot is -K(q)')
plt.xlabel('T/N')
plt.ylabel('eTq moments')
plt.xscale('log')
plt.yscale('log')
# third axes
plt.subplot(5,1,3)
plt.plot(qax, K, '-o')
plt.xlabel('q')
plt.ylabel('K(q)')
# calculate C and D
for k, q in enumerate(qax):
if (0.9 <= q) and (q <= 1.1):
Kgrad = np.gradient(K, qax[1] - qax[0])
C[k] = Kgrad[k]
intmit = C[k]
print('C({:g}) intermittency parameter is {:g}'.format(q, intmit))
else:
C[k] = K[k] / (q - 1)
D[k] = 1 - C[k]
if verbose == 1:
# fourth axes
plt.subplot(5,1,4)
plt.plot(qax, C, '-o')
plt.xlabel('q')
plt.ylabel('C(q)')
# fifth axes
plt.subplot(5,1,5)
plt.plot(qax, D, '-o')
plt.xlabel('q')
plt.ylabel('D(q)')
plt.show()
return intmit
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"numpy.sum",
"numpy.polyfit",
"numpy.empty",
"numpy.ones",
"numpy.argsort",
"numpy.mean",
"numpy.arange",
"pickle.load",
"numpy.tile",
"numpy.atleast_2d",
"matplotlib.pyplot.axvline",
"numpy.std",
"numpy.cumsum",
"numpy.max",
"nu... | [((1771, 1786), 'numpy.mean', 'np.mean', (['ers', '(0)'], {}), '(ers, 0)\n', (1778, 1786), True, 'import numpy as np\n'), ((1801, 1820), 'numpy.std', 'np.std', (['ers'], {'axis': '(0)'}), '(ers, axis=0)\n', (1807, 1820), True, 'import numpy as np\n'), ((1879, 1907), 'matplotlib.pyplot.plot', 'plt.plot', (['ptime', 'pdata', '"""-x"""'], {}), "(ptime, pdata, '-x')\n", (1887, 1907), True, 'import matplotlib.pyplot as plt\n'), ((2083, 2113), 'matplotlib.pyplot.plot', 'plt.plot', (['ptime', 'fit_data', '"""r"""'], {}), "(ptime, fit_data, 'r')\n", (2091, 2113), True, 'import matplotlib.pyplot as plt\n'), ((2277, 2294), 'math.factorial', 'math.factorial', (['d'], {}), '(d)\n', (2291, 2294), False, 'import math\n'), ((2572, 2588), 'numpy.empty', 'np.empty', (['(0, d)'], {}), '((0, d))\n', (2580, 2588), True, 'import numpy as np\n'), ((2749, 2770), 'numpy.zeros', 'np.zeros', (['(nst, bins)'], {}), '((nst, bins))\n', (2757, 2770), True, 'import numpy as np\n'), ((3142, 3157), 'numpy.mean', 'np.mean', (['val', '(1)'], {}), '(val, 1)\n', (3149, 3157), True, 'import numpy as np\n'), ((3188, 3202), 'numpy.std', 'np.std', (['val', '(1)'], {}), '(val, 1)\n', (3194, 3202), True, 'import numpy as np\n'), ((3225, 3240), 'numpy.argsort', 'np.argsort', (['(-pi)'], {}), '(-pi)\n', (3235, 3240), True, 'import numpy as np\n'), ((4620, 4642), 'numpy.sum', 'np.sum', (['((pi - pe) ** 2)'], {}), '((pi - pe) ** 2)\n', (4626, 4642), True, 'import numpy as np\n'), ((4725, 4742), 'math.factorial', 'math.factorial', (['d'], {}), '(d)\n', (4739, 4742), False, 'import math\n'), ((4755, 4785), 'numpy.arange', 'np.arange', (['(1.0 / nst)', '(1)', '(0.001)'], {}), '(1.0 / nst, 1, 0.001)\n', (4764, 4785), True, 'import numpy as np\n'), ((5112, 5125), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (5120, 5125), True, 'import numpy as np\n'), ((5137, 5150), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5145, 5150), True, 'import numpy as np\n'), ((5776, 5792), 'numpy.argsort', 'np.argsort', (['Htwo'], {}), '(Htwo)\n', (5786, 5792), True, 'import numpy as np\n'), ((6590, 6613), 'numpy.arange', 'np.arange', (['(-2)', '(8)', 'qstep'], {}), '(-2, 8, qstep)\n', (6599, 6613), True, 'import numpy as np\n'), ((7017, 7049), 'scipy.signal.detrend', 'signal.detrend', (['x'], {'type': '"""linear"""'}), "(x, type='linear')\n", (7031, 7049), False, 'from scipy import signal\n'), ((230, 262), 'scipy.signal.detrend', 'signal.detrend', (['x'], {'type': '"""linear"""'}), "(x, type='linear')\n", (244, 262), False, 'from scipy import signal\n'), ((326, 342), 'numpy.mean', 'np.mean', (['(nx ** 3)'], {}), '(nx ** 3)\n', (333, 342), True, 'import numpy as np\n'), ((486, 518), 'scipy.signal.detrend', 'signal.detrend', (['x'], {'type': '"""linear"""'}), "(x, type='linear')\n", (500, 518), False, 'from scipy import signal\n'), ((1986, 2007), 'numpy.log10', 'np.log10', (['ptime[fidx]'], {}), '(ptime[fidx])\n', (1994, 2007), True, 'import numpy as np\n'), ((2009, 2030), 'numpy.log10', 'np.log10', (['pdata[fidx]'], {}), '(pdata[fidx])\n', (2017, 2030), True, 'import numpy as np\n'), ((2332, 2346), 'numpy.arange', 'np.arange', (['nst'], {}), '(nst)\n', (2341, 2346), True, 'import numpy as np\n'), ((2624, 2636), 'numpy.arange', 'np.arange', (['d'], {}), '(d)\n', (2633, 2636), True, 'import numpy as np\n'), ((3512, 3523), 'numpy.log', 'np.log', (['nst'], {}), '(nst)\n', (3518, 3523), True, 'import numpy as np\n'), ((3734, 3745), 'numpy.log', 'np.log', (['nst'], {}), '(nst)\n', (3740, 3745), True, 'import numpy as np\n'), ((4484, 4496), 'numpy.ones', 'np.ones', (['nst'], {}), '(nst)\n', (4491, 4496), True, 'import numpy as np\n'), ((4949, 4962), 'numpy.zeros', 'np.zeros', (['nst'], {}), '(nst)\n', (4957, 4962), True, 'import numpy as np\n'), ((5193, 5233), 'numpy.arange', 'np.arange', (['(0.001)', '(1.0 / (nst - n))', '(0.001)'], {}), '(0.001, 1.0 / (nst - n), 0.001)\n', (5202, 5233), True, 'import numpy as np\n'), ((5677, 5713), 'numpy.concatenate', 'np.concatenate', (['(Htwo, Hext)'], {'axis': '(0)'}), '((Htwo, Hext), axis=0)\n', (5691, 5713), True, 'import numpy as np\n'), ((5729, 5765), 'numpy.concatenate', 'np.concatenate', (['(Ctwo, Cext)'], {'axis': '(0)'}), '((Ctwo, Cext), axis=0)\n', (5743, 5765), True, 'import numpy as np\n'), ((7080, 7123), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)', 'wspace': '(0.3)'}), '(hspace=0.5, wspace=0.3)\n', (7099, 7123), True, 'import matplotlib.pyplot as plt\n'), ((7144, 7164), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(1)'], {}), '(5, 1, 1)\n', (7155, 7164), True, 'import matplotlib.pyplot as plt\n'), ((7172, 7186), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x'], {}), '(t, x)\n', (7180, 7186), True, 'import matplotlib.pyplot as plt\n'), ((7412, 7426), 'numpy.zeros', 'np.zeros', (['bins'], {}), '(bins)\n', (7420, 7426), True, 'import numpy as np\n'), ((7765, 7785), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(2)'], {}), '(5, 1, 2)\n', (7776, 7785), True, 'import matplotlib.pyplot as plt\n'), ((8020, 8037), 'numpy.log', 'np.log', (['nTax[idx]'], {}), '(nTax[idx])\n', (8026, 8037), True, 'import numpy as np\n'), ((8051, 8070), 'numpy.log', 'np.log', (['eTq[idx, k]'], {}), '(eTq[idx, k])\n', (8057, 8070), True, 'import numpy as np\n'), ((8085, 8106), 'numpy.polyfit', 'np.polyfit', (['lx', 'ly', '(1)'], {}), '(lx, ly, 1)\n', (8095, 8106), True, 'import numpy as np\n'), ((8126, 8140), 'numpy.poly1d', 'np.poly1d', (['fit'], {}), '(fit)\n', (8135, 8140), True, 'import numpy as np\n'), ((8430, 8477), 'matplotlib.pyplot.title', 'plt.title', (['"""Linear fit of loglog plot is -K(q)"""'], {}), "('Linear fit of loglog plot is -K(q)')\n", (8439, 8477), True, 'import matplotlib.pyplot as plt\n'), ((8486, 8503), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""T/N"""'], {}), "('T/N')\n", (8496, 8503), True, 'import matplotlib.pyplot as plt\n'), ((8512, 8537), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""eTq moments"""'], {}), "('eTq moments')\n", (8522, 8537), True, 'import matplotlib.pyplot as plt\n'), ((8546, 8563), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (8556, 8563), True, 'import matplotlib.pyplot as plt\n'), ((8572, 8589), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (8582, 8589), True, 'import matplotlib.pyplot as plt\n'), ((8620, 8640), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(3)'], {}), '(5, 1, 3)\n', (8631, 8640), True, 'import matplotlib.pyplot as plt\n'), ((8647, 8669), 'matplotlib.pyplot.plot', 'plt.plot', (['qax', 'K', '"""-o"""'], {}), "(qax, K, '-o')\n", (8655, 8669), True, 'import matplotlib.pyplot as plt\n'), ((8678, 8693), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""q"""'], {}), "('q')\n", (8688, 8693), True, 'import matplotlib.pyplot as plt\n'), ((8702, 8720), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""K(q)"""'], {}), "('K(q)')\n", (8712, 8720), True, 'import matplotlib.pyplot as plt\n'), ((9127, 9147), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(4)'], {}), '(5, 1, 4)\n', (9138, 9147), True, 'import matplotlib.pyplot as plt\n'), ((9154, 9176), 'matplotlib.pyplot.plot', 'plt.plot', (['qax', 'C', '"""-o"""'], {}), "(qax, C, '-o')\n", (9162, 9176), True, 'import matplotlib.pyplot as plt\n'), ((9185, 9200), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""q"""'], {}), "('q')\n", (9195, 9200), True, 'import matplotlib.pyplot as plt\n'), ((9209, 9227), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""C(q)"""'], {}), "('C(q)')\n", (9219, 9227), True, 'import matplotlib.pyplot as plt\n'), ((9258, 9278), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(5)'], {}), '(5, 1, 5)\n', (9269, 9278), True, 'import matplotlib.pyplot as plt\n'), ((9285, 9307), 'matplotlib.pyplot.plot', 'plt.plot', (['qax', 'D', '"""-o"""'], {}), "(qax, D, '-o')\n", (9293, 9307), True, 'import matplotlib.pyplot as plt\n'), ((9316, 9331), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""q"""'], {}), "('q')\n", (9326, 9331), True, 'import matplotlib.pyplot as plt\n'), ((9340, 9358), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""D(q)"""'], {}), "('D(q)')\n", (9350, 9358), True, 'import matplotlib.pyplot as plt\n'), ((9368, 9378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9376, 9378), True, 'import matplotlib.pyplot as plt\n'), ((278, 288), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (285, 288), True, 'import numpy as np\n'), ((343, 359), 'numpy.mean', 'np.mean', (['(nx ** 2)'], {}), '(nx ** 2)\n', (350, 359), True, 'import numpy as np\n'), ((534, 544), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (541, 544), True, 'import numpy as np\n'), ((582, 598), 'numpy.mean', 'np.mean', (['(nx ** 4)'], {}), '(nx ** 4)\n', (589, 598), True, 'import numpy as np\n'), ((1068, 1101), 'scipy.signal.detrend', 'signal.detrend', (['sx'], {'type': '"""linear"""'}), "(sx, type='linear')\n", (1082, 1101), False, 'from scipy import signal\n'), ((1276, 1297), 'numpy.zeros', 'np.zeros', (['(ls + 1, 1)'], {}), '((ls + 1, 1))\n', (1284, 1297), True, 'import numpy as np\n'), ((2673, 2689), 'numpy.atleast_2d', 'np.atleast_2d', (['p'], {}), '(p)\n', (2686, 2689), True, 'import numpy as np\n'), ((2987, 3002), 'numpy.argsort', 'np.argsort', (['ssx'], {}), '(ssx)\n', (2997, 3002), True, 'import numpy as np\n'), ((3464, 3476), 'numpy.log', 'np.log', (['pinz'], {}), '(pinz)\n', (3470, 3476), True, 'import numpy as np\n'), ((3778, 3790), 'numpy.ones', 'np.ones', (['nst'], {}), '(nst)\n', (3785, 3790), True, 'import numpy as np\n'), ((3818, 3828), 'numpy.log', 'np.log', (['pe'], {}), '(pe)\n', (3824, 3828), True, 'import numpy as np\n'), ((3883, 3895), 'numpy.log', 'np.log', (['pieh'], {}), '(pieh)\n', (3889, 3895), True, 'import numpy as np\n'), ((4567, 4578), 'numpy.log', 'np.log', (['nst'], {}), '(nst)\n', (4573, 4578), True, 'import numpy as np\n'), ((4798, 4809), 'numpy.log', 'np.log', (['nst'], {}), '(nst)\n', (4804, 4809), True, 'import numpy as np\n'), ((5457, 5470), 'numpy.zeros', 'np.zeros', (['nst'], {}), '(nst)\n', (5465, 5470), True, 'import numpy as np\n'), ((6020, 6034), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6031, 6034), False, 'import pickle\n'), ((7575, 7599), 'numpy.mean', 'np.mean', (['ndxe[idx1:idx2]'], {}), '(ndxe[idx1:idx2])\n', (7582, 7599), True, 'import numpy as np\n'), ((7698, 7714), 'numpy.mean', 'np.mean', (['(eT ** q)'], {}), '(eT ** q)\n', (7705, 7714), True, 'import numpy as np\n'), ((7859, 7889), 'matplotlib.pyplot.plot', 'plt.plot', (['nTax', 'eTq[:, k]', '"""o"""'], {}), "(nTax, eTq[:, k], 'o')\n", (7867, 7889), True, 'import matplotlib.pyplot as plt\n'), ((8298, 8314), 'matplotlib.pyplot.plot', 'plt.plot', (['fx', 'fy'], {}), '(fx, fy)\n', (8306, 8314), True, 'import matplotlib.pyplot as plt\n'), ((8328, 8357), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'nT1', 'color': '"""r"""'}), "(x=nT1, color='r')\n", (8339, 8357), True, 'import matplotlib.pyplot as plt\n'), ((8370, 8399), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'nT2', 'color': '"""r"""'}), "(x=nT2, color='r')\n", (8381, 8399), True, 'import matplotlib.pyplot as plt\n'), ((8836, 8867), 'numpy.gradient', 'np.gradient', (['K', '(qax[1] - qax[0])'], {}), '(K, qax[1] - qax[0])\n', (8847, 8867), True, 'import numpy as np\n'), ((303, 313), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (310, 313), True, 'import numpy as np\n'), ((559, 569), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (566, 569), True, 'import numpy as np\n'), ((599, 615), 'numpy.mean', 'np.mean', (['(nx ** 2)'], {}), '(nx ** 2)\n', (606, 615), True, 'import numpy as np\n'), ((857, 872), 'numpy.log10', 'np.log10', (['bsize'], {}), '(bsize)\n', (865, 872), True, 'import numpy as np\n'), ((4032, 4043), 'numpy.log', 'np.log', (['nst'], {}), '(nst)\n', (4038, 4043), True, 'import numpy as np\n'), ((4593, 4605), 'numpy.log', 'np.log', (['pinz'], {}), '(pinz)\n', (4599, 4605), True, 'import numpy as np\n'), ((4818, 4830), 'numpy.log', 'np.log', (['pval'], {}), '(pval)\n', (4824, 4830), True, 'import numpy as np\n'), ((4844, 4878), 'numpy.log', 'np.log', (['((1.0 - pval) / (nst - 1.0))'], {}), '((1.0 - pval) / (nst - 1.0))\n', (4850, 4878), True, 'import numpy as np\n'), ((5292, 5303), 'numpy.log', 'np.log', (['nst'], {}), '(nst)\n', (5298, 5303), True, 'import numpy as np\n'), ((6765, 6779), 'numpy.log10', 'np.log10', (['Tmax'], {}), '(Tmax)\n', (6773, 6779), True, 'import numpy as np\n'), ((7204, 7214), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (7211, 7214), True, 'import numpy as np\n'), ((8248, 8258), 'numpy.log', 'np.log', (['fx'], {}), '(fx)\n', (8254, 8258), True, 'import numpy as np\n'), ((1459, 1473), 'numpy.cumsum', 'np.cumsum', (['ssx'], {}), '(ssx)\n', (1468, 1473), True, 'import numpy as np\n'), ((1532, 1545), 'numpy.max', 'np.max', (['delta'], {}), '(delta)\n', (1538, 1545), True, 'import numpy as np\n'), ((1548, 1561), 'numpy.min', 'np.min', (['delta'], {}), '(delta)\n', (1554, 1561), True, 'import numpy as np\n'), ((5312, 5324), 'numpy.log', 'np.log', (['pmin'], {}), '(pmin)\n', (5318, 5324), True, 'import numpy as np\n'), ((5338, 5376), 'numpy.log', 'np.log', (['((1.0 - pmin) / (nst - n - 1.0))'], {}), '((1.0 - pmin) / (nst - n - 1.0))\n', (5344, 5376), True, 'import numpy as np\n'), ((6258, 6273), 'numpy.sqrt', 'np.sqrt', (['pi[1:]'], {}), '(pi[1:])\n', (6265, 6273), True, 'import numpy as np\n'), ((6276, 6292), 'numpy.sqrt', 'np.sqrt', (['pi[:-1]'], {}), '(pi[:-1])\n', (6283, 6292), True, 'import numpy as np\n'), ((7234, 7244), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (7241, 7244), True, 'import numpy as np\n'), ((3992, 4009), 'numpy.log', 'np.log', (['(nst + 1.0)'], {}), '(nst + 1.0)\n', (3998, 4009), True, 'import numpy as np\n'), ((4014, 4031), 'numpy.log', 'np.log', (['(2.0 * nst)'], {}), '(2.0 * nst)\n', (4020, 4031), True, 'import numpy as np\n'), ((1590, 1606), 'numpy.sum', 'np.sum', (['(ssx ** 2)'], {}), '(ssx ** 2)\n', (1596, 1606), True, 'import numpy as np\n'), ((3046, 3068), 'numpy.tile', 'np.tile', (['sso', '(nst, 1)'], {}), '(sso, (nst, 1))\n', (3053, 3068), True, 'import numpy as np\n'), ((1486, 1497), 'numpy.ones', 'np.ones', (['ls'], {}), '(ls)\n', (1493, 1497), True, 'import numpy as np\n'), ((1611, 1622), 'numpy.sum', 'np.sum', (['ssx'], {}), '(ssx)\n', (1617, 1622), True, 'import numpy as np\n')] |
#!/usr/bin/python3.7
# -*-coding:utf8 -*
import numpy as np
import unittest
from FDApy.representation.functional_data import (DenseFunctionalData,
IrregularFunctionalData)
class TestDenseFunctionalData1D(unittest.TestCase):
"""Test class for the class DenseFunctionalData in one dimension."""
def setUp(self):
argvals = {'input_dim_0': np.array([1, 2, 3, 4])}
values = np.array([[1, 2, 3, 4],
[5, 6, 7, 9],
[3, 4, 5, 7],
[3, 4, 6, 1],
[3, 4, 7, 6]])
self.dense_fd = DenseFunctionalData(argvals, values)
def test_argvals_stand(self):
is_equal = np.allclose(self.dense_fd.argvals_stand['input_dim_0'],
np.array([0., 0.33333333, 0.66666667, 1.]))
self.assertTrue(is_equal)
def test_n_obs(self):
self.assertEqual(self.dense_fd.n_obs, 5)
def test_n_dim(self):
self.assertEqual(self.dense_fd.n_dim, 1)
def test_range_obs(self):
self.assertEqual(self.dense_fd.range_obs, (1, 9))
def test_range_dim(self):
self.assertEqual(self.dense_fd.range_dim, {'input_dim_0': (1, 4)})
def test_shape(self):
self.assertEqual(self.dense_fd.shape, {'input_dim_0': 4})
def test_subset(self):
new_dense_fd = self.dense_fd[2]
self.assertIsInstance(new_dense_fd, DenseFunctionalData)
self.assertEqual(new_dense_fd.n_obs, 1)
new_dense_fd = self.dense_fd[1:4]
self.assertIsInstance(new_dense_fd, DenseFunctionalData)
self.assertEqual(new_dense_fd.n_obs, 3)
def test_as_irregular(self):
irregu_fd = self.dense_fd.as_irregular()
self.assertIsInstance(irregu_fd, IrregularFunctionalData)
self.assertEqual(irregu_fd.n_obs, 5)
def test_is_compatible(self):
self.assertTrue(self.dense_fd.is_compatible(self.dense_fd))
def test_mean(self):
mean_fd = self.dense_fd.mean()
is_equal = np.allclose(mean_fd.values,
np.array([[3., 4., 5.6, 5.4]]))
self.assertTrue(is_equal)
class TestDenseFunctionalData2D(unittest.TestCase):
"""Test class for the class DenseFunctionalData in two dimension."""
def setUp(self):
argvals = {'input_dim_0': np.array([1, 2, 3, 4]),
'input_dim_1': np.array([5, 6, 7])}
values = np.array([[[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[5, 6, 7], [5, 6, 7], [5, 6, 7], [5, 6, 7]],
[[3, 4, 5], [3, 4, 5], [3, 4, 5], [3, 4, 5]],
[[3, 4, 6], [3, 4, 5], [3, 4, 5], [3, 4, 5]],
[[3, 4, 7], [3, 4, 5], [3, 4, 5], [3, 4, 5]]])
self.dense_fd = DenseFunctionalData(argvals, values)
def test_argvals_stand(self):
is_equal_dim0 = np.allclose(self.dense_fd.argvals_stand['input_dim_0'],
np.array([0., 0.33333333, 0.66666667, 1.]))
is_equal_dim1 = np.allclose(self.dense_fd.argvals_stand['input_dim_1'],
np.array([0., 0.5, 1.]))
self.assertTrue(is_equal_dim0 and is_equal_dim1)
def test_n_obs(self):
self.assertEqual(self.dense_fd.n_obs, 5)
def test_n_dim(self):
self.assertEqual(self.dense_fd.n_dim, 2)
def test_range_obs(self):
self.assertEqual(self.dense_fd.range_obs, (1, 7))
def test_range_dim(self):
self.assertEqual(self.dense_fd.range_dim, {'input_dim_0': (1, 4),
'input_dim_1': (5, 7)})
def test_shape(self):
self.assertEqual(self.dense_fd.shape, {'input_dim_0': 4,
'input_dim_1': 3})
def test_subset(self):
new_dense_fd = self.dense_fd[2]
self.assertIsInstance(new_dense_fd, DenseFunctionalData)
self.assertEqual(new_dense_fd.n_obs, 1)
new_dense_fd = self.dense_fd[1:4]
self.assertIsInstance(new_dense_fd, DenseFunctionalData)
self.assertEqual(new_dense_fd.n_obs, 3)
def test_as_irregular(self):
irregu_fd = self.dense_fd.as_irregular()
self.assertIsInstance(irregu_fd, IrregularFunctionalData)
self.assertEqual(irregu_fd.n_obs, 5)
def test_is_compatible(self):
self.assertTrue(self.dense_fd.is_compatible(self.dense_fd))
def test_mean(self):
mean_fd = self.dense_fd.mean()
is_equal = np.allclose(mean_fd.values,
np.array([[[3., 4., 5.6],
[3., 4., 5.],
[3., 4., 5.],
[3., 4., 5.]]]))
self.assertTrue(is_equal)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"FDApy.representation.functional_data.DenseFunctionalData"
] | [((4901, 4916), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4914, 4916), False, 'import unittest\n'), ((448, 533), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [5, 6, 7, 9], [3, 4, 5, 7], [3, 4, 6, 1], [3, 4, 7, 6]]'], {}), '([[1, 2, 3, 4], [5, 6, 7, 9], [3, 4, 5, 7], [3, 4, 6, 1], [3, 4, 7, 6]]\n )\n', (456, 533), True, 'import numpy as np\n'), ((661, 697), 'FDApy.representation.functional_data.DenseFunctionalData', 'DenseFunctionalData', (['argvals', 'values'], {}), '(argvals, values)\n', (680, 697), False, 'from FDApy.representation.functional_data import DenseFunctionalData, IrregularFunctionalData\n'), ((2479, 2732), 'numpy.array', 'np.array', (['[[[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]], [[5, 6, 7], [5, 6, 7], [5, 6,\n 7], [5, 6, 7]], [[3, 4, 5], [3, 4, 5], [3, 4, 5], [3, 4, 5]], [[3, 4, 6\n ], [3, 4, 5], [3, 4, 5], [3, 4, 5]], [[3, 4, 7], [3, 4, 5], [3, 4, 5],\n [3, 4, 5]]]'], {}), '([[[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]], [[5, 6, 7], [5, 6, \n 7], [5, 6, 7], [5, 6, 7]], [[3, 4, 5], [3, 4, 5], [3, 4, 5], [3, 4, 5]],\n [[3, 4, 6], [3, 4, 5], [3, 4, 5], [3, 4, 5]], [[3, 4, 7], [3, 4, 5], [3,\n 4, 5], [3, 4, 5]]])\n', (2487, 2732), True, 'import numpy as np\n'), ((2852, 2888), 'FDApy.representation.functional_data.DenseFunctionalData', 'DenseFunctionalData', (['argvals', 'values'], {}), '(argvals, values)\n', (2871, 2888), False, 'from FDApy.representation.functional_data import DenseFunctionalData, IrregularFunctionalData\n'), ((407, 429), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (415, 429), True, 'import numpy as np\n'), ((839, 883), 'numpy.array', 'np.array', (['[0.0, 0.33333333, 0.66666667, 1.0]'], {}), '([0.0, 0.33333333, 0.66666667, 1.0])\n', (847, 883), True, 'import numpy as np\n'), ((2133, 2165), 'numpy.array', 'np.array', (['[[3.0, 4.0, 5.6, 5.4]]'], {}), '([[3.0, 4.0, 5.6, 5.4]])\n', (2141, 2165), True, 'import numpy as np\n'), ((2382, 2404), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2390, 2404), True, 'import numpy as np\n'), ((2440, 2459), 'numpy.array', 'np.array', (['[5, 6, 7]'], {}), '([5, 6, 7])\n', (2448, 2459), True, 'import numpy as np\n'), ((3040, 3084), 'numpy.array', 'np.array', (['[0.0, 0.33333333, 0.66666667, 1.0]'], {}), '([0.0, 0.33333333, 0.66666667, 1.0])\n', (3048, 3084), True, 'import numpy as np\n'), ((3200, 3225), 'numpy.array', 'np.array', (['[0.0, 0.5, 1.0]'], {}), '([0.0, 0.5, 1.0])\n', (3208, 3225), True, 'import numpy as np\n'), ((4637, 4722), 'numpy.array', 'np.array', (['[[[3.0, 4.0, 5.6], [3.0, 4.0, 5.0], [3.0, 4.0, 5.0], [3.0, 4.0, 5.0]]]'], {}), '([[[3.0, 4.0, 5.6], [3.0, 4.0, 5.0], [3.0, 4.0, 5.0], [3.0, 4.0, 5.0]]]\n )\n', (4645, 4722), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import functools
import argparse
import os
import cv2
from DataPreparation import get_baseline_dataset, split_data, augment
from Model import Model
_IMG_SHAPE = (512, 512, 3)
_BATCH_SIZE = 1
class FilePaths:
fnAccuracy = '../model/accuracy.txt'
fnTrain = '../data/train/'
fnLabels = '../data/train_masks/'
fnLabelsCsv ='../data/train_masks.csv'
fnInfer = '../data/test/'
fnResults ='../results/'
def preprocess_function(train):
if(train):
cfg = {
'resize': [_IMG_SHAPE[0], _IMG_SHAPE[1]],
'scale': 1 / 255.,
'hue_delta': 0.1,
'horizontal_flip': True,
'width_shift_range': 0.1,
'height_shift_range': 0.1
}
else:
cfg = {
'resize': [_IMG_SHAPE[0], _IMG_SHAPE[1]],
'scale': 1 / 255.
}
preprocessing_fn = functools.partial(augment, **cfg)
return preprocessing_fn
# Helper function to write u_net prediction to an image
def preds_to_img(pred, actual_img, fname):
scale = 255.
pred = np.reshape(pred,(_IMG_SHAPE[0], _IMG_SHAPE[1]))
pred = pred[:,:]*scale
#pred = pred.astype(int)
pred = np.reshape(pred,(_IMG_SHAPE[0],_IMG_SHAPE[1],1))
cv2.imwrite(os.path.join(FilePaths.fnResults, "{}.jpg".format(fname)), actual_img)
cv2.imwrite(os.path.join(FilePaths.fnResults, "{}_result.jpg".format(fname)), pred)
def main():
print("Inside main")
# optional command line args
parser = argparse.ArgumentParser()
parser.add_argument("--train", help="train the NN", action="store_true")
#parser.add_argument("--validate", help="validate the NN", action="store_true")
parser.add_argument("--predict",nargs=1)
args = parser.parse_args()
if args.train:
# load training data, create TF model
x_train_filenames, x_val_filenames, y_train_filenames, y_val_filenames = split_data(FilePaths)
train_batches_per_epoch = int(len(x_train_filenames)/_BATCH_SIZE) + 1
no_of_val_batches = int(len(x_val_filenames)/_BATCH_SIZE) + 1
train_ds = get_baseline_dataset(x_train_filenames,
y_train_filenames,
batch_size=_BATCH_SIZE,
preproc_fn=preprocess_function(train=True),
)
val_ds = get_baseline_dataset(x_val_filenames,
y_val_filenames,
batch_size=_BATCH_SIZE,
preproc_fn= preprocess_function(train=False),
)
model = Model(val_dataset =val_ds, train_dataset=train_ds, mustRestore = False)
model.train(train_batches_per_epoch, no_of_val_batches, FilePaths)
#elif args.validate:
#model = Model(val_dataset =val_ds, mustRestore = False)
#model.validate()
# infer on test image
elif args.predict:
# We pass test_img as dummy label to maintain dataset structure
x_val_filenames, y_val_filenames = [args.predict[0]]*32, [args.predict[0]]*32
val_ds = get_baseline_dataset(x_val_filenames,
y_val_filenames,
batch_size=_BATCH_SIZE,
preproc_fn= preprocess_function(train=False),
threads=1)
print(open(FilePaths.fnAccuracy).read())
model = Model(val_dataset =val_ds, mustRestore = True)
prediction = model.infer()
fname = args.predict[0].split('/')[-1].split('.')[0]
test_img = cv2.imread(args.predict[0])
test_img = cv2.resize(test_img, (_IMG_SHAPE[0], _IMG_SHAPE[1]))
preds_to_img(prediction, test_img, fname)
if __name__ == '__main__':
main()
| [
"functools.partial",
"argparse.ArgumentParser",
"Model.Model",
"DataPreparation.split_data",
"cv2.imread",
"numpy.reshape",
"cv2.resize"
] | [((871, 904), 'functools.partial', 'functools.partial', (['augment'], {}), '(augment, **cfg)\n', (888, 904), False, 'import functools\n'), ((1055, 1103), 'numpy.reshape', 'np.reshape', (['pred', '(_IMG_SHAPE[0], _IMG_SHAPE[1])'], {}), '(pred, (_IMG_SHAPE[0], _IMG_SHAPE[1]))\n', (1065, 1103), True, 'import numpy as np\n'), ((1161, 1212), 'numpy.reshape', 'np.reshape', (['pred', '(_IMG_SHAPE[0], _IMG_SHAPE[1], 1)'], {}), '(pred, (_IMG_SHAPE[0], _IMG_SHAPE[1], 1))\n', (1171, 1212), True, 'import numpy as np\n'), ((1455, 1480), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1478, 1480), False, 'import argparse\n'), ((1837, 1858), 'DataPreparation.split_data', 'split_data', (['FilePaths'], {}), '(FilePaths)\n', (1847, 1858), False, 'from DataPreparation import get_baseline_dataset, split_data, augment\n'), ((2315, 2383), 'Model.Model', 'Model', ([], {'val_dataset': 'val_ds', 'train_dataset': 'train_ds', 'mustRestore': '(False)'}), '(val_dataset=val_ds, train_dataset=train_ds, mustRestore=False)\n', (2320, 2383), False, 'from Model import Model\n'), ((2959, 3002), 'Model.Model', 'Model', ([], {'val_dataset': 'val_ds', 'mustRestore': '(True)'}), '(val_dataset=val_ds, mustRestore=True)\n', (2964, 3002), False, 'from Model import Model\n'), ((3103, 3130), 'cv2.imread', 'cv2.imread', (['args.predict[0]'], {}), '(args.predict[0])\n', (3113, 3130), False, 'import cv2\n'), ((3144, 3196), 'cv2.resize', 'cv2.resize', (['test_img', '(_IMG_SHAPE[0], _IMG_SHAPE[1])'], {}), '(test_img, (_IMG_SHAPE[0], _IMG_SHAPE[1]))\n', (3154, 3196), False, 'import cv2\n')] |
"""
Sparse matrix functions
"""
#
# Authors: <NAME>, March 2002
# <NAME>, August 2012 (Sparse Updates)
# <NAME>, August 2012 (Sparse Updates)
#
from __future__ import division, print_function, absolute_import
__all__ = ['expm', 'inv']
import math
from numpy import asarray, dot, eye, ceil, log2
from numpy import matrix as mat
import numpy as np
import scipy.misc
from scipy.linalg.misc import norm
from scipy.linalg.basic import solve, solve_triangular, inv
from scipy.sparse.base import isspmatrix
from scipy.sparse.construct import eye as speye
from scipy.sparse.linalg import spsolve
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg.interface import LinearOperator
UPPER_TRIANGULAR = 'upper_triangular'
def inv(A):
"""
Compute the inverse of a sparse matrix
.. versionadded:: 0.12.0
Parameters
----------
A : (M,M) ndarray or sparse matrix
square matrix to be inverted
Returns
-------
Ainv : (M,M) ndarray or sparse matrix
inverse of `A`
Notes
-----
This computes the sparse inverse of `A`. If the inverse of `A` is expected
to be non-sparse, it will likely be faster to convert `A` to dense and use
scipy.linalg.inv.
"""
I = speye(A.shape[0], A.shape[1], dtype=A.dtype, format=A.format)
Ainv = spsolve(A, I)
return Ainv
def _exact_1_norm(A):
# A compatibility function which should eventually disappear.
# This is copypasted from expm_action.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=0).flat)
else:
return np.linalg.norm(A, 1)
def _ident_like(A):
# A compatibility function which should eventually disappear.
# This is copypasted from expm_action.
if scipy.sparse.isspmatrix(A):
return scipy.sparse.construct.eye(A.shape[0], A.shape[1],
dtype=A.dtype, format=A.format)
else:
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
def _count_nonzero(A):
# A compatibility function which should eventually disappear.
#XXX There should be a better way to do this when A is sparse
# in the traditional sense.
if isspmatrix(A):
return np.sum(A.toarray() != 0)
else:
return np.sum(A != 0)
def _is_upper_triangular(A):
# This function could possibly be of wider interest.
if isspmatrix(A):
lower_part = scipy.sparse.tril(A, -1)
if lower_part.nnz == 0:
# structural upper triangularity
return True
else:
# coincidental upper triangularity
return _count_nonzero(lower_part) == 0
else:
return _count_nonzero(np.tril(A, -1)) == 0
class MatrixPowerOperator(LinearOperator):
def __init__(self, A, p):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self.ndim = A.ndim
self.shape = A.shape
def matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def rmatvec(self, x):
for i in range(self._p):
x = x.dot(self._A)
return x
def matmat(self, X):
for i in range(self._p):
X = self._A.dot(X)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
class ProductOperator(LinearOperator):
"""
For now, this is limited to products of multiple square matrices.
"""
def __init__(self, *args):
for A in args:
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(
'For now, the ProductOperator implementation is '
'limited to the product of multiple square matrices.')
if args:
n = args[0].shape[0]
for A in args:
for d in A.shape:
if d != n:
raise ValueError(
'The square matrices of the ProductOperator '
'must all have the same shape.')
self.shape = (n, n)
self.ndim = len(self.shape)
self._operator_sequence = args
def matvec(self, x):
for A in reversed(self._operator_sequence):
x = A.dot(x)
return x
def rmatvec(self, x):
for A in self._operator_sequence:
x = x.dot(A)
return x
def matmat(self, X):
for A in reversed(self._operator_sequence):
X = A.dot(X)
return X
@property
def T(self):
T_args = [A.T for A in reversed(self._operator_sequence)]
return ProductOperator(*T_args)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
#XXX Eventually turn this into an API function in the _onenormest module,
#XXX and remove its underscore,
#XXX but wait until expm_action and expm_2009 go into scipy.
return scipy.sparse.linalg.onenormest(MatrixPowerOperator(A, p))
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
#XXX Eventually turn this into an API function in the _onenormest module,
#XXX and remove its underscore,
#XXX but wait until expm_2009 goes into scipy.
return scipy.sparse.linalg.onenormest(ProductOperator(*operator_seq))
def expm(A):
"""
Compute the matrix exponential using Pade approximation.
.. versionadded:: 0.12.0
Parameters
----------
A : (M,M) array or sparse matrix
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
References
----------
.. [1] <NAME> and <NAME> (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
"""
# Detect upper triangularity.
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
# Define the identity matrix depending on sparsity.
ident = _ident_like(A)
# Try Pade order 3.
A2 = A.dot(A)
d6 = _onenormest_matrix_power(A2, 3)**(1/6.)
eta_1 = max(_onenormest_matrix_power(A2, 2)**(1/4.), d6)
if eta_1 < 1.495585217958292e-002 and _ell(A, 3) == 0:
U, V = _pade3(A, ident, A2)
return _solve_P_Q(U, V, structure=structure)
# Try Pade order 5.
A4 = A2.dot(A2)
d4 = _exact_1_norm(A4)**(1/4.)
eta_2 = max(d4, d6)
if eta_2 < 2.539398330063230e-001 and _ell(A, 5) == 0:
U, V = _pade5(A, ident, A2, A4)
return _solve_P_Q(U, V, structure=structure)
# Try Pade orders 7 and 9.
A6 = A2.dot(A4)
d6 = _exact_1_norm(A6)**(1/6.)
d8 = _onenormest_matrix_power(A4, 2)**(1/8.)
eta_3 = max(d6, d8)
if eta_3 < 9.504178996162932e-001 and _ell(A, 7) == 0:
U, V = _pade7(A, ident, A2, A4, A6)
return _solve_P_Q(U, V, structure=structure)
if eta_3 < 2.097847961257068e+000 and _ell(A, 9) == 0:
U, V = _pade9(A, ident, A2, A4, A6)
return _solve_P_Q(U, V, structure=structure)
# Use Pade order 13.
d10 = _onenormest_product((A4, A6))**(1/10.)
eta_4 = max(d8, d10)
eta_5 = min(eta_3, eta_4)
theta_13 = 4.25
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
s = s + _ell(2**-s * A, 13)
B = A * 2**-s
B2 = A2 * 2**(-2*s)
B4 = A4 * 2**(-4*s)
B6 = A6 * 2**(-6*s)
U, V = _pade13(B, ident, B2, B4, B6)
X = _solve_P_Q(U, V, structure=structure)
if structure == UPPER_TRIANGULAR:
# Invoke Code Fragment 2.1.
X = _fragment_2_1(X, A, s)
else:
# X = r_13(A)^(2^s) by repeated squaring.
for i in range(s):
X = X.dot(X)
return X
def _solve_P_Q(U, V, structure=None):
"""
A helper function for expm_2009.
Parameters
----------
U : ndarray
Pade numerator.
V : ndarray
Pade denominator.
structure : str, optional
A string describing the structure of both matrices `U` and `V`.
Only `upper_triangular` is currently supported.
Notes
-----
The `structure` argument is inspired by similar args
for theano and cvxopt functions.
"""
P = U + V
Q = -U + V
if isspmatrix(U):
return spsolve(Q, P)
elif structure is None:
return solve(Q, P)
elif structure == UPPER_TRIANGULAR:
return solve_triangular(Q, P)
else:
raise ValueError('unsupported matrix structure: ' + str(structure))
def _sinch(x):
"""
Stably evaluate sinch.
Notes
-----
The strategy of falling back to a sixth order Taylor expansion
was suggested by the Spallation Neutron Source docs
which was found on the internet by google search.
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
The details of the cutoff point and the Horner-like evaluation
was picked without reference to anything in particular.
Note that sinch is not currently implemented in scipy.special,
whereas the "engineer's" definition of sinc is implemented.
The implementation of sinc involves a scaling factor of pi
that distinguishes it from the "mathematician's" version of sinc.
"""
# If x is small then use sixth order Taylor expansion.
# How small is small? I am using the point where the relative error
# of the approximation is less than 1e-14.
# If x is large then directly evaluate sinh(x) / x.
x2 = x*x
if abs(x) < 0.0135:
return 1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.)))
else:
return np.sinh(x) / x
def _eq_10_42(lam_1, lam_2, t_12):
"""
Equation (10.42) of Functions of Matrices: Theory and Computation.
Notes
-----
This is a helper function for _fragment_2_1 of expm_2009.
Equation (10.42) is on page 251 in the section on Schur algorithms.
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
expm([[lam_1, t_12], [0, lam_1])
=
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
[0, exp(lam_2)]
"""
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
# apparently suffers from cancellation, according to Higham's textbook.
# A nice implementation of sinch, defined as sinh(x)/x,
# will apparently work around the cancellation.
a = 0.5 * (lam_1 + lam_2)
b = 0.5 * (lam_1 - lam_2)
return t_12 * np.exp(a) * _sinch(b)
def _fragment_2_1(X, T, s):
"""
A helper function for expm_2009.
Notes
-----
The argument X is modified in-place, but this modification is not the same
as the returned value of the function.
This function also takes pains to do things in ways that are compatible
with sparse matrices, for example by avoiding fancy indexing
and by using methods of the matrices whenever possible instead of
using functions of the numpy or scipy libraries themselves.
"""
# Form X = r_m(2^-s T)
# Replace diag(X) by exp(2^-s diag(T)).
n = X.shape[0]
diag_T = T.diagonal().copy()
# Replace diag(X) by exp(2^-s diag(T)).
scale = 2 ** -s
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
for i in range(s-1, -1, -1):
X = X.dot(X)
# Replace diag(X) by exp(2^-i diag(T)).
scale = 2 ** -i
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
# Replace (first) superdiagonal of X by explicit formula
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
# the author's 2008 textbook
# Functions of Matrices: Theory and Computation.
for k in range(n-1):
lam_1 = scale * diag_T[k]
lam_2 = scale * diag_T[k+1]
t_12 = scale * T[k, k+1]
value = _eq_10_42(lam_1, lam_2, t_12)
X[k, k+1] = value
# Return the updated X matrix.
return X
def _ell(A, m):
"""
A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
p = 2*m + 1
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
abs_c_recip = scipy.misc.comb(2*p, p, exact=True) * math.factorial(2*p + 1)
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53
# Estimate the 1-norm of the matrix power.
est = _onenormest_matrix_power(abs(A), p)
# Treat zero norm as a special case.
if not est:
return 0
alpha = est / (_exact_1_norm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0)
# Implementation of Pade approximations of various degree
# using the algorithm presented in [Higham 2005].
# These should apply to both dense and sparse matricies.
# ident is the identity matrix, which matches A in being sparse or dense.
def _pade3(A, ident, A2=None):
b = (120., 60., 12., 1.)
if A2 is None:
A2 = A.dot(A)
U = A.dot(b[3]*A2 + b[1]*ident)
V = b[2]*A2 + b[0]*ident
return U,V
def _pade5(A, ident, A2=None, A4=None):
b = (30240., 15120., 3360., 420., 30., 1.)
if A2 is None:
A2 = A.dot(A)
if A4 is None:
A4 = A2.dot(A2)
U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[4]*A4 + b[2]*A2 + b[0]*ident
return U,V
def _pade7(A, ident, A2=None, A4=None, A6=None):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
if A2 is None:
A2 = A.dot(A)
if A4 is None:
A4 = A2.dot(A2)
if A6 is None:
A6 = A4.dot(A2)
U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
return U,V
def _pade9(A, ident, A2=None, A4=None, A6=None):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
if A2 is None:
A2 = A.dot(A)
if A4 is None:
A4 = A2.dot(A2)
if A6 is None:
A6 = A4.dot(A2)
A8 = A6.dot(A2)
U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
return U,V
def _pade13(A, ident, A2=None, A4=None, A6=None):
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600., 670442572800.,
33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.)
if A2 is None:
A2 = A.dot(A)
if A4 is None:
A4 = A2.dot(A2)
if A6 is None:
A6 = A4.dot(A2)
U = A.dot(A6.dot(b[13]*A6 + b[11]*A4 + b[9]*A2) + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = A6.dot(b[12]*A6 + b[10]*A4 + b[8]*A2) + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
return U,V
| [
"scipy.linalg.basic.solve",
"numpy.sum",
"numpy.ceil",
"numpy.tril",
"numpy.log2",
"scipy.sparse.construct.eye",
"scipy.sparse.base.isspmatrix",
"numpy.linalg.norm",
"numpy.exp",
"scipy.sparse.linalg.spsolve",
"math.factorial",
"scipy.linalg.basic.solve_triangular",
"numpy.eye",
"numpy.sin... | [((1268, 1329), 'scipy.sparse.construct.eye', 'speye', (['A.shape[0]', 'A.shape[1]'], {'dtype': 'A.dtype', 'format': 'A.format'}), '(A.shape[0], A.shape[1], dtype=A.dtype, format=A.format)\n', (1273, 1329), True, 'from scipy.sparse.construct import eye as speye\n'), ((1341, 1354), 'scipy.sparse.linalg.spsolve', 'spsolve', (['A', 'I'], {}), '(A, I)\n', (1348, 1354), False, 'from scipy.sparse.linalg import spsolve\n'), ((2179, 2192), 'scipy.sparse.base.isspmatrix', 'isspmatrix', (['A'], {}), '(A)\n', (2189, 2192), False, 'from scipy.sparse.base import isspmatrix\n'), ((2369, 2382), 'scipy.sparse.base.isspmatrix', 'isspmatrix', (['A'], {}), '(A)\n', (2379, 2382), False, 'from scipy.sparse.base import isspmatrix\n'), ((11072, 11085), 'scipy.sparse.base.isspmatrix', 'isspmatrix', (['U'], {}), '(U)\n', (11082, 11085), False, 'from scipy.sparse.base import isspmatrix\n'), ((14005, 14027), 'numpy.exp', 'np.exp', (['(scale * diag_T)'], {}), '(scale * diag_T)\n', (14011, 14027), True, 'import numpy as np\n'), ((15766, 15784), 'numpy.log2', 'np.log2', (['(alpha / u)'], {}), '(alpha / u)\n', (15773, 15784), True, 'import numpy as np\n'), ((1608, 1628), 'numpy.linalg.norm', 'np.linalg.norm', (['A', '(1)'], {}), '(A, 1)\n', (1622, 1628), True, 'import numpy as np\n'), ((1934, 1979), 'numpy.eye', 'np.eye', (['A.shape[0]', 'A.shape[1]'], {'dtype': 'A.dtype'}), '(A.shape[0], A.shape[1], dtype=A.dtype)\n', (1940, 1979), True, 'import numpy as np\n'), ((2259, 2273), 'numpy.sum', 'np.sum', (['(A != 0)'], {}), '(A != 0)\n', (2265, 2273), True, 'import numpy as np\n'), ((11102, 11115), 'scipy.sparse.linalg.spsolve', 'spsolve', (['Q', 'P'], {}), '(Q, P)\n', (11109, 11115), False, 'from scipy.sparse.linalg import spsolve\n'), ((14228, 14250), 'numpy.exp', 'np.exp', (['(scale * diag_T)'], {}), '(scale * diag_T)\n', (14234, 14250), True, 'import numpy as np\n'), ((15349, 15374), 'math.factorial', 'math.factorial', (['(2 * p + 1)'], {}), '(2 * p + 1)\n', (15363, 15374), False, 'import math\n'), ((15799, 15834), 'numpy.ceil', 'np.ceil', (['(log2_alpha_div_u / (2 * m))'], {}), '(log2_alpha_div_u / (2 * m))\n', (15806, 15834), True, 'import numpy as np\n'), ((11159, 11170), 'scipy.linalg.basic.solve', 'solve', (['Q', 'P'], {}), '(Q, P)\n', (11164, 11170), False, 'from scipy.linalg.basic import solve, solve_triangular, inv\n'), ((12434, 12444), 'numpy.sinh', 'np.sinh', (['x'], {}), '(x)\n', (12441, 12444), True, 'import numpy as np\n'), ((13278, 13287), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (13284, 13287), True, 'import numpy as np\n'), ((2683, 2697), 'numpy.tril', 'np.tril', (['A', '(-1)'], {}), '(A, -1)\n', (2690, 2697), True, 'import numpy as np\n'), ((10081, 10106), 'numpy.log2', 'np.log2', (['(eta_5 / theta_13)'], {}), '(eta_5 / theta_13)\n', (10088, 10106), True, 'import numpy as np\n'), ((11226, 11248), 'scipy.linalg.basic.solve_triangular', 'solve_triangular', (['Q', 'P'], {}), '(Q, P)\n', (11242, 11248), False, 'from scipy.linalg.basic import solve, solve_triangular, inv\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
@Contact: <EMAIL>
@File: reconstruction.py
@Time: 2020/1/2 10:26 AM
"""
import os
import sys
import time
import shutil
import numpy as np
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
import sklearn.metrics as metrics
from tensorboardX import SummaryWriter
from model import ClassificationNet
from dataset import Dataset
from utils import Logger
class Classification(object):
def __init__(self, args):
self.dataset_name = args.dataset
if args.epochs != None:
self.epochs = args.epochs
else:
self.epochs = 250
self.batch_size = args.batch_size
self.snapshot_interval = args.snapshot_interval
self.no_cuda = args.no_cuda
self.model_path = args.model_path
self.no_scheduler = args.no_scheduler
# create exp directory
file = [f for f in args.model_path.split('/')]
if args.exp_name != None:
self.experiment_id = "Classify_" + args.exp_name
elif file[-2] == 'models':
self.experiment_id = file[-3]
else:
self.experiment_id = "Classify" + time.strftime('%m%d%H%M%S')
snapshot_root = 'snapshot/%s' % self.experiment_id
tensorboard_root = 'tensorboard/%s' % self.experiment_id
self.save_dir = os.path.join(snapshot_root, 'models/')
self.tboard_dir = tensorboard_root
# check arguments
if self.model_path == '':
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
else:
choose = input("Remove " + self.save_dir + " ? (y/n)")
if choose == "y":
shutil.rmtree(self.save_dir)
os.makedirs(self.save_dir)
else:
sys.exit(0)
if not os.path.exists(self.tboard_dir):
os.makedirs(self.tboard_dir)
else:
shutil.rmtree(self.tboard_dir)
os.makedirs(self.tboard_dir)
sys.stdout = Logger(os.path.join(snapshot_root, 'log.txt'))
self.writer = SummaryWriter(log_dir=self.tboard_dir)
# print args
print(str(args))
# get gpu id
gids = ''.join(args.gpu.split())
self.gpu_ids = [int(gid) for gid in gids.split(',')]
self.first_gpu = self.gpu_ids[0]
# generate dataset
self.train_dataset = Dataset(
root=args.dataset_root,
dataset_name=args.dataset,
split='all',
num_points=args.num_points,
random_translate=True,
random_rotate=args.use_rotate,
random_jitter=args.use_jitter
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers
)
print("Training set size:", self.train_loader.dataset.__len__())
# initialize model
self.model = ClassificationNet(args)
if self.model_path != '':
self._load_pretrain(args.model_path)
# load model to gpu
if not self.no_cuda:
if len(self.gpu_ids) != 1: # multiple gpus
self.model = torch.nn.DataParallel(self.model.cuda(self.first_gpu), self.gpu_ids)
else:
self.model = self.model.cuda(self.gpu_ids[0])
# initialize optimizer
self.parameter = self.model.parameters()
if self.no_scheduler == False:
self.optimizer = optim.SGD(self.parameter, lr=0.1, weight_decay=1e-4)
self.scheduler = CosineAnnealingLR(self.optimizer, self.epochs, eta_min=1e-3)
else:
self.optimizer = optim.SGD(self.parameter, lr=0.01, weight_decay=1e-4)
def run(self):
self.train_hist = {
'loss': [],
'per_epoch_time': [],
'total_time': []
}
best_loss = 1000000000
print('Training start!!')
start_time = time.time()
self.model.train()
if self.model_path != '':
start_epoch = self.model_path[-7:-4]
if start_epoch[0] == '_':
start_epoch = start_epoch[1:]
start_epoch = int(start_epoch)
else:
start_epoch = 0
for epoch in range(start_epoch, self.epochs):
loss = self.train_epoch(epoch)
# save snapeshot
if (epoch + 1) % self.snapshot_interval == 0:
self._snapshot(epoch + 1)
if loss < best_loss:
best_loss = loss
self._snapshot('best')
# save tensorboard
if self.writer:
self.writer.add_scalar('Train Loss', self.train_hist['loss'][-1], epoch)
self.writer.add_scalar('Learning Rate', self._get_lr(), epoch)
# finish all epoch
self._snapshot(epoch + 1)
if loss < best_loss:
best_loss = loss
self._snapshot('best')
self.train_hist['total_time'].append(time.time() - start_time)
print("Avg one epoch time: %.2f, total %d epochs time: %.2f" % (np.mean(self.train_hist['per_epoch_time']),
self.epochs, self.train_hist['total_time'][0]))
print("Training finish!... save training results")
def train_epoch(self, epoch):
epoch_start_time = time.time()
loss_buf = []
train_pred = []
train_true = []
num_batch = int(len(self.train_loader.dataset) / self.batch_size)
for iter, (pts, label) in enumerate(self.train_loader):
if pts.size(0) == 1:
continue
if not self.no_cuda:
pts = pts.cuda(self.first_gpu)
label = label.cuda(self.first_gpu)
# forward
self.optimizer.zero_grad()
output, _ = self.model(pts)
# loss
if len(self.gpu_ids) != 1: # multiple gpus
loss = self.model.module.get_loss(output, label)
else:
loss = self.model.get_loss(output, label)
# backward
loss.backward()
self.optimizer.step()
loss_buf.append(loss.detach().cpu().numpy())
preds = output.max(dim=1)[1]
train_true.append(label.view(-1).cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
# finish one epoch
if self.no_scheduler == False:
self.scheduler.step()
epoch_time = time.time() - epoch_start_time
self.train_hist['per_epoch_time'].append(epoch_time)
self.train_hist['loss'].append(np.mean(loss_buf))
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
print("Epoch %d: Loss %.6f, train acc %.6f, train avg acc %.6f, time %.4fs" % (epoch+1,
np.mean(loss_buf),
metrics.accuracy_score(
train_true, train_pred),
metrics.balanced_accuracy_score(
train_true, train_pred),
epoch_time))
return np.mean(loss_buf)
def _snapshot(self, epoch):
state_dict = self.model.state_dict()
from collections import OrderedDict
new_state_dict = OrderedDict()
for key, val in state_dict.items():
if key[:6] == 'module':
name = key[7:] # remove 'module.'
else:
name = key
new_state_dict[name] = val
save_dir = os.path.join(self.save_dir, self.dataset_name)
torch.save(new_state_dict, save_dir + "_" + str(epoch) + '.pkl')
print(f"Save model to {save_dir}_{str(epoch)}.pkl")
def _load_pretrain(self, pretrain):
state_dict = torch.load(pretrain, map_location='cpu')
from collections import OrderedDict
new_state_dict = OrderedDict()
for key, val in state_dict.items():
if key[:6] == 'module':
name = key[7:] # remove 'module.'
else:
name = key
new_state_dict[name] = val
self.model.load_state_dict(new_state_dict)
print(f"Load model from {pretrain}")
def _get_lr(self, group=0):
return self.optimizer.param_groups[group]['lr']
| [
"sklearn.metrics.accuracy_score",
"time.strftime",
"numpy.mean",
"shutil.rmtree",
"os.path.join",
"torch.utils.data.DataLoader",
"dataset.Dataset",
"torch.load",
"os.path.exists",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"sys.exit",
"numpy.concatenate",
"tensorboardX.SummaryWriter",
"... | [((1399, 1437), 'os.path.join', 'os.path.join', (['snapshot_root', '"""models/"""'], {}), "(snapshot_root, 'models/')\n", (1411, 1437), False, 'import os\n'), ((2205, 2243), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'self.tboard_dir'}), '(log_dir=self.tboard_dir)\n', (2218, 2243), False, 'from tensorboardX import SummaryWriter\n'), ((2513, 2706), 'dataset.Dataset', 'Dataset', ([], {'root': 'args.dataset_root', 'dataset_name': 'args.dataset', 'split': '"""all"""', 'num_points': 'args.num_points', 'random_translate': '(True)', 'random_rotate': 'args.use_rotate', 'random_jitter': 'args.use_jitter'}), "(root=args.dataset_root, dataset_name=args.dataset, split='all',\n num_points=args.num_points, random_translate=True, random_rotate=args.\n use_rotate, random_jitter=args.use_jitter)\n", (2520, 2706), False, 'from dataset import Dataset\n'), ((2852, 2971), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.workers'}), '(self.train_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n', (2879, 2971), False, 'import torch\n'), ((3168, 3191), 'model.ClassificationNet', 'ClassificationNet', (['args'], {}), '(args)\n', (3185, 3191), False, 'from model import ClassificationNet\n'), ((4196, 4207), 'time.time', 'time.time', ([], {}), '()\n', (4205, 4207), False, 'import time\n'), ((5675, 5686), 'time.time', 'time.time', ([], {}), '()\n', (5684, 5686), False, 'import time\n'), ((7001, 7027), 'numpy.concatenate', 'np.concatenate', (['train_true'], {}), '(train_true)\n', (7015, 7027), True, 'import numpy as np\n'), ((7049, 7075), 'numpy.concatenate', 'np.concatenate', (['train_pred'], {}), '(train_pred)\n', (7063, 7075), True, 'import numpy as np\n'), ((7868, 7885), 'numpy.mean', 'np.mean', (['loss_buf'], {}), '(loss_buf)\n', (7875, 7885), True, 'import numpy as np\n'), ((8034, 8047), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8045, 8047), False, 'from collections import OrderedDict\n'), ((8282, 8328), 'os.path.join', 'os.path.join', (['self.save_dir', 'self.dataset_name'], {}), '(self.save_dir, self.dataset_name)\n', (8294, 8328), False, 'import os\n'), ((8525, 8565), 'torch.load', 'torch.load', (['pretrain'], {'map_location': '"""cpu"""'}), "(pretrain, map_location='cpu')\n", (8535, 8565), False, 'import torch\n'), ((8635, 8648), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8646, 8648), False, 'from collections import OrderedDict\n'), ((2143, 2181), 'os.path.join', 'os.path.join', (['snapshot_root', '"""log.txt"""'], {}), "(snapshot_root, 'log.txt')\n", (2155, 2181), False, 'import os\n'), ((3724, 3778), 'torch.optim.SGD', 'optim.SGD', (['self.parameter'], {'lr': '(0.1)', 'weight_decay': '(0.0001)'}), '(self.parameter, lr=0.1, weight_decay=0.0001)\n', (3733, 3778), True, 'import torch.optim as optim\n'), ((3806, 3867), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'CosineAnnealingLR', (['self.optimizer', 'self.epochs'], {'eta_min': '(0.001)'}), '(self.optimizer, self.epochs, eta_min=0.001)\n', (3823, 3867), False, 'from torch.optim.lr_scheduler import CosineAnnealingLR\n'), ((3910, 3965), 'torch.optim.SGD', 'optim.SGD', (['self.parameter'], {'lr': '(0.01)', 'weight_decay': '(0.0001)'}), '(self.parameter, lr=0.01, weight_decay=0.0001)\n', (3919, 3965), True, 'import torch.optim as optim\n'), ((6830, 6841), 'time.time', 'time.time', ([], {}), '()\n', (6839, 6841), False, 'import time\n'), ((6961, 6978), 'numpy.mean', 'np.mean', (['loss_buf'], {}), '(loss_buf)\n', (6968, 6978), True, 'import numpy as np\n'), ((1561, 1590), 'os.path.exists', 'os.path.exists', (['self.save_dir'], {}), '(self.save_dir)\n', (1575, 1590), False, 'import os\n'), ((1608, 1634), 'os.makedirs', 'os.makedirs', (['self.save_dir'], {}), '(self.save_dir)\n', (1619, 1634), False, 'import os\n'), ((1927, 1958), 'os.path.exists', 'os.path.exists', (['self.tboard_dir'], {}), '(self.tboard_dir)\n', (1941, 1958), False, 'import os\n'), ((1976, 2004), 'os.makedirs', 'os.makedirs', (['self.tboard_dir'], {}), '(self.tboard_dir)\n', (1987, 2004), False, 'import os\n'), ((2039, 2069), 'shutil.rmtree', 'shutil.rmtree', (['self.tboard_dir'], {}), '(self.tboard_dir)\n', (2052, 2069), False, 'import shutil\n'), ((2086, 2114), 'os.makedirs', 'os.makedirs', (['self.tboard_dir'], {}), '(self.tboard_dir)\n', (2097, 2114), False, 'import os\n'), ((5291, 5302), 'time.time', 'time.time', ([], {}), '()\n', (5300, 5302), False, 'import time\n'), ((1223, 1250), 'time.strftime', 'time.strftime', (['"""%m%d%H%M%S"""'], {}), "('%m%d%H%M%S')\n", (1236, 1250), False, 'import time\n'), ((1778, 1806), 'shutil.rmtree', 'shutil.rmtree', (['self.save_dir'], {}), '(self.save_dir)\n', (1791, 1806), False, 'import shutil\n'), ((1827, 1853), 'os.makedirs', 'os.makedirs', (['self.save_dir'], {}), '(self.save_dir)\n', (1838, 1853), False, 'import os\n'), ((1896, 1907), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1904, 1907), False, 'import sys\n'), ((5389, 5431), 'numpy.mean', 'np.mean', (["self.train_hist['per_epoch_time']"], {}), "(self.train_hist['per_epoch_time'])\n", (5396, 5431), True, 'import numpy as np\n'), ((7261, 7278), 'numpy.mean', 'np.mean', (['loss_buf'], {}), '(loss_buf)\n', (7268, 7278), True, 'import numpy as np\n'), ((7369, 7415), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['train_true', 'train_pred'], {}), '(train_true, train_pred)\n', (7391, 7415), True, 'import sklearn.metrics as metrics\n'), ((7600, 7655), 'sklearn.metrics.balanced_accuracy_score', 'metrics.balanced_accuracy_score', (['train_true', 'train_pred'], {}), '(train_true, train_pred)\n', (7631, 7655), True, 'import sklearn.metrics as metrics\n')] |
import numpy as np
def identity(x):
""" A no-op link function.
"""
return x
def _identity_inverse(x):
return x
identity.inverse = _identity_inverse
def logit(x):
""" A logit link function useful for going from probability units to log-odds units.
"""
return np.log(x/(1-x))
def _logit_inverse(x):
return 1/(1+np.exp(-x))
logit.inverse = _logit_inverse | [
"numpy.exp",
"numpy.log"
] | [((290, 309), 'numpy.log', 'np.log', (['(x / (1 - x))'], {}), '(x / (1 - x))\n', (296, 309), True, 'import numpy as np\n'), ((345, 355), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (351, 355), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# http://pointclouds.org/documentation/tutorials/planar_segmentation.php#planar-segmentation
import pcl
import numpy as np
import random
# int main (int argc, char** argv)
# {
# pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
#
# // Fill in the cloud data
# cloud->width = 15;
# cloud->height = 1;
# cloud->points.resize (cloud->width * cloud->height);
#
# // Generate the data
# for (size_t i = 0; i < cloud->points.size (); ++i)
# {
# cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
# cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
# cloud->points[i].z = 1.0;
# }
#
# // Set a few outliers
# cloud->points[0].z = 2.0;
# cloud->points[3].z = -2.0;
# cloud->points[6].z = 4.0;
###
cloud = pcl.PointCloud()
points = np.zeros((15, 3), dtype=np.float32)
RAND_MAX = 1024.0
for i in range(0, 15):
points[i][0] = 1024 * random.random () / (RAND_MAX + 1.0)
points[i][1] = 1024 * random.random () / (RAND_MAX + 1.0)
points[i][2] = 1.0
points[0][2] = 2.0
points[3][2] = -2.0
points[6][2] = 4.0
cloud.from_array(points)
# std::cerr << "Point cloud data: " << cloud->points.size () << " points" << std::endl;
# for (size_t i = 0; i < cloud->points.size (); ++i)
# std::cerr << " " << cloud->points[i].x << " "
# << cloud->points[i].y << " "
# << cloud->points[i].z << std::endl;
#
print ('Point cloud data: ' + str(cloud.size) + ' points')
for i in range(0, cloud.size):
print ('x: ' + str(cloud[i][0]) + ', y : ' + str(cloud[i][1]) + ', z : ' + str(cloud[i][2]))
# pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);
# pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
# // Create the segmentation object
# pcl::SACSegmentation<pcl::PointXYZ> seg;
# // Optional
# seg.setOptimizeCoefficients (true);
# // Mandatory
# seg.setModelType (pcl::SACMODEL_PLANE);
# seg.setMethodType (pcl::SAC_RANSAC);
# seg.setDistanceThreshold (0.01);
#
# seg.setInputCloud (cloud);
# seg.segment (*inliers, *coefficients);
###
# http://www.pcl-users.org/pcl-SACMODEL-CYLINDER-is-not-working-td4037530.html
# NG?
# seg = cloud.make_segmenter()
# seg.set_optimize_coefficients(True)
# seg.set_model_type(pcl.SACMODEL_NORMAL_PLANE)
# seg.set_method_type(pcl.SAC_RANSAC)
# seg.set_distance_threshold(0.01)
# indices, coefficients = seg.segment()
seg = cloud.make_segmenter_normals(ksearch=50)
seg.set_optimize_coefficients(True)
seg.set_model_type(pcl.SACMODEL_NORMAL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
seg.set_distance_threshold(0.01)
seg.set_normal_distance_weight(0.01)
seg.set_max_iterations(100)
indices, coefficients = seg.segment()
# if (inliers->indices.size () == 0)
# {
# PCL_ERROR ("Could not estimate a planar model for the given dataset.");
# return (-1);
# }
# std::cerr << "Model coefficients: " << coefficients->values[0] << " "
# << coefficients->values[1] << " "
# << coefficients->values[2] << " "
# << coefficients->values[3] << std::endl;
###
if len(indices) == 0:
print('Could not estimate a planar model for the given dataset.')
exit(0)
print('Model coefficients: ' + str(coefficients[0]) + ' ' + str(coefficients[1]) + ' ' + str(coefficients[2]) + ' ' + str(coefficients[3]))
# std::cerr << "Model inliers: " << inliers->indices.size () << std::endl;
# for (size_t i = 0; i < inliers->indices.size (); ++i)
# std::cerr << inliers->indices[i] << " " << cloud->points[inliers->indices[i]].x << " "
# << cloud->points[inliers->indices[i]].y << " "
# << cloud->points[inliers->indices[i]].z << std::endl;
###
print('Model inliers: ' + str(len(indices)))
for i in range(0, len(indices)):
print (str(indices[i]) + ', x: ' + str(cloud[indices[i]][0]) + ', y : ' + str(cloud[indices[i]][1]) + ', z : ' + str(cloud[indices[i]][2]))
| [
"random.random",
"pcl.PointCloud",
"numpy.zeros"
] | [((807, 823), 'pcl.PointCloud', 'pcl.PointCloud', ([], {}), '()\n', (821, 823), False, 'import pcl\n'), ((834, 869), 'numpy.zeros', 'np.zeros', (['(15, 3)'], {'dtype': 'np.float32'}), '((15, 3), dtype=np.float32)\n', (842, 869), True, 'import numpy as np\n'), ((937, 952), 'random.random', 'random.random', ([], {}), '()\n', (950, 952), False, 'import random\n'), ((999, 1014), 'random.random', 'random.random', ([], {}), '()\n', (1012, 1014), False, 'import random\n')] |
#!/usr/bin/env python3
"""
Tool for automated capturing of EM traces. EMcap can send commands to the target device for starting and stopping
operations using a simple communication protocol over either a serial connection or over TCP.
"""
import numpy as np
import sys
import socket
import os
import signal
import logging
import struct
import binascii
import osmosdr
import argparse
import serial
import subprocess
from gnuradio import blocks
from gnuradio import gr
from gnuradio import uhd
from time import sleep
from datetime import datetime
from emma.utils.socketwrapper import SocketWrapper
from scipy.signal import hilbert
from scipy import fftpack
from emma.emcap.online_client import EMCapOnlineClient
from collections import defaultdict
from emma.emcap.sdr import SDR
from emma.emcap.types import *
from emma.emcap.ttywrapper import TTYWrapper
from emma.utils.utils import binary_to_hex
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
hilbert3 = lambda x: hilbert(x, fftpack.next_fast_len(len(x)))[:len(x)]
def handler(signum, frame):
print("Got CTRL+C")
exit(0)
signal.signal(signal.SIGINT, handler)
def reset_usrp():
print("Resetting USRP")
p = subprocess.Popen(["/usr/lib/uhd/utils/b2xx_fx3_utils", "--reset-device"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(p.communicate())
# EMCap class: wait for signal and start capturing using a SDR
class EMCap:
def __init__(self, args):
# Determine ctrl socket type
self.ctrl_socket_type = None
if args.ctrl == 'serial':
self.ctrl_socket_type = CtrlType.SERIAL
elif args.ctrl == 'udp':
self.ctrl_socket_type = CtrlType.UDP
# Set up data socket
self.data_socket = SocketWrapper(socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM), ('127.0.0.1', 3884), self.cb_data)
self.online = args.online
# Set up sockets
if self.ctrl_socket_type == CtrlType.DOMAIN:
unix_domain_socket = '/tmp/emma.socket'
self.clear_domain_socket(unix_domain_socket)
self.ctrl_socket = SocketWrapper(socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM), unix_domain_socket, self.cb_ctrl)
elif self.ctrl_socket_type == CtrlType.UDP:
self.ctrl_socket = SocketWrapper(socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM), ('10.0.0.1', 3884), self.cb_ctrl)
elif self.ctrl_socket_type == CtrlType.SERIAL:
self.ctrl_socket = TTYWrapper("/dev/ttyUSB0", self.cb_ctrl)
else:
logger.error("Unknown ctrl_socket_type")
exit(1)
if self.online is not None:
try:
self.emma_client = EMCapOnlineClient()
self.emma_client.connect(self.online, 3885)
except Exception as e:
print(e)
exit(1)
self.sdr_args = {'hw': args.hw, 'samp_rate': args.sample_rate, 'freq': args.frequency, 'gain': args.gain, 'ds_mode': args.ds_mode, 'agc': args.agc}
self.sdr = SDR(**self.sdr_args)
self.store = False
self.stored_plaintext = []
self.stored_key = []
self.stored_data = []
self.trace_set = []
self.plaintexts = []
self.keys = []
self.preprocessed = []
self.preprocessed_keys = []
self.preprocessed_plaintexts = []
self.limit_counter = 0
self.limit = args.limit
self.compress = args.compress
self.args = args
if self.sdr.hw == 'usrp':
self.wait_num_chunks = 0
else:
self.wait_num_chunks = 50 # Bug in rtl-sdr?
self.global_meta = {
"core:datatype": "cf32_le",
"core:version": "0.0.1",
"core:license": "CC0",
"core:hw": self.sdr.hw,
"core:sample_rate": self.sdr.samp_rate,
"core:author": "<NAME>"
}
self.capture_meta = {
"core:sample_start": 0,
"core:frequency": self.sdr.freq,
"core:datetime": str(datetime.utcnow()),
}
def clear_domain_socket(self, address):
try:
os.unlink(address)
except OSError:
if os.path.exists(address):
raise
def cb_timeout(self):
logger.warning("Timeout on capture, skipping...")
self.sdr.stop()
def cb_data(self, client_socket, client_address, data):
self.stored_data.append(data)
return len(data)
def cb_ctrl(self, client_socket, client_address, data):
logger.log(logging.NOTSET, "Control packet: %s" % binary_to_hex(data))
if len(data) < 5:
# Not enough for TLV
return 0
else:
pkt_type, payload_len = struct.unpack(">BI", data[0:5])
payload = data[5:]
if len(payload) < payload_len:
return 0 # Not enough for payload
else:
self.process_ctrl_packet(pkt_type, payload)
# Send ack
if self.ctrl_socket_type == CtrlType.SERIAL:
client_socket.write(b"k")
else:
client_socket.sendall("k")
return payload_len + 5
def parse_ies(self, payload):
while len(payload) >= 5:
# Extract IE header
ie_type, ie_len = struct.unpack(">BI", payload[0:5])
payload = payload[5:]
# Extract IE data
ie = payload[0:ie_len]
payload = payload[ie_len:]
logger.debug("IE type %d of len %d: %s" % (ie_type, ie_len, binary_to_hex(ie)))
# Determine what to do with IE
if ie_type == InformationElementType.PLAINTEXT:
self.stored_plaintext = [byte_value for byte_value in ie]
elif ie_type == InformationElementType.KEY:
self.stored_key = [byte_value for byte_value in ie]
else:
logger.warning("Unknown IE type: %d" % ie_type)
def preprocess(self, trace_set, plaintexts, keys):
all_traces = []
key_set = defaultdict(set)
pt_set = defaultdict(set)
for i, trace in enumerate(trace_set):
if len(trace) < 16384:
print("--skip")
continue
trace = trace[8192:16384]
trace = np.square(np.abs(np.fft.fft(trace)))
all_traces.append(trace)
# Check keys and plaintexts
num_key_bytes = keys.shape[1]
for j in range(0, num_key_bytes):
key_set[j].add(keys[i][j])
pt_set[j].add(plaintexts[i][j])
if len(key_set[j]) != 1 or len(pt_set[j]) != 1:
print("Keys or plaintexts not equal at index %d" % j)
print(key_set)
print(pt_set)
exit(1)
all_traces = np.array(all_traces)
self.preprocessed.append(np.mean(all_traces, axis=0))
self.preprocessed_plaintexts.append(plaintexts[0])
self.preprocessed_keys.append(keys[0])
def save(self, trace_set, plaintexts, keys, ciphertexts=None):
filename = str(datetime.utcnow()).replace(" ", "_").replace(".", "_").replace(":", "-")
output_dir = self.args.output_dir
if self.args.preprocess:
self.preprocess(trace_set, plaintexts, keys)
if len(self.preprocessed) >= self.args.traces_per_set:
logger.info("Dumping %d preprocessed traces to file" % len(self.preprocessed))
np.save(os.path.join(output_dir, "%s_traces.npy" % filename), np.array(self.preprocessed))
np.save(os.path.join(output_dir, "%s_textin.npy" % filename), np.array(self.preprocessed_plaintexts))
np.save(os.path.join(output_dir, "%s_knownkey.npy" % filename), np.array(self.preprocessed_keys))
self.preprocessed = []
self.preprocessed_plaintexts = []
self.preprocessed_keys = []
else:
logger.info("Dumping %d traces to file" % len(self.trace_set))
np.save(os.path.join(output_dir, "%s_traces.npy" % filename), trace_set)
np.save(os.path.join(output_dir, "%s_textin.npy" % filename), plaintexts)
np.save(os.path.join(output_dir, "%s_knownkey.npy" % filename), keys)
if self.compress:
logger.info("Calling emcap-compress...")
subprocess.call(['/usr/bin/python', 'emcap-compress.py', os.path.join(output_dir, "%s_traces.npy" % filename)])
def process_ctrl_packet(self, pkt_type, payload):
if pkt_type == CtrlPacketType.SIGNAL_START:
logger.debug("Starting for payload: %s" % binary_to_hex(payload))
self.parse_ies(payload)
self.sdr.start()
# Spinlock until data
timeout = 3
current_time = 0.0
while len(self.stored_data) <= self.wait_num_chunks:
sleep(0.0001)
current_time += 0.0001
if current_time >= timeout:
logger.warning("Timeout while waiting for data. Did the SDR crash? Reinstantiating...")
del self.sdr
self.data_socket.socket.close()
self.data_socket = SocketWrapper(socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM), ('127.0.0.1', 3884), self.cb_data)
self.data_socket.start()
self.sdr = SDR(**self.sdr_args)
self.process_ctrl_packet(pkt_type, payload)
elif pkt_type == CtrlPacketType.SIGNAL_END:
# self.sdr.sdr_source.stop()
self.sdr.stop()
self.sdr.wait()
logger.debug("Stopped after receiving %d chunks" % len(self.stored_data))
# sleep(0.5)
# logger.debug("After sleep we have %d chunks" % len(self.stored_data))
# Successful capture (no errors or timeouts)
if len(self.stored_data) > 0: # We have more than 1 chunk
# Data to file
np_data = np.fromstring(b"".join(self.stored_data), dtype=np.complex64)
self.trace_set.append(np_data)
self.plaintexts.append(self.stored_plaintext)
self.keys.append(self.stored_key)
if len(self.trace_set) >= self.args.traces_per_set:
assert(len(self.trace_set) == len(self.plaintexts))
assert(len(self.trace_set) == len(self.keys))
np_trace_set = np.array(self.trace_set)
np_plaintexts = np.array(self.plaintexts, dtype=np.uint8)
np_keys = np.array(self.keys, dtype=np.uint8)
if self.online is not None: # Stream online
self.emma_client.send(np_trace_set, np_plaintexts, None, np_keys, None)
else: # Save to disk
if not self.args.dry:
# Write metadata to sigmf file
# if sigmf
#with open(test_meta_path, 'w') as f:
# test_sigmf = SigMFFile(data_file=test_data_path, global_info=copy.deepcopy(self.global_meta))
# test_sigmf.add_capture(0, metadata=capture_meta)
# test_sigmf.dump(f, pretty=True)
# elif chipwhisperer:
self.save(np_trace_set, np_plaintexts, np_keys)
else:
print("Dry run! Not saving.")
self.limit_counter += len(self.trace_set)
if self.limit_counter >= self.limit:
print("Done")
exit(0)
# Clear results
self.trace_set = []
self.plaintexts = []
self.keys = []
# Clear
self.stored_data = []
self.stored_plaintext = []
def capture(self, to_skip=0, timeout=1.0):
# Start listening for signals
self.data_socket.start()
self.ctrl_socket.start()
# Wait until supplicant signals end of acquisition
while self.ctrl_socket.is_alive():
self.ctrl_socket.join(timeout=1.0)
logging.info("Supplicant disconnected on control channel. Stopping...")
def main():
parser = argparse.ArgumentParser(description='EMCAP')
parser.add_argument('hw', type=str, choices=['usrp', 'hackrf', 'rtlsdr'], help='SDR capture hardware')
parser.add_argument('ctrl', type=str, choices=['serial', 'udp'], help='Controller type')
parser.add_argument('--sample-rate', type=int, default=4000000, help='Sample rate')
parser.add_argument('--frequency', type=float, default=64e6, help='Capture frequency')
parser.add_argument('--gain', type=float, default=50, help='RX gain')
parser.add_argument('--traces-per-set', type=int, default=256, help='Number of traces per set')
parser.add_argument('--limit', type=int, default=256*400, help='Limit number of traces')
parser.add_argument('--output-dir', dest="output_dir", type=str, default="/tmp/", help='Output directory to store samples')
parser.add_argument('--online', type=str, default=None, help='Stream samples to remote EMMA instance at <IP address> for online processing.')
parser.add_argument('--dry', default=False, action='store_true', help='Do not save to disk.')
parser.add_argument('--ds-mode', default=False, action='store_true', help='Direct sampling mode.')
parser.add_argument('--agc', default=False, action='store_true', help='Automatic Gain Control.')
parser.add_argument('--compress', default=False, action='store_true', help='Compress using emcap-compress.')
parser.add_argument('--preprocess', default=False, action='store_true', help='Preprocess before storing') # TODO integrate into emcap.py
args, unknown = parser.parse_known_args()
e = EMCap(args)
e.capture()
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"os.unlink",
"socket.socket",
"collections.defaultdict",
"datetime.datetime.utcnow",
"numpy.mean",
"os.path.join",
"emma.emcap.ttywrapper.TTYWrapper",
"emma.utils.utils.binary_to_hex",
"numpy.fft.fft",
"os.path.exists",
"subprocess.Popen",
"struct.unpack",
"emma.... | [((899, 958), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (918, 958), False, 'import logging\n'), ((968, 995), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (985, 995), False, 'import logging\n'), ((1137, 1174), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'handler'], {}), '(signal.SIGINT, handler)\n', (1150, 1174), False, 'import signal\n'), ((1231, 1356), 'subprocess.Popen', 'subprocess.Popen', (["['/usr/lib/uhd/utils/b2xx_fx3_utils', '--reset-device']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['/usr/lib/uhd/utils/b2xx_fx3_utils', '--reset-device'],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n", (1247, 1356), False, 'import subprocess\n'), ((12623, 12667), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""EMCAP"""'}), "(description='EMCAP')\n", (12646, 12667), False, 'import argparse\n'), ((3099, 3119), 'emma.emcap.sdr.SDR', 'SDR', ([], {}), '(**self.sdr_args)\n', (3102, 3119), False, 'from emma.emcap.sdr import SDR\n'), ((6183, 6199), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (6194, 6199), False, 'from collections import defaultdict\n'), ((6217, 6233), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (6228, 6233), False, 'from collections import defaultdict\n'), ((6983, 7003), 'numpy.array', 'np.array', (['all_traces'], {}), '(all_traces)\n', (6991, 7003), True, 'import numpy as np\n'), ((12524, 12595), 'logging.info', 'logging.info', (['"""Supplicant disconnected on control channel. Stopping..."""'], {}), "('Supplicant disconnected on control channel. Stopping...')\n", (12536, 12595), False, 'import logging\n'), ((1801, 1861), 'socket.socket', 'socket.socket', ([], {'family': 'socket.AF_INET', 'type': 'socket.SOCK_DGRAM'}), '(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n', (1814, 1861), False, 'import socket\n'), ((4220, 4238), 'os.unlink', 'os.unlink', (['address'], {}), '(address)\n', (4229, 4238), False, 'import os\n'), ((4828, 4859), 'struct.unpack', 'struct.unpack', (['""">BI"""', 'data[0:5]'], {}), "('>BI', data[0:5])\n", (4841, 4859), False, 'import struct\n'), ((5435, 5469), 'struct.unpack', 'struct.unpack', (['""">BI"""', 'payload[0:5]'], {}), "('>BI', payload[0:5])\n", (5448, 5469), False, 'import struct\n'), ((7037, 7064), 'numpy.mean', 'np.mean', (['all_traces'], {'axis': '(0)'}), '(all_traces, axis=0)\n', (7044, 7064), True, 'import numpy as np\n'), ((2165, 2226), 'socket.socket', 'socket.socket', ([], {'family': 'socket.AF_UNIX', 'type': 'socket.SOCK_STREAM'}), '(family=socket.AF_UNIX, type=socket.SOCK_STREAM)\n', (2178, 2226), False, 'import socket\n'), ((2759, 2778), 'emma.emcap.online_client.EMCapOnlineClient', 'EMCapOnlineClient', ([], {}), '()\n', (2776, 2778), False, 'from emma.emcap.online_client import EMCapOnlineClient\n'), ((4120, 4137), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4135, 4137), False, 'from datetime import datetime\n'), ((4278, 4301), 'os.path.exists', 'os.path.exists', (['address'], {}), '(address)\n', (4292, 4301), False, 'import os\n'), ((4677, 4696), 'emma.utils.utils.binary_to_hex', 'binary_to_hex', (['data'], {}), '(data)\n', (4690, 4696), False, 'from emma.utils.utils import binary_to_hex\n'), ((8212, 8264), 'os.path.join', 'os.path.join', (['output_dir', "('%s_traces.npy' % filename)"], {}), "(output_dir, '%s_traces.npy' % filename)\n", (8224, 8264), False, 'import os\n'), ((8297, 8349), 'os.path.join', 'os.path.join', (['output_dir', "('%s_textin.npy' % filename)"], {}), "(output_dir, '%s_textin.npy' % filename)\n", (8309, 8349), False, 'import os\n'), ((8383, 8437), 'os.path.join', 'os.path.join', (['output_dir', "('%s_knownkey.npy' % filename)"], {}), "(output_dir, '%s_knownkey.npy' % filename)\n", (8395, 8437), False, 'import os\n'), ((9081, 9094), 'time.sleep', 'sleep', (['(0.0001)'], {}), '(0.0001)\n', (9086, 9094), False, 'from time import sleep\n'), ((2359, 2420), 'socket.socket', 'socket.socket', ([], {'family': 'socket.AF_INET', 'type': 'socket.SOCK_STREAM'}), '(family=socket.AF_INET, type=socket.SOCK_STREAM)\n', (2372, 2420), False, 'import socket\n'), ((2542, 2582), 'emma.emcap.ttywrapper.TTYWrapper', 'TTYWrapper', (['"""/dev/ttyUSB0"""', 'self.cb_ctrl'], {}), "('/dev/ttyUSB0', self.cb_ctrl)\n", (2552, 2582), False, 'from emma.emcap.ttywrapper import TTYWrapper\n'), ((6448, 6465), 'numpy.fft.fft', 'np.fft.fft', (['trace'], {}), '(trace)\n', (6458, 6465), True, 'import numpy as np\n'), ((7655, 7707), 'os.path.join', 'os.path.join', (['output_dir', "('%s_traces.npy' % filename)"], {}), "(output_dir, '%s_traces.npy' % filename)\n", (7667, 7707), False, 'import os\n'), ((7709, 7736), 'numpy.array', 'np.array', (['self.preprocessed'], {}), '(self.preprocessed)\n', (7717, 7736), True, 'import numpy as np\n'), ((7762, 7814), 'os.path.join', 'os.path.join', (['output_dir', "('%s_textin.npy' % filename)"], {}), "(output_dir, '%s_textin.npy' % filename)\n", (7774, 7814), False, 'import os\n'), ((7816, 7854), 'numpy.array', 'np.array', (['self.preprocessed_plaintexts'], {}), '(self.preprocessed_plaintexts)\n', (7824, 7854), True, 'import numpy as np\n'), ((7880, 7934), 'os.path.join', 'os.path.join', (['output_dir', "('%s_knownkey.npy' % filename)"], {}), "(output_dir, '%s_knownkey.npy' % filename)\n", (7892, 7934), False, 'import os\n'), ((7936, 7968), 'numpy.array', 'np.array', (['self.preprocessed_keys'], {}), '(self.preprocessed_keys)\n', (7944, 7968), True, 'import numpy as np\n'), ((8821, 8843), 'emma.utils.utils.binary_to_hex', 'binary_to_hex', (['payload'], {}), '(payload)\n', (8834, 8843), False, 'from emma.utils.utils import binary_to_hex\n'), ((9597, 9617), 'emma.emcap.sdr.SDR', 'SDR', ([], {}), '(**self.sdr_args)\n', (9600, 9617), False, 'from emma.emcap.sdr import SDR\n'), ((5681, 5698), 'emma.utils.utils.binary_to_hex', 'binary_to_hex', (['ie'], {}), '(ie)\n', (5694, 5698), False, 'from emma.utils.utils import binary_to_hex\n'), ((8605, 8657), 'os.path.join', 'os.path.join', (['output_dir', "('%s_traces.npy' % filename)"], {}), "(output_dir, '%s_traces.npy' % filename)\n", (8617, 8657), False, 'import os\n'), ((9424, 9484), 'socket.socket', 'socket.socket', ([], {'family': 'socket.AF_INET', 'type': 'socket.SOCK_DGRAM'}), '(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n', (9437, 9484), False, 'import socket\n'), ((10677, 10701), 'numpy.array', 'np.array', (['self.trace_set'], {}), '(self.trace_set)\n', (10685, 10701), True, 'import numpy as np\n'), ((10738, 10779), 'numpy.array', 'np.array', (['self.plaintexts'], {'dtype': 'np.uint8'}), '(self.plaintexts, dtype=np.uint8)\n', (10746, 10779), True, 'import numpy as np\n'), ((10810, 10845), 'numpy.array', 'np.array', (['self.keys'], {'dtype': 'np.uint8'}), '(self.keys, dtype=np.uint8)\n', (10818, 10845), True, 'import numpy as np\n'), ((7263, 7280), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7278, 7280), False, 'from datetime import datetime\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
import logging
import cv2
import numpy as np
from .operators import register_op, BaseOperator
from .op_helper import jaccard_overlap
logger = logging.getLogger(__name__)
__all__ = ['PadBatch', 'RandomShape', 'PadMultiScaleTest', 'Gt2YoloTarget']
@register_op
class PadBatch(BaseOperator):
"""
Pad a batch of samples so they can be divisible by a stride.
The layout of each image should be 'CHW'.
Args:
pad_to_stride (int): If `pad_to_stride > 0`, pad zeros to ensure
height and width is divisible by `pad_to_stride`.
"""
def __init__(self, pad_to_stride=0, use_padded_im_info=True):
super(PadBatch, self).__init__()
self.pad_to_stride = pad_to_stride
self.use_padded_im_info = use_padded_im_info
def __call__(self, samples, context=None):
"""
Args:
samples (list): a batch of sample, each is dict.
"""
coarsest_stride = self.pad_to_stride
if coarsest_stride == 0:
return samples
max_shape = np.array([data['image'].shape for data in samples]).max(
axis=0)
if coarsest_stride > 0:
max_shape[1] = int(
np.ceil(max_shape[1] / coarsest_stride) * coarsest_stride)
max_shape[2] = int(
np.ceil(max_shape[2] / coarsest_stride) * coarsest_stride)
padding_batch = []
for data in samples:
im = data['image']
im_c, im_h, im_w = im.shape[:]
padding_im = np.zeros(
(im_c, max_shape[1], max_shape[2]), dtype=np.float32)
padding_im[:, :im_h, :im_w] = im
data['image'] = padding_im
if self.use_padded_im_info:
data['im_info'][:2] = max_shape[1:3]
return samples
@register_op
class RandomShape(BaseOperator):
"""
Randomly reshape a batch. If random_inter is True, also randomly
select one an interpolation algorithm [cv2.INTER_NEAREST, cv2.INTER_LINEAR,
cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4]. If random_inter is
False, use cv2.INTER_NEAREST.
Args:
sizes (list): list of int, random choose a size from these
random_inter (bool): whether to randomly interpolation, defalut true.
"""
def __init__(self, sizes=[], random_inter=False):
super(RandomShape, self).__init__()
self.sizes = sizes
self.random_inter = random_inter
self.interps = [
cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_AREA,
cv2.INTER_CUBIC,
cv2.INTER_LANCZOS4,
] if random_inter else []
def __call__(self, samples, context=None):
shape = np.random.choice(self.sizes)
method = np.random.choice(self.interps) if self.random_inter \
else cv2.INTER_NEAREST
for i in range(len(samples)):
im = samples[i]['image']
h, w = im.shape[:2]
scale_x = float(shape) / w
scale_y = float(shape) / h
im = cv2.resize(
im, None, None, fx=scale_x, fy=scale_y, interpolation=method)
samples[i]['image'] = im
return samples
@register_op
class PadMultiScaleTest(BaseOperator):
"""
Pad the image so they can be divisible by a stride for multi-scale testing.
Args:
pad_to_stride (int): If `pad_to_stride > 0`, pad zeros to ensure
height and width is divisible by `pad_to_stride`.
"""
def __init__(self, pad_to_stride=0):
super(PadMultiScaleTest, self).__init__()
self.pad_to_stride = pad_to_stride
def __call__(self, samples, context=None):
coarsest_stride = self.pad_to_stride
if coarsest_stride == 0:
return samples
batch_input = True
if not isinstance(samples, Sequence):
batch_input = False
samples = [samples]
if len(samples) != 1:
raise ValueError("Batch size must be 1 when using multiscale test, "
"but now batch size is {}".format(len(samples)))
for i in range(len(samples)):
sample = samples[i]
for k in sample.keys():
# hard code
if k.startswith('image'):
im = sample[k]
im_c, im_h, im_w = im.shape
max_h = int(
np.ceil(im_h / coarsest_stride) * coarsest_stride)
max_w = int(
np.ceil(im_w / coarsest_stride) * coarsest_stride)
padding_im = np.zeros(
(im_c, max_h, max_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = im
sample[k] = padding_im
info_name = 'im_info' if k == 'image' else 'im_info_' + k
# update im_info
sample[info_name][:2] = [max_h, max_w]
if not batch_input:
samples = samples[0]
return samples
@register_op
class Gt2YoloTarget(BaseOperator):
"""
Generate YOLOv3 targets by groud truth data, this operator is only used in
fine grained YOLOv3 loss mode
"""
def __init__(self, anchors, anchor_masks, downsample_ratios,
num_classes=80):
super(Gt2YoloTarget, self).__init__()
self.anchors = anchors
self.anchor_masks = anchor_masks
self.downsample_ratios = downsample_ratios
self.num_classes = num_classes
def __call__(self, samples, context=None):
assert len(self.anchor_masks) == len(self.downsample_ratios), \
"anchor_masks', and 'downsample_ratios' should have same length."
h, w = samples[0]['image'].shape[1:3]
an_hw = np.array(self.anchors) / np.array([[w, h]])
for sample in samples:
# im, gt_bbox, gt_class, gt_score = sample
im = sample['image']
gt_bbox = sample['gt_bbox']
gt_class = sample['gt_class']
gt_score = sample['gt_score']
for i, (
mask, downsample_ratio
) in enumerate(zip(self.anchor_masks, self.downsample_ratios)):
grid_h = int(h / downsample_ratio)
grid_w = int(w / downsample_ratio)
target = np.zeros(
(len(mask), 6 + self.num_classes, grid_h, grid_w),
dtype=np.float32)
for b in range(gt_bbox.shape[0]):
gx, gy, gw, gh = gt_bbox[b, :]
cls = gt_class[b]
score = gt_score[b]
if gw <= 0. or gh <= 0. or score <= 0.:
continue
# find best match anchor index
best_iou = 0.
best_idx = -1
for an_idx in range(an_hw.shape[0]):
iou = jaccard_overlap(
[0., 0., gw, gh],
[0., 0., an_hw[an_idx, 0], an_hw[an_idx, 1]])
if iou > best_iou:
best_iou = iou
best_idx = an_idx
# gtbox should be regresed in this layes if best match
# anchor index in anchor mask of this layer
if best_idx in mask:
best_n = mask.index(best_idx)
gi = int(gx * grid_w)
gj = int(gy * grid_h)
# x, y, w, h, scale
target[best_n, 0, gj, gi] = gx * grid_w - gi
target[best_n, 1, gj, gi] = gy * grid_h - gj
target[best_n, 2, gj, gi] = np.log(
gw * w / self.anchors[best_idx][0])
target[best_n, 3, gj, gi] = np.log(
gh * h / self.anchors[best_idx][1])
target[best_n, 4, gj, gi] = 2.0 - gw * gh
# objectness record gt_score
target[best_n, 5, gj, gi] = score
# classification
target[best_n, 6 + cls, gj, gi] = 1.
sample['target{}'.format(i)] = target
return samples
| [
"numpy.ceil",
"numpy.log",
"numpy.zeros",
"logging.getLogger",
"numpy.array",
"numpy.random.choice",
"cv2.resize"
] | [((967, 994), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (984, 994), False, 'import logging\n'), ((3546, 3574), 'numpy.random.choice', 'np.random.choice', (['self.sizes'], {}), '(self.sizes)\n', (3562, 3574), True, 'import numpy as np\n'), ((2346, 2408), 'numpy.zeros', 'np.zeros', (['(im_c, max_shape[1], max_shape[2])'], {'dtype': 'np.float32'}), '((im_c, max_shape[1], max_shape[2]), dtype=np.float32)\n', (2354, 2408), True, 'import numpy as np\n'), ((3592, 3622), 'numpy.random.choice', 'np.random.choice', (['self.interps'], {}), '(self.interps)\n', (3608, 3622), True, 'import numpy as np\n'), ((3883, 3955), 'cv2.resize', 'cv2.resize', (['im', 'None', 'None'], {'fx': 'scale_x', 'fy': 'scale_y', 'interpolation': 'method'}), '(im, None, None, fx=scale_x, fy=scale_y, interpolation=method)\n', (3893, 3955), False, 'import cv2\n'), ((6630, 6652), 'numpy.array', 'np.array', (['self.anchors'], {}), '(self.anchors)\n', (6638, 6652), True, 'import numpy as np\n'), ((6655, 6673), 'numpy.array', 'np.array', (['[[w, h]]'], {}), '([[w, h]])\n', (6663, 6673), True, 'import numpy as np\n'), ((1866, 1917), 'numpy.array', 'np.array', (["[data['image'].shape for data in samples]"], {}), "([data['image'].shape for data in samples])\n", (1874, 1917), True, 'import numpy as np\n'), ((2024, 2063), 'numpy.ceil', 'np.ceil', (['(max_shape[1] / coarsest_stride)'], {}), '(max_shape[1] / coarsest_stride)\n', (2031, 2063), True, 'import numpy as np\n'), ((2131, 2170), 'numpy.ceil', 'np.ceil', (['(max_shape[2] / coarsest_stride)'], {}), '(max_shape[2] / coarsest_stride)\n', (2138, 2170), True, 'import numpy as np\n'), ((5453, 5501), 'numpy.zeros', 'np.zeros', (['(im_c, max_h, max_w)'], {'dtype': 'np.float32'}), '((im_c, max_h, max_w), dtype=np.float32)\n', (5461, 5501), True, 'import numpy as np\n'), ((8614, 8656), 'numpy.log', 'np.log', (['(gw * w / self.anchors[best_idx][0])'], {}), '(gw * w / self.anchors[best_idx][0])\n', (8620, 8656), True, 'import numpy as np\n'), ((8738, 8780), 'numpy.log', 'np.log', (['(gh * h / self.anchors[best_idx][1])'], {}), '(gh * h / self.anchors[best_idx][1])\n', (8744, 8780), True, 'import numpy as np\n'), ((5261, 5292), 'numpy.ceil', 'np.ceil', (['(im_h / coarsest_stride)'], {}), '(im_h / coarsest_stride)\n', (5268, 5292), True, 'import numpy as np\n'), ((5369, 5400), 'numpy.ceil', 'np.ceil', (['(im_w / coarsest_stride)'], {}), '(im_w / coarsest_stride)\n', (5376, 5400), True, 'import numpy as np\n')] |
import numpy as np
from keras.models import Model
from data_unet import load_test_data, desired_size
from train_unet import preprocess, batch_size
import os
from skimage.io import imsave
from constants import mask_raw_path, get_unet
print('-'*30)
print('Loading and preprocessing test data...')
print('-'*30)
imgs_test, imgs_id_test = load_test_data()
imgs_test = preprocess(imgs_test)
# mean = np.mean(imgs_test) # mean for data centering
# std = np.std(imgs_test) # std for data normalization
# imgs_test -= mean
# imgs_test /= std
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model = get_unet()
model.load_weights('unet.h5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
imgs_mask_test = model.predict(imgs_test, verbose=1, batch_size=batch_size)
np.save('imgs_mask_test.npy', imgs_mask_test)
print('-' * 30)
print('Saving predicted masks to files...')
print('-' * 30)
if not os.path.exists(mask_raw_path):
os.mkdir(mask_raw_path)
mask_size = (desired_size, desired_size)
for image, image_id in zip(imgs_mask_test, imgs_id_test):
image = (image[:, :, 0])
print(image_id)
imsave(os.path.join(mask_raw_path, str(image_id) + '.png'), image)
| [
"os.mkdir",
"numpy.save",
"os.path.exists",
"train_unet.preprocess",
"data_unet.load_test_data",
"constants.get_unet"
] | [((336, 352), 'data_unet.load_test_data', 'load_test_data', ([], {}), '()\n', (350, 352), False, 'from data_unet import load_test_data, desired_size\n'), ((365, 386), 'train_unet.preprocess', 'preprocess', (['imgs_test'], {}), '(imgs_test)\n', (375, 386), False, 'from train_unet import preprocess, batch_size\n'), ((609, 619), 'constants.get_unet', 'get_unet', ([], {}), '()\n', (617, 619), False, 'from constants import mask_raw_path, get_unet\n'), ((797, 842), 'numpy.save', 'np.save', (['"""imgs_mask_test.npy"""', 'imgs_mask_test'], {}), "('imgs_mask_test.npy', imgs_mask_test)\n", (804, 842), True, 'import numpy as np\n'), ((928, 957), 'os.path.exists', 'os.path.exists', (['mask_raw_path'], {}), '(mask_raw_path)\n', (942, 957), False, 'import os\n'), ((963, 986), 'os.mkdir', 'os.mkdir', (['mask_raw_path'], {}), '(mask_raw_path)\n', (971, 986), False, 'import os\n')] |
from keras.models import load_model
from time import sleep
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
import cv2
import numpy as np
face_classifier = cv2.CascadeClassifier(
r'C:\Python37\Projects\Live Project\haarcascade_frontalface_default.xml')
classifier = load_model(
r'C:\Python37\Projects\Live Project\Emotion_little_vgg.h5')
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
cap = cv2.VideoCapture(0)
while True:
# Grab a single frame of video
ret, frame = cap.read()
labels = []
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
# rect,face,image = face_detector(frame)
if np.sum([roi_gray]) != 0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
# make a prediction on the ROI, then lookup the class
preds = classifier.predict(roi)[0]
label = class_labels[preds.argmax()]
label_position = (x, y)
cv2.putText(frame, label, label_position,
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
else:
cv2.putText(frame, 'No Face Found', (20, 60),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
cv2.imshow('Emotion Detector', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"keras.models.load_model",
"numpy.sum",
"cv2.putText",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"numpy.expand_dims",
"cv2.VideoCapture",
"cv2.rectangle",
"keras.preprocessing.image.img_to_array",
"cv2.CascadeClassifier",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((197, 305), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""C:\\\\Python37\\\\Projects\\\\Live Project\\\\haarcascade_frontalface_default.xml"""'], {}), "(\n 'C:\\\\Python37\\\\Projects\\\\Live Project\\\\haarcascade_frontalface_default.xml'\n )\n", (218, 305), False, 'import cv2\n'), ((311, 384), 'keras.models.load_model', 'load_model', (['"""C:\\\\Python37\\\\Projects\\\\Live Project\\\\Emotion_little_vgg.h5"""'], {}), "('C:\\\\Python37\\\\Projects\\\\Live Project\\\\Emotion_little_vgg.h5')\n", (321, 384), False, 'from keras.models import load_model\n'), ((459, 478), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (475, 478), False, 'import cv2\n'), ((1683, 1706), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1704, 1706), False, 'import cv2\n'), ((583, 622), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (595, 622), False, 'import cv2\n'), ((1574, 1611), 'cv2.imshow', 'cv2.imshow', (['"""Emotion Detector"""', 'frame'], {}), "('Emotion Detector', frame)\n", (1584, 1611), False, 'import cv2\n'), ((722, 782), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (735, 782), False, 'import cv2\n'), ((836, 896), 'cv2.resize', 'cv2.resize', (['roi_gray', '(48, 48)'], {'interpolation': 'cv2.INTER_AREA'}), '(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)\n', (846, 896), False, 'import cv2\n'), ((954, 972), 'numpy.sum', 'np.sum', (['[roi_gray]'], {}), '([roi_gray])\n', (960, 972), True, 'import numpy as np\n'), ((1046, 1063), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['roi'], {}), '(roi)\n', (1058, 1063), False, 'from keras.preprocessing.image import img_to_array\n'), ((1082, 1109), 'numpy.expand_dims', 'np.expand_dims', (['roi'], {'axis': '(0)'}), '(roi, axis=0)\n', (1096, 1109), True, 'import numpy as np\n'), ((1318, 1409), 'cv2.putText', 'cv2.putText', (['frame', 'label', 'label_position', 'cv2.FONT_HERSHEY_SIMPLEX', '(2)', '(0, 255, 0)', '(3)'], {}), '(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0, \n 255, 0), 3)\n', (1329, 1409), False, 'import cv2\n'), ((1455, 1549), 'cv2.putText', 'cv2.putText', (['frame', '"""No Face Found"""', '(20, 60)', 'cv2.FONT_HERSHEY_SIMPLEX', '(2)', '(0, 255, 0)', '(3)'], {}), "(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 2,\n (0, 255, 0), 3)\n", (1466, 1549), False, 'import cv2\n'), ((1619, 1633), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1630, 1633), False, 'import cv2\n')] |
import os
import warnings
from pathlib import Path
try:
import mne
except ImportError:
print(
"You need to install toeplitzlda with neuro extras to run examples "
"with real EEG data, i.e. pip install toeplitzlda[neuro]"
)
exit(1)
import numpy as np
import pandas as pd
from blockmatrix import linear_taper
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import make_pipeline
from toeplitzlda.classification.toeplitzlda import EpochsVectorizer
from toeplitzlda.usup_replay.llp import LearningFromLabelProportions
from toeplitzlda.usup_replay.visual_speller import (
VisualMatrixSpellerLLPDataset,
VisualMatrixSpellerMixDataset,
seq_labels_from_epoch,
)
mne.set_log_level("INFO")
np.seterr(divide="ignore") # this does nothing for some reason
# We get a division by 0 warning for calculating theoretical classifier sureness,
# but we do not care as nan is a valid result
warnings.filterwarnings("ignore", category=RuntimeWarning)
# Pandas deprecation of append, but the proposed concat does not work...?
warnings.filterwarnings("ignore", category=FutureWarning)
EVENT_ID_TO_LETTER_DICT = {
1: "A",
2: "B",
4: "C",
6: "D",
7: "E",
9: "F",
10: "G",
11: "H",
12: "I",
13: "J",
15: "K",
16: "L",
17: "M",
19: "N",
20: "O",
21: "P",
22: "Q",
24: "R",
25: "S",
26: "T",
27: "U",
29: "V",
30: "W",
31: "X",
32: "Y",
34: "Z",
35: " ",
37: ".",
39: ",",
40: "!",
41: "?",
42: "<",
}
# This was the sentence each subject was instructed to spell in each run
GT_LLP = "<NAME> IM KOMPLETT VERWAHRLOSTEN TAXI QUER DURCH FREIBURG."
GT_MIX = "<NAME> IM TAXI QUER DURCH DAS "
spellable = list(EVENT_ID_TO_LETTER_DICT.values())
def predicted_letter(epo, pred, ax=None, gt=None, agg=np.mean):
epo.selection = np.array(range(len(epo)))
cum_scores = np.zeros(len(spellable))
for i, l in enumerate(spellable):
scores = pred[epo[l].selection]
cum_scores[i] = agg(scores)
high_score = np.argmax(cum_scores)
if ax is not None:
bars = ax.bar(spellable, cum_scores - np.mean(cum_scores))
bars[high_score].set_facecolor("red")
if gt is not None:
gt_x = spellable.index(gt)
ax.axvspan(gt_x - 0.5, gt_x + 0.5, alpha=0.3, color="green")
return spellable[high_score], cum_scores
def predicted_letter_over_trial(epo, pred, ax=None, gt=None):
epo.selection = np.array(range(len(epo)))
if len(epo) < 15:
print("WARNING: not enough epochs? Invalid epochs")
return "#", np.nan, np.inf, np.nan, np.nan
cum_score_history = np.zeros((len(spellable), len(epo)))
highlight_history = np.zeros((len(spellable), len(epo)))
letter_sequencing = np.zeros_like(highlight_history)
for ei in range(len(epo)):
evs = list(epo[ei].event_id.keys())[0].split("/") # [3:]
letters = [e for e in evs[3:] if e != "#"]
indices = [spellable.index(l) for l in letters]
letter_sequencing[indices, ei] = 1
if ei > 0:
highlight_history[:, ei] = highlight_history[:, ei - 1]
cum_score_history[:, ei] = cum_score_history[:, ei - 1]
highlight_history[indices, ei] += 1
cum_score_history[indices, ei] += pred[ei]
high_score_history = np.argmax(cum_score_history / highlight_history, axis=0)
high_score_history[np.any(highlight_history == 0, axis=0)] = -1
first_valid = np.where(high_score_history >= 0)[0][0]
if ax is not None:
alphas = [0.2, 1]
for si, s in enumerate(spellable):
a = alphas[1] if s == gt else alphas[0]
ax.plot(
range(len(epo)),
cum_score_history[si, :] / highlight_history[si, :],
label=s,
alpha=a,
)
ax.axvline(first_valid, c="k", linestyle=":")
pred_letter = spellable[high_score_history[-1]]
correct = pred_letter == gt
t_let_seq = letter_sequencing[spellable.index(gt)]
if correct:
earliest_correct = (
1 + len(epo) - np.where(~(high_score_history == high_score_history[-1])[::-1])[0][0]
)
num_target_flashes = int(t_let_seq[:earliest_correct].sum())
else:
earliest_correct = np.nan
num_target_flashes = np.nan
mandatory_target_flashes = int(t_let_seq[:first_valid].sum())
return (
pred_letter,
earliest_correct,
first_valid,
num_target_flashes,
mandatory_target_flashes,
)
# %%
# Choose dataset here
ds = "LLP"
if ds == "LLP":
dataset = VisualMatrixSpellerLLPDataset()
n_subs = 13
GT = GT_LLP
elif ds == "Mix":
dataset = VisualMatrixSpellerMixDataset()
n_subs = 12
GT = GT_MIX
else:
raise ValueError("invalid ds code")
df = pd.DataFrame(
columns=[
"subject",
"block",
"clf",
"nth_letter",
"correct",
"correct_sofar",
"auc",
"earliest_correct",
"first_valid",
"num_target_flashes",
"mandatory_target_flashes",
]
)
num_letters = 63 if ds == "LLP" else 35 # Maximum number of letters is 63
letter_memory = np.inf # Keep this many letters in memory for training
sr = 40
lowpass = 8
use_base = False
use_chdrop = False
use_jump = False
use_neutral_jump = False
use_each_best = False
enable_early_stopping_simulation = False
enable_calculate_postfix = False
if use_jump:
ntimes = 6
elif sr == 20:
ntimes = 14
elif sr == 100:
ntimes = 66
elif sr == 8:
ntimes = 6
elif sr == 40:
ntimes = 27
elif sr == 200:
ntimes = 131
elif sr == 1000:
ntimes = 651
else:
raise ValueError("invalid sampling rate")
epo_cache_root = Path("/") / "tmp" / ds
os.makedirs(epo_cache_root, exist_ok=True)
suffix = "_jump" if use_jump else ""
suffix += "_base" if use_base else ""
suffix += "_chdrop" if use_chdrop else ""
basedir = Path.home() / f"results_usup" / f"{lowpass}hz_lowpass_{sr}Hz_sr_{ntimes}tD{suffix}"
os.makedirs(
basedir,
exist_ok=True,
)
def get_llp_epochs(sub, block, use_cache=True):
dataset.subject_list = [sub]
epo_file = epo_cache_root / f"sub_{sub}_block_{block}-epo.fif"
if use_cache and epo_file.is_file():
print("WARNING: Loading cached data.")
return mne.epochs.read_epochs(epo_file)
else:
print("Preprocessing data.")
try:
# ATTENTION: TODO Unify Lowpass handling -> also in select_ival and n_times param
epochs = dataset.load_epochs(block_nrs=[block], fband=[0.5, lowpass], sampling_rate=sr)
if use_base:
epochs.apply_baseline((-0.2, 0))
if use_chdrop:
epochs.drop_channels("Fp1")
epochs.drop_channels("Fp2")
if use_cache:
epochs.save(epo_file)
return epochs
except Exception as e: # TODO specify what to catch
raise e
for sub in range(1, 1 + n_subs):
for block in range(1, 4):
print(f"Subject {sub}, block {block}")
print(f" Loading Data")
epochs, _ = get_llp_epochs(sub, block, use_cache=False)
if epochs is None:
continue
print(f" Starting evaluation")
# These are the original time intervals used for averaging
jm = [
[0.05, 0.12],
[0.12, 0.20],
[0.20, 0.28],
[0.28, 0.38],
[0.38, 0.53],
[0.53, 0.70],
]
vec_args = dict(jumping_mean_ivals=jm) if use_jump else dict(select_ival=[0.05, 0.70])
if use_each_best:
vec_args_slda = dict(jumping_mean_ivals=jm)
vec_args_toep = dict(select_ival=[0.05, 0.70])
else:
vec_args_slda = vec_args
vec_args_toep = vec_args
clfs = dict(
slda=make_pipeline(
EpochsVectorizer(
mne_scaler=mne.decoding.Scaler(epochs.info, scalings="mean"),
**vec_args_slda,
),
LearningFromLabelProportions(
n_channels=len(epochs.ch_names),
n_times=6 if use_each_best or use_jump else ntimes,
),
),
toep_lda=make_pipeline(
EpochsVectorizer(
mne_scaler=mne.decoding.Scaler(epochs.info, scalings="mean"),
**vec_args_toep,
),
LearningFromLabelProportions(
n_times=ntimes,
n_channels=len(epochs.ch_names),
toeplitz_time=True,
taper_time=linear_taper,
),
),
)
correct_letters = {k: 0 for k in clfs}
aucs = {k: list() for k in clfs}
clf_state = {k: None for k in clfs}
for let_i in range(1, 1 + num_letters):
gt_letter = GT[let_i - 1]
beg, end = max(1, let_i - letter_memory + 1), let_i
letters = [f"Letter_{i}" for i in range(beg, end + 1)]
epo_all = epochs[letters]
if let_i > 1 and enable_early_stopping_simulation:
epo_train = epochs[letters[:-1]]
else:
epo_train = epo_all
s, l = seq_labels_from_epoch(epo_train)
X = epo_train
cur_epo = epochs[f"Letter_{let_i}"]
cur_s, cur_l = seq_labels_from_epoch(cur_epo)
cur_n_epos = len(cur_epo)
pred_letter = dict()
pred_earliest = dict()
for cli, ckey in enumerate(clfs):
clf = clfs[ckey]
if enable_calculate_postfix:
if clf_state[ckey] is None:
print("Training classifier on all epochs.")
X = epochs
seq, l = seq_labels_from_epoch(X)
clf.fit(epochs, seq)
clf_state[ckey] = clf
clf = clf_state[ckey]
else:
clf.fit(X, s)
cur_X = cur_epo
pred = clf.decision_function(cur_X)
(
pred_letter[ckey],
earliest_correct,
first_valid,
num_target_flashes,
mandatory_target_flashes,
) = predicted_letter_over_trial(cur_X, pred, gt=gt_letter)
if let_i == 1:
earliest_correct = np.min([68, earliest_correct])
if pred_letter[ckey] == gt_letter:
correct_letters[ckey] += 1
auc = roc_auc_score(cur_l, pred)
aucs[ckey].append(auc)
row = dict(
subject=sub,
block=block,
clf=ckey,
nth_letter=let_i,
correct=pred_letter[ckey] == gt_letter,
correct_sofar=correct_letters[ckey],
earliest_correct=earliest_correct,
first_valid=first_valid,
num_target_flashes=num_target_flashes,
mandatory_target_flashes=mandatory_target_flashes,
auc=auc,
)
# df = pd.concat([df, row], ignore_index=True)
# This cause FutureWarning
df = df.append(row, ignore_index=True)
if not np.all(np.array(list(pred_letter.values())) == gt_letter):
print(f'Using letters {beg}-{end} (target: "{gt_letter}"):')
for k in clfs:
print(f' {k.rjust(25)} predicted: "{pred_letter[k]}"')
print("----------")
df["sample_rate"] = sr
df["ntime_features"] = ntimes
df["letter_memory"] = letter_memory
df["lowpass"] = lowpass
df["early_stop_sim"] = enable_early_stopping_simulation
df["postfix_sim"] = enable_calculate_postfix
csv_name = f"{basedir}{ds}_usup_toeplitz.csv"
df.to_csv(csv_name)
| [
"pathlib.Path.home",
"numpy.argmax",
"pathlib.Path",
"numpy.mean",
"pandas.DataFrame",
"numpy.zeros_like",
"toeplitzlda.usup_replay.visual_speller.VisualMatrixSpellerLLPDataset",
"mne.epochs.read_epochs",
"mne.set_log_level",
"toeplitzlda.usup_replay.visual_speller.seq_labels_from_epoch",
"sklea... | [((717, 742), 'mne.set_log_level', 'mne.set_log_level', (['"""INFO"""'], {}), "('INFO')\n", (734, 742), False, 'import mne\n'), ((744, 770), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (753, 770), True, 'import numpy as np\n'), ((936, 994), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (959, 994), False, 'import warnings\n'), ((1069, 1126), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (1092, 1126), False, 'import warnings\n'), ((4888, 5079), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['subject', 'block', 'clf', 'nth_letter', 'correct', 'correct_sofar', 'auc',\n 'earliest_correct', 'first_valid', 'num_target_flashes',\n 'mandatory_target_flashes']"}), "(columns=['subject', 'block', 'clf', 'nth_letter', 'correct',\n 'correct_sofar', 'auc', 'earliest_correct', 'first_valid',\n 'num_target_flashes', 'mandatory_target_flashes'])\n", (4900, 5079), True, 'import pandas as pd\n'), ((5829, 5871), 'os.makedirs', 'os.makedirs', (['epo_cache_root'], {'exist_ok': '(True)'}), '(epo_cache_root, exist_ok=True)\n', (5840, 5871), False, 'import os\n'), ((6085, 6120), 'os.makedirs', 'os.makedirs', (['basedir'], {'exist_ok': '(True)'}), '(basedir, exist_ok=True)\n', (6096, 6120), False, 'import os\n'), ((2090, 2111), 'numpy.argmax', 'np.argmax', (['cum_scores'], {}), '(cum_scores)\n', (2099, 2111), True, 'import numpy as np\n'), ((2822, 2854), 'numpy.zeros_like', 'np.zeros_like', (['highlight_history'], {}), '(highlight_history)\n', (2835, 2854), True, 'import numpy as np\n'), ((3379, 3435), 'numpy.argmax', 'np.argmax', (['(cum_score_history / highlight_history)'], {'axis': '(0)'}), '(cum_score_history / highlight_history, axis=0)\n', (3388, 3435), True, 'import numpy as np\n'), ((4676, 4707), 'toeplitzlda.usup_replay.visual_speller.VisualMatrixSpellerLLPDataset', 'VisualMatrixSpellerLLPDataset', ([], {}), '()\n', (4705, 4707), False, 'from toeplitzlda.usup_replay.visual_speller import VisualMatrixSpellerLLPDataset, VisualMatrixSpellerMixDataset, seq_labels_from_epoch\n'), ((3459, 3497), 'numpy.any', 'np.any', (['(highlight_history == 0)'], {'axis': '(0)'}), '(highlight_history == 0, axis=0)\n', (3465, 3497), True, 'import numpy as np\n'), ((4772, 4803), 'toeplitzlda.usup_replay.visual_speller.VisualMatrixSpellerMixDataset', 'VisualMatrixSpellerMixDataset', ([], {}), '()\n', (4801, 4803), False, 'from toeplitzlda.usup_replay.visual_speller import VisualMatrixSpellerLLPDataset, VisualMatrixSpellerMixDataset, seq_labels_from_epoch\n'), ((5806, 5815), 'pathlib.Path', 'Path', (['"""/"""'], {}), "('/')\n", (5810, 5815), False, 'from pathlib import Path\n'), ((6001, 6012), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (6010, 6012), False, 'from pathlib import Path\n'), ((6385, 6417), 'mne.epochs.read_epochs', 'mne.epochs.read_epochs', (['epo_file'], {}), '(epo_file)\n', (6407, 6417), False, 'import mne\n'), ((3522, 3555), 'numpy.where', 'np.where', (['(high_score_history >= 0)'], {}), '(high_score_history >= 0)\n', (3530, 3555), True, 'import numpy as np\n'), ((9375, 9407), 'toeplitzlda.usup_replay.visual_speller.seq_labels_from_epoch', 'seq_labels_from_epoch', (['epo_train'], {}), '(epo_train)\n', (9396, 9407), False, 'from toeplitzlda.usup_replay.visual_speller import VisualMatrixSpellerLLPDataset, VisualMatrixSpellerMixDataset, seq_labels_from_epoch\n'), ((9510, 9540), 'toeplitzlda.usup_replay.visual_speller.seq_labels_from_epoch', 'seq_labels_from_epoch', (['cur_epo'], {}), '(cur_epo)\n', (9531, 9540), False, 'from toeplitzlda.usup_replay.visual_speller import VisualMatrixSpellerLLPDataset, VisualMatrixSpellerMixDataset, seq_labels_from_epoch\n'), ((2181, 2200), 'numpy.mean', 'np.mean', (['cum_scores'], {}), '(cum_scores)\n', (2188, 2200), True, 'import numpy as np\n'), ((10765, 10791), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['cur_l', 'pred'], {}), '(cur_l, pred)\n', (10778, 10791), False, 'from sklearn.metrics import roc_auc_score\n'), ((4160, 4223), 'numpy.where', 'np.where', (['(~(high_score_history == high_score_history[-1])[::-1])'], {}), '(~(high_score_history == high_score_history[-1])[::-1])\n', (4168, 4223), True, 'import numpy as np\n'), ((10613, 10643), 'numpy.min', 'np.min', (['[68, earliest_correct]'], {}), '([68, earliest_correct])\n', (10619, 10643), True, 'import numpy as np\n'), ((9956, 9980), 'toeplitzlda.usup_replay.visual_speller.seq_labels_from_epoch', 'seq_labels_from_epoch', (['X'], {}), '(X)\n', (9977, 9980), False, 'from toeplitzlda.usup_replay.visual_speller import VisualMatrixSpellerLLPDataset, VisualMatrixSpellerMixDataset, seq_labels_from_epoch\n'), ((8018, 8067), 'mne.decoding.Scaler', 'mne.decoding.Scaler', (['epochs.info'], {'scalings': '"""mean"""'}), "(epochs.info, scalings='mean')\n", (8037, 8067), False, 'import mne\n'), ((8431, 8480), 'mne.decoding.Scaler', 'mne.decoding.Scaler', (['epochs.info'], {'scalings': '"""mean"""'}), "(epochs.info, scalings='mean')\n", (8450, 8480), False, 'import mne\n')] |
"""Plots Laplacian kernel used for edge-detector test."""
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
import matplotlib.colors
from matplotlib import pyplot
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.plotting import plotting_utils
THIS_FIRST_MATRIX = numpy.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
])
THIS_SECOND_MATRIX = numpy.array([
[0, 1, 0],
[1, -6, 1],
[0, 1, 0]
])
KERNEL_MATRIX_3D = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX, THIS_FIRST_MATRIX), axis=-1
).astype(float)
THIS_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
THIS_COLOUR = matplotlib.colors.to_rgba(THIS_COLOUR, 0.5)
COLOUR_MAP_OBJECT = matplotlib.colors.ListedColormap([THIS_COLOUR])
ZERO_FONT_COLOUR = numpy.full(3, 0.)
POSITIVE_FONT_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
FONT_SIZE = 25
AXIS_LINE_WIDTH = 4
FIGURE_RESOLUTION_DPI = 600
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('axes', linewidth=AXIS_LINE_WIDTH)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
OUTPUT_FILE_ARG_NAME = 'output_file_name'
OUTPUT_FILE_HELP_STRING = 'Path to output file (figure will be saved here).'
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_FILE_ARG_NAME, type=str, required=True,
help=OUTPUT_FILE_HELP_STRING)
def _plot_kernel_one_height(kernel_matrix_2d, axes_object):
"""Plots kernel at one height.
M = number of rows in kernel
N = number of columns in kernel
:param kernel_matrix_2d: M-by-N numpy array of kernel values.
:param axes_object: Will plot on these axes (instance of
`matplotlib.axes._subplots.AxesSubplot`).
"""
dummy_matrix = numpy.ma.masked_where(
kernel_matrix_2d == 0, kernel_matrix_2d)
axes_object.imshow(
dummy_matrix, cmap=COLOUR_MAP_OBJECT, vmin=numpy.min(kernel_matrix_2d),
vmax=numpy.max(kernel_matrix_2d), origin='upper'
)
axes_object.set_xticks([], [])
axes_object.set_yticks([], [])
num_rows = kernel_matrix_2d.shape[0]
num_columns = kernel_matrix_2d.shape[1]
for i in range(num_rows):
for j in range(num_columns):
this_label_string = '{0:d}'.format(
int(numpy.round(kernel_matrix_2d[i, j]))
)
if kernel_matrix_2d[i, j] == 0:
this_colour = ZERO_FONT_COLOUR
else:
this_colour = POSITIVE_FONT_COLOUR
axes_object.text(
j, i, this_label_string, fontsize=FONT_SIZE, color=this_colour,
horizontalalignment='center', verticalalignment='center')
def _run(output_file_name):
"""Plots Laplacian kernel used for edge-detector test.
This is effectively the main method.
:param output_file_name: See documentation at top of file.
"""
num_heights = KERNEL_MATRIX_3D.shape[-1]
figure_object, axes_object_matrix = plotting_utils.create_paneled_figure(
num_rows=1, num_columns=num_heights, horizontal_spacing=0.1,
vertical_spacing=0.1, shared_x_axis=False, shared_y_axis=False,
keep_aspect_ratio=True)
for k in range(num_heights):
_plot_kernel_one_height(
kernel_matrix_2d=KERNEL_MATRIX_3D[..., k],
axes_object=axes_object_matrix[0, k]
)
axes_object_matrix[0, 0].set_title('Bottom height')
axes_object_matrix[0, 1].set_title('Middle height')
axes_object_matrix[0, 2].set_title('Top height')
file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
print('Saving figure to: "{0:s}"...'.format(output_file_name))
figure_object.savefig(
output_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(figure_object)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
output_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME)
)
| [
"matplotlib.colors.to_rgba",
"numpy.full",
"numpy.stack",
"argparse.ArgumentParser",
"numpy.ma.masked_where",
"matplotlib.pyplot.close",
"gewittergefahr.plotting.plotting_utils.create_paneled_figure",
"numpy.min",
"matplotlib.use",
"numpy.array",
"matplotlib.pyplot.rc",
"gewittergefahr.gg_util... | [((106, 127), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (120, 127), False, 'import matplotlib\n'), ((309, 355), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [0, 1, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 1, 0], [0, 0, 0]])\n', (320, 355), False, 'import numpy\n'), ((392, 439), 'numpy.array', 'numpy.array', (['[[0, 1, 0], [1, -6, 1], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, -6, 1], [0, 1, 0]])\n', (403, 439), False, 'import numpy\n'), ((651, 694), 'matplotlib.colors.to_rgba', 'matplotlib.colors.to_rgba', (['THIS_COLOUR', '(0.5)'], {}), '(THIS_COLOUR, 0.5)\n', (676, 694), False, 'import matplotlib\n'), ((715, 762), 'matplotlib.colors.ListedColormap', 'matplotlib.colors.ListedColormap', (['[THIS_COLOUR]'], {}), '([THIS_COLOUR])\n', (747, 762), False, 'import matplotlib\n'), ((783, 801), 'numpy.full', 'numpy.full', (['(3)', '(0.0)'], {}), '(3, 0.0)\n', (793, 801), False, 'import numpy\n'), ((934, 967), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""font"""'], {'size': 'FONT_SIZE'}), "('font', size=FONT_SIZE)\n", (943, 967), False, 'from matplotlib import pyplot\n'), ((968, 1006), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'titlesize': 'FONT_SIZE'}), "('axes', titlesize=FONT_SIZE)\n", (977, 1006), False, 'from matplotlib import pyplot\n'), ((1007, 1045), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'labelsize': 'FONT_SIZE'}), "('axes', labelsize=FONT_SIZE)\n", (1016, 1045), False, 'from matplotlib import pyplot\n'), ((1046, 1090), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'linewidth': 'AXIS_LINE_WIDTH'}), "('axes', linewidth=AXIS_LINE_WIDTH)\n", (1055, 1090), False, 'from matplotlib import pyplot\n'), ((1091, 1130), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""xtick"""'], {'labelsize': 'FONT_SIZE'}), "('xtick', labelsize=FONT_SIZE)\n", (1100, 1130), False, 'from matplotlib import pyplot\n'), ((1131, 1170), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""ytick"""'], {'labelsize': 'FONT_SIZE'}), "('ytick', labelsize=FONT_SIZE)\n", (1140, 1170), False, 'from matplotlib import pyplot\n'), ((1171, 1210), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""legend"""'], {'fontsize': 'FONT_SIZE'}), "('legend', fontsize=FONT_SIZE)\n", (1180, 1210), False, 'from matplotlib import pyplot\n'), ((1211, 1251), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""figure"""'], {'titlesize': 'FONT_SIZE'}), "('figure', titlesize=FONT_SIZE)\n", (1220, 1251), False, 'from matplotlib import pyplot\n'), ((1392, 1417), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1415, 1417), False, 'import argparse\n'), ((590, 630), 'numpy.array', 'numpy.array', (['[27, 158, 119]'], {'dtype': 'float'}), '([27, 158, 119], dtype=float)\n', (601, 630), False, 'import numpy\n'), ((824, 862), 'numpy.array', 'numpy.array', (['[217, 95, 2]'], {'dtype': 'float'}), '([217, 95, 2], dtype=float)\n', (835, 862), False, 'import numpy\n'), ((1914, 1976), 'numpy.ma.masked_where', 'numpy.ma.masked_where', (['(kernel_matrix_2d == 0)', 'kernel_matrix_2d'], {}), '(kernel_matrix_2d == 0, kernel_matrix_2d)\n', (1935, 1976), False, 'import numpy\n'), ((3134, 3327), 'gewittergefahr.plotting.plotting_utils.create_paneled_figure', 'plotting_utils.create_paneled_figure', ([], {'num_rows': '(1)', 'num_columns': 'num_heights', 'horizontal_spacing': '(0.1)', 'vertical_spacing': '(0.1)', 'shared_x_axis': '(False)', 'shared_y_axis': '(False)', 'keep_aspect_ratio': '(True)'}), '(num_rows=1, num_columns=num_heights,\n horizontal_spacing=0.1, vertical_spacing=0.1, shared_x_axis=False,\n shared_y_axis=False, keep_aspect_ratio=True)\n', (3170, 3327), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((3697, 3771), 'gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary', 'file_system_utils.mkdir_recursive_if_necessary', ([], {'file_name': 'output_file_name'}), '(file_name=output_file_name)\n', (3743, 3771), False, 'from gewittergefahr.gg_utils import file_system_utils\n'), ((3972, 3999), 'matplotlib.pyplot.close', 'pyplot.close', (['figure_object'], {}), '(figure_object)\n', (3984, 3999), False, 'from matplotlib import pyplot\n'), ((474, 559), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX, THIS_FIRST_MATRIX)'], {'axis': '(-1)'}), '((THIS_FIRST_MATRIX, THIS_SECOND_MATRIX, THIS_FIRST_MATRIX), axis=-1\n )\n', (485, 559), False, 'import numpy\n'), ((2062, 2089), 'numpy.min', 'numpy.min', (['kernel_matrix_2d'], {}), '(kernel_matrix_2d)\n', (2071, 2089), False, 'import numpy\n'), ((2104, 2131), 'numpy.max', 'numpy.max', (['kernel_matrix_2d'], {}), '(kernel_matrix_2d)\n', (2113, 2131), False, 'import numpy\n'), ((2447, 2482), 'numpy.round', 'numpy.round', (['kernel_matrix_2d[i, j]'], {}), '(kernel_matrix_2d[i, j])\n', (2458, 2482), False, 'import numpy\n')] |
from os.path import isfile
from debug_tools import Debug
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
import numpy as np
from torchvision import transforms
import torch
from .sensation import Sensation
from .configure import config
from .AutoEncoder import AutoEncoder
from .DeltaTime import DeltaT
from torch_model_fit import Fit
from MasterConfig import Config as mconf
from MemoryManager import MemoryManager
import multiprocessing as mp
class Train(MemoryManager):
memory_format:str =Sensation.memory_format
log_title:str = f'train{memory_format}'
def __init__(self,device:torch.device,debug_mode:bool=False) -> None:
super().__init__(log_title=self.log_title, debug_mode=debug_mode)
self.device = torch.device(device)
self.dtype = config.training_dtype
self.fit = Fit(self.log_title,debug_mode)
def activation(self,shutdown:mp.Value,sleep:mp.Value) -> None:
# load and preprocess data for Training AutoEncoder
names = os.listdir(config.data_folder)
if len(names) ==0:
self.warn('To train AutoEncoder data does not exist')
return
times = np.sort([float(i) for i in names])[::-1]
names = [str(i) for i in times]
uses = names[:config.train_video_use]
deletes = names[config.train_video_use:]
for i in deletes:
self.remove_file(i)
data = np.concatenate([self.load_python_obj(os.path.join(config.data_folder,i)) for i in uses])
data = self.preprocess(data)
self.log(data.shape,debug_only=True)
# load AutoEncoder
model = AutoEncoder()
model.encoder.load_state_dict(torch.load(config.encoder_params,map_location=self.device))
model.decoder.load_state_dict(torch.load(config.decoder_params,map_location=self.device))
# AutoEncoder settings
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),lr=config.AE_lr)
epochs = config.AE_epochs
batch_size = config.AE_batch_size
# Train
self.fit.Train(
shutdown,sleep,
model=model,
epochs=epochs,
batch_size=batch_size,
optimizer=optimizer,
criterion=criterion,
device=self.device,
train_x=data,
train_y=data
)
torch.save(model.encoder.state_dict(),config.encoder_params)
torch.save(model.decoder.state_dict(),config.decoder_params)
self.log('trained AutoEncoder')
del data,model
self.release_system_memory()
## training delta time model
# loading and preproessing dataset
if not isfile(config.newestId_file):
self.warn('To train DeltaTime data does not exist!')
return None
newest_id = self.load_python_obj(config.newestId_file)
first_id = self.get_firstId(self.memory_format)
ids = np.arange(first_id,newest_id)[:config.time_use]
ids,data,times = self.load_memory(ids,return_time=True)
datalen = data.shape[0]
zerolen = int(np.floor(datalen*config.zero_per))
zero_idx = np.random.permutation(datalen)[:zerolen]
zero_data = data[zero_idx]
zero_ans = np.zeros(zerolen,dtype=times.dtype)
data_idx = np.random.permutation(datalen)
data_sh = data[data_idx]
deltatimes = np.abs(times - times[data_idx])
data_idx = np.random.permutation((datalen+zerolen))
data1 = np.concatenate([data,zero_data])[data_idx]
data2 = np.concatenate([data_sh,zero_data])[data_idx]
ans = np.concatenate([deltatimes,zero_ans])[data_idx]
data1 = torch.from_numpy(data1).type(self.dtype)
data2 = torch.from_numpy(data2).type(self.dtype)
ans = torch.from_numpy(ans).type(self.dtype).unsqueeze(1)
self.log(
'data1:',data1.shape,
'data2:',data2.shape,
'ans:',ans.shape,
debug_only=True
)
# load deltaT
model = DeltaT()
model.load_state_dict(torch.load(config.deltatime_params,map_location=self.device))
# deltaT settings
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),lr=config.DT_lr)
epochs = config.DT_epochs
batch_size = config.DT_batch_size
# Train
self.fit.Train(
shutdown,sleep,
model=model,
epochs=epochs,
batch_size=batch_size,
optimizer=optimizer,
criterion = criterion,
device=self.device,
train_x=[data1,data2],
train_y=ans,
)
torch.save(model.state_dict(),config.deltatime_params)
self.log('Trained DeltaTime')
del data1,data2,ans,model
self.release_system_memory()
self.log('Train process was finished')
def preprocess(self,data:np.ndarray) -> torch.Tensor:
data = torch.from_numpy(data).permute(0,3,2,1)
resizer = transforms.Resize(config.frame_size)
data = resizer(data).type(self.dtype) / 255
return data
| [
"torch.nn.MSELoss",
"torch.from_numpy",
"numpy.abs",
"os.path.dirname",
"torch.load",
"numpy.zeros",
"numpy.floor",
"os.path.isfile",
"numpy.arange",
"torch.device",
"numpy.random.permutation",
"torch_model_fit.Fit",
"os.path.join",
"os.listdir",
"numpy.concatenate",
"torchvision.trans... | [((107, 132), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (122, 132), False, 'import os\n'), ((778, 798), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (790, 798), False, 'import torch\n'), ((861, 892), 'torch_model_fit.Fit', 'Fit', (['self.log_title', 'debug_mode'], {}), '(self.log_title, debug_mode)\n', (864, 892), False, 'from torch_model_fit import Fit\n'), ((1037, 1067), 'os.listdir', 'os.listdir', (['config.data_folder'], {}), '(config.data_folder)\n', (1047, 1067), False, 'import os\n'), ((1933, 1951), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (1949, 1951), False, 'import torch\n'), ((3334, 3370), 'numpy.zeros', 'np.zeros', (['zerolen'], {'dtype': 'times.dtype'}), '(zerolen, dtype=times.dtype)\n', (3342, 3370), True, 'import numpy as np\n'), ((3389, 3419), 'numpy.random.permutation', 'np.random.permutation', (['datalen'], {}), '(datalen)\n', (3410, 3419), True, 'import numpy as np\n'), ((3474, 3505), 'numpy.abs', 'np.abs', (['(times - times[data_idx])'], {}), '(times - times[data_idx])\n', (3480, 3505), True, 'import numpy as np\n'), ((3525, 3565), 'numpy.random.permutation', 'np.random.permutation', (['(datalen + zerolen)'], {}), '(datalen + zerolen)\n', (3546, 3565), True, 'import numpy as np\n'), ((4271, 4289), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (4287, 4289), False, 'import torch\n'), ((5156, 5192), 'torchvision.transforms.Resize', 'transforms.Resize', (['config.frame_size'], {}), '(config.frame_size)\n', (5173, 5192), False, 'from torchvision import transforms\n'), ((1723, 1782), 'torch.load', 'torch.load', (['config.encoder_params'], {'map_location': 'self.device'}), '(config.encoder_params, map_location=self.device)\n', (1733, 1782), False, 'import torch\n'), ((1821, 1880), 'torch.load', 'torch.load', (['config.decoder_params'], {'map_location': 'self.device'}), '(config.decoder_params, map_location=self.device)\n', (1831, 1880), False, 'import torch\n'), ((2762, 2790), 'os.path.isfile', 'isfile', (['config.newestId_file'], {}), '(config.newestId_file)\n', (2768, 2790), False, 'from os.path import isfile\n'), ((3019, 3049), 'numpy.arange', 'np.arange', (['first_id', 'newest_id'], {}), '(first_id, newest_id)\n', (3028, 3049), True, 'import numpy as np\n'), ((3185, 3220), 'numpy.floor', 'np.floor', (['(datalen * config.zero_per)'], {}), '(datalen * config.zero_per)\n', (3193, 3220), True, 'import numpy as np\n'), ((3239, 3269), 'numpy.random.permutation', 'np.random.permutation', (['datalen'], {}), '(datalen)\n', (3260, 3269), True, 'import numpy as np\n'), ((3582, 3615), 'numpy.concatenate', 'np.concatenate', (['[data, zero_data]'], {}), '([data, zero_data])\n', (3596, 3615), True, 'import numpy as np\n'), ((3641, 3677), 'numpy.concatenate', 'np.concatenate', (['[data_sh, zero_data]'], {}), '([data_sh, zero_data])\n', (3655, 3677), True, 'import numpy as np\n'), ((3701, 3739), 'numpy.concatenate', 'np.concatenate', (['[deltatimes, zero_ans]'], {}), '([deltatimes, zero_ans])\n', (3715, 3739), True, 'import numpy as np\n'), ((4162, 4223), 'torch.load', 'torch.load', (['config.deltatime_params'], {'map_location': 'self.device'}), '(config.deltatime_params, map_location=self.device)\n', (4172, 4223), False, 'import torch\n'), ((3766, 3789), 'torch.from_numpy', 'torch.from_numpy', (['data1'], {}), '(data1)\n', (3782, 3789), False, 'import torch\n'), ((3823, 3846), 'torch.from_numpy', 'torch.from_numpy', (['data2'], {}), '(data2)\n', (3839, 3846), False, 'import torch\n'), ((5098, 5120), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (5114, 5120), False, 'import torch\n'), ((1492, 1527), 'os.path.join', 'os.path.join', (['config.data_folder', 'i'], {}), '(config.data_folder, i)\n', (1504, 1527), False, 'import os\n'), ((3878, 3899), 'torch.from_numpy', 'torch.from_numpy', (['ans'], {}), '(ans)\n', (3894, 3899), False, 'import torch\n')] |
import os
import sys
import numpy as np
import pandas as pd
from .reader import open_csv_file, read_erbs_file, read_measurements_file
from .path_loss import Cost231, Cost231Hata, FlatEarth, OkumuraHata
from .path_loss import FreeSpace, Ecc33, CitySize, AreaKind
from .geo import Coordinate, distance_in_km, azimuth
from .grid import Grid, Point, loss_matrix_from_grid
from .enums import CitySize, AreaKind
def make_coord_matrix(grid):
print('making coordinate grid')
print('will take approx.', int(np.round(grid.vertical_cell_count/180)), 'minutes')
coord_grid = grid.make_coord_grid()
print('done')
return coord_grid
def compute_loss(method, distance):
loss_estimator = None
if method == 'free_space':
loss_estimator = FreeSpace(frequency = 1800)
elif method == 'flat_earth':
loss_estimator = FlatEarth(
frequency = 1800,
transmitter_height = 100,
receiver_height = 1.5
)
elif method == 'okumura':
loss_estimator = OkumuraHata(
frequency = 1800,
transmitter_height = 100,
receiver_height = 1.5,
city_size = CitySize.MEDIUM,
area_kind = AreaKind.SUBURBAN
)
elif method == 'cost_hata':
loss_estimator = Cost231Hata(
frequency = 1800,
transmitter_height = 100,
receiver_height = 1.5,
area_kind = AreaKind.SUBURBAN
)
elif method == 'ecc33':
loss_estimator = Ecc33(
frequency = 1800,
transmitter_height = 100,
receiver_height = 1.5,
)
if loss_estimator is None:
raise ValueError('Invalid loss estimator')
return loss_estimator.path_loss(distance)
def make_loss_matrix(method, station_coord, grid):
loss_estimator = None
if method == 'free_space':
loss_estimator = FreeSpace(frequency = 1800)
elif method == 'flat_earth':
loss_estimator = FlatEarth(
frequency = 1800,
transmitter_height = 100,
receiver_height = 1.5
)
elif method == 'okumura':
loss_estimator = OkumuraHata(
frequency = 1800,
transmitter_height = 100,
receiver_height = 1.5,
city_size = CitySize.MEDIUM,
area_kind = AreaKind.SUBURBAN
)
elif method == 'cost_hata':
loss_estimator = Cost231Hata(
frequency = 1800,
transmitter_height = 100,
receiver_height = 1.5,
area_kind = AreaKind.SUBURBAN
)
elif method == 'ecc33':
loss_estimator = Ecc33(
frequency = 1800,
transmitter_height = 100,
receiver_height = 1.5,
)
if loss_estimator is None:
raise ValueError('Invalid loss estimator')
print('making loss matrix')
loss_matrix = loss_matrix_from_grid(grid, station_coord, loss_estimator.path_loss)
print('done')
return loss_matrix
def read_coord_grid(matrix):
return [list(map(lambda pair: Coordinate(pair[0], pair[1]), line)) for line in matrix]
def make_loss_vector(station_powers, dictionary):
loss_list = []
for index, power in enumerate(station_powers):
loss_list.append(station_powers[index] - dictionary['RSSI_' + str(index + 1)])
return np.array(loss_list)
def start():
if len(sys.argv) > 1:
measurements_file = sys.argv[1]
else:
measurements_file = 'testLoc.csv'
print('opening', measurements_file)
method = 'cost_hata'
print("Current directory is ", os.getcwd())
erbs = open_csv_file('erbs.csv', read_erbs_file)
measurements = open_csv_file(measurements_file, read_measurements_file)
top_left = Coordinate(-8.065, -34.91)
bottom_right = Coordinate(-8.08, -34.887)
resolution = 10
grid = Grid(top_left, bottom_right, resolution)
coord_matrix = make_coord_matrix(grid)
loss_matrices = []
erb_coords = []
erb_power = []
for index, erb in erbs.iterrows():
station = Coordinate(erbs.ix[index].lat, erbs.ix[index].lon)
erb_coords.append(station)
erb_power.append(erbs.ix[index].eirp)
filename = method + '_' + str(index) + '.csv'
if os.path.isfile(filename):
with open(filename) as csvfile:
loss_frame = pd.read_csv(csvfile)
loss_matrices.append(np.delete(loss_frame.values, 0, axis=1))
else:
loss_matrix = make_loss_matrix(method, station, coord_matrix)
loss_frame = pd.DataFrame(loss_matrix)
loss_frame.to_csv(filename)
loss_matrices.append(loss_matrix)
loss_vectors = []
for index, dictionary in measurements.iterrows():
loss_vectors.append(make_loss_vector(erb_power, dictionary))
loss_array = np.array(loss_vectors)
full_loss_matrix = np.stack(loss_matrices, axis=-1)
print('generating output...')
output = [0] * measurements.shape[0]
for index, measurement in measurements.iterrows():
error_matrix = np.apply_along_axis(
arr = full_loss_matrix,
func1d = lambda v: np.linalg.norm(v-loss_vectors[index]),
axis = 2
)
expected_position = np.argmin(error_matrix)
error_matrix_lines, error_matrix_cols = error_matrix.shape
expected_position_line = expected_position // error_matrix_cols
expected_position_col = expected_position % error_matrix_cols
actual_position = Coordinate(measurements.ix[index]['lat'], measurements.ix[index]['lon'])
predicted_position = grid.coordinates_at_cell_center(
Point(expected_position_col, expected_position_line)
)
output_dictionary = {
'predicted_lat': predicted_position.latitude,
'predicted_lon': predicted_position.longitude,
'actual_lat': actual_position.latitude,
'actual_lon': actual_position.longitude,
'error': np.linalg.norm(full_loss_matrix[expected_position_line][expected_position_col] - loss_vectors[index]),
'distance': distance_in_km(actual_position, predicted_position)
}
output[index] = pd.Series(output_dictionary)
print(index + 1, 'done of', measurements.shape[0])
output_frame = pd.DataFrame(output)
output_frame.to_csv(method + '_output.csv')
print('Mean error:', output_frame['error'].mean())
print('Median error:', output_frame['error'].median())
print('Error standard dev:', output_frame['error'].std())
print('Mean distance:', output_frame['distance'].mean())
print('Median distance:', output_frame['distance'].median())
print('Distance standard dev:', output_frame['distance'].std())
def test():
methods = [
'free_space',
'flat_earth',
'okumura',
'cost_hata',
'ecc33'
]
erbs = open_csv_file('erbs.csv', read_erbs_file)
measurements = open_csv_file('medicoes.csv', read_measurements_file)
erb_coords = []
erb_power = []
for index, erb in erbs.iterrows():
station = Coordinate(erbs.ix[index].lat, erbs.ix[index].lon)
erb_coords.append(station)
erb_power.append(erbs.ix[index].eirp)
error_array = np.zeros(len(methods))
for method_index, method in enumerate(methods):
print('evaluating method', method)
actual_loss_vectors = np.zeros((measurements.shape[0], 6))
predicted_loss_vectors = np.zeros((measurements.shape[0], 6))
for index, dictionary in measurements.iterrows():
print(index + 1, 'measurements out of', measurements.shape[0], 'done')
position = Coordinate(dictionary['lat'], dictionary['lon'])
actual_loss_vectors[index] = make_loss_vector(erb_power, dictionary)
predicted_losses = []
for coord in erb_coords:
distance = distance_in_km(coord, position)
loss = compute_loss(method, distance)
predicted_losses.append(loss)
predicted_loss_vectors[index] = np.array(predicted_losses)
difference_array = predicted_loss_vectors - actual_loss_vectors
mean_square_error = np.mean(np.square(np.ndarray.flatten(difference_array)))
error_array[method_index] = mean_square_error
errors = zip(methods, error_array.tolist())
print(list(errors))
print('minimum error is', methods[np.argmin(error_array)], 'with', error_array[np.argmin(error_array)])
| [
"numpy.stack",
"pandas.DataFrame",
"os.getcwd",
"pandas.read_csv",
"numpy.zeros",
"numpy.argmin",
"os.path.isfile",
"numpy.array",
"pandas.Series",
"numpy.linalg.norm",
"numpy.round",
"numpy.delete",
"numpy.ndarray.flatten"
] | [((3033, 3052), 'numpy.array', 'np.array', (['loss_list'], {}), '(loss_list)\n', (3041, 3052), True, 'import numpy as np\n'), ((4418, 4440), 'numpy.array', 'np.array', (['loss_vectors'], {}), '(loss_vectors)\n', (4426, 4440), True, 'import numpy as np\n'), ((4462, 4494), 'numpy.stack', 'np.stack', (['loss_matrices'], {'axis': '(-1)'}), '(loss_matrices, axis=-1)\n', (4470, 4494), True, 'import numpy as np\n'), ((5787, 5807), 'pandas.DataFrame', 'pd.DataFrame', (['output'], {}), '(output)\n', (5799, 5807), True, 'import pandas as pd\n'), ((3270, 3281), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3279, 3281), False, 'import os\n'), ((3891, 3915), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (3905, 3915), False, 'import os\n'), ((4799, 4822), 'numpy.argmin', 'np.argmin', (['error_matrix'], {}), '(error_matrix)\n', (4808, 4822), True, 'import numpy as np\n'), ((5679, 5707), 'pandas.Series', 'pd.Series', (['output_dictionary'], {}), '(output_dictionary)\n', (5688, 5707), True, 'import pandas as pd\n'), ((6818, 6854), 'numpy.zeros', 'np.zeros', (['(measurements.shape[0], 6)'], {}), '((measurements.shape[0], 6))\n', (6826, 6854), True, 'import numpy as np\n'), ((6884, 6920), 'numpy.zeros', 'np.zeros', (['(measurements.shape[0], 6)'], {}), '((measurements.shape[0], 6))\n', (6892, 6920), True, 'import numpy as np\n'), ((503, 543), 'numpy.round', 'np.round', (['(grid.vertical_cell_count / 180)'], {}), '(grid.vertical_cell_count / 180)\n', (511, 543), True, 'import numpy as np\n'), ((4164, 4189), 'pandas.DataFrame', 'pd.DataFrame', (['loss_matrix'], {}), '(loss_matrix)\n', (4176, 4189), True, 'import pandas as pd\n'), ((5479, 5585), 'numpy.linalg.norm', 'np.linalg.norm', (['(full_loss_matrix[expected_position_line][expected_position_col] -\n loss_vectors[index])'], {}), '(full_loss_matrix[expected_position_line][\n expected_position_col] - loss_vectors[index])\n', (5493, 5585), True, 'import numpy as np\n'), ((7425, 7451), 'numpy.array', 'np.array', (['predicted_losses'], {}), '(predicted_losses)\n', (7433, 7451), True, 'import numpy as np\n'), ((7767, 7789), 'numpy.argmin', 'np.argmin', (['error_array'], {}), '(error_array)\n', (7776, 7789), True, 'import numpy as np\n'), ((7812, 7834), 'numpy.argmin', 'np.argmin', (['error_array'], {}), '(error_array)\n', (7821, 7834), True, 'import numpy as np\n'), ((3976, 3996), 'pandas.read_csv', 'pd.read_csv', (['csvfile'], {}), '(csvfile)\n', (3987, 3996), True, 'import pandas as pd\n'), ((7562, 7598), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['difference_array'], {}), '(difference_array)\n', (7580, 7598), True, 'import numpy as np\n'), ((4026, 4065), 'numpy.delete', 'np.delete', (['loss_frame.values', '(0)'], {'axis': '(1)'}), '(loss_frame.values, 0, axis=1)\n', (4035, 4065), True, 'import numpy as np\n'), ((4715, 4754), 'numpy.linalg.norm', 'np.linalg.norm', (['(v - loss_vectors[index])'], {}), '(v - loss_vectors[index])\n', (4729, 4754), True, 'import numpy as np\n')] |
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from rlalgos import BaseExperiment
from rlstructures.logger import Logger, TFLogger
from rlstructures import DictTensor, TemporalDictTensor
from rlstructures.batchers import Batcher,EpisodeBatcher
from rlstructures.batchers.trajectorybatchers import MultiThreadTrajectoryBatcher
from rlstructures.batchers.buffers import LocalBuffer
from rlstructures import logging
from rlstructures.tools import weight_init
import torch.nn as nn
import copy
import torch
import time
import numpy as np
import torch.nn.functional as F
import pickle
class ReplayBuffer:
'''
This class is used to store transitions. Each transition is a TemporalDictTensor of size T
'''
def __init__(self,N):
self.N=N
self.buffer=None
def _init_buffer(self,trajectories):
self.buffer={}
for k in trajectories.keys():
dtype=trajectories[k].dtype
size=trajectories[k].size()
b_size=(self.N,)+size[2:]
self.buffer[k]=torch.zeros(*b_size,dtype=dtype)
self.pos=0
self.full=False
def write(self,trajectories):
rs={}
new_pos=None
for k in trajectories.keys():
v=trajectories[k]
size=v.size()
b_size=(size[0]*size[1],)+size[2:]
v=v.reshape(*b_size)
n=v.size()[0]
overhead=self.N-(self.pos+n)
if new_pos is None:
new_pos=torch.arange(n)+self.pos
mask=new_pos.ge(self.N).float()
nidx=torch.arange(n)+self.pos-self.N
new_pos=(new_pos*(1-mask)+mask*(nidx)).long()
self.buffer[k][new_pos]=v
self.pos=self.pos+n
if self.pos>=self.N:
self.pos=self.pos-self.N
self.full=True
assert self.pos<self.N
def size(self):
if self.full:
return self.N
else:
return self.pos
def push(self,trajectories):
'''
Add transitions to the replay buffer
'''
max_length=trajectories.lengths.max().item()
assert trajectories.lengths.eq(max_length).all()
if self.buffer is None:
self._init_buffer(trajectories)
self.write(trajectories)
def sample(self,n=1):
limit=self.pos
if self.full:
limit=self.N
transitions=torch.randint(0,high=limit,size=(n,))
d={k:self.buffer[k][transitions] for k in self.buffer}
return DictTensor(d)
class SAC(BaseExperiment):
def __init__(self, config, create_env, create_agent):
super().__init__(config,create_env,create_agent)
env = self._create_env(
self.config["n_envs"], seed=0,**{k:self.config[k] for k in self.config if k.startswith("environment/")}
)
self.action_dim = env.action_space.sample().shape[0]
self.obs_dim = env.reset()[0]["frame"].size()[1]
del env
def check_arguments(self,args):
assert args["n_evaluation_rollouts"]%(args["n_envs"]*args["n_evaluation_threads"])==0
assert args["evaluation_mode"]=="deterministic" or args["evaluation_mode"]=="stochastic"
return True
def save(self):
super().save()
reward=self.evaluate(relaunch=False)
while(reward is None):
reward=self.evaluate(relaunch=False)
f=open(self.config["logdir"]+"/out.out","wb")
pickle.dump({"reward":reward},f)
time.sleep(np.random.rand()*10)
f.close()
def reset(self):
self.q1 = self._create_q()
self.q2 = self._create_q()
self.target_q1=self._create_q()
self.target_q2=self._create_q()
self.target_q1.load_state_dict(self.q1.state_dict())
self.target_q2.load_state_dict(self.q2.state_dict())
model=copy.deepcopy(self.learning_model)
self.train_batcher=Batcher(
n_timesteps=self.config["batch_timesteps"],
n_slots=self.config["n_envs"]*self.config["n_threads"],
create_agent=self._create_agent,
create_env=self._create_env,
env_args={
"mode":"train",
"n_envs": self.config["n_envs"],
"max_episode_steps": self.config["max_episode_steps"],
**{k:self.config[k] for k in self.config if k.startswith("environment/")}
},
agent_args={"action_dim": self.action_dim, "policy": model},
n_threads=self.config["n_threads"],
seeds=self.config["env_seed"],
)
model=copy.deepcopy(self.learning_model)
self.evaluation_batcher=EpisodeBatcher(
n_timesteps=self.config["max_episode_steps"],
n_slots=self.config["n_evaluation_rollouts"],
create_agent=self._create_agent,
create_env=self._create_env,
env_args={
"mode":"evaluation",
"max_episode_steps": self.config["max_episode_steps"],
"n_envs": self.config["n_envs"],
**{k:self.config[k] for k in self.config if k.startswith("environment/")}
},
agent_args={"action_dim": self.action_dim, "policy": model},
n_threads=self.config["n_evaluation_threads"],
seeds=self.config["env_seed"]*10,
)
self.register_batcher(self.train_batcher)
self.register_batcher(self.evaluation_batcher)
def _state_dict(self,model,device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
def soft_update_params(self,net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data +(1 - tau) * target_param.data)
def run(self):
self.replay_buffer=ReplayBuffer(self.config["replay_buffer_size"])
device = torch.device(self.config["learner_device"])
self.learning_model.to(device)
self.q1.to(device)
self.q2.to(device)
self.target_q1.to(device)
self.target_q2.to(device)
optimizer = torch.optim.Adam(
self.learning_model.parameters(), lr=self.config["lr"]
)
optimizer_q1 = torch.optim.Adam(
self.q1.parameters(), lr=self.config["lr"]
)
optimizer_q2 = torch.optim.Adam(
self.q2.parameters(), lr=self.config["lr"]
)
self.train_batcher.update(self._state_dict(self.learning_model,torch.device("cpu")))
self.evaluation_batcher.update(self._state_dict(self.learning_model,torch.device("cpu")))
n_episodes=self.config["n_envs"]*self.config["n_threads"]
self.train_batcher.reset(agent_info=DictTensor({"stochastic":torch.zeros(n_episodes).eq(0.0)}))
logging.info("Sampling initial transitions")
n_iterations=int(self.config["n_starting_transitions"]/(n_episodes*self.config["batch_timesteps"]))
for k in range(n_iterations):
self.train_batcher.execute()
trajectories=self.train_batcher.get()
self.replay_buffer.push(trajectories)
print("replay_buffer_size = ",self.replay_buffer.size())
n_episodes=self.config["n_evaluation_rollouts"]
stochastic=torch.tensor([self.config["evaluation_mode"]=="stochastic"]).repeat(n_episodes)
self.evaluation_batcher.execute(agent_info=DictTensor({"stochastic":stochastic}), n_episodes=n_episodes)
logging.info("Starting Learning")
_start_time=time.time()
logging.info("Learning")
while time.time()-_start_time <self.config["time_limit"]:
self.train_batcher.execute()
trajectories=self.train_batcher.get()
self.replay_buffer.push(trajectories)
self.logger.add_scalar("replay_buffer_size",self.replay_buffer.size(),self.iteration)
# avg_reward = 0
for k in range(self.config["n_batches_per_epochs"]):
transitions=self.replay_buffer.sample(n=self.config["size_batches"])
#print(dt)
dt,transitions = self.get_q_loss(transitions,device)
[self.logger.add_scalar(k,dt[k].item(),self.iteration) for k in dt.keys()]
optimizer_q1.zero_grad()
dt["q1_loss"].backward()
optimizer_q1.step()
optimizer_q2.zero_grad()
dt["q2_loss"].backward()
optimizer_q2.step()
optimizer.zero_grad()
dt = self.get_policy_loss(transitions)
[self.logger.add_scalar(k,dt[k].item(),self.iteration) for k in dt.keys()]
dt["policy_loss"].backward()
optimizer.step()
tau=self.config["tau"]
self.soft_update_params(self.q1,self.target_q1,tau)
self.soft_update_params(self.q2,self.target_q2,tau)
self.iteration+=1
self.train_batcher.update(self._state_dict(self.learning_model,torch.device("cpu")))
self.evaluate()
def evaluate(self,relaunch=True):
evaluation_trajectories = self.evaluation_batcher.get(blocking=False)
if (evaluation_trajectories is None):
return
avg_reward = (
(
evaluation_trajectories["_reward"]
* evaluation_trajectories.mask()
)
.sum(1)
.mean()
.item()
)
self.logger.add_scalar("avg_reward/"+self.config["evaluation_mode"], avg_reward, self.iteration)
if (self.config["verbose"]):
print("Iteration "+str(self.iteration)+", Reward = "+str(avg_reward))
if (relaunch):
cpu_parameters=self._state_dict(self.learning_model,torch.device("cpu"))
self.evaluation_batcher.update(cpu_parameters)
self.evaluation_batcher.reexecute()
return avg_reward
def get_q_loss(self, transitions,device):
transitions = transitions.to(device)
B=transitions.n_elems()
Bv=torch.arange(B)
action = transitions["action"]
reward = transitions["_reward"]
frame = transitions["frame"]
_frame = transitions["_frame"]
_done = transitions["_done"].float()
# action for s_prime
mean_prime,var_prime=self.learning_model(_frame)
_id = torch.eye(self.action_dim).unsqueeze(0).repeat(B, 1, 1)
# _nvar = var_prime.unsqueeze(-1).repeat(1, 1, self.action_dim)
# _nvar = _nvar * _id
distribution=torch.distributions.Normal(mean_prime, var_prime)
next_action=distribution.sample().detach()
#Compute targets
q1=self.target_q1(_frame,next_action).detach().squeeze(-1)
q2=self.target_q2(_frame,next_action).detach().squeeze(-1)
q = torch.min(q1,q2)
lp= distribution.log_prob(next_action).detach().sum(-1)
q = q - self.config["lambda_entropy"]*lp
target_value=q*(1.-_done)*self.config["discount_factor"]+reward
q1_loss=(target_value.detach()-self.q1(frame,action).squeeze(-1))**2
q2_loss=(target_value.detach()-self.q2(frame,action).squeeze(-1))**2
dt ={
"q1_loss": q1_loss.mean(),
"q2_loss": q2_loss.mean(),
}
return DictTensor(dt),transitions
def get_policy_loss(self,transitions):
frame = transitions["frame"]
B=transitions.n_elems()
#Now, compute the policy term
mean,var=self.learning_model(frame)
#print(var.mean().item())
#print(mean)
_id = torch.eye(self.action_dim).unsqueeze(0).repeat(B, 1, 1)
# _nvar = var.unsqueeze(-1).repeat(1, 1, self.action_dim)
# _nvar = _nvar * _id
distribution=torch.distributions.Normal(mean, var)
entropy=distribution.entropy().mean()
action_tilde=distribution.rsample()
#print(action_tilde)
q1 = self.q1(frame,action_tilde).squeeze(-1)
q2 = self.q2(frame,action_tilde).squeeze(-1)
q=torch.min(q1,q2)
loss=q-self.config["lambda_entropy"]*distribution.log_prob(action_tilde).sum(-1)
dt={"policy_loss":-loss.mean(),"entropy":entropy.detach(),"avg_var":var.mean().detach(),"avg_mean":mean.mean().detach()}
dt=DictTensor(dt)
return dt
| [
"copy.deepcopy",
"pickle.dump",
"torch.randint",
"torch.eye",
"numpy.random.rand",
"time.time",
"torch.distributions.Normal",
"rlstructures.DictTensor",
"torch.arange",
"torch.device",
"torch.zeros",
"rlstructures.logging.info",
"torch.min",
"torch.tensor"
] | [((2538, 2577), 'torch.randint', 'torch.randint', (['(0)'], {'high': 'limit', 'size': '(n,)'}), '(0, high=limit, size=(n,))\n', (2551, 2577), False, 'import torch\n'), ((2654, 2667), 'rlstructures.DictTensor', 'DictTensor', (['d'], {}), '(d)\n', (2664, 2667), False, 'from rlstructures import DictTensor, TemporalDictTensor\n'), ((3583, 3617), 'pickle.dump', 'pickle.dump', (["{'reward': reward}", 'f'], {}), "({'reward': reward}, f)\n", (3594, 3617), False, 'import pickle\n'), ((3983, 4017), 'copy.deepcopy', 'copy.deepcopy', (['self.learning_model'], {}), '(self.learning_model)\n', (3996, 4017), False, 'import copy\n'), ((4733, 4767), 'copy.deepcopy', 'copy.deepcopy', (['self.learning_model'], {}), '(self.learning_model)\n', (4746, 4767), False, 'import copy\n'), ((6091, 6134), 'torch.device', 'torch.device', (["self.config['learner_device']"], {}), "(self.config['learner_device'])\n", (6103, 6134), False, 'import torch\n'), ((6995, 7039), 'rlstructures.logging.info', 'logging.info', (['"""Sampling initial transitions"""'], {}), "('Sampling initial transitions')\n", (7007, 7039), False, 'from rlstructures import logging\n'), ((7670, 7703), 'rlstructures.logging.info', 'logging.info', (['"""Starting Learning"""'], {}), "('Starting Learning')\n", (7682, 7703), False, 'from rlstructures import logging\n'), ((7724, 7735), 'time.time', 'time.time', ([], {}), '()\n', (7733, 7735), False, 'import time\n'), ((7745, 7769), 'rlstructures.logging.info', 'logging.info', (['"""Learning"""'], {}), "('Learning')\n", (7757, 7769), False, 'from rlstructures import logging\n'), ((10349, 10364), 'torch.arange', 'torch.arange', (['B'], {}), '(B)\n', (10361, 10364), False, 'import torch\n'), ((10845, 10894), 'torch.distributions.Normal', 'torch.distributions.Normal', (['mean_prime', 'var_prime'], {}), '(mean_prime, var_prime)\n', (10871, 10894), False, 'import torch\n'), ((11118, 11135), 'torch.min', 'torch.min', (['q1', 'q2'], {}), '(q1, q2)\n', (11127, 11135), False, 'import torch\n'), ((12069, 12106), 'torch.distributions.Normal', 'torch.distributions.Normal', (['mean', 'var'], {}), '(mean, var)\n', (12095, 12106), False, 'import torch\n'), ((12342, 12359), 'torch.min', 'torch.min', (['q1', 'q2'], {}), '(q1, q2)\n', (12351, 12359), False, 'import torch\n'), ((12589, 12603), 'rlstructures.DictTensor', 'DictTensor', (['dt'], {}), '(dt)\n', (12599, 12603), False, 'from rlstructures import DictTensor, TemporalDictTensor\n'), ((1165, 1198), 'torch.zeros', 'torch.zeros', (['*b_size'], {'dtype': 'dtype'}), '(*b_size, dtype=dtype)\n', (1176, 1198), False, 'import torch\n'), ((11605, 11619), 'rlstructures.DictTensor', 'DictTensor', (['dt'], {}), '(dt)\n', (11615, 11619), False, 'from rlstructures import DictTensor, TemporalDictTensor\n'), ((3635, 3651), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3649, 3651), True, 'import numpy as np\n'), ((6696, 6715), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6708, 6715), False, 'import torch\n'), ((6794, 6813), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6806, 6813), False, 'import torch\n'), ((7468, 7530), 'torch.tensor', 'torch.tensor', (["[self.config['evaluation_mode'] == 'stochastic']"], {}), "([self.config['evaluation_mode'] == 'stochastic'])\n", (7480, 7530), False, 'import torch\n'), ((7599, 7637), 'rlstructures.DictTensor', 'DictTensor', (["{'stochastic': stochastic}"], {}), "({'stochastic': stochastic})\n", (7609, 7637), False, 'from rlstructures import DictTensor, TemporalDictTensor\n'), ((7784, 7795), 'time.time', 'time.time', ([], {}), '()\n', (7793, 7795), False, 'import time\n'), ((10059, 10078), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (10071, 10078), False, 'import torch\n'), ((1608, 1623), 'torch.arange', 'torch.arange', (['n'], {}), '(n)\n', (1620, 1623), False, 'import torch\n'), ((9230, 9249), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (9242, 9249), False, 'import torch\n'), ((1702, 1717), 'torch.arange', 'torch.arange', (['n'], {}), '(n)\n', (1714, 1717), False, 'import torch\n'), ((10666, 10692), 'torch.eye', 'torch.eye', (['self.action_dim'], {}), '(self.action_dim)\n', (10675, 10692), False, 'import torch\n'), ((11896, 11922), 'torch.eye', 'torch.eye', (['self.action_dim'], {}), '(self.action_dim)\n', (11905, 11922), False, 'import torch\n'), ((6952, 6975), 'torch.zeros', 'torch.zeros', (['n_episodes'], {}), '(n_episodes)\n', (6963, 6975), False, 'import torch\n')] |
## @author: <NAME>
# Documentation for this module.
#
# Created on Wed Feb 6 15:06:12 2019; -*- coding: utf-8 -*-;
#################################################################################################################################
#################################################################################################################################
# This code was built with the aim of allowing the user to work with Spike2 .smr files and further perfom correlation analyses ##
# between specific acoustic features and neuronal activity. ##
# In our group we work with Zebra finches, recording their neuronal activity while they sing, so the parameters here might ##
# have to be ajusted to your specific data. ## ##
#################################################################################################################################
#################################################################################################################################
### Necessary packages
import neo
import nolds
import numpy as np
import pylab as py
import matplotlib.lines as mlines
import datetime
import os
import pandas
import scipy.io
import scipy.signal
import scipy.stats
import scipy.fftpack
import scipy.interpolate
import random
from statsmodels.tsa.stattools import acf
### Example of files that one could use:
"""
file="CSC1_light_LFPin.smr" #Here you define the .smr file that will be analysed
songfile="CSC10.npy" #Here you define which is the file with the raw signal of the song
motifile="labels.txt" #Here you define what is the name of the file with the motif stamps/times
"""
## Some key parameters:
fs=32000 #Sampling Frequency (Hz)
n_iterations=1000 #For bootstrapping
window_size=100 #For envelope
lags=100 #For Autocorrelation
alpha=0.05 #For P value
premot=0.05 # Premotor window
binwidth=0.02 # Bin PSTH
#############################################################################################################################
# This block includes some functions that will be used several times in the code, but will not be individually documented: #
#
def sortsyls(motifile):
#Read and import files that will be needed
f=open(motifile, "r")
imported = f.read().splitlines()
#Excludes everything that is not a real syllable
a=[] ; b=[] ; c=[] ; d=[]; e=[]
arra=np.empty((1,2)); arrb=np.empty((1,2)); arrc=np.empty((1,2))
arrd=np.empty((1,2)); arre=np.empty((1,2))
for i in range(len(imported)):
if imported[i][-1] == "a":
a=[imported[i].split(",")]
arra=np.append(arra, np.array([int(a[0][0]), int(a[0][1])], float).reshape(1,2), axis=0)
if imported[i][-1] == "b":
b=[imported[i].split(",")]
arrb=np.append(arrb, np.array([int(b[0][0]), int(b[0][1])], float).reshape(1,2), axis=0)
if imported[i][-1] == "y":
c=[imported[i].split(",")]
arrc=np.append(arrc, np.array([int(c[0][0]), int(c[0][1])], float).reshape(1,2), axis=0)
if imported[i][-1] == "d":
d=[imported[i].split(",")]
arrd=np.append(arrd, np.array([int(d[0][0]), int(d[0][1])], float).reshape(1,2), axis=0)
if imported[i][-1] == "e":
e=[imported[i].split(",")]
arre=np.append(arre, np.array([int(e[0][0]), int(e[0][1])], float).reshape(1,2), axis=0)
arra=arra[1:]; arrb=arrb[1:]; arrc=arrc[1:]; arrd=arrd[1:] ; arre=arre[1:]
k=[arra,arrb,arrc,arrd,arre]
finallist=[]
for i in k:
print(i.size)
if i.size != 0:
finallist+=[i]
else:
continue
return finallist
def tellme(s):
print(s)
py.title(s, fontsize=10)
py.draw()
def smoothed(inputSignal,fs=fs, smooth_win=10):
squared_song = np.power(inputSignal, 2)
len = np.round(fs * smooth_win / 1000).astype(int)
h = np.ones((len,)) / len
smooth = np.convolve(squared_song, h)
offset = round((smooth.shape[-1] - inputSignal.shape[-1]) / 2)
smooth = smooth[offset:inputSignal.shape[-1] + offset]
smooth = np.sqrt(smooth)
return smooth
#Fast loop to check visually if the syllables are ok. I've been finding problems in A syllables, so I recommend checking always before analysis.
def checksyls(songfile,motifile, beg, end):
finallist=sortsyls(motifile)
song=np.load(songfile)
#Will filter which arra will be used
answer=input("Which syllable?")
if answer.lower() == "a":
used=finallist[0]
elif answer.lower() == "b":
used=finallist[1]
elif answer.lower() == "c":
used=finallist[2]
elif answer.lower() == "d":
used=finallist[3]
print("This syb has "+ str(len(used)) + " renditions.")
for i in range(beg,end):
py.figure()
py.plot(song[int(used[i][0]):int(used[i][1])])
""" The two following functions were obtained from
http://ceciliajarne.web.unq.edu.ar/investigacion/envelope_code/ """
def window_rms(inputSignal, window_size=window_size):
a2 = np.power(inputSignal,2)
window = np.ones(window_size)/float(window_size)
return np.sqrt(np.convolve(a2, window, "valid"))
def getEnvelope(inputSignal, window_size=window_size):
# Taking the absolute value
absoluteSignal = []
for sample in inputSignal:
absoluteSignal.append (abs (sample))
# Peak detection
intervalLength = window_size # change this number depending on your signal frequency content and time scale
outputSignal = []
for baseIndex in range (0, len (absoluteSignal)):
maximum = 0
for lookbackIndex in range (intervalLength):
maximum = max (absoluteSignal [baseIndex - lookbackIndex], maximum)
outputSignal.append (maximum)
return outputSignal
###############################################################################################################################
##############################################################################################################################
# From now on there will be the core functions of this code, which will be individually documented: #
#
##
##
#
# This function will allow you to read the .smr files from Spike2.
def read(file):
reader = neo.io.Spike2IO(filename=file) #This command will read the file defined above
data = reader.read()[0] #This will get the block of data of interest inside the file
data_seg=data.segments[0] #This will get all the segments
return data, data_seg
##
#
# This function will allow you to get information inside the .smr file.
# It will return the number of analog signals inside it, the number of spike trains,
# a numpy array with the time (suitable for further plotting), and the sampling rate of the recording.
def getinfo(file):
data, data_seg= read(file)
# Get the informations of the file
t_start=float(data_seg.t_start) #This gets the time of start of your recording
t_stop=float(data_seg.t_stop) #This gets the time of stop of your recording
as_steps=len(data_seg.analogsignals[0])
time=np.linspace(t_start,t_stop,as_steps)
n_analog_signals=len(data_seg.analogsignals) #This gets the number of analogical signals of your recording
n_spike_trains=len(data_seg.spiketrains) #This gets the number of spiketrains signals of your recording
ansampling_rate=int(data.children_recur[0].sampling_rate) #This gets the sampling rate of your recording
return n_analog_signals, n_spike_trains, time, ansampling_rate
def getsong(file):
"""
Botched up. Needed to modify the matrix to extract song.
Was working earlier with just commented sections.
"""
data, data_seg = read(file)
# for i in range(len(data_seg.analogsignals)):
# if data_seg.analogsignals[i].name == 'Channel bundle (RAW 009) ':
# song=data_seg.analogsignals[0][i].as_array()
s = data_seg.analogsignals[0].name.split('(')[1].split(')')[0].split(',') # What happened? Was working without this earlier. No clue what changed.
analog_signals = np.array([data_seg.analogsignals[0]])
analog_signals = analog_signals.transpose()
for i in range(len(s)):
if s[i] == 'CSC5':
song = analog_signals[i]
else:
continue
print('Saving song to ', file[:-4], "_songfile.npy")
np.save(file[:-4]+"_songfile", song)
##
# This function will get the analogical signals and the spiketrains from the .smr file and return them in the end as arrays.
def getarrays(file): #Transforms analog signals into arrays inside list
data, data_seg= read(file)
n_analog_signals, n_spike_trains, time, ansampling_rate = getinfo(file)
# Extract analogs and put each array inside a list
analog=[]
for i in range(n_analog_signals):
analog += [data_seg.analogsignals[i].as_array()]
print("analog: This list contains " + str(n_analog_signals) + " analog signals!")
# Extract spike trains and put each array inside a list
sp=[]
for k in range(n_spike_trains):
sp += [data_seg.spiketrains[k].as_array()]
print("sp: This list contains " + str(n_spike_trains) + " spiketrains!")
return analog, sp
##
#
# This function will allow you to plot the analog signals and spiketrains inside the .smr file.
def plotplots(file):
data, data_seg= read(file)
n_analog_signals, n_spike_trains, time, ansampling_rate = getinfo(file)
analog, sp = getarrays(file);
#Plot of Analogs
py.figure()
for i in range(n_analog_signals):
py.subplot(len(analog),1,i+1)
py.plot(time,analog[i])
py.xlabel("time (s)")
py.ylabel("Amplitude")
py.title("Analog signal of: " + data_seg.analogsignals[i].name.split(" ")[2])
py.tight_layout()
#Plot of Spike Trains
Labels=[]
for i in range(n_spike_trains):
Chprov = data.list_units[i].annotations["id"]
Labels += [Chprov]
py.figure()
py.yticks(np.arange(0, 11, step=1), )
py.xlabel("time (s)")
py.title("Spike trains")
py.ylabel("Number of spike trains")
res=-1
count=0
for j in sp:
# colors=["black","blue", "red", "pink", "purple", "grey", "limegreen", "aqua", "magenta", "darkviolet", "orange"] #This was decided from observing which order SPIKE2 defines the colors for the spiketrains
colors=["black","blue", "red", "pink", "purple", "grey", "limegreen", "aqua", "magenta", "darkviolet", "orange", "brown", "gold", "green", "pink","black","blue", "red", "pink", "purple", "grey", "limegreen", "aqua", "magenta", "darkviolet", "orange", "brown", "gold", "green", "pink","black","blue", "red", "pink", "purple", "grey", "limegreen", "aqua", "magenta", "darkviolet", "orange", "brown", "gold", "green", "pink"] #Random
res=res+1
print(count)
py.scatter(j,res+np.zeros(len(j)),marker="|", color=colors[count])
py.legend((Labels), bbox_to_anchor=(1, 1))
count+=1
py.tight_layout()
py.show()
##
#
# This function will create a few files inside a folder which will be named according to the date and time.
# The files are:
#
# 1 - summary.txt : this file will contain a summary of the contents of the .smr file.
#
# 2- the spiketimes as .txt: these files will contain the spiketimes of each spiketrain.
#
# 3- the analog signals as .npy: these files will contain the raw data of the analog signals.
def createsave(file):
data, data_seg= read(file)
n_analog_signals, n_spike_trains, time, ansampling_rate = getinfo(file)
analog, sp = getarrays(file)
#Create new folder and change directory
today= datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
os.mkdir(today)
os.chdir(os.path.expanduser(today))
#Create DataFrame (LFP should be indicated by the subject) and SpikeTime files
res=[]
LFP = input("Enter LFP number:")
if os.path.isfile("..//unitswindow.txt"):
for i in range(n_spike_trains):
Chprov = data.list_units[i].annotations["id"]
Chprov2 = Chprov.split("#")[0]
Ch = Chprov2.split("ch")[1]
Label = Chprov.split("#")[1]
res += [[int(Ch), int(Label), int(LFP)]]
df = pandas.DataFrame(data=res, columns= ["Channel", "Label", "LFP number"])
with open("..//unitswindow.txt", "r") as datafile:
s=datafile.read().split()
d=s[0::3]
x=np.array(s).reshape((-1,3))
if Chprov in d and x.size >=3:
arr= data_seg.spiketrains[i].as_array()
where=d.index(Chprov)
windowbeg=int(x[where][1])
windowend=int(x[where][2])
if windowend==-1:
windowend=arr[-1]
tosave= arr[np.where(np.logical_and(arr >= windowbeg , arr <= windowend) == True)]
np.savetxt(Chprov+".txt", tosave) #Creates files with the Spiketimes.
else:
np.savetxt(Chprov+".txt", data_seg.spiketrains[i].as_array())
else:
for i in range(n_spike_trains):
Chprov = data.list_units[i].annotations["id"]
Chprov2 = Chprov.split("#")[0]
Ch = Chprov2.split("ch")[1]
Label = Chprov.split("#")[1]
res += [[int(Ch), int(Label), int(LFP)]]
df = pandas.DataFrame(data=res, columns= ["Channel", "Label", "LFP number"])
np.savetxt(Chprov+".txt", data_seg.spiketrains[i].as_array()) #Creates files with the Spiketimes.
print(df)
file = open("Channels_Label_LFP.txt", "w+")
file.write(str(df))
file.close()
#Create and Save Binary/.NPY files of Analog signals
for j in range(n_analog_signals):
temp=data_seg.analogsignals[j].name.split(" ")[2][1:-1]
np.save(temp, data_seg.analogsignals[j].as_array())
#Create and Save Summary about the File
an=["File of origin: " + data.file_origin, "Number of AnalogSignals: " + str(n_analog_signals)]
for k in range(n_analog_signals):
anlenght= str(data.children_recur[k].size)
anunit=str(data.children_recur[k].units).split(" ")[1]
anname=str(data.children_recur[k].name)
antime = str(str(data.children_recur[k].t_start) + " to " + str(data.children_recur[k].t_stop))
an+=[["Analog index:" + str(k) + " Channel Name: " + anname, "Lenght: "+ anlenght, " Unit: " + anunit, " Sampling Rate: " + str(ansampling_rate) + " Duration: " + antime]]
spk=["Number of SpikeTrains: " + str(n_spike_trains)]
for l in range(n_analog_signals, n_spike_trains + n_analog_signals):
spkid = str(data.children_recur[l].annotations["id"])
spkcreated = str(data.children_recur[l].annotations["comment"])
spkname= str(data.children_recur[l].name)
spksize = str(data.children_recur[l].size)
spkunit = str(data.children_recur[l].units).split(" ")[1]
spk+=[["SpikeTrain index:" + str(l-n_analog_signals) + " Channel Id: " + spkid, " " + spkcreated, " Name: " + spkname, " Size: "+ spksize, " Unit: " + spkunit]]
final = an + spk
with open("summary.txt", "w+") as f:
for item in final:
f.write("%s\n" % "".join(item))
f.close()
print("\n"+"All files were created!")
##
#
# This function will get and save the spikeshapes (.txt) from the Raw unfiltered neuronal signal.
#
# Arguments:
#
# file is the .smr file
#
# raw is the .npy file containing the Raw unfiltered neuronal signal
#
# rawfiltered is the .npy containing the spike2 filtered neuronal signal
def spikeshapes(file, raw, rawfiltered):
data, _= read(file)
_, n_spike_trains, _, ansampling_rate = getinfo(file)
LFP=np.load(raw)
notLFP=np.load(rawfiltered)
windowsize=int(ansampling_rate*2/1000) #Define here the number of points that suit your window (set to 2ms)
# Create and save the spikeshapes
# This part will iterate through all the .txt files containing the spiketimes inside the folder.
answer4 = input("Would you like to see an example of spike from each file? [Y/n]?")
for m in range(n_spike_trains):
Chprov1 = data.list_units[m].annotations["id"]
Label1 = Chprov1.split("#")[1]
channel1 = np.loadtxt(Chprov1+".txt")
print(Chprov1)
print("Starting to get the spikeshapes... Grab a book or something, this might take a while!")
x1=np.empty([1,windowsize+2],int)
for n in range(len(channel1)):
a1= int(channel1[n]*ansampling_rate)-57
analogtxt1=LFP[a1:a1+windowsize].reshape(1,windowsize)
y1 = np.array([[a1],[a1+windowsize]], np.int32).reshape(1,2)
res1 = np.append(y1,analogtxt1).reshape(1,-1)
x1=np.append(x1,res1, axis=0)
b1=x1[1:]
print("\n" + "Voilà!")
np.savetxt("SpikeShape#"+Label1+".txt", b1, header="First column = Initial Time; Second column = Final Time; Third Column = First Spike Shape value, etc")
if answer4 == "" or answer4.lower()[0] == "y":
window1=int(b1[0][0])
window2=int(b1[0][1])
py.fig,(s,s1) = py.subplots(2,1)
s.plot(LFP[window1:window2])
s.set_title("SpikeShape from Raw Unfiltered")
s1.plot(notLFP[window1+57:window2+57])
s1.set_title("SpikeShape from Raw Filtered Spike2") # Just like you would see in Spike2
s.set_ylabel("Amplitude")
s1.set_ylabel("Amplitude")
s1.set_xlabel("Sample points")
py.tight_layout()
py.show()
##
#
# This function will downsample your LFP signal to 1000Hz and save it as .npy file
def lfpdown(LFPfile, fs=fs): #LFPfile is the .npy one inside the new folder generated by the function createsave (for example, CSC1.npy)
fs1=int(fs/1000)
rawsignal=np.load(LFPfile)
def mma(series,window):
return np.convolve(series,np.repeat(1,window)/window,"same")
rawsignal=rawsignal[0:][:,0] #window of the array, in case you want to select a specific part
conv=mma(rawsignal,100) #convolved version
c=[]
for i in range(len(conv)):
if i%fs1==0:
c+=[conv[i]]
downsamp=np.array(c)
np.save("LFPDownsampled", downsamp)
answer=input("Want to see the plots? Might be a bit heavy. [Y/n]")
if answer == "" or answer.lower()[0] == "y":
py.fig,(s,s1) = py.subplots(2,1)
s.plot(rawsignal)
s.plot(conv)
s.set_title("Plot of RawSignal X Convolved Version")
s1.plot(downsamp)
s1.set_title("LFP Downsampled")
s.set_ylabel("Amplitude")
s1.set_ylabel("Amplitude")
s1.set_xlabel("Sample points")
py.show()
py.tight_layout()
##
#
# This function generates spectrogram of the motifs in the song raw signal.
# To be used with the new matfiles.
#
# Arguments:
#
# songfile is the .npy file containing the song signal.
#
# beg, end : are the index that would correspond to the beginning and the end of the motif/syllable (check syllables annotations file for that)
#
# fs = sampling frequency (Hz)
def spectrogram(songfile, beg, end, fs=fs):
analog= np.load(songfile)
rawsong1=analog[beg:end].reshape(1,-1)
rawsong=rawsong1[0]
#Compute and plot spectrogram
#(f,t,sp)=scipy.signal.spectrogram(rawsong, fs, window, nperseg, noverlap, scaling="density", mode="complex")
py.fig, ax = py.subplots(2,1)
ax[0].plot(rawsong)
ax[0].set_ylabel("Amplitude")
ax[0].set_xlabel("Sample points")
_,_,_,im = ax[1].specgram(rawsong,Fs=fs, NFFT=980, noverlap=930, scale_by_freq=False, mode="default", pad_to=915, cmap="inferno")
# py.imshow(10*np.log10(np.square(abs(sp))), origin="lower", aspect="auto", interpolation="none", cmap="inferno")
ax[1].tick_params(
axis="x", # changes apply to the x-axis
which="both", # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False)
ax[1].set_ylabel("Frequency")
cbar=py.colorbar(im, ax=ax[1])
cbar.ax.invert_yaxis()
cbar.set_ticks(np.linspace(cbar.vmin, cbar.vmax, 5, dtype=float))
cbar.ax.set_yticklabels(np.floor(np.linspace(np.floor(cbar.vmin), cbar.vmax, 5)).astype(int))
py.tight_layout()
##
#
# This function generates a PSTH for motifs.
# To be used with the new matfiles.
#
# Arguments:
#
# spikefile is the .txt file with the spiketimes.
#
# motifile is the .txt file containing the annotations of the beggining and end of each syllable/motif.
#
# fs is the sampling frequency
#
# basebeg is the start time for baseline computation
#
# basend is the end time for baseline computation
def psth(spikefile, motifile, basebeg, basend,binwidth=binwidth, fs=fs):
finallist=sortsyls(motifile)
#Starts to plot the PSTH
spused=np.loadtxt(spikefile)
shoulder= 0.05 #50 ms
adjust=0
adj2=0
meandurall=0
py.fig, ax = py.subplots(2,1, figsize=(18,15))
x2=[]
y2=[]
# This part will result in an iteration through all the syllables, and then through all the motifs inside each syllable.
for i in range(len(finallist)-1):
used=finallist[i]/fs # sets which array from finallist will be used.
meandurall=np.mean(used[:,1]-used[:,0])
spikes1=[]
res=-1
spikes=[]
basespk=[]
n0,n1=0,2
for j in range(len(used)):
step1=[]
step2=[]
step3=[]
beg= used[j][0] #Will compute the beginning of the window
end= used[j][1] #Will compute the end of the window
step1=spused[np.where(np.logical_and(spused >= beg-shoulder, spused <= end+shoulder) == True)]-beg
step2=step1[np.where(np.logical_and(step1 >= 0, step1 <= end-beg) == True)]*(meandurall/(end-beg))
step3=step1[np.where(np.logical_and(step1 >= end-beg, step1 <= (end-beg)+shoulder) == True)]+(meandurall-(end-beg))
spikes1+=[step2+adjust,step3+adjust]
res=res+1
spikes2=spikes1
spikes3=np.concatenate(spikes2[n0:n1]) # Gets the step2 and step3 arrays for scatter
ax[1].scatter(spikes3,res+np.zeros(len(spikes3)),marker="|", color="black")
n0+=2
n1+=2
ax[1].set_xlim(-shoulder,(shoulder+meandurall)+binwidth+adjust)
ax[1].set_ylabel("Motif number")
ax[1].set_xlabel("Time [s]")
normfactor=len(used)*binwidth
ax[0].set_ylabel("Spikes/s")
bins=np.arange(0,(shoulder+meandurall)+binwidth, step=binwidth)
ax[0].set_xlim(-shoulder,(shoulder+meandurall)+binwidth+adjust)
ax[0].tick_params(
axis="x", # changes apply to the x-axis
which="both", # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False)
basecuts=np.random.choice(np.arange(basebeg,basend))
test2=spused[np.where(np.logical_and(spused >= basecuts, spused <= basecuts+meandurall) == True)]-basecuts
basespk+=[test2]
# Computation of baseline
b=np.sort(np.concatenate(basespk))
u,_= py.histogram(b, bins=np.arange(0,meandurall,binwidth)+binwidth, weights=np.ones(len(b))/normfactor)
basemean=np.mean(u)
stdbase=np.std(u)
axis=np.arange(meandurall/3,meandurall*2/3,binwidth)+adjust
ax[0].plot(axis,np.ones((len(axis),))*basemean, color = "g")
ax[0].plot(axis,np.ones((len(axis),))*(basemean+stdbase), color = "black")
ax[0].plot(axis,np.ones((len(axis),))*(basemean-stdbase), color = "black", ls="dashed")
# Computation of spikes
spikes=np.sort(np.concatenate(spikes2))
y1,x1= py.histogram(spikes, bins=bins+adjust, weights=np.ones(len(spikes))/normfactor)
print(y1)
ax[0].axvline(x=(shoulder+meandurall)+adjust, color="grey", linestyle="--")
#ax[0].hist(spikes, bins=bins+adjust, color="b", edgecolor="black", linewidth=1, weights=np.ones(len(spikes))/normfactor, align="left", rwidth=binwidth*10)
x2+=[x1[:-1]+adj2]
y2+=[y1[:]]
adj2=binwidth/20
adjust=meandurall+shoulder+adjust+adj2
x4=np.sort(np.concatenate(x2))
y4=np.concatenate(y2)
ax[0].plot(x4,y4, color="red")
#f = scipy.interpolate.interp1d(x4, y4, kind="linear")
#xnew=np.linspace(min(x4),max(x4), num=100)
#ax[0].plot(xnew,f(xnew), color="r")
py.fig.subplots_adjust(hspace=0)
black_line = mlines.Line2D([], [], color="black", label="+STD")
black_dashed = mlines.Line2D([], [], color="black", label="+STD", linestyle="--")
green_line = mlines.Line2D([], [], color="green", label="Mean")
ax[0].legend(handles=[black_line,black_dashed,green_line], loc="upper left")
##
#
# Generates correlations for each syllable.
# To be used it with new matfiles.
#
# Arguments:
#
# spikefile is the .txt file with the spiketimes.
#
# motifile is the .txt file containing the annotations of the beggining and end of each syllable/motif.
#
# n_iterations is the number of iterations for the bootstrapping
#
# fs is the sampling frequency
def corrduration(spikefile, motifile, n_iterations=n_iterations,fs=fs):
#Read and import mat file (new version)
sybs=["A","B","C","D","E"]
finallist=sortsyls(motifile)
#Starts to compute correlations and save the data into txt file (in case the user wants to use it in another software)
spused=np.loadtxt(spikefile)
final=[]
f = open("SummaryDuration.txt", "w+")
for i in range(len(finallist)):
used=finallist[i]
dur=used[:,1]-used[:,0]
array=np.empty((1,2))
statistics=[]
for j in range(len(used)):
step1=[]
beg= used[j][0] #Will compute the beginning of the window
end= used[j][1] #Will compute the end of the window
step1=spused[np.where(np.logical_and(spused >= beg, spused <= end) == True)]
array=np.append(array, np.array([[dur[j]],[np.size(step1)/dur[j]]]).reshape(-1,2), axis=0)
array=array[1:]
np.savetxt("Data_Raw_Corr_Duration_Result_Syb"+str(sybs[i])+".txt", array, header="First column is the duration value, second is the number of spikes.")
threshold = 3 #Standard Deviation threshold for Z score identification of outliers
z = np.abs(scipy.stats.zscore(array))
array=array[(z < threshold).all(axis=1)]
if len(array)<3:
continue
else:
s1=scipy.stats.shapiro(array[:,0])[1]
s2=scipy.stats.shapiro(array[:,1])[1]
s3=np.array([s1,s2])
s3=s3>alpha
homo=scipy.stats.levene(array[:,0],array[:,1])[1]
if s3.all() == True and homo > alpha: #test for normality
final=scipy.stats.pearsonr(array[:,0],array[:,1]) #if this is used, outcome will have no clear name on it
statistics+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(array[:,0], len(array[:,0]), replace=True)
res=scipy.stats.spearmanr(array[:,1],resample)
statistics+=[[res[0],res[1]]]
else:
final=scipy.stats.spearmanr(array[:,0],array[:,1]) #if this is used, outcome will have the name spearman on it
statistics+=[[final[0],final[1]]]
# Bootstrapping
for x in range(n_iterations):
resample=np.random.choice(array[:,0], len(array[:,0]), replace=True)
res=scipy.stats.spearmanr(array[:,1],resample)
statistics+=[[res[0],res[1]]]
np.savetxt("Data_Boot_Corr_Duration_Result_Syb"+str(sybs[i])+".txt", np.array(statistics), header="First column is the correlation value, second is the p value. First line is the original correlation, all below are the bootstrapped correlations.") #First column is the correlation value, second is the p value.
print("Syllable " + str(sybs[i]) +": " + str(final))
f.writelines("Syllable " + str(sybs[i]) +": " + str(final) + "\n")
##
#
# This function allows you to see the envelope for song signal.
#
# Arguments:
#
# songfile is the .npy file containing the song signal.
#
# beg, end : are the index that would correspond to the beginning and the end of the motif/syllable (check syllables annotations file for that)
#
# window_size is the size of the window for the convolve function.
def plotEnvelopes(songfile, beg, end, window_size=window_size):
inputSignal=np.load(songfile)
inputSignal=np.ravel(inputSignal[beg:end])
outputSignal=getEnvelope(inputSignal, window_size)
rms=window_rms(inputSignal, window_size)
# Plots of the envelopes
py.fig, (a,b,c) =py.subplots(3,1, sharey=True)
py.xlabel("Sample Points")
a.plot(abs(inputSignal))
a.set_ylabel("Amplitude")
a.set_title("Raw Signal")
b.plot(abs(inputSignal))
b.plot(outputSignal)
b.set_ylabel("Amplitude")
b.set_title("Squared Windows")
c.plot(abs(inputSignal))
c.plot(rms)
c.set_ylabel("Amplitude")
c.set_title("RMS")
py.tight_layout()
py.show()
##
#
# This function will perform the Fast Fourier Transform to obtain the power spectrum of the syllables.
#
# Arguments:
#
# songfile is the .npy file containing the song signal.
#
# beg, end : are the index that would correspond to the beginning and the end of the motif/syllable (check syllables annotations file for that)
#
# fs is the sampling rate
def powerspectrum(songfile, beg, end, fs=fs):
signal=np.load(songfile) #The song channel raw data
signal=signal[beg:end] #I selected just one syllable A to test
print ("Frequency sampling", fs)
l_audio = len(signal.shape)
if l_audio == 2:
signal = signal.sum(axis=1) / 2
N = signal.shape[0]
print ("Complete Samplings N", N)
secs = N / float(fs)
print ("secs", secs)
Ts = 1.0/fs # sampling interval in time
print ("Timestep between samples Ts", Ts)
t = scipy.arange(0, secs, Ts) # time vector as scipy arange field / numpy.ndarray
FFT = abs(scipy.fft(signal))**2 # if **2 is power spectrum, without is amplitude spectrum
FFT_side = FFT[range(int(N/2))] # one side FFT range
freqs = scipy.fftpack.fftfreq(signal.size, t[1]-t[0])
freqs_side = freqs[range(int(N/2))]
py.subplot(311)
py.plot(t, signal, "g") # plotting the signal
py.xlabel("Time")
py.ylabel("Amplitude")
py.subplot(312)
py.plot(freqs, FFT, "r") # plotting the complete fft spectrum
py.xlabel("Frequency (Hz)")
py.title("Double-sided")
py.ylabel("Power")
py.subplot(313)
py.plot(freqs_side, abs(FFT_side), "b") # plotting the positive fft spectrum
py.xlabel("Frequency (Hz)")
py.title("Single sided")
py.ylabel("Power")
py.tight_layout()
py.show()
##
#
# This function can be used to obtain the pitch of specific tones inside a syllable.
# It will execute an autocorrelation for the identification of the pitchs.
#
# Arguments:
#
# songfile is the .npy file containing the song signal.
#
# motifile is the .txt file containing the annotations of the beggining and end of each syllable/motif.
#
# lags is the number of lags for the autocorrelation
#
# window_size is the size of the window for the convolve function (RMS of signal)
#
# fs is the sampling rate
def corrpitch(songfile, motifile,spikefile, lags=lags, window_size=window_size,fs=fs, means=None):
#Read and import files that will be needed
spused=np.loadtxt(spikefile)
song=np.load(songfile)
finallist=sortsyls(motifile)
#Will filter which arra will be used
answer=input("Which syllable?")
if answer.lower() == "a":
used=finallist[0]
elif answer.lower() == "b":
used=finallist[1]
elif answer.lower() == "c":
used=finallist[2]
elif answer.lower() == "d":
used=finallist[3]
if means is not None:
means = np.loadtxt(means).astype(int)
syb=song[int(used[0][0]):int(used[0][1])]
pass
else:
#Will plot an exmaple of the syllable for you to get an idea of the number of chunks
fig, az = py.subplots()
example=song[int(used[0][0]):int(used[0][1])]
tempo=np.linspace(used[0][0]/fs, used[0][1]/fs, len(example))
abso=abs(example)
az.plot(tempo,example)
az.plot(tempo,abso)
rms=window_rms(np.ravel(example),window_size)
az.plot(tempo[:len(rms)],rms)
az.set_title("Click on graph to move on.")
py.waitforbuttonpress(10)
numcuts=int(input("Number of chunks?"))
py.close()
# Will provide you 4 random exmaples of syllables to stablish the cutting points
coords2=[]
for j in range(4):
j=random.randint(0,len(used)-1)
fig, ax = py.subplots()
syb=song[int(used[j][0]):int(used[j][1])]
abso=abs(syb)
ax.plot(abso)
rms=window_rms(np.ravel(syb),window_size)
ax.plot(rms)
py.waitforbuttonpress(10)
while True:
coords = []
while len(coords) < numcuts+1:
tellme("Select the points to cut with mouse")
coords = np.asarray(py.ginput(numcuts+1, timeout=-1, show_clicks=True))
scat = py.scatter(coords[:,0],coords[:,1], s=50, marker="X", zorder=10, c="r")
tellme("Happy? Key click for yes, mouse click for no")
if py.waitforbuttonpress():
break
else:
scat.remove()
py.close()
coords2=np.append(coords2,coords[:,0])
#Will keep the mean coordinates for the cuts
coords2.sort()
coords2=np.split(coords2,numcuts+1)
means=[]
for k in range(len(coords2)):
means+=[int(np.mean(coords2[k]))]
np.savetxt("Mean_cut_syb"+answer+".txt", means)
# Will plot how the syllables will be cut according to the avarage of the coordinates clicked before by the user
py.plot(syb)
for l in range(1,len(means)):
py.plot(np.arange(means[l-1],means[l-1]+len(syb[means[l-1]:means[l]])),syb[means[l-1]:means[l]])
# Autocorrelation and Distribution
for m in range(1,len(means)):
spikespremot=[]
spikesdur=[]
freq2=[]
coords5=[]
fig=py.figure(figsize=(18,15))
gs=py.GridSpec(2,2)
a2=fig.add_subplot(gs[0,0]) # First row, first column
a3=fig.add_subplot(gs[0,1]) # First row, second column
a1=fig.add_subplot(gs[1,:])
for n in range(len(used)):
syb=song[int(used[n][0]):int(used[n][1])] #Will get the syllables for each rendition
sybcut=syb[means[m-1]:means[m]] #Will apply the cuts for the syllable
x2=np.arange(0,len(acf(sybcut,nlags=int(lags))),1)
f=scipy.interpolate.interp1d(x2,acf(sybcut, nlags=int(lags), unbiased=True), kind="quadratic")
xnew=np.linspace(min(x2),max(x2), num=1000)
a1.plot(xnew,f(xnew))
a1.set_xlabel("Number of Lags")
a1.set_ylabel("Autocorrelation score")
a1.set_label(tellme("Want to keep it? Key click (x2) for yes, mouse click for no"))
gs.tight_layout(fig)
if not py.waitforbuttonpress(30):
py.close()
continue
else:
py.waitforbuttonpress(30)
while True:
coord=[]
while len(coord) < 2:
tellme("Select the points for the peak.") #You should choose in the graph the range that representes the peak
coord = np.asarray(py.ginput(2, timeout=-1, show_clicks=True))
scat=a1.scatter(coord[:,0],coord[:,1], s=50, marker="X", zorder=10, c="b")
tellme("Happy? Key click for yes, mouse click for no")
if py.waitforbuttonpress(30):
break
else:
scat.remove()
coords5=coord[:,0]*10 # times ten is because of the linspace being 1000
a1.clear()
#From now it will use the coordinates of the peak to plot the distribution and the interpolated version of the peak
for x in range(len(used)):
syb=song[int(used[x][0]):int(used[x][1])]
sybcut=syb[means[m-1]:means[m]]
x2=np.arange(0,len(acf(sybcut,nlags=int(lags))),1)
f=scipy.interpolate.interp1d(x2,acf(sybcut, nlags=int(lags), unbiased=True), kind="quadratic")
xnew=np.linspace(min(x2),max(x2), num=1000)
a1.plot(xnew,f(xnew))
x3=xnew[int(coords5[0]):int(coords5[1])]
g=scipy.interpolate.interp1d(x3,f(xnew)[int(coords5[0]):int(coords5[1])], kind="cubic")
xnew2=np.linspace(min(x3),max(x3), num=1000)
a2.plot(xnew2,g(xnew2))
peak=np.argmax(g(xnew2))
freq2+=[xnew2[peak]]
beg=(used[x][0] + means[m-1])/fs
end=(used[x][0] + means[m])/fs
step1=spused[np.where(np.logical_and(spused >= beg-premot, spused <= beg) == True)]
step2=spused[np.where(np.logical_and(spused >= beg, spused <= end) == True)]
spikespremot+=[[np.size(step1)/(beg-(beg-premot))]]
spikesdur+=[[np.size(step2)/(end-beg)]]
statistics=[]
statistics2=[]
spikesdur=np.array(spikesdur)[:,0]
spikespremot=np.array(spikespremot)[:,0]
freq2=np.array(freq2)
freq2=np.reciprocal(freq2/fs)
total = np.column_stack((freq2,spikespremot,spikesdur))
np.savetxt("Data_Raw_Corr_Pitch_Result_Syb" + answer + "_tone_" + str(m) + ".txt", total, header="First column is the pitch value, second is the number of spikes inside premotor window, third is the number of spikes inside 'during' window.")
#Here it will give you the possibility of computing the correlations and Bootstrapping
an=input("Correlations?")
if an.lower() == "n":
pass
else:
threshold = 3 #Standard Deviation threshold for Z score identification of outliers
total1=np.column_stack((freq2,spikespremot))
total2=np.column_stack((freq2,spikesdur))
z1 = np.abs(scipy.stats.zscore(total1))
z2 = np.abs(scipy.stats.zscore(total2))
total1=total1[(z1 < threshold).all(axis=1)]
total2=total2[(z2 < threshold).all(axis=1)]
a = total1[:,1] == 0
b = total2[:,1] == 0
#This will get the data for Pitch vs Premotor
if len(total1) < 3 or all(a) == True:
pass
else:
s1=scipy.stats.shapiro(total1[:,0])[1] #Pitch column
s2=scipy.stats.shapiro(total1[:,1])[1] #Premot Column
homo=scipy.stats.levene(total1[:,0],total[:,1])[1]
comb1=np.array([s1,s2,homo])
comb1=comb1>alpha
if comb1.all() == True: #test for normality
final=scipy.stats.pearsonr(total1[:,0],total1[:,1]) #if this is used, outcome will have no clear name on it
statistics+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total1[:,0], len(total1[:,0]), replace=True)
res=scipy.stats.spearmanr(total1[:,1],resample)
statistics+=[[res[0],res[1]]]
else:
final=scipy.stats.spearmanr(total1[:,0],total1[:,1]) #if this is used, outcome will have the name spearman on it
statistics+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total1[:,0], len(total1[:,0]), replace=True)
res=scipy.stats.spearmanr(total1[:,1],resample)
statistics+=[[res[0],res[1]]]
np.savetxt("Data_Boot_Corr_Pitch_Result_Syb" + answer + "_tone_" + str(m)+ "_Premotor.txt", statistics, header="First column is the correlation value, second is the p value. First line is the original correlation, all below are the bootstrapped correlations.")
print(final)
#This will get the data for Pitch vs During
if len(total2) < 3 or all(b) == True:
pass
else:
s1=scipy.stats.shapiro(total2[:,0])[1] #Pitch column
s2=scipy.stats.shapiro(total2[:,1])[1] #During Column
homo=scipy.stats.levene(total2[:,0],total2[:,1])[1]
comb1=np.array([s1,s2,homo])
comb1=comb1>alpha
if comb1.all() == True: #test for normality
final=scipy.stats.pearsonr(total2[:,0],total2[:,1]) #if this is used, outcome will have no clear name on it
statistics2+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total2[:,0], len(total2[:,0]), replace=True)
res=scipy.stats.spearmanr(total2[:,1],resample)
statistics2+=[[res[0],res[1]]]
else:
final=scipy.stats.spearmanr(total2[:,0],total2[:,1]) #if this is used, outcome will have the name spearman on it
statistics2+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total2[:,0], len(total2[:,0]), replace=True)
res=scipy.stats.spearmanr(total2[:,1],resample)
statistics2+=[[res[0],res[1]]]
np.savetxt("Data_Boot_Corr_Pitch_Result_Syb" + answer + "_tone_" + str(m)+ "_During.txt", statistics2, header="First column is the correlation value, second is the p value. First line is the original correlation, all below are the bootstrapped correlations.")
print(final)
a2.set_xlabel("Number of Lags")
a2.set_ylabel("Autocorrelation score")
a3.hist(freq2, bins=int(np.mean(freq2)*0.01))
a3.set_xlabel("Frequency (Hz)")
a1.set_xlabel("Number of Lags")
a1.set_ylabel("Autocorrelation score")
a1.set_label(tellme("Now let's select the frequency. Key click (x2) for yes, mouse click for no")) #Here you will be asked to select a point in the peak that could represent the frequency (just to get an estimation)
gs.tight_layout(fig)
if not py.waitforbuttonpress(30):
py.savefig("Corr_Pitch_syb"+ answer +"_tone"+ str(m)+".tif")
py.close()
continue
else:
py.waitforbuttonpress(30)
while True:
freq = []
while len(freq) < 1:
tellme("Select the point for the frequency.")
freq = np.asarray(py.ginput(1, timeout=-1, show_clicks=True))
scat= a1.scatter(freq[:,0],freq[:,1], s=50, marker="X", zorder=10, c="b")
ann=a1.annotate(str(int(np.reciprocal(freq[:,0]/fs))) +" Hz", xy=(freq[:,0],freq[:,1]), xytext=(freq[:,0]*1.2,freq[:,1]*1.2),
arrowprops=dict(facecolor="black", shrink=0.05))
tellme("Happy? Key click for yes, mouse click for no")
if py.waitforbuttonpress(30):
py.savefig("Corr_Pitch_syb"+ answer +"_tone"+ str(m)+".tif")
break
else:
ann.remove()
scat.remove()
##
#
# This function can be used to obtain the amplitude and its correlations of specific tones inside a syllable.
# It will allow you to work with the means or the area under the curve (integration)
#
# Arguments:
#
# songfile is the .npy file containing the song signal.
#
# motifile is the .txt file containing the annotations of the beggining and end of each syllable/motif.
#
# fs is the sampling rate.
#
# means is the .txt that contains the cutting points for the tones. If None, it will allow you to create this list of means by visual inspection of plots.
def corramplitude(songfile, motifile, spikefile, fs=fs, window_size=window_size, means=None):
#Read and import files that will be needed
spused=np.loadtxt(spikefile)
song=np.load(songfile)
finallist=sortsyls(motifile)
#Will filter which arra will be used
answer=input("Which syllable?")
if answer.lower() == "a":
used=finallist[0]
elif answer.lower() == "b":
used=finallist[1]
elif answer.lower() == "c":
used=finallist[2]
elif answer.lower() == "d":
used=finallist[3]
if means is not None:
means = np.loadtxt(means).astype(int)
syb=song[int(used[0][0]):int(used[0][1])]
pass
else:
#Will plot an exmaple of the syllable for you to get an idea of the number of chunks
fig, az = py.subplots()
example=song[int(used[0][0]):int(used[0][1])]
tempo=np.linspace(used[0][0]/fs, used[0][1]/fs, len(example))
abso=abs(example)
az.plot(tempo,example)
az.plot(tempo,abso)
smooth=smoothed(np.ravel(example),fs)
az.plot(tempo[:len(smooth)],smooth)
az.set_title("Click on graph to move on.")
py.waitforbuttonpress(10)
numcuts=int(input("Number of chunks?"))
py.close()
# Will provide you 4 random exmaples of syllables to stablish the cutting points
coords2=[]
for j in range(4):
j=random.randint(0,len(used)-1)
fig, ax = py.subplots()
syb=song[int(used[j][0]):int(used[j][1])]
abso=abs(syb)
ax.plot(abso)
rms=window_rms(np.ravel(syb),window_size)
ax.plot(rms)
py.waitforbuttonpress(10)
while True:
coords = []
while len(coords) < numcuts+1:
tellme("Select the points to cut with mouse")
coords = np.asarray(py.ginput(numcuts+1, timeout=-1, show_clicks=True))
scat = py.scatter(coords[:,0],coords[:,1], s=50, marker="X", zorder=10, c="r")
tellme("Happy? Key click for yes, mouse click for no")
if py.waitforbuttonpress():
break
else:
scat.remove()
py.close()
coords2=np.append(coords2,coords[:,0])
#Will keep the mean coordinates for the cuts
coords2.sort()
coords2=np.split(coords2,numcuts+1)
means=[]
for k in range(len(coords2)):
means+=[int(np.mean(coords2[k]))]
np.savetxt("Mean_cut_syb"+answer+".txt", means)
# Will plot how the syllables will be cut according to the avarage of the coordinates clicked before by the user
py.plot(syb)
for l in range(1,len(means)):
py.plot(np.arange(means[l-1],means[l-1]+len(syb[means[l-1]:means[l]])),syb[means[l-1]:means[l]])
# Autocorrelation and Distribution
an2=input("Want to execute correlations with Means or Integration?")
for m in range(1,len(means)):
spikespremot=[]
spikesdur=[]
amps=[]
integ=[]
fig=py.figure(figsize=(18,15))
gs=py.GridSpec(2,3)
a1=fig.add_subplot(gs[0,:]) # First row, first column
a2=fig.add_subplot(gs[1,0]) # First row, second column
a3=fig.add_subplot(gs[1,1])
a4=fig.add_subplot(gs[1,2])
statistics=[]
statistics2=[]
for n in range(len(used)):
syb=song[int(used[n][0]):int(used[n][1])] #Will get the syllables for each rendition
sybcut=syb[means[m-1]:means[m]] #Will apply the cuts for the syllable
smooth=smoothed(np.ravel(sybcut),fs)
beg=(used[n][0] + means[m-1])/fs
end=(used[n][0] + means[m])/fs
step1=spused[np.where(np.logical_and(spused >= beg-premot, spused <= beg) == True)]
step2=spused[np.where(np.logical_and(spused >= beg, spused <= end) == True)]
spikespremot+=[[np.size(step1)/(beg-(beg-premot))]]
spikesdur+=[[np.size(step2)/(end-beg)]]
amps+=[np.mean(smooth)]
integ+=[scipy.integrate.simps(smooth)]
a1.plot(abs(sybcut))
a1.set_title("Syllable " + answer + " Tone " + str(m))
a1.set_xlabel("Sample points")
a1.set_ylabel("Amplitude")
a1.fill_between(np.arange(0,len(smooth),1), 0, smooth, zorder=10, color="b", alpha=0.1)
spikesdur=np.array(spikesdur)[:,0]
spikespremot=np.array(spikespremot)[:,0]
amps=np.array(amps)
integ=np.array(integ)
if an2[0].lower() == "m":
total = np.column_stack((amps,spikespremot,spikesdur))
np.savetxt("Data_Raw_Corr_Amplitude_Result_Syb" + answer + "_tone_" + str(m) + "_" + an2 + ".txt", total, header="First column is the amplitude value, second is the number of spikes inside premotor window, third is the number of spikes inside 'during' window.")
total1=np.column_stack((amps,spikespremot))
total2=np.column_stack((amps,spikesdur))
a2.hist(amps)
a2.set_title("Distribution of the Raw Means")
a2.set_ylabel("Frequency")
a2.set_xlabel("Mean Values")
else:
total = np.column_stack((integ,spikespremot,spikesdur))
np.savetxt("Data_Raw_Corr_Amplitude_Result_Syb" + answer + "_tone_" + str(m)+ "_" + an2 + ".txt", total, header="First column is the amplitude value, second is the number of spikes inside premotor window, third is the number of spikes inside 'during' window.")
total1=np.column_stack((integ,spikespremot))
total2=np.column_stack((integ,spikesdur))
a2.hist(integ)
a2.set_title("Distribution of the Raw Integration")
#Here it will give you the possibility of computing the correlations and Bootstrapping
an=input("Correlations?")
if an.lower() == "n":
pass
else:
threshold = 3 #Standard Deviation threshold for Z score identification of outliers
z1 = np.abs(scipy.stats.zscore(total1))
z2 = np.abs(scipy.stats.zscore(total2))
total1=total1[(z1 < threshold).all(axis=1)]
total2=total2[(z2 < threshold).all(axis=1)]
a = total1[:,1] == 0
b = total2[:,1] == 0
if len(total1) < 3 or all(a) == True:
pass
else:
s1=scipy.stats.shapiro(total1[:,0])[1] #Amplitude column
s2=scipy.stats.shapiro(total1[:,1])[1] #Premot Column
homo=scipy.stats.levene(total1[:,0],total[:,1])[1]
comb1=np.array([s1,s2,homo])
comb1=comb1>alpha
#This will get the data for Amplitude vs Premotor
if comb1.all() == True: #test for normality
final=scipy.stats.pearsonr(total1[:,0],total1[:,1]) #if this is used, outcome will have no clear name on it
statistics+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total1[:,0], len(total1[:,0]), replace=True)
res=scipy.stats.spearmanr(total1[:,1],resample)
statistics+=[[res[0],res[1]]]
else:
final=scipy.stats.spearmanr(total1[:,0],total1[:,1]) #if this is used, outcome will have the name spearman on it
statistics+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total1[:,0], len(total1[:,0]), replace=True)
res=scipy.stats.spearmanr(total1[:,1],resample)
statistics+=[[res[0],res[1]]]
np.savetxt("Data_Boot_Corr_Amplitude_Result_Syb" + answer + "_tone_" + str(m)+ "_Premotor_"+ an2 +".txt", statistics, header="First column is the correlation value, second is the p value. First line is the original correlation, all below are the bootstrapped correlations.")
print(final)
a3.hist(np.array(statistics)[:,0])
a3.set_title("Bootstrap Premotor")
a3.set_xlabel("Correlation Values")
#This will get the data for Pitch vs During
if len(total2) < 3 or all(b) == True:
pass
else:
s1=scipy.stats.shapiro(total2[:,0])[1] #Amplitude column
s2=scipy.stats.shapiro(total2[:,1])[1] #During Column
homo=scipy.stats.levene(total2[:,0],total2[:,1])[1]
comb1=np.array([s1,s2,homo])
comb1=comb1>alpha
if comb1.all() == True: #test for normality
final=scipy.stats.pearsonr(total2[:,0],total2[:,1]) #if this is used, outcome will have no clear name on it
statistics2+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total2[:,0], len(total2[:,0]), replace=True)
res=scipy.stats.spearmanr(total2[:,1],resample)
statistics2+=[[res[0],res[1]]]
else:
final=scipy.stats.spearmanr(total2[:,0],total2[:,1]) #if this is used, outcome will have the name spearman on it
statistics2+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total2[:,0], len(total2[:,0]), replace=True)
res=scipy.stats.spearmanr(total2[:,1],resample)
statistics2+=[[res[0],res[1]]]
np.savetxt("Data_Boot_Corr_Amplitude_Result_Syb" + answer + "_tone_" + str(m)+ "_During_" + an2 + ".txt", statistics2, header="First column is the correlation value, second is the p value. First line is the original correlation, all below are the bootstrapped correlations.")
a4.hist(np.array(statistics2)[:,0])
a4.set_title("Bootstrap During")
a4.set_xlabel("Correlation Values")
print(final)
py.savefig(fname="Corr_Amplitude_syb"+ answer +"_tone"+ str(m) +".tif")
##
# This function computes the Spectral Entropy of a signal.
#The power spectrum is computed through fft. Then, it is normalised and assimilated to a probability density function.
#
# Arguments:
# ----------
# signal : list or array
# List or array of values.
# sampling_rate : int
# Sampling rate (samples/second).
# bands : list or array
# A list of numbers delimiting the bins of the frequency bands. If None the entropy is computed over the whole range of the DFT (from 0 to `f_s/2`).
#
# Returns
# ----------
# spectral_entropy : float
# The spectral entropy as float value.
def complexity_entropy_spectral(signal, fs=fs, bands=None):
"""
Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by <NAME>.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> spectral_entropy = nk.complexity_entropy_spectral(signal, 1000)
Notes
----------
*Details*
- **Spectral Entropy**: Entropy for different frequency bands.
*Authors*
- <NAME> (https://github.com/qgeissmann)
*Dependencies*
- numpy
*See Also*
- pyrem package: https://github.com/gilestrolab/pyrem
"""
psd = np.abs(np.fft.rfft(signal))**2
psd /= np.sum(psd) # psd as a pdf (normalised to one)
if bands is None:
power_per_band= psd[psd>0]
else:
freqs = np.fft.rfftfreq(signal.size, 1/float(fs))
bands = np.asarray(bands)
freq_limits_low = np.concatenate([[0.0],bands])
freq_limits_up = np.concatenate([bands, [np.Inf]])
power_per_band = [np.sum(psd[np.bitwise_and(freqs >= low, freqs<up)])
for low,up in zip(freq_limits_low, freq_limits_up)]
power_per_band= np.array(power_per_band)[np.array(power_per_band) > 0]
spectral = - np.sum(power_per_band * np.log2(power_per_band))
return(spectral)
##
#
# This function can be used to obtain the spectral entropy and its correlations of specific tones inside a syllable.
#
# Arguments:
#
# songfile is the .npy file containing the song signal.
#
# motifile is the .txt file containing the annotations of the beggining and end of each syllable/motif.
#
# fs is the sampling rate (Hz)
#
# means is the a .txt that contains the cutting points for the tones. If None, it will allow you to create this list of means by visual inspection of plots.
def corrspectral(songfile, motifile, spikefile, fs=fs, window_size=window_size, means=None):
spused=np.loadtxt(spikefile)
song=np.load(songfile)
finallist=sortsyls(motifile)
#Will filter which arra will be used
answer=input("Which syllable?")
if answer.lower() == "a":
used=finallist[0]
elif answer.lower() == "b":
used=finallist[1]
elif answer.lower() == "c":
used=finallist[2]
elif answer.lower() == "d":
used=finallist[3]
if means is not None:
means = np.loadtxt(means).astype(int)
syb=song[int(used[0][0]):int(used[0][1])]
pass
else:
#Will plot an exmaple of the syllable for you to get an idea of the number of chunks
fig, az = py.subplots()
example=song[int(used[0][0]):int(used[0][1])]
tempo=np.linspace(used[0][0]/fs, used[0][1]/fs, len(example))
abso=abs(example)
az.plot(tempo,example)
az.plot(tempo,abso)
smooth=smoothed(np.ravel(example), fs)
az.plot(tempo[:len(smooth)],smooth)
az.set_title("Click on graph to move on.")
py.waitforbuttonpress(10)
numcuts=int(input("Number of chunks?"))
py.close()
# Will provide you 4 random exmaples of syllables to stablish the cutting points
coords2=[]
for j in range(4):
j=random.randint(0,len(used)-1)
fig, ax = py.subplots()
syb=song[int(used[j][0]):int(used[j][1])]
abso=abs(syb)
ax.plot(abso)
rms=window_rms(np.ravel(syb),window_size)
ax.plot(rms)
py.waitforbuttonpress(10)
while True:
coords = []
while len(coords) < numcuts+1:
tellme("Select the points to cut with mouse")
coords = np.asarray(py.ginput(numcuts+1, timeout=-1, show_clicks=True))
scat = py.scatter(coords[:,0],coords[:,1], s=50, marker="X", zorder=10, c="r")
tellme("Happy? Key click for yes, mouse click for no")
if py.waitforbuttonpress():
break
else:
scat.remove()
py.close()
coords2=np.append(coords2,coords[:,0])
#Will keep the mean coordinates for the cuts
coords2.sort()
coords2=np.split(coords2,numcuts+1)
means=[]
for k in range(len(coords2)):
means+=[int(np.mean(coords2[k]))]
np.savetxt("Mean_cut_syb"+answer+".txt", means)
# Will plot how the syllables will be cut according to the avarage of the coordinates clicked before by the user
py.plot(syb)
for l in range(1,len(means)):
py.plot(np.arange(means[l-1],means[l-1]+len(syb[means[l-1]:means[l]])),syb[means[l-1]:means[l]])
# Autocorrelation and Distribution
for m in range(1,len(means)):
spikespremot=[]
spikesdur=[]
specent=[]
fig=py.figure(figsize=(18,15))
gs=py.GridSpec(1,3)
a2=fig.add_subplot(gs[0,0]) # First row, second column
a3=fig.add_subplot(gs[0,1])
a4=fig.add_subplot(gs[0,2])
statistics=[]
statistics2=[]
for n in range(len(used)):
syb=song[int(used[n][0]):int(used[n][1])] #Will get the syllables for each rendition
sybcut=syb[means[m-1]:means[m]] #Will apply the cuts for the syllable
SE=complexity_entropy_spectral(sybcut[:,0],fs)
beg=(used[n][0] + means[m-1])/fs
end=(used[n][0] + means[m])/fs
step1=spused[np.where(np.logical_and(spused >= beg-premot, spused <= beg) == True)]
step2=spused[np.where(np.logical_and(spused >= beg, spused <= end) == True)]
spikespremot+=[[np.size(step1)/(beg-(beg-premot))]]
spikesdur+=[[np.size(step2)/(end-beg)]]
specent+=[[SE]]
fig.suptitle("Syllable " + answer + " Tone " + str(m))
spikesdur=np.array(spikesdur)[:,0]
spikespremot=np.array(spikespremot)[:,0]
specent=np.array(specent)
total = np.column_stack((specent,spikespremot,spikesdur))
np.savetxt("Data_Raw_Corr_SpecEnt_Result_Syb" + answer + "_tone_" + str(m) + ".txt", total, header="First column is the spectral value, second is the number of spikes inside premotor window, third is the number of spikes inside 'during' window.")
#Here it will give you the possibility of computing the correlations and Bootstrapping
an=input("Correlations?")
if an.lower() == "n":
pass
else:
threshold = 3 #Standard Deviation threshold for Z score identification of outliers
total1=np.column_stack((specent,spikespremot))
total2=np.column_stack((specent,spikesdur))
z1 = np.abs(scipy.stats.zscore(total1))
z2 = np.abs(scipy.stats.zscore(total2))
total1=total1[(z1 < threshold).all(axis=1)]
total2=total2[(z2 < threshold).all(axis=1)]
a = total1[:,1] == 0
b = total2[:,1] == 0
a2.hist(specent)
a2.set_title("Distribution of the Raw Spectral Entropy")
a2.set_ylabel("Frequency")
a2.set_xlabel("Spectral Values")
#This will get the data for Spectral Entropy vs Premotor
if len(total1) < 3 or all(a) == True:
pass
else:
s1=scipy.stats.shapiro(total1[:,0])[1] #Spectral Entropy column
s2=scipy.stats.shapiro(total1[:,1])[1] #Premot Column
homo=scipy.stats.levene(total1[:,0],total[:,1])[1]
comb1=np.array([s1,s2,homo])
comb1=comb1>alpha
if comb1.all() == True: #test for normality
final=scipy.stats.pearsonr(total1[:,0],total1[:,1]) #if this is used, outcome will have no clear name on it
statistics+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total1[:,0], len(total1[:,0]), replace=True)
res=scipy.stats.spearmanr(total1[:,1],resample)
statistics+=[[res[0],res[1]]]
else:
final=scipy.stats.spearmanr(total1[:,0],total1[:,1]) #if this is used, outcome will have the name spearman on it
statistics+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total1[:,0], len(total1[:,0]), replace=True)
res=scipy.stats.spearmanr(total1[:,1],resample)
statistics+=[[res[0],res[1]]]
np.savetxt("Data_Boot_Corr_SpecEnt_Result_Syb" + answer + "_tone_" + str(m)+ "_Premotor.txt", statistics, header="First column is the correlation value, second is the p value. First line is the original correlation, all below are the bootstrapped correlations.")
print(final)
a3.hist(np.array(statistics)[:,0])
a3.set_title("Bootstrap Premotor")
a3.set_xlabel("Correlation Values")
#This will get the data for Spectral Entropy vs During
if len(total2) < 3 or all(b) == True:
pass
else:
s1=scipy.stats.shapiro(total2[:,0])[1] #Spectral Entropy column
s2=scipy.stats.shapiro(total2[:,1])[1] #During Column
homo=scipy.stats.levene(total2[:,0],total2[:,1])[1]
comb1=np.array([s1,s2,homo])
comb1=comb1>alpha
if comb1.all() == True: #test for normality
final=scipy.stats.pearsonr(total2[:,0],total2[:,1]) #if this is used, outcome will have no clear name on it
statistics2+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total2[:,0], len(total2[:,0]), replace=True)
res=scipy.stats.spearmanr(total2[:,1],resample)
statistics2+=[[res[0],res[1]]]
else:
final=scipy.stats.spearmanr(total2[:,0],total2[:,1]) #if this is used, outcome will have the name spearman on it
statistics2+=[[final[0],final[1]]]
# Bootstrapping
for q in range(n_iterations):
resample=np.random.choice(total2[:,0], len(total2[:,0]), replace=True)
res=scipy.stats.spearmanr(total2[:,1],resample)
statistics2+=[[res[0],res[1]]]
np.savetxt("Data_Boot_Corr_SpectEnt_Result_Syb" + answer + "_tone_" + str(m)+ "_During.txt", statistics2, header="First column is the correlation value, second is the p value. First line is the original correlation, all below are the bootstrapped correlations.")
print(final)
a4.hist(np.array(statistics2)[:,0])
a4.set_title("Bootstrap During")
a4.set_xlabel("Correlation Values")
py.savefig("Corr_SpecEnt_syb"+ answer +"_tone"+ str(m)+".tif")
def ISI(spikefile):
spikes=np.loadtxt(spikefile)
times=np.sort(np.diff(spikes))*1000
py.hist(times, bins= np.arange(np.min(times), np.max(times), 1))
py.xscale('log')
py.xlabel("Millisecond (ms)")
py.ylabel("Counts/bin")
| [
"pylab.close",
"os.mkdir",
"numpy.load",
"numpy.fft.rfft",
"numpy.sum",
"pylab.GridSpec",
"numpy.ravel",
"numpy.empty",
"numpy.floor",
"numpy.reciprocal",
"numpy.ones",
"pylab.waitforbuttonpress",
"os.path.isfile",
"pylab.subplots",
"pylab.figure",
"pylab.tight_layout",
"numpy.arange... | [((2777, 2793), 'numpy.empty', 'np.empty', (['(1, 2)'], {}), '((1, 2))\n', (2785, 2793), True, 'import numpy as np\n'), ((2799, 2815), 'numpy.empty', 'np.empty', (['(1, 2)'], {}), '((1, 2))\n', (2807, 2815), True, 'import numpy as np\n'), ((2821, 2837), 'numpy.empty', 'np.empty', (['(1, 2)'], {}), '((1, 2))\n', (2829, 2837), True, 'import numpy as np\n'), ((2846, 2862), 'numpy.empty', 'np.empty', (['(1, 2)'], {}), '((1, 2))\n', (2854, 2862), True, 'import numpy as np\n'), ((2868, 2884), 'numpy.empty', 'np.empty', (['(1, 2)'], {}), '((1, 2))\n', (2876, 2884), True, 'import numpy as np\n'), ((4133, 4157), 'pylab.title', 'py.title', (['s'], {'fontsize': '(10)'}), '(s, fontsize=10)\n', (4141, 4157), True, 'import pylab as py\n'), ((4162, 4171), 'pylab.draw', 'py.draw', ([], {}), '()\n', (4169, 4171), True, 'import pylab as py\n'), ((4245, 4269), 'numpy.power', 'np.power', (['inputSignal', '(2)'], {}), '(inputSignal, 2)\n', (4253, 4269), True, 'import numpy as np\n'), ((4380, 4408), 'numpy.convolve', 'np.convolve', (['squared_song', 'h'], {}), '(squared_song, h)\n', (4391, 4408), True, 'import numpy as np\n'), ((4560, 4575), 'numpy.sqrt', 'np.sqrt', (['smooth'], {}), '(smooth)\n', (4567, 4575), True, 'import numpy as np\n'), ((4841, 4858), 'numpy.load', 'np.load', (['songfile'], {}), '(songfile)\n', (4848, 4858), True, 'import numpy as np\n'), ((5532, 5556), 'numpy.power', 'np.power', (['inputSignal', '(2)'], {}), '(inputSignal, 2)\n', (5540, 5556), True, 'import numpy as np\n'), ((6908, 6938), 'neo.io.Spike2IO', 'neo.io.Spike2IO', ([], {'filename': 'file'}), '(filename=file)\n', (6923, 6938), False, 'import neo\n'), ((7740, 7778), 'numpy.linspace', 'np.linspace', (['t_start', 't_stop', 'as_steps'], {}), '(t_start, t_stop, as_steps)\n', (7751, 7778), True, 'import numpy as np\n'), ((8720, 8757), 'numpy.array', 'np.array', (['[data_seg.analogsignals[0]]'], {}), '([data_seg.analogsignals[0]])\n', (8728, 8757), True, 'import numpy as np\n'), ((10151, 10162), 'pylab.figure', 'py.figure', ([], {}), '()\n', (10160, 10162), True, 'import pylab as py\n'), ((10422, 10439), 'pylab.tight_layout', 'py.tight_layout', ([], {}), '()\n', (10437, 10439), True, 'import pylab as py\n'), ((10601, 10612), 'pylab.figure', 'py.figure', ([], {}), '()\n', (10610, 10612), True, 'import pylab as py\n'), ((10659, 10680), 'pylab.xlabel', 'py.xlabel', (['"""time (s)"""'], {}), "('time (s)')\n", (10668, 10680), True, 'import pylab as py\n'), ((10685, 10709), 'pylab.title', 'py.title', (['"""Spike trains"""'], {}), "('Spike trains')\n", (10693, 10709), True, 'import pylab as py\n'), ((10714, 10749), 'pylab.ylabel', 'py.ylabel', (['"""Number of spike trains"""'], {}), "('Number of spike trains')\n", (10723, 10749), True, 'import pylab as py\n'), ((11636, 11653), 'pylab.tight_layout', 'py.tight_layout', ([], {}), '()\n', (11651, 11653), True, 'import pylab as py\n'), ((11658, 11667), 'pylab.show', 'py.show', ([], {}), '()\n', (11665, 11667), True, 'import pylab as py\n'), ((12363, 12378), 'os.mkdir', 'os.mkdir', (['today'], {}), '(today)\n', (12371, 12378), False, 'import os\n'), ((12562, 12599), 'os.path.isfile', 'os.path.isfile', (['"""..//unitswindow.txt"""'], {}), "('..//unitswindow.txt')\n", (12576, 12599), False, 'import os\n'), ((16456, 16468), 'numpy.load', 'np.load', (['raw'], {}), '(raw)\n', (16463, 16468), True, 'import numpy as np\n'), ((16480, 16500), 'numpy.load', 'np.load', (['rawfiltered'], {}), '(rawfiltered)\n', (16487, 16500), True, 'import numpy as np\n'), ((18599, 18615), 'numpy.load', 'np.load', (['LFPfile'], {}), '(LFPfile)\n', (18606, 18615), True, 'import numpy as np\n'), ((18975, 18986), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (18983, 18986), True, 'import numpy as np\n'), ((18991, 19026), 'numpy.save', 'np.save', (['"""LFPDownsampled"""', 'downsamp'], {}), "('LFPDownsampled', downsamp)\n", (18998, 19026), True, 'import numpy as np\n'), ((19952, 19969), 'numpy.load', 'np.load', (['songfile'], {}), '(songfile)\n', (19959, 19969), True, 'import numpy as np\n'), ((20202, 20219), 'pylab.subplots', 'py.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (20213, 20219), True, 'import pylab as py\n'), ((20992, 21017), 'pylab.colorbar', 'py.colorbar', (['im'], {'ax': 'ax[1]'}), '(im, ax=ax[1])\n', (21003, 21017), True, 'import pylab as py\n'), ((21217, 21234), 'pylab.tight_layout', 'py.tight_layout', ([], {}), '()\n', (21232, 21234), True, 'import pylab as py\n'), ((21800, 21821), 'numpy.loadtxt', 'np.loadtxt', (['spikefile'], {}), '(spikefile)\n', (21810, 21821), True, 'import numpy as np\n'), ((21906, 21941), 'pylab.subplots', 'py.subplots', (['(2)', '(1)'], {'figsize': '(18, 15)'}), '(2, 1, figsize=(18, 15))\n', (21917, 21941), True, 'import pylab as py\n'), ((25628, 25646), 'numpy.concatenate', 'np.concatenate', (['y2'], {}), '(y2)\n', (25642, 25646), True, 'import numpy as np\n'), ((25842, 25874), 'pylab.fig.subplots_adjust', 'py.fig.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (25864, 25874), True, 'import pylab as py\n'), ((25892, 25942), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""black"""', 'label': '"""+STD"""'}), "([], [], color='black', label='+STD')\n", (25905, 25942), True, 'import matplotlib.lines as mlines\n'), ((25963, 26029), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""black"""', 'label': '"""+STD"""', 'linestyle': '"""--"""'}), "([], [], color='black', label='+STD', linestyle='--')\n", (25976, 26029), True, 'import matplotlib.lines as mlines\n'), ((26048, 26098), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""green"""', 'label': '"""Mean"""'}), "([], [], color='green', label='Mean')\n", (26061, 26098), True, 'import matplotlib.lines as mlines\n'), ((26874, 26895), 'numpy.loadtxt', 'np.loadtxt', (['spikefile'], {}), '(spikefile)\n', (26884, 26895), True, 'import numpy as np\n'), ((30237, 30254), 'numpy.load', 'np.load', (['songfile'], {}), '(songfile)\n', (30244, 30254), True, 'import numpy as np\n'), ((30271, 30301), 'numpy.ravel', 'np.ravel', (['inputSignal[beg:end]'], {}), '(inputSignal[beg:end])\n', (30279, 30301), True, 'import numpy as np\n'), ((30462, 30492), 'pylab.subplots', 'py.subplots', (['(3)', '(1)'], {'sharey': '(True)'}), '(3, 1, sharey=True)\n', (30473, 30492), True, 'import pylab as py\n'), ((30496, 30522), 'pylab.xlabel', 'py.xlabel', (['"""Sample Points"""'], {}), "('Sample Points')\n", (30505, 30522), True, 'import pylab as py\n'), ((30844, 30861), 'pylab.tight_layout', 'py.tight_layout', ([], {}), '()\n', (30859, 30861), True, 'import pylab as py\n'), ((30866, 30875), 'pylab.show', 'py.show', ([], {}), '()\n', (30873, 30875), True, 'import pylab as py\n'), ((31290, 31307), 'numpy.load', 'np.load', (['songfile'], {}), '(songfile)\n', (31297, 31307), True, 'import numpy as np\n'), ((32073, 32088), 'pylab.subplot', 'py.subplot', (['(311)'], {}), '(311)\n', (32083, 32088), True, 'import pylab as py\n'), ((32093, 32116), 'pylab.plot', 'py.plot', (['t', 'signal', '"""g"""'], {}), "(t, signal, 'g')\n", (32100, 32116), True, 'import pylab as py\n'), ((32143, 32160), 'pylab.xlabel', 'py.xlabel', (['"""Time"""'], {}), "('Time')\n", (32152, 32160), True, 'import pylab as py\n'), ((32165, 32187), 'pylab.ylabel', 'py.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (32174, 32187), True, 'import pylab as py\n'), ((32192, 32207), 'pylab.subplot', 'py.subplot', (['(312)'], {}), '(312)\n', (32202, 32207), True, 'import pylab as py\n'), ((32212, 32236), 'pylab.plot', 'py.plot', (['freqs', 'FFT', '"""r"""'], {}), "(freqs, FFT, 'r')\n", (32219, 32236), True, 'import pylab as py\n'), ((32278, 32305), 'pylab.xlabel', 'py.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (32287, 32305), True, 'import pylab as py\n'), ((32310, 32334), 'pylab.title', 'py.title', (['"""Double-sided"""'], {}), "('Double-sided')\n", (32318, 32334), True, 'import pylab as py\n'), ((32339, 32357), 'pylab.ylabel', 'py.ylabel', (['"""Power"""'], {}), "('Power')\n", (32348, 32357), True, 'import pylab as py\n'), ((32362, 32377), 'pylab.subplot', 'py.subplot', (['(313)'], {}), '(313)\n', (32372, 32377), True, 'import pylab as py\n'), ((32463, 32490), 'pylab.xlabel', 'py.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (32472, 32490), True, 'import pylab as py\n'), ((32495, 32519), 'pylab.title', 'py.title', (['"""Single sided"""'], {}), "('Single sided')\n", (32503, 32519), True, 'import pylab as py\n'), ((32524, 32542), 'pylab.ylabel', 'py.ylabel', (['"""Power"""'], {}), "('Power')\n", (32533, 32542), True, 'import pylab as py\n'), ((32547, 32564), 'pylab.tight_layout', 'py.tight_layout', ([], {}), '()\n', (32562, 32564), True, 'import pylab as py\n'), ((32569, 32578), 'pylab.show', 'py.show', ([], {}), '()\n', (32576, 32578), True, 'import pylab as py\n'), ((33257, 33278), 'numpy.loadtxt', 'np.loadtxt', (['spikefile'], {}), '(spikefile)\n', (33267, 33278), True, 'import numpy as np\n'), ((33288, 33305), 'numpy.load', 'np.load', (['songfile'], {}), '(songfile)\n', (33295, 33305), True, 'import numpy as np\n'), ((35863, 35875), 'pylab.plot', 'py.plot', (['syb'], {}), '(syb)\n', (35870, 35875), True, 'import pylab as py\n'), ((46393, 46414), 'numpy.loadtxt', 'np.loadtxt', (['spikefile'], {}), '(spikefile)\n', (46403, 46414), True, 'import numpy as np\n'), ((46424, 46441), 'numpy.load', 'np.load', (['songfile'], {}), '(songfile)\n', (46431, 46441), True, 'import numpy as np\n'), ((49000, 49012), 'pylab.plot', 'py.plot', (['syb'], {}), '(syb)\n', (49007, 49012), True, 'import pylab as py\n'), ((57987, 57998), 'numpy.sum', 'np.sum', (['psd'], {}), '(psd)\n', (57993, 57998), True, 'import numpy as np\n'), ((59226, 59247), 'numpy.loadtxt', 'np.loadtxt', (['spikefile'], {}), '(spikefile)\n', (59236, 59247), True, 'import numpy as np\n'), ((59257, 59274), 'numpy.load', 'np.load', (['songfile'], {}), '(songfile)\n', (59264, 59274), True, 'import numpy as np\n'), ((61836, 61848), 'pylab.plot', 'py.plot', (['syb'], {}), '(syb)\n', (61843, 61848), True, 'import pylab as py\n'), ((68530, 68551), 'numpy.loadtxt', 'np.loadtxt', (['spikefile'], {}), '(spikefile)\n', (68540, 68551), True, 'import numpy as np\n'), ((68665, 68681), 'pylab.xscale', 'py.xscale', (['"""log"""'], {}), "('log')\n", (68674, 68681), True, 'import pylab as py\n'), ((68686, 68715), 'pylab.xlabel', 'py.xlabel', (['"""Millisecond (ms)"""'], {}), "('Millisecond (ms)')\n", (68695, 68715), True, 'import pylab as py\n'), ((68720, 68743), 'pylab.ylabel', 'py.ylabel', (['"""Counts/bin"""'], {}), "('Counts/bin')\n", (68729, 68743), True, 'import pylab as py\n'), ((4341, 4356), 'numpy.ones', 'np.ones', (['(len,)'], {}), '((len,))\n', (4348, 4356), True, 'import numpy as np\n'), ((5277, 5288), 'pylab.figure', 'py.figure', ([], {}), '()\n', (5286, 5288), True, 'import pylab as py\n'), ((5573, 5593), 'numpy.ones', 'np.ones', (['window_size'], {}), '(window_size)\n', (5580, 5593), True, 'import numpy as np\n'), ((5636, 5668), 'numpy.convolve', 'np.convolve', (['a2', 'window', '"""valid"""'], {}), "(a2, window, 'valid')\n", (5647, 5668), True, 'import numpy as np\n'), ((9007, 9045), 'numpy.save', 'np.save', (["(file[:-4] + '_songfile')", 'song'], {}), "(file[:-4] + '_songfile', song)\n", (9014, 9045), True, 'import numpy as np\n'), ((10247, 10271), 'pylab.plot', 'py.plot', (['time', 'analog[i]'], {}), '(time, analog[i])\n', (10254, 10271), True, 'import pylab as py\n'), ((10279, 10300), 'pylab.xlabel', 'py.xlabel', (['"""time (s)"""'], {}), "('time (s)')\n", (10288, 10300), True, 'import pylab as py\n'), ((10309, 10331), 'pylab.ylabel', 'py.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (10318, 10331), True, 'import pylab as py\n'), ((10627, 10651), 'numpy.arange', 'np.arange', (['(0)', '(11)'], {'step': '(1)'}), '(0, 11, step=1)\n', (10636, 10651), True, 'import numpy as np\n'), ((11564, 11604), 'pylab.legend', 'py.legend', (['Labels'], {'bbox_to_anchor': '(1, 1)'}), '(Labels, bbox_to_anchor=(1, 1))\n', (11573, 11604), True, 'import pylab as py\n'), ((12392, 12417), 'os.path.expanduser', 'os.path.expanduser', (['today'], {}), '(today)\n', (12410, 12417), False, 'import os\n'), ((16989, 17017), 'numpy.loadtxt', 'np.loadtxt', (["(Chprov1 + '.txt')"], {}), "(Chprov1 + '.txt')\n", (16999, 17017), True, 'import numpy as np\n'), ((17153, 17187), 'numpy.empty', 'np.empty', (['[1, windowsize + 2]', 'int'], {}), '([1, windowsize + 2], int)\n', (17161, 17187), True, 'import numpy as np\n'), ((17579, 17747), 'numpy.savetxt', 'np.savetxt', (["('SpikeShape#' + Label1 + '.txt')", 'b1'], {'header': '"""First column = Initial Time; Second column = Final Time; Third Column = First Spike Shape value, etc"""'}), "('SpikeShape#' + Label1 + '.txt', b1, header=\n 'First column = Initial Time; Second column = Final Time; Third Column = First Spike Shape value, etc'\n )\n", (17589, 17747), True, 'import numpy as np\n'), ((19171, 19188), 'pylab.subplots', 'py.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (19182, 19188), True, 'import pylab as py\n'), ((19479, 19488), 'pylab.show', 'py.show', ([], {}), '()\n', (19486, 19488), True, 'import pylab as py\n'), ((19497, 19514), 'pylab.tight_layout', 'py.tight_layout', ([], {}), '()\n', (19512, 19514), True, 'import pylab as py\n'), ((21064, 21113), 'numpy.linspace', 'np.linspace', (['cbar.vmin', 'cbar.vmax', '(5)'], {'dtype': 'float'}), '(cbar.vmin, cbar.vmax, 5, dtype=float)\n', (21075, 21113), True, 'import numpy as np\n'), ((22228, 22260), 'numpy.mean', 'np.mean', (['(used[:, 1] - used[:, 0])'], {}), '(used[:, 1] - used[:, 0])\n', (22235, 22260), True, 'import numpy as np\n'), ((24613, 24623), 'numpy.mean', 'np.mean', (['u'], {}), '(u)\n', (24620, 24623), True, 'import numpy as np\n'), ((24644, 24653), 'numpy.std', 'np.std', (['u'], {}), '(u)\n', (24650, 24653), True, 'import numpy as np\n'), ((25601, 25619), 'numpy.concatenate', 'np.concatenate', (['x2'], {}), '(x2)\n', (25615, 25619), True, 'import numpy as np\n'), ((27071, 27087), 'numpy.empty', 'np.empty', (['(1, 2)'], {}), '((1, 2))\n', (27079, 27087), True, 'import numpy as np\n'), ((33915, 33928), 'pylab.subplots', 'py.subplots', ([], {}), '()\n', (33926, 33928), True, 'import pylab as py\n'), ((34289, 34314), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(10)'], {}), '(10)\n', (34310, 34314), True, 'import pylab as py\n'), ((34371, 34381), 'pylab.close', 'py.close', ([], {}), '()\n', (34379, 34381), True, 'import pylab as py\n'), ((35547, 35577), 'numpy.split', 'np.split', (['coords2', '(numcuts + 1)'], {}), '(coords2, numcuts + 1)\n', (35555, 35577), True, 'import numpy as np\n'), ((35684, 35735), 'numpy.savetxt', 'np.savetxt', (["('Mean_cut_syb' + answer + '.txt')", 'means'], {}), "('Mean_cut_syb' + answer + '.txt', means)\n", (35694, 35735), True, 'import numpy as np\n'), ((36186, 36213), 'pylab.figure', 'py.figure', ([], {'figsize': '(18, 15)'}), '(figsize=(18, 15))\n', (36195, 36213), True, 'import pylab as py\n'), ((36224, 36241), 'pylab.GridSpec', 'py.GridSpec', (['(2)', '(2)'], {}), '(2, 2)\n', (36235, 36241), True, 'import pylab as py\n'), ((39341, 39356), 'numpy.array', 'np.array', (['freq2'], {}), '(freq2)\n', (39349, 39356), True, 'import numpy as np\n'), ((39371, 39396), 'numpy.reciprocal', 'np.reciprocal', (['(freq2 / fs)'], {}), '(freq2 / fs)\n', (39384, 39396), True, 'import numpy as np\n'), ((39411, 39460), 'numpy.column_stack', 'np.column_stack', (['(freq2, spikespremot, spikesdur)'], {}), '((freq2, spikespremot, spikesdur))\n', (39426, 39460), True, 'import numpy as np\n'), ((47058, 47071), 'pylab.subplots', 'py.subplots', ([], {}), '()\n', (47069, 47071), True, 'import pylab as py\n'), ((47430, 47455), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(10)'], {}), '(10)\n', (47451, 47455), True, 'import pylab as py\n'), ((47512, 47522), 'pylab.close', 'py.close', ([], {}), '()\n', (47520, 47522), True, 'import pylab as py\n'), ((48688, 48718), 'numpy.split', 'np.split', (['coords2', '(numcuts + 1)'], {}), '(coords2, numcuts + 1)\n', (48696, 48718), True, 'import numpy as np\n'), ((48825, 48876), 'numpy.savetxt', 'np.savetxt', (["('Mean_cut_syb' + answer + '.txt')", 'means'], {}), "('Mean_cut_syb' + answer + '.txt', means)\n", (48835, 48876), True, 'import numpy as np\n'), ((49393, 49420), 'pylab.figure', 'py.figure', ([], {'figsize': '(18, 15)'}), '(figsize=(18, 15))\n', (49402, 49420), True, 'import pylab as py\n'), ((49431, 49448), 'pylab.GridSpec', 'py.GridSpec', (['(2)', '(3)'], {}), '(2, 3)\n', (49442, 49448), True, 'import pylab as py\n'), ((50796, 50810), 'numpy.array', 'np.array', (['amps'], {}), '(amps)\n', (50804, 50810), True, 'import numpy as np\n'), ((50825, 50840), 'numpy.array', 'np.array', (['integ'], {}), '(integ)\n', (50833, 50840), True, 'import numpy as np\n'), ((58176, 58193), 'numpy.asarray', 'np.asarray', (['bands'], {}), '(bands)\n', (58186, 58193), True, 'import numpy as np\n'), ((58221, 58251), 'numpy.concatenate', 'np.concatenate', (['[[0.0], bands]'], {}), '([[0.0], bands])\n', (58235, 58251), True, 'import numpy as np\n'), ((58276, 58309), 'numpy.concatenate', 'np.concatenate', (['[bands, [np.Inf]]'], {}), '([bands, [np.Inf]])\n', (58290, 58309), True, 'import numpy as np\n'), ((59893, 59906), 'pylab.subplots', 'py.subplots', ([], {}), '()\n', (59904, 59906), True, 'import pylab as py\n'), ((60266, 60291), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(10)'], {}), '(10)\n', (60287, 60291), True, 'import pylab as py\n'), ((60348, 60358), 'pylab.close', 'py.close', ([], {}), '()\n', (60356, 60358), True, 'import pylab as py\n'), ((61524, 61554), 'numpy.split', 'np.split', (['coords2', '(numcuts + 1)'], {}), '(coords2, numcuts + 1)\n', (61532, 61554), True, 'import numpy as np\n'), ((61661, 61712), 'numpy.savetxt', 'np.savetxt', (["('Mean_cut_syb' + answer + '.txt')", 'means'], {}), "('Mean_cut_syb' + answer + '.txt', means)\n", (61671, 61712), True, 'import numpy as np\n'), ((62142, 62169), 'pylab.figure', 'py.figure', ([], {'figsize': '(18, 15)'}), '(figsize=(18, 15))\n', (62151, 62169), True, 'import pylab as py\n'), ((62180, 62197), 'pylab.GridSpec', 'py.GridSpec', (['(1)', '(3)'], {}), '(1, 3)\n', (62191, 62197), True, 'import pylab as py\n'), ((63238, 63255), 'numpy.array', 'np.array', (['specent'], {}), '(specent)\n', (63246, 63255), True, 'import numpy as np\n'), ((63272, 63323), 'numpy.column_stack', 'np.column_stack', (['(specent, spikespremot, spikesdur)'], {}), '((specent, spikespremot, spikesdur))\n', (63287, 63323), True, 'import numpy as np\n'), ((4284, 4316), 'numpy.round', 'np.round', (['(fs * smooth_win / 1000)'], {}), '(fs * smooth_win / 1000)\n', (4292, 4316), True, 'import numpy as np\n'), ((12305, 12328), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12326, 12328), False, 'import datetime\n'), ((12914, 12984), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': 'res', 'columns': "['Channel', 'Label', 'LFP number']"}), "(data=res, columns=['Channel', 'Label', 'LFP number'])\n", (12930, 12984), False, 'import pandas\n'), ((14057, 14127), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': 'res', 'columns': "['Channel', 'Label', 'LFP number']"}), "(data=res, columns=['Channel', 'Label', 'LFP number'])\n", (14073, 14127), False, 'import pandas\n'), ((17488, 17515), 'numpy.append', 'np.append', (['x1', 'res1'], {'axis': '(0)'}), '(x1, res1, axis=0)\n', (17497, 17515), True, 'import numpy as np\n'), ((17885, 17902), 'pylab.subplots', 'py.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (17896, 17902), True, 'import pylab as py\n'), ((18284, 18301), 'pylab.tight_layout', 'py.tight_layout', ([], {}), '()\n', (18299, 18301), True, 'import pylab as py\n'), ((18314, 18323), 'pylab.show', 'py.show', ([], {}), '()\n', (18321, 18323), True, 'import pylab as py\n'), ((23132, 23162), 'numpy.concatenate', 'np.concatenate', (['spikes2[n0:n1]'], {}), '(spikes2[n0:n1])\n', (23146, 23162), True, 'import numpy as np\n'), ((23631, 23692), 'numpy.arange', 'np.arange', (['(0)', '(shoulder + meandurall + binwidth)'], {'step': 'binwidth'}), '(0, shoulder + meandurall + binwidth, step=binwidth)\n', (23640, 23692), True, 'import numpy as np\n'), ((24450, 24473), 'numpy.concatenate', 'np.concatenate', (['basespk'], {}), '(basespk)\n', (24464, 24473), True, 'import numpy as np\n'), ((24671, 24726), 'numpy.arange', 'np.arange', (['(meandurall / 3)', '(meandurall * 2 / 3)', 'binwidth'], {}), '(meandurall / 3, meandurall * 2 / 3, binwidth)\n', (24680, 24726), True, 'import numpy as np\n'), ((25049, 25072), 'numpy.concatenate', 'np.concatenate', (['spikes2'], {}), '(spikes2)\n', (25063, 25072), True, 'import numpy as np\n'), ((28124, 28142), 'numpy.array', 'np.array', (['[s1, s2]'], {}), '([s1, s2])\n', (28132, 28142), True, 'import numpy as np\n'), ((34161, 34178), 'numpy.ravel', 'np.ravel', (['example'], {}), '(example)\n', (34169, 34178), True, 'import numpy as np\n'), ((34601, 34614), 'pylab.subplots', 'py.subplots', ([], {}), '()\n', (34612, 34614), True, 'import pylab as py\n'), ((34806, 34831), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(10)'], {}), '(10)\n', (34827, 34831), True, 'import pylab as py\n'), ((35385, 35395), 'pylab.close', 'py.close', ([], {}), '()\n', (35393, 35395), True, 'import pylab as py\n'), ((35415, 35447), 'numpy.append', 'np.append', (['coords2', 'coords[:, 0]'], {}), '(coords2, coords[:, 0])\n', (35424, 35447), True, 'import numpy as np\n'), ((37108, 37133), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(30)'], {}), '(30)\n', (37129, 37133), True, 'import pylab as py\n'), ((37147, 37157), 'pylab.close', 'py.close', ([], {}), '()\n', (37155, 37157), True, 'import pylab as py\n'), ((37217, 37242), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(30)'], {}), '(30)\n', (37238, 37242), True, 'import pylab as py\n'), ((39253, 39272), 'numpy.array', 'np.array', (['spikesdur'], {}), '(spikesdur)\n', (39261, 39272), True, 'import numpy as np\n'), ((39299, 39321), 'numpy.array', 'np.array', (['spikespremot'], {}), '(spikespremot)\n', (39307, 39321), True, 'import numpy as np\n'), ((40013, 40051), 'numpy.column_stack', 'np.column_stack', (['(freq2, spikespremot)'], {}), '((freq2, spikespremot))\n', (40028, 40051), True, 'import numpy as np\n'), ((40070, 40105), 'numpy.column_stack', 'np.column_stack', (['(freq2, spikesdur)'], {}), '((freq2, spikesdur))\n', (40085, 40105), True, 'import numpy as np\n'), ((44562, 44587), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(30)'], {}), '(30)\n', (44583, 44587), True, 'import pylab as py\n'), ((44674, 44684), 'pylab.close', 'py.close', ([], {}), '()\n', (44682, 44684), True, 'import pylab as py\n'), ((44744, 44769), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(30)'], {}), '(30)\n', (44765, 44769), True, 'import pylab as py\n'), ((47305, 47322), 'numpy.ravel', 'np.ravel', (['example'], {}), '(example)\n', (47313, 47322), True, 'import numpy as np\n'), ((47742, 47755), 'pylab.subplots', 'py.subplots', ([], {}), '()\n', (47753, 47755), True, 'import pylab as py\n'), ((47947, 47972), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(10)'], {}), '(10)\n', (47968, 47972), True, 'import pylab as py\n'), ((48526, 48536), 'pylab.close', 'py.close', ([], {}), '()\n', (48534, 48536), True, 'import pylab as py\n'), ((48556, 48588), 'numpy.append', 'np.append', (['coords2', 'coords[:, 0]'], {}), '(coords2, coords[:, 0])\n', (48565, 48588), True, 'import numpy as np\n'), ((50709, 50728), 'numpy.array', 'np.array', (['spikesdur'], {}), '(spikesdur)\n', (50717, 50728), True, 'import numpy as np\n'), ((50755, 50777), 'numpy.array', 'np.array', (['spikespremot'], {}), '(spikespremot)\n', (50763, 50777), True, 'import numpy as np\n'), ((50895, 50943), 'numpy.column_stack', 'np.column_stack', (['(amps, spikespremot, spikesdur)'], {}), '((amps, spikespremot, spikesdur))\n', (50910, 50943), True, 'import numpy as np\n'), ((51235, 51272), 'numpy.column_stack', 'np.column_stack', (['(amps, spikespremot)'], {}), '((amps, spikespremot))\n', (51250, 51272), True, 'import numpy as np\n'), ((51291, 51325), 'numpy.column_stack', 'np.column_stack', (['(amps, spikesdur)'], {}), '((amps, spikesdur))\n', (51306, 51325), True, 'import numpy as np\n'), ((51523, 51572), 'numpy.column_stack', 'np.column_stack', (['(integ, spikespremot, spikesdur)'], {}), '((integ, spikespremot, spikesdur))\n', (51538, 51572), True, 'import numpy as np\n'), ((51863, 51901), 'numpy.column_stack', 'np.column_stack', (['(integ, spikespremot)'], {}), '((integ, spikespremot))\n', (51878, 51901), True, 'import numpy as np\n'), ((51920, 51955), 'numpy.column_stack', 'np.column_stack', (['(integ, spikesdur)'], {}), '((integ, spikesdur))\n', (51935, 51955), True, 'import numpy as np\n'), ((57952, 57971), 'numpy.fft.rfft', 'np.fft.rfft', (['signal'], {}), '(signal)\n', (57963, 57971), True, 'import numpy as np\n'), ((58482, 58506), 'numpy.array', 'np.array', (['power_per_band'], {}), '(power_per_band)\n', (58490, 58506), True, 'import numpy as np\n'), ((60140, 60157), 'numpy.ravel', 'np.ravel', (['example'], {}), '(example)\n', (60148, 60157), True, 'import numpy as np\n'), ((60578, 60591), 'pylab.subplots', 'py.subplots', ([], {}), '()\n', (60589, 60591), True, 'import pylab as py\n'), ((60783, 60808), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(10)'], {}), '(10)\n', (60804, 60808), True, 'import pylab as py\n'), ((61362, 61372), 'pylab.close', 'py.close', ([], {}), '()\n', (61370, 61372), True, 'import pylab as py\n'), ((61392, 61424), 'numpy.append', 'np.append', (['coords2', 'coords[:, 0]'], {}), '(coords2, coords[:, 0])\n', (61401, 61424), True, 'import numpy as np\n'), ((63148, 63167), 'numpy.array', 'np.array', (['spikesdur'], {}), '(spikesdur)\n', (63156, 63167), True, 'import numpy as np\n'), ((63194, 63216), 'numpy.array', 'np.array', (['spikespremot'], {}), '(spikespremot)\n', (63202, 63216), True, 'import numpy as np\n'), ((63881, 63921), 'numpy.column_stack', 'np.column_stack', (['(specent, spikespremot)'], {}), '((specent, spikespremot))\n', (63896, 63921), True, 'import numpy as np\n'), ((63940, 63977), 'numpy.column_stack', 'np.column_stack', (['(specent, spikesdur)'], {}), '((specent, spikesdur))\n', (63955, 63977), True, 'import numpy as np\n'), ((68570, 68585), 'numpy.diff', 'np.diff', (['spikes'], {}), '(spikes)\n', (68577, 68585), True, 'import numpy as np\n'), ((13568, 13603), 'numpy.savetxt', 'np.savetxt', (["(Chprov + '.txt')", 'tosave'], {}), "(Chprov + '.txt', tosave)\n", (13578, 13603), True, 'import numpy as np\n'), ((18678, 18698), 'numpy.repeat', 'np.repeat', (['(1)', 'window'], {}), '(1, window)\n', (18687, 18698), True, 'import numpy as np\n'), ((24207, 24233), 'numpy.arange', 'np.arange', (['basebeg', 'basend'], {}), '(basebeg, basend)\n', (24216, 24233), True, 'import numpy as np\n'), ((29392, 29412), 'numpy.array', 'np.array', (['statistics'], {}), '(statistics)\n', (29400, 29412), True, 'import numpy as np\n'), ((33700, 33717), 'numpy.loadtxt', 'np.loadtxt', (['means'], {}), '(means)\n', (33710, 33717), True, 'import numpy as np\n'), ((34744, 34757), 'numpy.ravel', 'np.ravel', (['syb'], {}), '(syb)\n', (34752, 34757), True, 'import numpy as np\n'), ((35106, 35180), 'pylab.scatter', 'py.scatter', (['coords[:, 0]', 'coords[:, 1]'], {'s': '(50)', 'marker': '"""X"""', 'zorder': '(10)', 'c': '"""r"""'}), "(coords[:, 0], coords[:, 1], s=50, marker='X', zorder=10, c='r')\n", (35116, 35180), True, 'import pylab as py\n'), ((35270, 35293), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', ([], {}), '()\n', (35291, 35293), True, 'import pylab as py\n'), ((37735, 37760), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(30)'], {}), '(30)\n', (37756, 37760), True, 'import pylab as py\n'), ((40762, 40786), 'numpy.array', 'np.array', (['[s1, s2, homo]'], {}), '([s1, s2, homo])\n', (40770, 40786), True, 'import numpy as np\n'), ((42571, 42595), 'numpy.array', 'np.array', (['[s1, s2, homo]'], {}), '([s1, s2, homo])\n', (42579, 42595), True, 'import numpy as np\n'), ((45434, 45459), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', (['(30)'], {}), '(30)\n', (45455, 45459), True, 'import pylab as py\n'), ((46843, 46860), 'numpy.loadtxt', 'np.loadtxt', (['means'], {}), '(means)\n', (46853, 46860), True, 'import numpy as np\n'), ((47885, 47898), 'numpy.ravel', 'np.ravel', (['syb'], {}), '(syb)\n', (47893, 47898), True, 'import numpy as np\n'), ((48247, 48321), 'pylab.scatter', 'py.scatter', (['coords[:, 0]', 'coords[:, 1]'], {'s': '(50)', 'marker': '"""X"""', 'zorder': '(10)', 'c': '"""r"""'}), "(coords[:, 0], coords[:, 1], s=50, marker='X', zorder=10, c='r')\n", (48257, 48321), True, 'import pylab as py\n'), ((48411, 48434), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', ([], {}), '()\n', (48432, 48434), True, 'import pylab as py\n'), ((49932, 49948), 'numpy.ravel', 'np.ravel', (['sybcut'], {}), '(sybcut)\n', (49940, 49948), True, 'import numpy as np\n'), ((50361, 50376), 'numpy.mean', 'np.mean', (['smooth'], {}), '(smooth)\n', (50368, 50376), True, 'import numpy as np\n'), ((52934, 52958), 'numpy.array', 'np.array', (['[s1, s2, homo]'], {}), '([s1, s2, homo])\n', (52942, 52958), True, 'import numpy as np\n'), ((54981, 55005), 'numpy.array', 'np.array', (['[s1, s2, homo]'], {}), '([s1, s2, homo])\n', (54989, 55005), True, 'import numpy as np\n'), ((58507, 58531), 'numpy.array', 'np.array', (['power_per_band'], {}), '(power_per_band)\n', (58515, 58531), True, 'import numpy as np\n'), ((58579, 58602), 'numpy.log2', 'np.log2', (['power_per_band'], {}), '(power_per_band)\n', (58586, 58602), True, 'import numpy as np\n'), ((59678, 59695), 'numpy.loadtxt', 'np.loadtxt', (['means'], {}), '(means)\n', (59688, 59695), True, 'import numpy as np\n'), ((60721, 60734), 'numpy.ravel', 'np.ravel', (['syb'], {}), '(syb)\n', (60729, 60734), True, 'import numpy as np\n'), ((61083, 61157), 'pylab.scatter', 'py.scatter', (['coords[:, 0]', 'coords[:, 1]'], {'s': '(50)', 'marker': '"""X"""', 'zorder': '(10)', 'c': '"""r"""'}), "(coords[:, 0], coords[:, 1], s=50, marker='X', zorder=10, c='r')\n", (61093, 61157), True, 'import pylab as py\n'), ((61247, 61270), 'pylab.waitforbuttonpress', 'py.waitforbuttonpress', ([], {}), '()\n', (61268, 61270), True, 'import pylab as py\n'), ((64838, 64862), 'numpy.array', 'np.array', (['[s1, s2, homo]'], {}), '([s1, s2, homo])\n', (64846, 64862), True, 'import numpy as np\n'), ((66825, 66849), 'numpy.array', 'np.array', (['[s1, s2, homo]'], {}), '([s1, s2, homo])\n', (66833, 66849), True, 'import numpy as np\n'), ((68627, 68640), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (68633, 68640), True, 'import numpy as np\n'), ((68642, 68655), 'numpy.max', 'np.max', (['times'], {}), '(times)\n', (68648, 68655), True, 'import numpy as np\n'), ((13127, 13138), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (13135, 13138), True, 'import numpy as np\n'), ((17359, 17404), 'numpy.array', 'np.array', (['[[a1], [a1 + windowsize]]', 'np.int32'], {}), '([[a1], [a1 + windowsize]], np.int32)\n', (17367, 17404), True, 'import numpy as np\n'), ((17434, 17459), 'numpy.append', 'np.append', (['y1', 'analogtxt1'], {}), '(y1, analogtxt1)\n', (17443, 17459), True, 'import numpy as np\n'), ((24513, 24547), 'numpy.arange', 'np.arange', (['(0)', 'meandurall', 'binwidth'], {}), '(0, meandurall, binwidth)\n', (24522, 24547), True, 'import numpy as np\n'), ((35654, 35673), 'numpy.mean', 'np.mean', (['coords2[k]'], {}), '(coords2[k])\n', (35661, 35673), True, 'import numpy as np\n'), ((48795, 48814), 'numpy.mean', 'np.mean', (['coords2[k]'], {}), '(coords2[k])\n', (48802, 48814), True, 'import numpy as np\n'), ((58348, 58388), 'numpy.bitwise_and', 'np.bitwise_and', (['(freqs >= low)', '(freqs < up)'], {}), '(freqs >= low, freqs < up)\n', (58362, 58388), True, 'import numpy as np\n'), ((61631, 61650), 'numpy.mean', 'np.mean', (['coords2[k]'], {}), '(coords2[k])\n', (61638, 61650), True, 'import numpy as np\n'), ((21164, 21183), 'numpy.floor', 'np.floor', (['cbar.vmin'], {}), '(cbar.vmin)\n', (21172, 21183), True, 'import numpy as np\n'), ((27357, 27401), 'numpy.logical_and', 'np.logical_and', (['(spused >= beg)', '(spused <= end)'], {}), '(spused >= beg, spused <= end)\n', (27371, 27401), True, 'import numpy as np\n'), ((35032, 35084), 'pylab.ginput', 'py.ginput', (['(numcuts + 1)'], {'timeout': '(-1)', 'show_clicks': '(True)'}), '(numcuts + 1, timeout=-1, show_clicks=True)\n', (35041, 35084), True, 'import pylab as py\n'), ((37510, 37552), 'pylab.ginput', 'py.ginput', (['(2)'], {'timeout': '(-1)', 'show_clicks': '(True)'}), '(2, timeout=-1, show_clicks=True)\n', (37519, 37552), True, 'import pylab as py\n'), ((38923, 38976), 'numpy.logical_and', 'np.logical_and', (['(spused >= beg - premot)', '(spused <= beg)'], {}), '(spused >= beg - premot, spused <= beg)\n', (38937, 38976), True, 'import numpy as np\n'), ((39019, 39063), 'numpy.logical_and', 'np.logical_and', (['(spused >= beg)', '(spused <= end)'], {}), '(spused >= beg, spused <= end)\n', (39033, 39063), True, 'import numpy as np\n'), ((39102, 39116), 'numpy.size', 'np.size', (['step1'], {}), '(step1)\n', (39109, 39116), True, 'import numpy as np\n'), ((39163, 39177), 'numpy.size', 'np.size', (['step2'], {}), '(step2)\n', (39170, 39177), True, 'import numpy as np\n'), ((44145, 44159), 'numpy.mean', 'np.mean', (['freq2'], {}), '(freq2)\n', (44152, 44159), True, 'import numpy as np\n'), ((44961, 45003), 'pylab.ginput', 'py.ginput', (['(1)'], {'timeout': '(-1)', 'show_clicks': '(True)'}), '(1, timeout=-1, show_clicks=True)\n', (44970, 45003), True, 'import pylab as py\n'), ((48173, 48225), 'pylab.ginput', 'py.ginput', (['(numcuts + 1)'], {'timeout': '(-1)', 'show_clicks': '(True)'}), '(numcuts + 1, timeout=-1, show_clicks=True)\n', (48182, 48225), True, 'import pylab as py\n'), ((50075, 50128), 'numpy.logical_and', 'np.logical_and', (['(spused >= beg - premot)', '(spused <= beg)'], {}), '(spused >= beg - premot, spused <= beg)\n', (50089, 50128), True, 'import numpy as np\n'), ((50171, 50215), 'numpy.logical_and', 'np.logical_and', (['(spused >= beg)', '(spused <= end)'], {}), '(spused >= beg, spused <= end)\n', (50185, 50215), True, 'import numpy as np\n'), ((50254, 50268), 'numpy.size', 'np.size', (['step1'], {}), '(step1)\n', (50261, 50268), True, 'import numpy as np\n'), ((50315, 50329), 'numpy.size', 'np.size', (['step2'], {}), '(step2)\n', (50322, 50329), True, 'import numpy as np\n'), ((54468, 54488), 'numpy.array', 'np.array', (['statistics'], {}), '(statistics)\n', (54476, 54488), True, 'import numpy as np\n'), ((56425, 56446), 'numpy.array', 'np.array', (['statistics2'], {}), '(statistics2)\n', (56433, 56446), True, 'import numpy as np\n'), ((61009, 61061), 'pylab.ginput', 'py.ginput', (['(numcuts + 1)'], {'timeout': '(-1)', 'show_clicks': '(True)'}), '(numcuts + 1, timeout=-1, show_clicks=True)\n', (61018, 61061), True, 'import pylab as py\n'), ((62772, 62825), 'numpy.logical_and', 'np.logical_and', (['(spused >= beg - premot)', '(spused <= beg)'], {}), '(spused >= beg - premot, spused <= beg)\n', (62786, 62825), True, 'import numpy as np\n'), ((62868, 62912), 'numpy.logical_and', 'np.logical_and', (['(spused >= beg)', '(spused <= end)'], {}), '(spused >= beg, spused <= end)\n', (62882, 62912), True, 'import numpy as np\n'), ((62951, 62965), 'numpy.size', 'np.size', (['step1'], {}), '(step1)\n', (62958, 62965), True, 'import numpy as np\n'), ((63012, 63026), 'numpy.size', 'np.size', (['step2'], {}), '(step2)\n', (63019, 63026), True, 'import numpy as np\n'), ((66294, 66314), 'numpy.array', 'np.array', (['statistics'], {}), '(statistics)\n', (66302, 66314), True, 'import numpy as np\n'), ((68289, 68310), 'numpy.array', 'np.array', (['statistics2'], {}), '(statistics2)\n', (68297, 68310), True, 'import numpy as np\n'), ((13490, 13540), 'numpy.logical_and', 'np.logical_and', (['(arr >= windowbeg)', '(arr <= windowend)'], {}), '(arr >= windowbeg, arr <= windowend)\n', (13504, 13540), True, 'import numpy as np\n'), ((22673, 22739), 'numpy.logical_and', 'np.logical_and', (['(spused >= beg - shoulder)', '(spused <= end + shoulder)'], {}), '(spused >= beg - shoulder, spused <= end + shoulder)\n', (22687, 22739), True, 'import numpy as np\n'), ((22787, 22833), 'numpy.logical_and', 'np.logical_and', (['(step1 >= 0)', '(step1 <= end - beg)'], {}), '(step1 >= 0, step1 <= end - beg)\n', (22801, 22833), True, 'import numpy as np\n'), ((22902, 22967), 'numpy.logical_and', 'np.logical_and', (['(step1 >= end - beg)', '(step1 <= end - beg + shoulder)'], {}), '(step1 >= end - beg, step1 <= end - beg + shoulder)\n', (22916, 22967), True, 'import numpy as np\n'), ((24272, 24339), 'numpy.logical_and', 'np.logical_and', (['(spused >= basecuts)', '(spused <= basecuts + meandurall)'], {}), '(spused >= basecuts, spused <= basecuts + meandurall)\n', (24286, 24339), True, 'import numpy as np\n'), ((45136, 45166), 'numpy.reciprocal', 'np.reciprocal', (['(freq[:, 0] / fs)'], {}), '(freq[:, 0] / fs)\n', (45149, 45166), True, 'import numpy as np\n'), ((27471, 27485), 'numpy.size', 'np.size', (['step1'], {}), '(step1)\n', (27478, 27485), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 1 07:26:50 2021
@author: P.Chimenti
This code tests the base class of WoodHardness analysis
"""
import numpy as np
from Tests.BasicTools import WoodHardness_base as whb
samples = 10000
wh_base = whb.WoodHardness_base()
print("Number of samples: ",len(wh_base.Data_x))
mask = np.random.choice(a=[False, True], size=len(wh_base.Data_x), p=[0.5, 1-0.5])
print(mask)
wh_base.reset(mask)
print(wh_base.Data_x)
print(wh_base.Data_x_fit)
print(wh_base.Data_x_test)
samples, blobs = wh_base.run_mcmc(nsamples = samples)
np.save('test_wh_base.npy', np.concatenate((samples, blobs), axis=1) )
| [
"Tests.BasicTools.WoodHardness_base.WoodHardness_base",
"numpy.concatenate"
] | [((277, 300), 'Tests.BasicTools.WoodHardness_base.WoodHardness_base', 'whb.WoodHardness_base', ([], {}), '()\n', (298, 300), True, 'from Tests.BasicTools import WoodHardness_base as whb\n'), ((624, 664), 'numpy.concatenate', 'np.concatenate', (['(samples, blobs)'], {'axis': '(1)'}), '((samples, blobs), axis=1)\n', (638, 664), True, 'import numpy as np\n')] |
from smt_solver.formula_parser.formula_parser import FormulaParser
from smt_solver.sat_solver.sat_solver import SATSolver
import numpy as np
class TestFormulaParser:
@staticmethod
def test_prepare_formula():
assert FormulaParser._prepare_formula(' ') == ''
assert FormulaParser._prepare_formula('(((a)))') == 'a'
assert FormulaParser._prepare_formula(' and a b ') == 'and a b'
assert FormulaParser._prepare_formula(' ( and a b ) ') == 'and a b'
assert FormulaParser._prepare_formula('(and (a) (b))') == 'and (a) (b)'
assert FormulaParser._prepare_formula('and (a) (b)') == 'and (a) (b)'
assert FormulaParser._prepare_formula('(((and (a) (b))))') == 'and (a) (b)'
@staticmethod
def test_parse_formula():
assert FormulaParser._parse_formula("not (=> (not (and p q)) (not r))") == \
("not", ("=>", ("not", ("and", "p", "q")), ("not", "r")))
assert FormulaParser._parse_formula("not (=> (not (and pq78 q)) (not r))") == \
("not", ("=>", ("not", ("and", "pq78", "q")), ("not", "r")))
assert FormulaParser._parse_formula("not (=> (not (and ((p)) q)) ((not (r))))") == \
("not", ("=>", ("not", ("and", "p", "q")), ("not", "r")))
assert FormulaParser._parse_formula("not (=> (not (and ((p)) ((not ((((r)))))))) ((not (r))))") == \
("not", ("=>", ("not", ("and", "p", ("not", "r"))), ("not", "r")))
@staticmethod
def test_tseitin_transform():
transformed_formula = {
frozenset({1, -3}),
frozenset({3, -1, -5}),
frozenset({8, -3}),
frozenset({-8, -7, 3}),
frozenset({-3, 7}),
frozenset({-1}),
frozenset({1, 5})
}
assert FormulaParser._tseitin_transform(FormulaParser._parse_formula("not (=> (not (and p q)) (not r))")) == \
transformed_formula
assert FormulaParser._tseitin_transform(FormulaParser._parse_formula("not (=> (not (and pq78 q)) (not r))")) ==\
transformed_formula
assert FormulaParser._tseitin_transform(FormulaParser._parse_formula("and (not x) x")) == {
frozenset({1, 2, -2}),
frozenset({-1, -2}),
frozenset({1}),
frozenset({2, -1})
}
@staticmethod
def test_preprocessing():
assert FormulaParser._preprocess(frozenset({frozenset({})})) == frozenset()
assert FormulaParser._preprocess(frozenset({frozenset({1})})) == frozenset({frozenset({1})})
assert FormulaParser._preprocess(frozenset({frozenset({1}), frozenset({2})})) == \
frozenset({frozenset({2}), frozenset({1})})
assert FormulaParser._preprocess(frozenset({frozenset({2, 1}), frozenset({3, 4})})) == \
frozenset({frozenset({3, 4}), frozenset({1, 2})})
assert FormulaParser._preprocess(frozenset({frozenset({1, 2, 1, 1, 2}), frozenset({3, 4})})) == \
frozenset({frozenset({3, 4}), frozenset({1, 2})})
assert FormulaParser._preprocess(frozenset({frozenset({1, 2, 1, 1, 2, -1}), frozenset({3, 4})})) == \
frozenset({frozenset({3, 4})})
assert FormulaParser._preprocess(frozenset({frozenset({1, -1}), frozenset({3, -4})})) == \
frozenset({frozenset({3, -4})})
assert FormulaParser._preprocess(frozenset({frozenset({2, 1, -1}), frozenset({3, -4})})) == \
frozenset({frozenset({3, -4})})
assert FormulaParser._preprocess(frozenset({frozenset({1, 2, -1}), frozenset({3, -4})})) == \
frozenset({frozenset({3, -4})})
assert FormulaParser._preprocess(frozenset({frozenset({1, -1, 2}), frozenset({3, -4})})) == \
frozenset({frozenset({3, -4})})
assert FormulaParser._preprocess(frozenset({frozenset({1, 1, 2, 3, 3, -4}), frozenset({3, -4, 1, 2})})) == \
frozenset({frozenset({1, 2, 3, -4})})
@staticmethod
def test_parse_uf():
formula = """(declare-fun cost (Int Int Bool) Real)
(declare-fun s1 () Bool)
(declare-fun s2 () Bool)
(declare-fun s3 () Bool)
(declare-fun s4 ( ) Bool)
( declare-fun q1 ( ) Real )
(declare-fun q2 ( Int Bool a ) Real )
(declare-fun q3 () Real)
( assert ( = 250 (+ q1(q1(5,q2),8) 7 )))
( assert (= 250 (+ (And (q1 ) (x ) ) ( q2(2,q2(1,true,2),8) ) )) )
(declare-fun q4 () Real)
; comment
(assert ((= 250 ( + q1 q2))) )"""
signature = {'cost': {'index': 0,
'output_type': 'Real',
'parameter_types': ['Int', 'Int', 'Bool']},
'q1': {'index': 5, 'output_type': 'Real', 'parameter_types': []},
'q2': {'index': 6,
'output_type': 'Real',
'parameter_types': ['Int', 'Bool', 'a']},
'q3': {'index': 7, 'output_type': 'Real', 'parameter_types': []},
'q4': {'index': 8, 'output_type': 'Real', 'parameter_types': []},
's1': {'index': 1, 'output_type': 'Bool', 'parameter_types': []},
's2': {'index': 2, 'output_type': 'Bool', 'parameter_types': []},
's3': {'index': 3, 'output_type': 'Bool', 'parameter_types': []},
's4': {'index': 4, 'output_type': 'Bool', 'parameter_types': []}}
parsed_formulas = [('=', '250', ('+', ('q1', ('q1', '5', ('q2',)), '8'), '7')),
('=',
'250',
('+', ('and', ('q1',), 'x'), ('q2', '2', ('q2', '1', 'true', '2'), '8'))),
('=', '250', ('+', ('q1',), ('q2',)))]
assert FormulaParser._parse_smt_lib_v2(formula) == (signature, parsed_formulas)
# Uses a weird one-line input
formula = ("(declare-fun q1 () Real) ( assert (= 250 (+ q1 ( q1 (5 , q2 ) , 8 ) 7 )))" +
"( assert (= 260 (+ (And (q1 ) (x ) )" +
" ( q1(2,q1(1,true,2),8) ) )) ) (declare-fun q3 () Real) " +
"( assert ( - 50 ( + ( Or q3 ( not x ) ) 12 ) ) ) ")
signature = {'q1': {'index': 0, 'output_type': 'Real', 'parameter_types': []},
'q3': {'index': 1, 'output_type': 'Real', 'parameter_types': []}}
parsed_formulas = [('=', '250', ('+', ('q1', ('q1', '5', 'q2'), '8'), '7')),
('=',
'260',
('+', ('and', ('q1',), 'x'), ('q1', '2', ('q1', '1', 'true', '2'), '8'))),
('-', '50', ('+', ('or', ('q3',), ('not', 'x')), '12'))]
assert FormulaParser._parse_smt_lib_v2(formula) == (signature, parsed_formulas)
formula = '(declare-fun f () Bool) (assert (=> (= a b) f(1)))'
signature = {'f': {'index': 0, 'output_type': 'Bool', 'parameter_types': []}}
parsed_formulas = [('=>', ('=', 'a', 'b'), ('f', '1'))]
assert FormulaParser._parse_smt_lib_v2(formula) == (signature, parsed_formulas)
@staticmethod
def test_create_boolean_abstraction():
formula = ' (( ( and ( a ) ( b ) )) ) '
abstraction = {}
signature = {}
parsed_formula = FormulaParser._parse_formula(formula)
abstracted_formula = FormulaParser._create_boolean_abstraction(parsed_formula, signature, abstraction)
assert abstracted_formula == ('and', '1', '2')
assert abstraction == {'a': '1', 'b': '2'}
formula = '(((and ( = true false ) (a))))'
abstraction = {}
parsed_formula = FormulaParser._parse_formula(formula)
abstracted_formula = FormulaParser._create_boolean_abstraction(parsed_formula, signature, abstraction)
assert abstracted_formula == ('and', '1', '2')
assert abstraction == {('=', 'true', 'false'): '1', 'a': '2'}
formula = '(declare-fun f (Int Int) Bool) (assert ((and (= (not a) f ( 1 , 2 ) ) (a))))'
abstraction = {}
signature, parsed_formula = FormulaParser._parse_smt_lib_v2(formula)
abstracted_formula = FormulaParser._create_boolean_abstraction(parsed_formula.pop(), signature, abstraction)
assert abstracted_formula == ('and', '1', '2')
assert abstraction == {'a': '2', ('=', ('not', 'a'), ('f', '1', '2')): '1'}
formula = ('(declare-fun f (Int) Bool) ' +
'(declare-fun g (Int) Bool) '
'(assert (and (and (= g(a) c) (or (not (= f(g(a)) f(c))) (= g(a) d))) (not (= c d)))')
abstraction = {}
signature, parsed_formula = FormulaParser._parse_smt_lib_v2(formula)
abstracted_formula = FormulaParser._create_boolean_abstraction(parsed_formula.pop(), signature, abstraction)
assert abstracted_formula == ('and', ('and', '1', ('or', ('not', '2'), '3')), ('not', '4'))
assert abstraction == {('=', ('f', ('g', 'a')), ('f', 'c')): '2',
('=', ('g', 'a'), 'c'): '1',
('=', ('g', 'a'), 'd'): '3',
('=', 'c', 'd'): '4'}
formula = '(declare-fun f () Bool) (assert (=> (= a b) f(1)))'
abstraction = {}
signature, parsed_formula = FormulaParser._parse_smt_lib_v2(formula)
abstracted_formula = FormulaParser._create_boolean_abstraction(parsed_formula.pop(), signature, abstraction)
assert abstracted_formula == ('=>', '1', '2')
assert abstraction == {('=', 'a', 'b'): '1', ('f', '1'): '2'}
@staticmethod
def test_import_uf():
formula = '(declare-fun f (Int Int) Bool) (assert ((and a f ( 1 , 2 ) )))'
cnf_formula, _, _ = FormulaParser.import_uf(formula)
assert cnf_formula == frozenset({
frozenset({1, -3, -2}),
frozenset({1}),
frozenset({2, -1}),
frozenset({3, -1})
})
formula = '(declare-fun f (Int Int) Bool) (declare-fun g () Bool) (assert ((and (= a g) f ( 1 , 2 ) )))'
cnf_formula, _, _ = FormulaParser.import_uf(formula)
assert cnf_formula == frozenset({
frozenset({1, -3, -2}),
frozenset({1}),
frozenset({2, -1}),
frozenset({3, -1})
})
formula = ('(declare-fun f (Int Int) Bool) ' +
'(declare-fun g () Bool) ' +
'(assert ((and (= a g) f ( 1 , 2 ) ))) ' +
'(assert (not f(5,7)))')
cnf_formula, _, _ = FormulaParser.import_uf(formula)
assert cnf_formula == frozenset({
frozenset({-4}),
frozenset({1}),
frozenset({1, -3, -2}),
frozenset({3, -1}),
frozenset({2, -1})
})
formula = ('(declare-fun f (Int Int) Bool) ' +
'(declare-fun g () Bool) ' +
'(assert (and (= 5 4) f(1, 2))) ' +
'(assert (not g(1, 2)))')
cnf_formula, _, _ = FormulaParser.import_uf(formula)
assert cnf_formula == frozenset({
frozenset({-4}),
frozenset({1}),
frozenset({1, -3, -2}),
frozenset({3, -1}),
frozenset({2, -1})
})
formula = ('(declare-fun f (Int Int) Bool) ' +
'(assert (= f(2,3) a) ' +
'(assert (not (= f(2,3) a))')
cnf_formula, _, _ = FormulaParser.import_uf(formula)
assert cnf_formula == frozenset({frozenset({1}), frozenset({-1})})
formula = ('(declare-fun f (Int Int) Bool) ' +
'(assert (= f(3,3) a) ' +
'(assert (not (= f(2,3) a))')
cnf_formula, _, _ = FormulaParser.import_uf(formula)
assert cnf_formula == frozenset({frozenset({1}), frozenset({-2})})
@staticmethod
def test_parse_linear_equation():
signature = {"x1": {"index": 0}, "x2": {"index": 1}, "x3": {"index": 2}}
_, A, b = FormulaParser._parse_linear_equation("-5x1", "-6", signature)
assert np.allclose(A, np.array([-5., 0., 0.]))
assert np.allclose(b, np.array([-6.]))
_, A, b = FormulaParser._parse_linear_equation("1*x1+6*x2-5*x3-1.1*x1", "0.52", signature)
assert np.allclose(A, np.array([-0.1, 6., -5.]))
assert np.allclose(b, np.array([0.52]))
@staticmethod
def test_import_linear_equation():
formula = "(declare-fun x1 () Int) (assert (<= 5x1 1))"
_, (_, _), non_boolean_clauses = FormulaParser.import_tq(formula)
assert non_boolean_clauses == {('<=', (5.,), 1.)}
formula = "(declare-fun x1 () Int) (assert (not (<= 5x1 1))) (assert (<= 5x1 1))"
_, (_, _), non_boolean_clauses = FormulaParser.import_tq(formula)
assert non_boolean_clauses == {('<=', (5.,), 1.)}
formula = "(declare-fun x1 () Int) (declare-fun x2 () Int) (assert (<= 5x1 1)) (assert (<= (1x1 + 6x2) 0.5))"
_, (_, _), non_boolean_clauses = FormulaParser.import_tq(formula)
assert non_boolean_clauses == {('<=', (5.0, 0), 1.0), ('<=', (1.0, 6.0), 0.5)}
| [
"smt_solver.formula_parser.formula_parser.FormulaParser.import_uf",
"smt_solver.formula_parser.formula_parser.FormulaParser._create_boolean_abstraction",
"smt_solver.formula_parser.formula_parser.FormulaParser._parse_linear_equation",
"smt_solver.formula_parser.formula_parser.FormulaParser._prepare_formula",
... | [((7792, 7829), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_formula', 'FormulaParser._parse_formula', (['formula'], {}), '(formula)\n', (7820, 7829), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((7859, 7944), 'smt_solver.formula_parser.formula_parser.FormulaParser._create_boolean_abstraction', 'FormulaParser._create_boolean_abstraction', (['parsed_formula', 'signature', 'abstraction'], {}), '(parsed_formula, signature,\n abstraction)\n', (7900, 7944), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((8162, 8199), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_formula', 'FormulaParser._parse_formula', (['formula'], {}), '(formula)\n', (8190, 8199), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((8229, 8314), 'smt_solver.formula_parser.formula_parser.FormulaParser._create_boolean_abstraction', 'FormulaParser._create_boolean_abstraction', (['parsed_formula', 'signature', 'abstraction'], {}), '(parsed_formula, signature,\n abstraction)\n', (8270, 8314), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((8595, 8635), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_smt_lib_v2', 'FormulaParser._parse_smt_lib_v2', (['formula'], {}), '(formula)\n', (8626, 8635), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((9160, 9200), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_smt_lib_v2', 'FormulaParser._parse_smt_lib_v2', (['formula'], {}), '(formula)\n', (9191, 9200), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((9798, 9838), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_smt_lib_v2', 'FormulaParser._parse_smt_lib_v2', (['formula'], {}), '(formula)\n', (9829, 9838), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((10236, 10268), 'smt_solver.formula_parser.formula_parser.FormulaParser.import_uf', 'FormulaParser.import_uf', (['formula'], {}), '(formula)\n', (10259, 10268), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((10591, 10623), 'smt_solver.formula_parser.formula_parser.FormulaParser.import_uf', 'FormulaParser.import_uf', (['formula'], {}), '(formula)\n', (10614, 10623), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((11042, 11074), 'smt_solver.formula_parser.formula_parser.FormulaParser.import_uf', 'FormulaParser.import_uf', (['formula'], {}), '(formula)\n', (11065, 11074), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((11516, 11548), 'smt_solver.formula_parser.formula_parser.FormulaParser.import_uf', 'FormulaParser.import_uf', (['formula'], {}), '(formula)\n', (11539, 11548), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((11936, 11968), 'smt_solver.formula_parser.formula_parser.FormulaParser.import_uf', 'FormulaParser.import_uf', (['formula'], {}), '(formula)\n', (11959, 11968), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((12222, 12254), 'smt_solver.formula_parser.formula_parser.FormulaParser.import_uf', 'FormulaParser.import_uf', (['formula'], {}), '(formula)\n', (12245, 12254), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((12486, 12547), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_linear_equation', 'FormulaParser._parse_linear_equation', (['"""-5x1"""', '"""-6"""', 'signature'], {}), "('-5x1', '-6', signature)\n", (12522, 12547), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((12669, 12754), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_linear_equation', 'FormulaParser._parse_linear_equation', (['"""1*x1+6*x2-5*x3-1.1*x1"""', '"""0.52"""', 'signature'], {}), "('1*x1+6*x2-5*x3-1.1*x1', '0.52', signature\n )\n", (12705, 12754), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((13018, 13050), 'smt_solver.formula_parser.formula_parser.FormulaParser.import_tq', 'FormulaParser.import_tq', (['formula'], {}), '(formula)\n', (13041, 13050), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((13241, 13273), 'smt_solver.formula_parser.formula_parser.FormulaParser.import_tq', 'FormulaParser.import_tq', (['formula'], {}), '(formula)\n', (13264, 13273), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((13492, 13524), 'smt_solver.formula_parser.formula_parser.FormulaParser.import_tq', 'FormulaParser.import_tq', (['formula'], {}), '(formula)\n', (13515, 13524), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((234, 277), 'smt_solver.formula_parser.formula_parser.FormulaParser._prepare_formula', 'FormulaParser._prepare_formula', (['""" """'], {}), "(' ')\n", (264, 277), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((299, 340), 'smt_solver.formula_parser.formula_parser.FormulaParser._prepare_formula', 'FormulaParser._prepare_formula', (['"""(((a)))"""'], {}), "('(((a)))')\n", (329, 340), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((363, 418), 'smt_solver.formula_parser.formula_parser.FormulaParser._prepare_formula', 'FormulaParser._prepare_formula', (['""" and a b """'], {}), "(' and a b ')\n", (393, 418), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((447, 506), 'smt_solver.formula_parser.formula_parser.FormulaParser._prepare_formula', 'FormulaParser._prepare_formula', (['""" ( and a b ) """'], {}), "(' ( and a b ) ')\n", (477, 506), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((535, 582), 'smt_solver.formula_parser.formula_parser.FormulaParser._prepare_formula', 'FormulaParser._prepare_formula', (['"""(and (a) (b))"""'], {}), "('(and (a) (b))')\n", (565, 582), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((615, 660), 'smt_solver.formula_parser.formula_parser.FormulaParser._prepare_formula', 'FormulaParser._prepare_formula', (['"""and (a) (b)"""'], {}), "('and (a) (b)')\n", (645, 660), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((693, 744), 'smt_solver.formula_parser.formula_parser.FormulaParser._prepare_formula', 'FormulaParser._prepare_formula', (['"""(((and (a) (b))))"""'], {}), "('(((and (a) (b))))')\n", (723, 744), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((826, 890), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_formula', 'FormulaParser._parse_formula', (['"""not (=> (not (and p q)) (not r))"""'], {}), "('not (=> (not (and p q)) (not r))')\n", (854, 890), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((981, 1048), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_formula', 'FormulaParser._parse_formula', (['"""not (=> (not (and pq78 q)) (not r))"""'], {}), "('not (=> (not (and pq78 q)) (not r))')\n", (1009, 1048), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((1142, 1214), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_formula', 'FormulaParser._parse_formula', (['"""not (=> (not (and ((p)) q)) ((not (r))))"""'], {}), "('not (=> (not (and ((p)) q)) ((not (r))))')\n", (1170, 1214), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((1305, 1398), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_formula', 'FormulaParser._parse_formula', (['"""not (=> (not (and ((p)) ((not ((((r)))))))) ((not (r))))"""'], {}), "(\n 'not (=> (not (and ((p)) ((not ((((r)))))))) ((not (r))))')\n", (1333, 1398), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((6169, 6209), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_smt_lib_v2', 'FormulaParser._parse_smt_lib_v2', (['formula'], {}), '(formula)\n', (6200, 6209), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((7203, 7243), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_smt_lib_v2', 'FormulaParser._parse_smt_lib_v2', (['formula'], {}), '(formula)\n', (7234, 7243), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((7513, 7553), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_smt_lib_v2', 'FormulaParser._parse_smt_lib_v2', (['formula'], {}), '(formula)\n', (7544, 7553), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((12578, 12604), 'numpy.array', 'np.array', (['[-5.0, 0.0, 0.0]'], {}), '([-5.0, 0.0, 0.0])\n', (12586, 12604), True, 'import numpy as np\n'), ((12633, 12649), 'numpy.array', 'np.array', (['[-6.0]'], {}), '([-6.0])\n', (12641, 12649), True, 'import numpy as np\n'), ((12780, 12807), 'numpy.array', 'np.array', (['[-0.1, 6.0, -5.0]'], {}), '([-0.1, 6.0, -5.0])\n', (12788, 12807), True, 'import numpy as np\n'), ((12837, 12853), 'numpy.array', 'np.array', (['[0.52]'], {}), '([0.52])\n', (12845, 12853), True, 'import numpy as np\n'), ((1848, 1912), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_formula', 'FormulaParser._parse_formula', (['"""not (=> (not (and p q)) (not r))"""'], {}), "('not (=> (not (and p q)) (not r))')\n", (1876, 1912), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((1999, 2066), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_formula', 'FormulaParser._parse_formula', (['"""not (=> (not (and pq78 q)) (not r))"""'], {}), "('not (=> (not (and pq78 q)) (not r))')\n", (2027, 2066), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n'), ((2152, 2197), 'smt_solver.formula_parser.formula_parser.FormulaParser._parse_formula', 'FormulaParser._parse_formula', (['"""and (not x) x"""'], {}), "('and (not x) x')\n", (2180, 2197), False, 'from smt_solver.formula_parser.formula_parser import FormulaParser\n')] |
import argh
import logging
import networkx as nx
import pygna.reading_class as rc
import pygna.output as out
import pygna.statistical_test as st
import pygna.painter as paint
import pygna.diagnostic as diagnostic
import pygna.command as cmd
import numpy as np
def average_closeness_centrality(graph: nx.Graph, geneset: set, diz: dict, observed_flag: bool = False) -> float:
"""
This function calculates the average closeness centrality of a geneset. For
a single node, the closeness centrality is defined as the inverse of the
shortest path distance of the node from all the other nodes.
Given a network with N nodes and a distance shortest path function between
two nodes d(u,v) closeness centrality (u)= (N -1) / sum (v != u) d(u,v)
:param graph: The network to analyse
:param geneset: the geneset to analyse
:param diz: The dictionary contains the nodes and a vector with the sum of shortest path for the whole network.
"""
graph_centrality = []
ids = [diz["nodes"].index(n) for n in geneset]
graph_centrality = [(len(diz["nodes"]) - 1) / diz['vector'][idx] for idx in ids]
return np.mean(graph_centrality)
def test_topology_centrality(
network_file: "network file",
geneset_file: "GMT geneset file",
distance_matrix_filename: "The matrix with the SP for each node",
output_table: "output results table, use .csv extension",
setname: "Geneset to analyse" = None,
size_cut: "removes all genesets with a mapped length < size_cut" = 20,
number_of_permutations: "number of permutations for computing the empirical pvalue" = 500,
cores: "Number of cores for the multiprocessing" = 1,
in_memory: 'load hdf5 data onto memory' = False,):
"""
This function calculates the average closeness centrality of a geneset.
For a single node, the closeness centrality is defined as the inverse
of the shortest path distance of the node from all the other nodes.
"""
logging.info("Evaluating the test topology total degree, please wait")
network = rc.ReadTsv(network_file).get_network()
network = nx.Graph(network.subgraph(max(nx.connected_components(network), key=len)))
geneset = rc.ReadGmt(geneset_file).get_geneset(setname)
setnames = [key for key in geneset.keys()]
diz = {"nodes": cmd.read_distance_matrix(distance_matrix_filename, in_memory=in_memory)[0],
"matrix": cmd.read_distance_matrix(distance_matrix_filename, in_memory=in_memory)[1]}
diz["matrix"] = diz["matrix"] + np.transpose(diz["matrix"])
np.fill_diagonal(diz["matrix"], float(0))
diz['vector'] = np.sum(diz["matrix"],axis = 0)
# Generate output
output1 = out.Output(network_file, output_table, "topology_centrality", geneset_file, setnames)
logging.info("Results file = " + output1.output_table_results)
# Create table
output1.create_st_table_empirical()
st_test = st.StatisticalTest(average_closeness_centrality, network, diz)
for setname, item in geneset.items():
# Geneset smaller than size cut are not taken into consideration
if len(item) > size_cut:
item = set(item)
observed, pvalue, null_d, n_mapped, n_geneset = st_test.empirical_pvalue(item,
max_iter=number_of_permutations,
alternative="greater",
cores=cores)
logging.info("Setname:" + setname)
if n_mapped < size_cut:
logging.info("%s removed from results since nodes mapped are < %d" % (setname, size_cut))
else:
logging.info("Observed: %g p-value: %g" % (observed, pvalue))
output1.update_st_table_empirical(setname, n_mapped, n_geneset, number_of_permutations, observed,
pvalue, np.mean(null_d), np.var(null_d))
output1.close_temporary_table()
logging.info("Test topology CENTRALITY completed")
def main():
"""
argh dispatch
"""
argh.dispatch_commands([test_topology_centrality])
if __name__ == "__main__":
"""
MAIN
"""
main()
| [
"pygna.statistical_test.StatisticalTest",
"numpy.sum",
"pygna.reading_class.ReadTsv",
"argh.dispatch_commands",
"numpy.transpose",
"logging.info",
"pygna.command.read_distance_matrix",
"numpy.mean",
"networkx.connected_components",
"pygna.output.Output",
"pygna.reading_class.ReadGmt",
"numpy.v... | [((1147, 1172), 'numpy.mean', 'np.mean', (['graph_centrality'], {}), '(graph_centrality)\n', (1154, 1172), True, 'import numpy as np\n'), ((1980, 2050), 'logging.info', 'logging.info', (['"""Evaluating the test topology total degree, please wait"""'], {}), "('Evaluating the test topology total degree, please wait')\n", (1992, 2050), False, 'import logging\n'), ((2627, 2656), 'numpy.sum', 'np.sum', (["diz['matrix']"], {'axis': '(0)'}), "(diz['matrix'], axis=0)\n", (2633, 2656), True, 'import numpy as np\n'), ((2695, 2784), 'pygna.output.Output', 'out.Output', (['network_file', 'output_table', '"""topology_centrality"""', 'geneset_file', 'setnames'], {}), "(network_file, output_table, 'topology_centrality', geneset_file,\n setnames)\n", (2705, 2784), True, 'import pygna.output as out\n'), ((2785, 2847), 'logging.info', 'logging.info', (["('Results file = ' + output1.output_table_results)"], {}), "('Results file = ' + output1.output_table_results)\n", (2797, 2847), False, 'import logging\n'), ((2921, 2983), 'pygna.statistical_test.StatisticalTest', 'st.StatisticalTest', (['average_closeness_centrality', 'network', 'diz'], {}), '(average_closeness_centrality, network, diz)\n', (2939, 2983), True, 'import pygna.statistical_test as st\n'), ((4110, 4160), 'logging.info', 'logging.info', (['"""Test topology CENTRALITY completed"""'], {}), "('Test topology CENTRALITY completed')\n", (4122, 4160), False, 'import logging\n'), ((4213, 4263), 'argh.dispatch_commands', 'argh.dispatch_commands', (['[test_topology_centrality]'], {}), '([test_topology_centrality])\n', (4235, 4263), False, 'import argh\n'), ((2531, 2558), 'numpy.transpose', 'np.transpose', (["diz['matrix']"], {}), "(diz['matrix'])\n", (2543, 2558), True, 'import numpy as np\n'), ((2065, 2089), 'pygna.reading_class.ReadTsv', 'rc.ReadTsv', (['network_file'], {}), '(network_file)\n', (2075, 2089), True, 'import pygna.reading_class as rc\n'), ((2207, 2231), 'pygna.reading_class.ReadGmt', 'rc.ReadGmt', (['geneset_file'], {}), '(geneset_file)\n', (2217, 2231), True, 'import pygna.reading_class as rc\n'), ((2322, 2393), 'pygna.command.read_distance_matrix', 'cmd.read_distance_matrix', (['distance_matrix_filename'], {'in_memory': 'in_memory'}), '(distance_matrix_filename, in_memory=in_memory)\n', (2346, 2393), True, 'import pygna.command as cmd\n'), ((2419, 2490), 'pygna.command.read_distance_matrix', 'cmd.read_distance_matrix', (['distance_matrix_filename'], {'in_memory': 'in_memory'}), '(distance_matrix_filename, in_memory=in_memory)\n', (2443, 2490), True, 'import pygna.command as cmd\n'), ((3589, 3623), 'logging.info', 'logging.info', (["('Setname:' + setname)"], {}), "('Setname:' + setname)\n", (3601, 3623), False, 'import logging\n'), ((2148, 2180), 'networkx.connected_components', 'nx.connected_components', (['network'], {}), '(network)\n', (2171, 2180), True, 'import networkx as nx\n'), ((3676, 3770), 'logging.info', 'logging.info', (["('%s removed from results since nodes mapped are < %d' % (setname, size_cut))"], {}), "('%s removed from results since nodes mapped are < %d' % (\n setname, size_cut))\n", (3688, 3770), False, 'import logging\n'), ((3800, 3861), 'logging.info', 'logging.info', (["('Observed: %g p-value: %g' % (observed, pvalue))"], {}), "('Observed: %g p-value: %g' % (observed, pvalue))\n", (3812, 3861), False, 'import logging\n'), ((4034, 4049), 'numpy.mean', 'np.mean', (['null_d'], {}), '(null_d)\n', (4041, 4049), True, 'import numpy as np\n'), ((4051, 4065), 'numpy.var', 'np.var', (['null_d'], {}), '(null_d)\n', (4057, 4065), True, 'import numpy as np\n')] |
import os
import logging
logger = logging.getLogger(__name__)
import numpy as np
import astropy.io.fits as fits
import scipy.interpolate as intp
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
def get_region_lst(header):
"""Get a list of array indices.
Args:
header ():
Returns:
tuple:
"""
nx = header['NAXIS1']
ny = header['NAXIS2']
binx = header['BIN-FCT1']
biny = header['BIN-FCT2']
if (nx, ny)==(2148, 4100) and (binx, biny)==(1, 1):
sci1_x1, sci1_x2 = 0, 1024
sci1_y1, sci1_y2 = 0, 4100
ovr1_x1, ovr1_x2 = 1024, 1024+50
ovr1_y1, ovr1_y2 = 0, 4100
sci2_x1, sci2_x2 = 1024+50*2, 1024*2+50*2
sci2_y1, sci2_y2 = 0, 4100
ovr2_x1, ovr2_x2 = 1024+50, 1024+50*2
ovr2_y1, ovr2_y2 = 0, 4100
elif (nx, ny)==(1124, 2050) and (binx, biny)==(2, 2):
sci1_x1, sci1_x2 = 0, 512
sci1_y1, sci1_y2 = 0, 2050
ovr1_x1, ovr1_x2 = 512, 512+50
ovr1_y1, ovr1_y2 = 0, 2050
sci2_x1, sci2_x2 = 512+50*2, 512*2+50*2
sci2_y1, sci2_y2 = 0, 2050
ovr2_x1, ovr2_x2 = 512+50, 512+50*2
ovr2_y1, ovr2_y2 = 0, 2050
else:
print(nx, ny, binx, biny)
pass
return [
((sci1_x1, sci1_x2, sci1_y1, sci1_y2),
(ovr1_x1, ovr1_x2, ovr1_y1, ovr1_y2)),
((sci2_x1, sci2_x2, sci2_y1, sci2_y2),
(ovr2_x1, ovr2_x2, ovr2_y1, ovr2_y2)),
]
std_setup_lst = {
'StdUa': (310, 387, 400, 476, 'BLUE', 4.986, 'FREE', 'FREE'),
'StdUb': (298, 370, 382, 458, 'BLUE', 4.786, 'FREE', 'FREE'),
'StdBa': (342, 419, 432, 508, 'BLUE', 5.386, 'FREE', 'FREE'),
'StdBc': (355, 431, 445, 521, 'BLUE', 5.561, 'FREE', 'FREE'),
'StdYa': (403, 480, 494, 566, 'BLUE', 6.136, 'FREE', 'FREE'),
#'StdYb': (414, 535, 559, 681, 'RED', 4.406, 'FREE', 'KV370'),
'StdYb': (414, 540, 559, 681, 'RED', 4.406, 'FREE', 'KV370'),
'StdYc': (442, 566, 586, 705, 'RED', 4.619, 'FREE', 'KV389'),
'StdYd': (406, 531, 549, 666, 'RED', 4.336, 'FREE', 'KV370'),
'StdRa': (511, 631, 658, 779, 'RED', 5.163, 'FREE', 'SC46'),
'StdRb': (534, 659, 681, 800, 'RED', 5.336, 'FREE', 'SC46'),
'StdNIRa': (750, 869, 898, 1016, 'RED', 7.036, 'OG530', 'FREE'),
'StdNIRb': (673, 789, 812, 937, 'RED', 6.386, 'OG530', 'FREE'),
'StdNIRc': (617, 740, 759, 882, 'RED', 5.969, 'OG530', 'FREE'),
'StdI2a': (498, 618, 637, 759, 'RED', 5.036, 'FREE', 'SC46'),
'StdI2b': (355, 476, 498, 618, 'RED', 3.936, 'FREE', 'FREE'),
}
def get_setup_param(setup, key=None):
"""Get parameter of a given standard setup.
Args:
setup (str):
key (str):
Returns:
int, str, or tuple:
"""
if setup not in std_setup_lst:
return None
item = std_setup_lst[setup]
colname_lst = ['wavemin_2', 'wavemax_2', 'wavemin_1', 'wavemax_1',
'collim', 'cro_ang', 'filter1', 'filter2']
for i, colname in enumerate(colname_lst):
if colname == key:
return item[i]
if key=='wave_1':
return (item[2], item[3])
elif key=='wave_2':
return (item[0], item[1])
elif key=='crossd':
return item[4]
else:
return None
def get_std_setup(header):
"""Get standard setup.
Args:
header (:class:`astropy.io.fits.Header`):
Returns:
str:
"""
objtype = header['DATA-TYP']
objname = header['OBJECT']
ccd_id = header['DET-ID']
filter1 = header['FILTER01']
filter2 = header['FILTER02']
collim = header['H_COLLIM']
crossd = header['H_CROSSD']
ech_ang = header['H_EROTAN']
cro_ang = header['H_CROTAN']
wave_min = int(round(header['WAV-MIN']))
wave_max = int(round(header['WAV-MAX']))
for stdname, setup in std_setup_lst.items():
if objtype=='BIAS' and objname=='BIAS':
if collim == setup[4] and crossd == setup[4] \
and abs(cro_ang - setup[5]) < 0.05 \
and (ccd_id, wave_min, wave_max) in [
(1, setup[2], setup[3]), (2, setup[0], setup[1])
]:
return stdname
else:
if collim == setup[4] and crossd == setup[4] \
and filter1 == setup[6] and filter2 == setup[7] \
and abs(cro_ang - setup[5]) < 0.05 \
and (ccd_id, wave_min, wave_max) in [
(1, setup[2], setup[3]), (2, setup[0], setup[1])
]:
return stdname
print('Unknown setup:',collim, crossd, filter1, filter2, cro_ang, ccd_id,
wave_min, wave_max)
return 'nonStd'
def print_wrapper(string, item):
"""A wrapper for log printing for Subaru/HDS pipeline.
Args:
string (str): The output string for wrapping.
item (:class:`astropy.table.Row`): The log item.
Returns:
str: The color-coded string.
"""
objtype = item['objtype']
objname = item['object']
if objtype=='BIAS' and objname=='BIAS':
# bias images, use dim (2)
return '\033[2m'+string.replace('\033[0m', '')+'\033[0m'
elif objtype=='COMPARISON'and objname=='COMPARISON':
# arc lamp, use light yellow (93)
return '\033[93m'+string.replace('\033[0m', '')+'\033[0m'
elif objtype=='FLAT' and objname=='FLAT':
# flat, use light red (91)
return '\033[91m'+string.replace('\033[0m', '')+'\033[0m'
elif objtype=='OBJECT':
# sci images, use highlights (1)
return '\033[1m'+string.replace('\033[0m', '')+'\033[0m'
else:
return string
def get_badpixel_mask(ccd_id, binx, biny):
"""Get bad pixel mask for Subaru/HDS CCDs.
Args:
ccd_id (int):
binx (int):
biny (int):
"""
if ccd_id == 1:
if (binx,biny)==(1,1):
# all False
mask = np.zeros((4100,2048), dtype=np.bool)
mask[1124:2069,937] = True
mask[1124:2059,938] = True
mask[1136:1974,939] = True
mask[1210:2018,940] = True
mask[1130:2056,941] = True
mask[1130:1994,942] = True
mask[1130:,1105] = True
mask[:,1106] = True
mask[1209:,1107] = True
mask[1136:,1108] = True
mask[1124:,1109] = True
mask[1124:,1110] = True
elif (binx,biny)==(2,2):
mask = np.zeros((2050,1024), dtype=np.bool)
mask[:,653] = True
mask[723:,654] = True
mask[726:,655] = True
elif ccd_id == 2:
if (binx,biny)==(1,1):
mask = np.zeros((4100,2048), dtype=np.bool)
mask[628:,1720] = True
mask[131:,1741] = True
mask[131:,1742] = True
mask[3378:,1426] = True
mask[2674:,127] = True
mask[2243:,358] = True
mask[2243:,359] = True
mask[3115:3578,90] = True
mask[3003:,88] = True
mask[2694:,75] = True
mask[2839:,58] = True
mask[2839:,59] = True
mask[2839:,60] = True
elif (binx,biny)==(2,2):
mask = np.zeros((2050,1024), dtype=np.bool)
mask[66:,870] = True
mask[66:,871] = True
mask[315:,860] = True
mask[1689:,713] = True
mask[1121:,179] = True
mask[1337:,63] = True
mask[1348:,37] = True
mask[1420:,29] = True
mask[1503:,44] = True
else:
print('Error: Wrong det_id:', ccd_id)
return np.int16(mask)
def fix_image(data, mask):
if mask.shape != data.shape:
print('data shape {} and mask shape {} do not match'.format(
data.shape, mask.shape))
raise ValueError
for i, v in enumerate(mask.sum(axis=1)):
if v > 0:
m = np.logical_not(mask[i,:])
x = np.arange(m.size)[m]
y = data[i,:][m]
f = intp.InterpolatedUnivariateSpline(x,y)
data[i,:] = f(np.arange(m.size))
return data
def correct_overscan(data, header):
"""Correct overscan for an input image and update related information in the
FITS header.
Args:
data (:class:`numpy.ndarray`): Input image data.
Returns:
tuple: A tuple containing:
* **data** (:class:`numpy.ndarray`) – Output image with overscan
corrected.
"""
binx = header['BIN-FCT1']
biny = header['BIN-FCT2']
osmin1 = header['H_OSMIN1']
osmax1 = header['H_OSMAX1']
gain1 = header['H_GAIN1']
gain2 = header['H_GAIN2']
#width of overscan region
ovs_width = osmax1 - osmin1 + 1
ny, nx = data.shape
if nx != 2048/binx + ovs_width:
print('error on image shape of x axis')
raise ValueError
if ny != 4100/biny:
print('error on image shape of y axis')
raise ValueError
# i1:i2 science data for readout 1
# i2:i3 overscan data for readout 1
# i3:i4 overscan data for readout 2
# i4:i5 science data for readout 2
# Subaru CCD
# +-----+---+---+-----+
# | | | | |
# | sci |ovr|ovr| sci |
# | | | | |
# +-----+---+---+-----+
# i1 i2 i3 i4 i5
#
i1 = 0
i2 = osmin1 - 1
i3 = nx//2
i4 = osmax1
i5 = nx
# j1,j2,j3: x boudaries for overscaned data
# j1:j2 science data for readout 1
# j2:j3 science data for readout 2
j1 = 0
j2 = i2
j3 = i2 + (i5 - i4)
#
overdata1 = data[:,i2+2:i3].mean(axis=1)
overdata2 = data[:,i3:i4-2].mean(axis=1)
overmean1 = overdata1.mean()
overmean2 = overdata2.mean()
#sm1 = savgol_filter(overdata1, window_length=501, polyorder=3)
#sm2 = savgol_filter(overdata2, window_length=501, polyorder=3)
#fig = plt.figure()
#ax = fig.gca()
#ax.plot(overdata1, lw=0.5, alpha=0.6)
#ax.plot(overdata2, lw=0.5, alpha=0.6)
#ax.plot(sm1, lw=0.5, alpha=0.6, color='C3')
#fig.savefig(header['FRAMEID']+'.png')
#plt.close(fig)
# initialize the overscan-corrected data
corrdata = np.zeros((ny, nx-ovs_width), dtype=np.float32)
corrdata[:, j1:j2] = (data[:,i1:i2] - overmean1)*gain1
corrdata[:, j2:j3] = (data[:,i4:i5] - overmean2)*gain2
return corrdata
def parse_image(data, header):
"""Parse CCD image.
Args:
data ():
header ():
"""
ccd_id = header['DET-ID']
binx = header['BIN-FCT1']
biny = header['BIN-FCT2']
# reset saturated pixels (NaN) to 65535
sat_mask = np.isnan(data)
data[sat_mask] = 65535
# correct overscan
data = correct_overscan(data, header)
# fix bad pixels
bad_mask = get_badpixel_mask(ccd_id, binx, biny)
data = fix_image(data, bad_mask)
return data
| [
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.logical_not",
"numpy.zeros",
"numpy.isnan",
"numpy.arange",
"numpy.int16",
"logging.getLogger"
] | [((34, 61), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (51, 61), False, 'import logging\n'), ((7753, 7767), 'numpy.int16', 'np.int16', (['mask'], {}), '(mask)\n', (7761, 7767), True, 'import numpy as np\n'), ((10323, 10371), 'numpy.zeros', 'np.zeros', (['(ny, nx - ovs_width)'], {'dtype': 'np.float32'}), '((ny, nx - ovs_width), dtype=np.float32)\n', (10331, 10371), True, 'import numpy as np\n'), ((10774, 10788), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (10782, 10788), True, 'import numpy as np\n'), ((5981, 6018), 'numpy.zeros', 'np.zeros', (['(4100, 2048)'], {'dtype': 'np.bool'}), '((4100, 2048), dtype=np.bool)\n', (5989, 6018), True, 'import numpy as np\n'), ((8044, 8070), 'numpy.logical_not', 'np.logical_not', (['mask[i, :]'], {}), '(mask[i, :])\n', (8058, 8070), True, 'import numpy as np\n'), ((8152, 8191), 'scipy.interpolate.InterpolatedUnivariateSpline', 'intp.InterpolatedUnivariateSpline', (['x', 'y'], {}), '(x, y)\n', (8185, 8191), True, 'import scipy.interpolate as intp\n'), ((6538, 6575), 'numpy.zeros', 'np.zeros', (['(2050, 1024)'], {'dtype': 'np.bool'}), '((2050, 1024), dtype=np.bool)\n', (6546, 6575), True, 'import numpy as np\n'), ((6750, 6787), 'numpy.zeros', 'np.zeros', (['(4100, 2048)'], {'dtype': 'np.bool'}), '((4100, 2048), dtype=np.bool)\n', (6758, 6787), True, 'import numpy as np\n'), ((8086, 8103), 'numpy.arange', 'np.arange', (['m.size'], {}), '(m.size)\n', (8095, 8103), True, 'import numpy as np\n'), ((8217, 8234), 'numpy.arange', 'np.arange', (['m.size'], {}), '(m.size)\n', (8226, 8234), True, 'import numpy as np\n'), ((7333, 7370), 'numpy.zeros', 'np.zeros', (['(2050, 1024)'], {'dtype': 'np.bool'}), '((2050, 1024), dtype=np.bool)\n', (7341, 7370), True, 'import numpy as np\n')] |
import os
import dabest
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr
from task import SequenceLearning
from utils.params import P
from utils.constants import TZ_COND_DICT
from utils.io import build_log_path, pickle_load_dict, \
get_test_data_dir, get_test_data_fname
from analysis import compute_stats, \
compute_cell_memory_similarity, create_sim_dict, batch_compute_true_dk, \
process_cache, get_trial_cond_ids, trim_data, make_df
from brainiak.funcalign.srm import SRM
from matplotlib.ticker import FormatStrFormatter
from itertools import combinations
from scipy.special import comb
sns.set(style='white', palette='colorblind', context='poster')
log_root = '../log/'
exp_name = 'vary-test-penalty'
subj_ids = np.arange(15)
n_subjs = len(subj_ids)
all_conds = ['RM', 'DM', 'NM']
supervised_epoch = 600
epoch_load = 1000
learning_rate = 7e-4
n_param = 16
n_branch = 4
enc_size = 16
n_event_remember = 2
n_hidden = 194
n_hidden_dec = 128
eta = .1
penalty_random = 1
# testing param, ortho to the training directory
penalty_discrete = 1
penalty_onehot = 0
normalize_return = 1
# loading params
p_rm_ob_enc_load = .3
p_rm_ob_rcl_load = 0
pad_len_load = -1
penalty_train = 4
# testing params
p_test = 0
p_rm_ob_enc_test = p_test
p_rm_ob_rcl_test = p_test
pad_len_test = 0
penalty_test = 2
slience_recall_time = None
n_examples_test = 256
def prealloc():
return {cond: [] for cond in all_conds}
has_memory_conds = ['RM', 'DM']
CMs_dlist = {cond: [] for cond in all_conds}
DAs_dlist = {cond: [] for cond in all_conds}
ma_dlist = {cond: [] for cond in has_memory_conds}
for subj_id in subj_ids:
print(f'\nsubj_id = {subj_id}: ', end='')
for fix_cond in all_conds:
print(f'{fix_cond} ', end='')
np.random.seed(subj_id)
p = P(
exp_name=exp_name, sup_epoch=supervised_epoch,
n_param=n_param, n_branch=n_branch, pad_len=pad_len_load,
enc_size=enc_size, n_event_remember=n_event_remember,
penalty=penalty_train, penalty_random=penalty_random,
penalty_onehot=penalty_onehot, penalty_discrete=penalty_discrete,
normalize_return=normalize_return,
p_rm_ob_enc=p_rm_ob_enc_load, p_rm_ob_rcl=p_rm_ob_rcl_load,
n_hidden=n_hidden, n_hidden_dec=n_hidden_dec,
lr=learning_rate, eta=eta,
)
# init env
task = SequenceLearning(
n_param=p.env.n_param, n_branch=p.env.n_branch, pad_len=pad_len_test,
p_rm_ob_enc=p_rm_ob_enc_test, p_rm_ob_rcl=p_rm_ob_rcl_test,
)
# create logging dirs
log_path, log_subpath = build_log_path(
subj_id, p, log_root=log_root, verbose=False
)
test_params = [penalty_test, pad_len_test, slience_recall_time]
test_data_dir, test_data_subdir = get_test_data_dir(
log_subpath, epoch_load, test_params)
test_data_fname = get_test_data_fname(n_examples_test, fix_cond)
fpath = os.path.join(test_data_dir, test_data_fname)
test_data_dict = pickle_load_dict(fpath)
results = test_data_dict['results']
XY = test_data_dict['XY']
[dist_a_, Y_, log_cache_, log_cond_] = results
[X_raw, Y_raw] = XY
# compute ground truth / objective uncertainty (delay phase removed)
true_dk_wm_, true_dk_em_ = batch_compute_true_dk(X_raw, task)
'''precompute some constants'''
# figure out max n-time-steps across for all trials
T_part = n_param + pad_len_test
T_total = T_part * task.n_parts
#
n_conds = len(TZ_COND_DICT)
memory_types = ['targ', 'lure']
ts_predict = np.array(
[t % T_part >= pad_len_test for t in range(T_total)])
'''organize results to analyzable form'''
# skip examples untill EM is full
n_examples_skip = n_event_remember
n_examples = n_examples_test - n_examples_skip
data_to_trim = [
dist_a_, Y_, log_cond_, log_cache_, true_dk_wm_, true_dk_em_
]
[dist_a, Y, log_cond, log_cache, true_dk_wm, true_dk_em] = trim_data(
n_examples_skip, data_to_trim)
# process the data
cond_ids = get_trial_cond_ids(log_cond)
activity_, ctrl_param_ = process_cache(log_cache, T_total, p)
[C, H, M, CM, DA, V] = activity_
[inpt] = ctrl_param_
comp_val = .8
leak_val = 0
comp = np.full(np.shape(inpt), comp_val)
leak = np.full(np.shape(inpt), leak_val)
# collect data
CMs_dlist[fix_cond].append(CM)
DAs_dlist[fix_cond].append(DA)
if fix_cond in has_memory_conds:
# collect memory activation for RM and DM sessions
_, sim_lca = compute_cell_memory_similarity(
C, V, inpt, leak, comp)
sim_lca_dict = create_sim_dict(
sim_lca, cond_ids, n_targ=p.n_segments)
ma_dlist[fix_cond].append(sim_lca_dict[fix_cond])
print(f'n_subjs = {n_subjs}')
# organize target memory activation
tma = {cond: [] for cond in has_memory_conds}
for cond in has_memory_conds:
# extract max target activation as the metric for recall
tma[cond] = np.array([
np.max(ma_dlist[cond][i_s]['targ'], axis=-1)
for i_s in range(n_subjs)
]).transpose((0, 2, 1))
print(f'np.shape(tma[{cond}]) = {np.shape(tma[cond])}')
# organize brain activity
CMs_darray, DAs_darray = {}, {}
for cond in all_conds:
CMs_darray[cond] = np.array(CMs_dlist[cond]).transpose((0, 3, 2, 1))
DAs_darray[cond] = np.array(DAs_dlist[cond]).transpose((0, 3, 2, 1))
print(f'np.shape(CMs_darray[{cond}]) = {np.shape(CMs_darray[cond])}')
print(f'np.shape(DAs_darray[{cond}]) = {np.shape(DAs_darray[cond])}')
'''isc'''
dim_srm = 16
srm = SRM(features=dim_srm)
test_prop = .5
n_examples_tr = int(n_examples * (1 - test_prop))
n_examples_te = n_examples - n_examples_tr
data = DAs_darray
_, nH, _, _ = np.shape(data[cond])
# split data
data_tr = {cond: [] for cond in all_conds}
data_te = {cond: [] for cond in all_conds}
for cond in all_conds:
data_tr_cond = data[cond][:, :, :, :n_examples_tr]
data_te_cond = data[cond][:, :, :, n_examples_tr:]
# mean centering
for i_s in range(n_subjs):
d_tr_i_s_ = data_tr_cond[i_s].reshape(nH, -1)
d_te_i_s_ = data_te_cond[i_s].reshape(nH, -1)
# mean center for each condition, for each subject
mu_i_s_ = np.mean(d_tr_i_s_, axis=1, keepdims=True)
data_tr[cond].append(d_tr_i_s_ - mu_i_s_)
data_te[cond].append(d_te_i_s_ - mu_i_s_)
# fit training set
data_tr_unroll = np.concatenate(
[data_tr[cond] for cond in all_conds],
axis=2
)
# organize the data for srm form
data_tr_all_conds = np.moveaxis(
[data_tr[cond] for cond in all_conds], source=0, destination=-1
).reshape(n_subjs, nH, -1)
data_te_all_conds = np.moveaxis(
[data_te[cond] for cond in all_conds], source=0, destination=-1
).reshape(n_subjs, nH, -1)
srm.fit(data_tr_all_conds)
X_test_srm_ = srm.transform(data_te_all_conds)
X_test_srm_bycond = np.moveaxis(
np.reshape(X_test_srm_, newshape=(n_subjs, dim_srm, -1, 3)),
source=-1, destination=0
)
X_test_srm = {cond: None for cond in all_conds}
for i, cond in enumerate(all_conds):
X_test_srm_cond_ = X_test_srm_bycond[i].reshape(
n_subjs, dim_srm, -1, n_examples_te)
X_test_srm[cond] = np.moveaxis(X_test_srm_cond_, source=-1, destination=0)
'''Inter-subject pattern correlation, RM vs. cond'''
def compute_bs_bc_trsm(data_te_srm_rm_i, data_te_srm_xm_i, return_mean=True):
_, m_, n_ = np.shape(data_te_srm_rm_i)
bs_bc_trsm = []
# loop over subject i-j combinations
for (i_s, j_s) in combinations(range(n_subjs), 2):
bs_bc_trsm.append(
np.corrcoef(
data_te_srm_rm_i[i_s].T,
data_te_srm_xm_i[j_s].T
)[:n_, n_:]
)
if return_mean:
return np.mean(bs_bc_trsm, axis=0)
return bs_bc_trsm
def compute_bs_bc_isc(
data_te_srm_rm_i, data_te_srm_xm_i,
win_size=5, return_mean=True,
):
'''
compute average isc acorss all subject pairs
for the same trial, for two condition
'''
isc_mu = []
for (i_s, j_s) in combinations(range(n_subjs), 2):
isc_mu_ij = []
# compute sliding window averages
for t in np.arange(T_part, T_total - win_size):
isc_mu_ij.append(
np.mean(np.diag(np.corrcoef(
data_te_srm_rm_i[i_s][:, t: t + win_size],
data_te_srm_xm_i[j_s][:, t: t + win_size]
)[dim_srm:, :dim_srm]))
)
isc_mu_ij = np.array(isc_mu_ij)
isc_mu.append(isc_mu_ij)
if return_mean:
return np.mean(isc_mu, axis=0)
return isc_mu
win_size = 5
bs_bc_sisc = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
bs_bc_tisc = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
bs_bc_sw_tisc = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
for i_rc, ref_cond in enumerate(all_conds):
for i_c, cond in enumerate(all_conds):
# pick a trial
if i_c >= i_rc:
for i in range(n_examples_te):
# for this trial ...
data_te_srm_rm_i = X_test_srm[ref_cond][i]
data_te_srm_xm_i = X_test_srm[cond][i]
# compute inter-subject inter-condition pattern corr
bs_bc_trsm_c_i = compute_bs_bc_trsm(
data_te_srm_rm_i, data_te_srm_xm_i,
return_mean=False
)
# only extract the diag entries (t to t)
bs_bc_sisc[ref_cond][cond].append(
[np.diag(sisc_mat_ij) for sisc_mat_ij in bs_bc_trsm_c_i]
)
# compute isc
bs_bc_tisc_c_i = compute_bs_bc_trsm(
np.transpose(data_te_srm_rm_i, axes=(0, 2, 1)),
np.transpose(data_te_srm_xm_i, axes=(0, 2, 1)),
return_mean=False
)
bs_bc_tisc[ref_cond][cond].append(
[np.diag(tisc_mat_ij) for tisc_mat_ij in bs_bc_tisc_c_i]
)
# sw-isc
bs_bc_sw_tisc[ref_cond][cond].append(
compute_bs_bc_isc(
data_te_srm_rm_i, data_te_srm_xm_i, win_size,
return_mean=False
)
)
'''plot spatial pattern isc '''
c_pal = sns.color_palette(n_colors=8)
color_id_pick = [0, 1, 2, 3, 4, 7]
c_pal = [c_pal[color_id] for color_id in color_id_pick]
# compute stats
n_se = 1
mu_ = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
er_ = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
for ref_cond in cond_ids.keys():
for cond in cond_ids.keys():
d_ = np.array(bs_bc_sisc[ref_cond][cond])
if len(d_) > 0:
mu_[ref_cond][cond], er_[ref_cond][cond] = compute_stats(
np.mean(d_, axis=1),
n_se=n_se
)
# plot
f, ax = plt.subplots(1, 1, figsize=(7, 5))
color_id = 0
i_rc, ref_cond = 0, 'RM'
for i_c, cond in enumerate(['RM', 'DM']):
if i_c >= i_rc:
ax.errorbar(
x=range(T_part),
y=mu_[ref_cond][cond][T_part:],
yerr=er_[ref_cond][cond][T_part:],
label=f'{ref_cond}-{cond}', color=c_pal[color_id]
)
color_id += 1
ax.legend()
ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.set_xlabel('Time')
ax.set_ylabel('Linear correlation')
ax.set_title('Spatial inter-subject correlation')
sns.despine()
f.tight_layout()
f.savefig('../figs/sisc-lineplot.png', dpi=120, bbox_to_anchor='tight')
'''plot temporal isc'''
# compute stats
n_se = 1
mu_ = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
er_ = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
for ref_cond in cond_ids.keys():
for cond in cond_ids.keys():
d_ = np.array(bs_bc_tisc[ref_cond][cond])
if len(d_) > 0:
mu_[ref_cond][cond], er_[ref_cond][cond] = compute_stats(
np.mean(d_, axis=1),
n_se=n_se
)
# plot
sort_id = np.argsort(mu_['RM']['RM'])[::-1]
f, ax = plt.subplots(1, 1, figsize=(9, 5))
color_id = 0
i_rc, ref_cond = 0, 'RM'
for i_c, cond in enumerate(['RM', 'DM']):
if i_c >= i_rc:
ax.errorbar(
x=range(dim_srm),
y=mu_[ref_cond][cond][sort_id],
yerr=er_[ref_cond][cond][sort_id],
label=f'{ref_cond}-{cond}', color=c_pal[color_id]
)
color_id += 1
ax.legend(bbox_to_anchor=(1, 1))
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.set_xlabel('SRM components (sorted by RM-RM ISC value)')
ax.set_ylabel('Linear Correlation')
ax.set_title('Temporal inter-subject correlation')
sns.despine()
f.tight_layout()
'''plot temporal isc - sliding window'''
n_se = 1
# compute stats
mu_ = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
er_ = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
for ref_cond in cond_ids.keys():
for cond in cond_ids.keys():
d_ = bs_bc_sw_tisc[ref_cond][cond]
if len(d_) > 0:
mu_[ref_cond][cond], er_[ref_cond][cond] = compute_stats(
np.mean(d_, axis=1), n_se=n_se)
# plot
f, ax = plt.subplots(1, 1, figsize=(7, 5))
color_id = 0
i_rc, ref_cond = 0, 'RM'
for i_c, cond in enumerate(['RM', 'DM']):
print(i_c, cond)
if i_c >= i_rc:
ax.errorbar(
x=range(len(mu_[ref_cond][cond])),
y=mu_[ref_cond][cond], yerr=er_[ref_cond][cond],
label=f'{ref_cond}-{cond}', color=c_pal[color_id]
)
color_id += 1
ax.legend()
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.set_xlabel(f'Time (sliding window size = {win_size})')
ax.set_ylabel('Linear correlation')
ax.set_title('Temporal inter-subject correlation')
sns.despine()
f.tight_layout()
f.savefig('../figs/tisc-lineplot.png', dpi=120, bbox_to_anchor='tight')
'''analyze isc change'''
n_tps = 9
n_subj_pairs = int(comb(n_subjs, 2))
tma_dm_p2_test = tma['DM'][:, T_part:, n_examples_tr:]
recall = np.mean(tma_dm_p2_test, axis=0)
r_val_sisc = {cond: np.zeros((n_subj_pairs, n_tps))
for cond in has_memory_conds}
p_val_sisc = {cond: np.zeros((n_subj_pairs, n_tps))
for cond in has_memory_conds}
r_val_tisc = {cond: np.zeros((n_subj_pairs, n_tps - win_size))
for cond in has_memory_conds}
p_val_tisc = {cond: np.zeros((n_subj_pairs, n_tps - win_size))
for cond in has_memory_conds}
r_mu_sisc = {cond: None for cond in has_memory_conds}
r_se_sisc = {cond: None for cond in has_memory_conds}
r_mu_tisc = {cond: None for cond in has_memory_conds}
r_se_tisc = {cond: None for cond in has_memory_conds}
for cond in has_memory_conds:
rmdm_sisc = np.zeros((n_subj_pairs, n_examples_te, T_part))
rmdm_tisc = np.zeros((n_subj_pairs, n_examples_te, T_part - win_size))
for i in range(n_examples_te):
# for this trial ...
data_te_srm_rm_i = X_test_srm['RM'][i]
data_te_srm_dm_i = X_test_srm[cond][i]
# compute inter-subject inter-condition pattern corr
rmdm_sisc_i = compute_bs_bc_trsm(
data_te_srm_rm_i, data_te_srm_dm_i, return_mean=False
)
rmdm_sisc_i_diag = np.array([np.diag(mat) for mat in rmdm_sisc_i])
rmdm_sisc[:, i, :] = rmdm_sisc_i_diag[:, T_part:]
# isc
rmdm_tisc[:, i, :] = compute_bs_bc_isc(
data_te_srm_rm_i, data_te_srm_dm_i, win_size, return_mean=False
)
tma_dm_p2_test = tma[cond][:, T_part:, n_examples_tr:]
recall = np.zeros((n_subj_pairs, n_examples_te, T_part))
for i_comb, (i_s, j_s) in enumerate(combinations(range(n_subjs), 2)):
recall_ij = tma_dm_p2_test[i_s] + tma_dm_p2_test[j_s] / 2
recall[i_comb] = recall_ij.T
for t in range(n_tps):
sisc_change_t = rmdm_sisc[:, :, t + 1] - rmdm_sisc[:, :, t]
for i_comb in range(n_subj_pairs):
r_val_sisc[cond][i_comb, t], p_val_sisc[cond][i_comb, t] = pearsonr(
recall[i_comb, :, t], sisc_change_t[i_comb])
for t in range(n_tps - win_size):
tisc_change_t = rmdm_tisc[:, :, t + 1] - rmdm_tisc[:, :, t]
recall_win_t = np.mean(recall[:, :, t:t + win_size], axis=-1)
for i_comb in range(n_subj_pairs):
r_val_tisc[cond][i_comb, t], p_val_tisc[cond][i_comb, t] = pearsonr(
recall_win_t[i_comb], tisc_change_t[i_comb])
r_mu_sisc[cond], r_se_sisc[cond] = compute_stats(r_val_sisc[cond])
r_mu_tisc[cond], r_se_tisc[cond] = compute_stats(r_val_tisc[cond])
# '''plot s-isc'''
# f, ax = plt.subplots(1, 1, figsize=(7, 5))
# for cond in has_memory_conds:
# ax.errorbar(
# x=range(len(r_mu_sisc[cond])),
# y=r_mu_sisc[cond], yerr=r_se_sisc[cond], label=f'RM-{cond}'
# )
# ax.axhline(0, color='grey', linestyle='--')
# ax.set_xlabel('Time')
# ax.set_ylabel('Linear Corr.')
# ax.set_title('Correlation: recall vs. spatial ISC change')
# ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
# ax.legend()
# sns.despine()
# f.tight_layout()
#
#
# xticklabels = [f'RM-{cond}' for cond in has_memory_conds]
# f, ax = plt.subplots(1, 1, figsize=(6, 4))
# sns.violinplot(data=[np.ravel(r_val_sisc[cond]) for cond in has_memory_conds])
# ax.axhline(0, color='grey', linestyle='--')
# ax.set_xticks(range(len(xticklabels)))
# ax.set_xticklabels(xticklabels)
# ax.set_xlabel('Condition')
# ax.set_ylabel('Linear Correlation')
# ax.set_title('Correlation: recall vs. spatial ISC change')
# sns.despine()
# f.tight_layout()
#
# data_dict = {}
# for cond in list(r_mu_sisc.keys()):
# data_dict[f'RM-{cond}'] = np.mean(r_val_sisc[cond], axis=-1)
#
# df = make_df(data_dict)
# iris_dabest = dabest.load(
# data=df, x="Condition", y="Value", idx=list(data_dict.keys())
# )
# iris_dabest.mean_diff.plot(
# swarm_label='Linear correlation', fig_size=(7, 5)
# )
#
#
# f, ax = plt.subplots(1, 1, figsize=(5, 3))
# # sns.violinplot(data=[r_mu_sisc[cond] for cond in has_memory_conds])
# sns.swarmplot(data=np.mean(
# r_val_sisc['DM'], axis=-1), color=sns.color_palette('colorblind')[1])
# # np.ravel(r_val_sisc[cond])
# # sns.swarmplot(data=[r_mu_sisc[cond] for cond in has_memory_conds])
# ax.axhline(0, color='grey', linestyle='--')
# ax.set_xticks(range(1))
# ax.set_xticklabels(['RM-DM'])
# # ax.set_xlabel('Condition')
# ax.set_ylabel('Linear Correlation')
# ax.set_title('Correlation: recall vs. spatial ISC change')
# sns.despine()
# f.tight_layout()
#
#
'''plot t-isc'''
f, ax = plt.subplots(1, 1, figsize=(7, 5))
for cond in has_memory_conds:
ax.errorbar(
x=range(len(r_mu_tisc[cond])),
y=r_mu_tisc[cond], yerr=r_se_tisc[cond], label=f'RM-{cond}'
)
ax.axhline(0, color='grey', linestyle='--')
ax.set_xlabel('Time')
ax.set_ylabel('Linear Corr.')
ax.set_title('Correlation: recall vs. ISC change')
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.legend()
sns.despine()
f.tight_layout()
xticklabels = [f'RM-{cond}' for cond in has_memory_conds]
f, ax = plt.subplots(1, 1, figsize=(6, 4))
sns.violinplot(data=[r_mu_tisc[cond] for cond in has_memory_conds])
ax.axhline(0, color='grey', linestyle='--')
ax.set_xticks(range(len(xticklabels)))
ax.set_xticklabels(xticklabels)
ax.set_xlabel('Condition')
ax.set_ylabel('Linear Correlation')
ax.set_title('Correlation: recall vs. ISC change')
sns.despine()
f.tight_layout()
data_dict = {}
for cond in list(r_mu_sisc.keys()):
data_dict[f'RM-{cond}'] = np.mean(r_val_tisc[cond], axis=-1)
df = make_df(data_dict)
iris_dabest = dabest.load(
data=df, x="Condition", y="Value", idx=list(data_dict.keys())
)
iris_dabest.mean_diff.plot(
swarm_label='Linear correlation', fig_size=(7, 5),
)
| [
"numpy.moveaxis",
"analysis.make_df",
"numpy.random.seed",
"analysis.compute_cell_memory_similarity",
"scipy.special.comb",
"numpy.shape",
"numpy.argsort",
"numpy.mean",
"numpy.arange",
"numpy.diag",
"os.path.join",
"analysis.create_sim_dict",
"task.SequenceLearning",
"utils.io.get_test_da... | [((661, 723), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""', 'palette': '"""colorblind"""', 'context': '"""poster"""'}), "(style='white', palette='colorblind', context='poster')\n", (668, 723), True, 'import seaborn as sns\n'), ((789, 802), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (798, 802), True, 'import numpy as np\n'), ((5869, 5890), 'brainiak.funcalign.srm.SRM', 'SRM', ([], {'features': 'dim_srm'}), '(features=dim_srm)\n', (5872, 5890), False, 'from brainiak.funcalign.srm import SRM\n'), ((6033, 6053), 'numpy.shape', 'np.shape', (['data[cond]'], {}), '(data[cond])\n', (6041, 6053), True, 'import numpy as np\n'), ((6706, 6767), 'numpy.concatenate', 'np.concatenate', (['[data_tr[cond] for cond in all_conds]'], {'axis': '(2)'}), '([data_tr[cond] for cond in all_conds], axis=2)\n', (6720, 6767), True, 'import numpy as np\n'), ((10616, 10645), 'seaborn.color_palette', 'sns.color_palette', ([], {'n_colors': '(8)'}), '(n_colors=8)\n', (10633, 10645), True, 'import seaborn as sns\n'), ((11193, 11227), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 5)'}), '(1, 1, figsize=(7, 5))\n', (11205, 11227), True, 'import matplotlib.pyplot as plt\n'), ((11796, 11809), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (11807, 11809), True, 'import seaborn as sns\n'), ((12424, 12458), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(9, 5)'}), '(1, 1, figsize=(9, 5))\n', (12436, 12458), True, 'import matplotlib.pyplot as plt\n'), ((13031, 13044), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (13042, 13044), True, 'import seaborn as sns\n'), ((13524, 13558), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 5)'}), '(1, 1, figsize=(7, 5))\n', (13536, 13558), True, 'import matplotlib.pyplot as plt\n'), ((14115, 14128), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (14126, 14128), True, 'import seaborn as sns\n'), ((14357, 14388), 'numpy.mean', 'np.mean', (['tma_dm_p2_test'], {'axis': '(0)'}), '(tma_dm_p2_test, axis=0)\n', (14364, 14388), True, 'import numpy as np\n'), ((18838, 18872), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 5)'}), '(1, 1, figsize=(7, 5))\n', (18850, 18872), True, 'import matplotlib.pyplot as plt\n'), ((19247, 19260), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (19258, 19260), True, 'import seaborn as sns\n'), ((19345, 19379), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(6, 4)'}), '(1, 1, figsize=(6, 4))\n', (19357, 19379), True, 'import matplotlib.pyplot as plt\n'), ((19380, 19447), 'seaborn.violinplot', 'sns.violinplot', ([], {'data': '[r_mu_tisc[cond] for cond in has_memory_conds]'}), '(data=[r_mu_tisc[cond] for cond in has_memory_conds])\n', (19394, 19447), True, 'import seaborn as sns\n'), ((19677, 19690), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (19688, 19690), True, 'import seaborn as sns\n'), ((19832, 19850), 'analysis.make_df', 'make_df', (['data_dict'], {}), '(data_dict)\n', (19839, 19850), False, 'from analysis import compute_stats, compute_cell_memory_similarity, create_sim_dict, batch_compute_true_dk, process_cache, get_trial_cond_ids, trim_data, make_df\n'), ((7180, 7239), 'numpy.reshape', 'np.reshape', (['X_test_srm_'], {'newshape': '(n_subjs, dim_srm, -1, 3)'}), '(X_test_srm_, newshape=(n_subjs, dim_srm, -1, 3))\n', (7190, 7239), True, 'import numpy as np\n'), ((7479, 7534), 'numpy.moveaxis', 'np.moveaxis', (['X_test_srm_cond_'], {'source': '(-1)', 'destination': '(0)'}), '(X_test_srm_cond_, source=-1, destination=0)\n', (7490, 7534), True, 'import numpy as np\n'), ((7686, 7712), 'numpy.shape', 'np.shape', (['data_te_srm_rm_i'], {}), '(data_te_srm_rm_i)\n', (7694, 7712), True, 'import numpy as np\n'), ((11605, 11631), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.1f"""'], {}), "('%.1f')\n", (11623, 11631), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((11662, 11686), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%d"""'], {}), "('%d')\n", (11680, 11686), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((12381, 12408), 'numpy.argsort', 'np.argsort', (["mu_['RM']['RM']"], {}), "(mu_['RM']['RM'])\n", (12391, 12408), True, 'import numpy as np\n'), ((12858, 12882), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%d"""'], {}), "('%d')\n", (12876, 12882), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((13944, 13968), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%d"""'], {}), "('%d')\n", (13962, 13968), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((14274, 14290), 'scipy.special.comb', 'comb', (['n_subjs', '(2)'], {}), '(n_subjs, 2)\n', (14278, 14290), False, 'from scipy.special import comb\n'), ((14410, 14441), 'numpy.zeros', 'np.zeros', (['(n_subj_pairs, n_tps)'], {}), '((n_subj_pairs, n_tps))\n', (14418, 14441), True, 'import numpy as np\n'), ((14506, 14537), 'numpy.zeros', 'np.zeros', (['(n_subj_pairs, n_tps)'], {}), '((n_subj_pairs, n_tps))\n', (14514, 14537), True, 'import numpy as np\n'), ((14602, 14644), 'numpy.zeros', 'np.zeros', (['(n_subj_pairs, n_tps - win_size)'], {}), '((n_subj_pairs, n_tps - win_size))\n', (14610, 14644), True, 'import numpy as np\n'), ((14709, 14751), 'numpy.zeros', 'np.zeros', (['(n_subj_pairs, n_tps - win_size)'], {}), '((n_subj_pairs, n_tps - win_size))\n', (14717, 14751), True, 'import numpy as np\n'), ((15061, 15108), 'numpy.zeros', 'np.zeros', (['(n_subj_pairs, n_examples_te, T_part)'], {}), '((n_subj_pairs, n_examples_te, T_part))\n', (15069, 15108), True, 'import numpy as np\n'), ((15125, 15183), 'numpy.zeros', 'np.zeros', (['(n_subj_pairs, n_examples_te, T_part - win_size)'], {}), '((n_subj_pairs, n_examples_te, T_part - win_size))\n', (15133, 15183), True, 'import numpy as np\n'), ((15877, 15924), 'numpy.zeros', 'np.zeros', (['(n_subj_pairs, n_examples_te, T_part)'], {}), '((n_subj_pairs, n_examples_te, T_part))\n', (15885, 15924), True, 'import numpy as np\n'), ((16785, 16816), 'analysis.compute_stats', 'compute_stats', (['r_val_sisc[cond]'], {}), '(r_val_sisc[cond])\n', (16798, 16816), False, 'from analysis import compute_stats, compute_cell_memory_similarity, create_sim_dict, batch_compute_true_dk, process_cache, get_trial_cond_ids, trim_data, make_df\n'), ((16856, 16887), 'analysis.compute_stats', 'compute_stats', (['r_val_tisc[cond]'], {}), '(r_val_tisc[cond])\n', (16869, 16887), False, 'from analysis import compute_stats, compute_cell_memory_similarity, create_sim_dict, batch_compute_true_dk, process_cache, get_trial_cond_ids, trim_data, make_df\n'), ((19209, 19233), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%d"""'], {}), "('%d')\n", (19227, 19233), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((19791, 19825), 'numpy.mean', 'np.mean', (['r_val_tisc[cond]'], {'axis': '(-1)'}), '(r_val_tisc[cond], axis=-1)\n', (19798, 19825), True, 'import numpy as np\n'), ((1805, 1828), 'numpy.random.seed', 'np.random.seed', (['subj_id'], {}), '(subj_id)\n', (1819, 1828), True, 'import numpy as np\n'), ((1841, 2316), 'utils.params.P', 'P', ([], {'exp_name': 'exp_name', 'sup_epoch': 'supervised_epoch', 'n_param': 'n_param', 'n_branch': 'n_branch', 'pad_len': 'pad_len_load', 'enc_size': 'enc_size', 'n_event_remember': 'n_event_remember', 'penalty': 'penalty_train', 'penalty_random': 'penalty_random', 'penalty_onehot': 'penalty_onehot', 'penalty_discrete': 'penalty_discrete', 'normalize_return': 'normalize_return', 'p_rm_ob_enc': 'p_rm_ob_enc_load', 'p_rm_ob_rcl': 'p_rm_ob_rcl_load', 'n_hidden': 'n_hidden', 'n_hidden_dec': 'n_hidden_dec', 'lr': 'learning_rate', 'eta': 'eta'}), '(exp_name=exp_name, sup_epoch=supervised_epoch, n_param=n_param, n_branch=\n n_branch, pad_len=pad_len_load, enc_size=enc_size, n_event_remember=\n n_event_remember, penalty=penalty_train, penalty_random=penalty_random,\n penalty_onehot=penalty_onehot, penalty_discrete=penalty_discrete,\n normalize_return=normalize_return, p_rm_ob_enc=p_rm_ob_enc_load,\n p_rm_ob_rcl=p_rm_ob_rcl_load, n_hidden=n_hidden, n_hidden_dec=\n n_hidden_dec, lr=learning_rate, eta=eta)\n', (1842, 2316), False, 'from utils.params import P\n'), ((2443, 2594), 'task.SequenceLearning', 'SequenceLearning', ([], {'n_param': 'p.env.n_param', 'n_branch': 'p.env.n_branch', 'pad_len': 'pad_len_test', 'p_rm_ob_enc': 'p_rm_ob_enc_test', 'p_rm_ob_rcl': 'p_rm_ob_rcl_test'}), '(n_param=p.env.n_param, n_branch=p.env.n_branch, pad_len=\n pad_len_test, p_rm_ob_enc=p_rm_ob_enc_test, p_rm_ob_rcl=p_rm_ob_rcl_test)\n', (2459, 2594), False, 'from task import SequenceLearning\n'), ((2687, 2747), 'utils.io.build_log_path', 'build_log_path', (['subj_id', 'p'], {'log_root': 'log_root', 'verbose': '(False)'}), '(subj_id, p, log_root=log_root, verbose=False)\n', (2701, 2747), False, 'from utils.io import build_log_path, pickle_load_dict, get_test_data_dir, get_test_data_fname\n'), ((2885, 2940), 'utils.io.get_test_data_dir', 'get_test_data_dir', (['log_subpath', 'epoch_load', 'test_params'], {}), '(log_subpath, epoch_load, test_params)\n', (2902, 2940), False, 'from utils.io import build_log_path, pickle_load_dict, get_test_data_dir, get_test_data_fname\n'), ((2980, 3026), 'utils.io.get_test_data_fname', 'get_test_data_fname', (['n_examples_test', 'fix_cond'], {}), '(n_examples_test, fix_cond)\n', (2999, 3026), False, 'from utils.io import build_log_path, pickle_load_dict, get_test_data_dir, get_test_data_fname\n'), ((3043, 3087), 'os.path.join', 'os.path.join', (['test_data_dir', 'test_data_fname'], {}), '(test_data_dir, test_data_fname)\n', (3055, 3087), False, 'import os\n'), ((3114, 3137), 'utils.io.pickle_load_dict', 'pickle_load_dict', (['fpath'], {}), '(fpath)\n', (3130, 3137), False, 'from utils.io import build_log_path, pickle_load_dict, get_test_data_dir, get_test_data_fname\n'), ((3413, 3447), 'analysis.batch_compute_true_dk', 'batch_compute_true_dk', (['X_raw', 'task'], {}), '(X_raw, task)\n', (3434, 3447), False, 'from analysis import compute_stats, compute_cell_memory_similarity, create_sim_dict, batch_compute_true_dk, process_cache, get_trial_cond_ids, trim_data, make_df\n'), ((4178, 4218), 'analysis.trim_data', 'trim_data', (['n_examples_skip', 'data_to_trim'], {}), '(n_examples_skip, data_to_trim)\n', (4187, 4218), False, 'from analysis import compute_stats, compute_cell_memory_similarity, create_sim_dict, batch_compute_true_dk, process_cache, get_trial_cond_ids, trim_data, make_df\n'), ((4278, 4306), 'analysis.get_trial_cond_ids', 'get_trial_cond_ids', (['log_cond'], {}), '(log_cond)\n', (4296, 4306), False, 'from analysis import compute_stats, compute_cell_memory_similarity, create_sim_dict, batch_compute_true_dk, process_cache, get_trial_cond_ids, trim_data, make_df\n'), ((4340, 4376), 'analysis.process_cache', 'process_cache', (['log_cache', 'T_total', 'p'], {}), '(log_cache, T_total, p)\n', (4353, 4376), False, 'from analysis import compute_stats, compute_cell_memory_similarity, create_sim_dict, batch_compute_true_dk, process_cache, get_trial_cond_ids, trim_data, make_df\n'), ((6526, 6567), 'numpy.mean', 'np.mean', (['d_tr_i_s_'], {'axis': '(1)', 'keepdims': '(True)'}), '(d_tr_i_s_, axis=1, keepdims=True)\n', (6533, 6567), True, 'import numpy as np\n'), ((6831, 6907), 'numpy.moveaxis', 'np.moveaxis', (['[data_tr[cond] for cond in all_conds]'], {'source': '(0)', 'destination': '(-1)'}), '([data_tr[cond] for cond in all_conds], source=0, destination=-1)\n', (6842, 6907), True, 'import numpy as np\n'), ((6960, 7036), 'numpy.moveaxis', 'np.moveaxis', (['[data_te[cond] for cond in all_conds]'], {'source': '(0)', 'destination': '(-1)'}), '([data_te[cond] for cond in all_conds], source=0, destination=-1)\n', (6971, 7036), True, 'import numpy as np\n'), ((8031, 8058), 'numpy.mean', 'np.mean', (['bs_bc_trsm'], {'axis': '(0)'}), '(bs_bc_trsm, axis=0)\n', (8038, 8058), True, 'import numpy as np\n'), ((8443, 8480), 'numpy.arange', 'np.arange', (['T_part', '(T_total - win_size)'], {}), '(T_part, T_total - win_size)\n', (8452, 8480), True, 'import numpy as np\n'), ((8756, 8775), 'numpy.array', 'np.array', (['isc_mu_ij'], {}), '(isc_mu_ij)\n', (8764, 8775), True, 'import numpy as np\n'), ((8844, 8867), 'numpy.mean', 'np.mean', (['isc_mu'], {'axis': '(0)'}), '(isc_mu, axis=0)\n', (8851, 8867), True, 'import numpy as np\n'), ((10968, 11004), 'numpy.array', 'np.array', (['bs_bc_sisc[ref_cond][cond]'], {}), '(bs_bc_sisc[ref_cond][cond])\n', (10976, 11004), True, 'import numpy as np\n'), ((12155, 12191), 'numpy.array', 'np.array', (['bs_bc_tisc[ref_cond][cond]'], {}), '(bs_bc_tisc[ref_cond][cond])\n', (12163, 12191), True, 'import numpy as np\n'), ((16513, 16559), 'numpy.mean', 'np.mean', (['recall[:, :, t:t + win_size]'], {'axis': '(-1)'}), '(recall[:, :, t:t + win_size], axis=-1)\n', (16520, 16559), True, 'import numpy as np\n'), ((4513, 4527), 'numpy.shape', 'np.shape', (['inpt'], {}), '(inpt)\n', (4521, 4527), True, 'import numpy as np\n'), ((4562, 4576), 'numpy.shape', 'np.shape', (['inpt'], {}), '(inpt)\n', (4570, 4576), True, 'import numpy as np\n'), ((4820, 4874), 'analysis.compute_cell_memory_similarity', 'compute_cell_memory_similarity', (['C', 'V', 'inpt', 'leak', 'comp'], {}), '(C, V, inpt, leak, comp)\n', (4850, 4874), False, 'from analysis import compute_stats, compute_cell_memory_similarity, create_sim_dict, batch_compute_true_dk, process_cache, get_trial_cond_ids, trim_data, make_df\n'), ((4919, 4974), 'analysis.create_sim_dict', 'create_sim_dict', (['sim_lca', 'cond_ids'], {'n_targ': 'p.n_segments'}), '(sim_lca, cond_ids, n_targ=p.n_segments)\n', (4934, 4974), False, 'from analysis import compute_stats, compute_cell_memory_similarity, create_sim_dict, batch_compute_true_dk, process_cache, get_trial_cond_ids, trim_data, make_df\n'), ((5567, 5592), 'numpy.array', 'np.array', (['CMs_dlist[cond]'], {}), '(CMs_dlist[cond])\n', (5575, 5592), True, 'import numpy as np\n'), ((5640, 5665), 'numpy.array', 'np.array', (['DAs_dlist[cond]'], {}), '(DAs_dlist[cond])\n', (5648, 5665), True, 'import numpy as np\n'), ((16312, 16365), 'scipy.stats.pearsonr', 'pearsonr', (['recall[i_comb, :, t]', 'sisc_change_t[i_comb]'], {}), '(recall[i_comb, :, t], sisc_change_t[i_comb])\n', (16320, 16365), False, 'from scipy.stats import pearsonr\n'), ((16674, 16727), 'scipy.stats.pearsonr', 'pearsonr', (['recall_win_t[i_comb]', 'tisc_change_t[i_comb]'], {}), '(recall_win_t[i_comb], tisc_change_t[i_comb])\n', (16682, 16727), False, 'from scipy.stats import pearsonr\n'), ((5438, 5457), 'numpy.shape', 'np.shape', (['tma[cond]'], {}), '(tma[cond])\n', (5446, 5457), True, 'import numpy as np\n'), ((5734, 5760), 'numpy.shape', 'np.shape', (['CMs_darray[cond]'], {}), '(CMs_darray[cond])\n', (5742, 5760), True, 'import numpy as np\n'), ((5808, 5834), 'numpy.shape', 'np.shape', (['DAs_darray[cond]'], {}), '(DAs_darray[cond])\n', (5816, 5834), True, 'import numpy as np\n'), ((7868, 7929), 'numpy.corrcoef', 'np.corrcoef', (['data_te_srm_rm_i[i_s].T', 'data_te_srm_xm_i[j_s].T'], {}), '(data_te_srm_rm_i[i_s].T, data_te_srm_xm_i[j_s].T)\n', (7879, 7929), True, 'import numpy as np\n'), ((11115, 11134), 'numpy.mean', 'np.mean', (['d_'], {'axis': '(1)'}), '(d_, axis=1)\n', (11122, 11134), True, 'import numpy as np\n'), ((12302, 12321), 'numpy.mean', 'np.mean', (['d_'], {'axis': '(1)'}), '(d_, axis=1)\n', (12309, 12321), True, 'import numpy as np\n'), ((13475, 13494), 'numpy.mean', 'np.mean', (['d_'], {'axis': '(1)'}), '(d_, axis=1)\n', (13482, 13494), True, 'import numpy as np\n'), ((15559, 15571), 'numpy.diag', 'np.diag', (['mat'], {}), '(mat)\n', (15566, 15571), True, 'import numpy as np\n'), ((5294, 5338), 'numpy.max', 'np.max', (["ma_dlist[cond][i_s]['targ']"], {'axis': '(-1)'}), "(ma_dlist[cond][i_s]['targ'], axis=-1)\n", (5300, 5338), True, 'import numpy as np\n'), ((9985, 10031), 'numpy.transpose', 'np.transpose', (['data_te_srm_rm_i'], {'axes': '(0, 2, 1)'}), '(data_te_srm_rm_i, axes=(0, 2, 1))\n', (9997, 10031), True, 'import numpy as np\n'), ((10053, 10099), 'numpy.transpose', 'np.transpose', (['data_te_srm_xm_i'], {'axes': '(0, 2, 1)'}), '(data_te_srm_xm_i, axes=(0, 2, 1))\n', (10065, 10099), True, 'import numpy as np\n'), ((9807, 9827), 'numpy.diag', 'np.diag', (['sisc_mat_ij'], {}), '(sisc_mat_ij)\n', (9814, 9827), True, 'import numpy as np\n'), ((10229, 10249), 'numpy.diag', 'np.diag', (['tisc_mat_ij'], {}), '(tisc_mat_ij)\n', (10236, 10249), True, 'import numpy as np\n'), ((8544, 8644), 'numpy.corrcoef', 'np.corrcoef', (['data_te_srm_rm_i[i_s][:, t:t + win_size]', 'data_te_srm_xm_i[j_s][:, t:t + win_size]'], {}), '(data_te_srm_rm_i[i_s][:, t:t + win_size], data_te_srm_xm_i[j_s]\n [:, t:t + win_size])\n', (8555, 8644), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
from .base import Denoiser, Denoiser2D
class TVDenoiser(Denoiser):
def __init__(self, iter_num=5, use_3dtv=False):
self.iter_num = iter_num
self.use_3dtv = use_3dtv
def denoise(self, x, sigma):
from .models.TV_denoising import TV_denoising, TV_denoising3d
x = x.squeeze()
if self.use_3dtv:
x = TV_denoising3d(x, sigma, self.iter_num)
else:
x = TV_denoising(x, sigma, self.iter_num)
x = x.unsqueeze(0)
return x
def to(self, device):
return self
class FFDNetDenoiser(Denoiser2D):
def __init__(self, n_channels, model_path):
from .models.network_ffdnet import FFDNet
model = FFDNet(in_nc=n_channels, out_nc=n_channels, nc=64, nb=15, act_mode='R')
model.load_state_dict(torch.load(model_path), strict=True)
model.eval()
for _, v in model.named_parameters():
v.requires_grad = False
self.model = model
def denoise_2d(self, x, sigma):
sigma = sigma.float().repeat(1, 1, 1, 1)
x = self.model(x, sigma)
return x
def to(self, device):
self.model.to(device)
return self
class FFDNet3DDenoiser(Denoiser):
def __init__(self, model_path):
from .models.network_ffdnet import FFDNet3D
model = FFDNet3D(in_nc=32, out_nc=31, nc=64, nb=15, act_mode='R')
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['net'])
model.eval()
for _, v in model.named_parameters():
v.requires_grad = False
self.model = model
def denoise(self, x, sigma):
x = torch.cat((x, sigma.float().repeat(1, 1, x.shape[2], x.shape[3])), dim=1)
x = self.model(x)
return x
def to(self, device):
self.model.to(device)
return self
class IRCNNDenoiser(Denoiser2D):
def __init__(self, n_channels, model_path):
from .models.network_dncnn import IRCNN as net
self.model = net(in_nc=n_channels, out_nc=n_channels, nc=64)
self.model25 = torch.load(model_path)
self.former_idx = 0
def denoise_2d(self, x, sigma):
current_idx = np.int(np.ceil(sigma.cpu().numpy()*255./2.)-1)
if current_idx != self.former_idx:
self.model.load_state_dict(
self.model25[str(current_idx)], strict=True)
self.model.eval()
for _, v in self.model.named_parameters():
v.requires_grad = False
self.model = self.model.to(self.device)
self.former_idx = current_idx
x = self.model(x)
return x
def to(self, device):
self.device = device
return self
class DRUNetDenoiser(Denoiser2D):
def __init__(self, n_channels, model_path):
from .models.network_unet import UNetRes as net
model = net(in_nc=n_channels+1, out_nc=n_channels, nc=[64, 128, 256, 512],
nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose")
model.load_state_dict(torch.load(model_path), strict=True)
model.eval()
for _, v in model.named_parameters():
v.requires_grad = False
self.model = model
def denoise_2d(self, x, sigma):
x = torch.cat((x, sigma.float().repeat(1, 1, x.shape[2], x.shape[3])), dim=1)
x = self._denoise(x, refield=32, min_size=256, modulo=16)
return x
def to(self, device):
self.model.to(device)
return self
def _denoise(self, L, refield=32, min_size=256, sf=1, modulo=1):
'''
model:
L: input Low-quality image
refield: effective receptive filed of the network, 32 is enough
min_size: min_sizeXmin_size image, e.g., 256X256 image
sf: scale factor for super-resolution, otherwise 1
modulo: 1 if split
'''
h, w = L.size()[-2:]
if h*w <= min_size**2:
L = nn.ReplicationPad2d((0, int(np.ceil(w/modulo)*modulo-w), 0, int(np.ceil(h/modulo)*modulo-h)))(L)
E = self.model(L)
E = E[..., :h*sf, :w*sf]
else:
top = slice(0, (h//2//refield+1)*refield)
bottom = slice(h - (h//2//refield+1)*refield, h)
left = slice(0, (w//2//refield+1)*refield)
right = slice(w - (w//2//refield+1)*refield, w)
Ls = [L[..., top, left], L[..., top, right], L[..., bottom, left], L[..., bottom, right]]
if h * w <= 4*(min_size**2):
Es = [self.model(Ls[i]) for i in range(4)]
else:
Es = [self._denoise(Ls[i], refield=refield,
min_size=min_size, sf=sf, modulo=modulo) for i in range(4)]
b, c = Es[0].size()[:2]
E = torch.zeros(b, c, sf * h, sf * w).type_as(L)
E[..., :h//2*sf, :w//2*sf] = Es[0][..., :h//2*sf, :w//2*sf]
E[..., :h//2*sf, w//2*sf:w*sf] = Es[1][..., :h//2*sf, (-w + w//2)*sf:]
E[..., h//2*sf:h*sf, :w//2*sf] = Es[2][..., (-h + h//2)*sf:, :w//2*sf]
E[..., h//2*sf:h*sf, w//2*sf:w*sf] = Es[3][..., (-h + h//2)*sf:, (-w + w//2)*sf:]
return E
class QRNN3DDenoiser(Denoiser):
def __init__(self, model_path, use_noise_map=True):
from .models.qrnn import qrnn3d, qrnn3d_masked
model = qrnn3d_masked() if use_noise_map else qrnn3d()
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['net'])
model.eval()
for _, v in model.named_parameters():
v.requires_grad = False
self.model = model
self.use_noise_map = use_noise_map
def denoise(self, x, sigma):
if self.use_noise_map:
x = torch.cat((x, sigma.float().repeat(1, x.shape[1], x.shape[2], x.shape[3])), dim=0)
x = torch.unsqueeze(x, 0)
x = self.model(x)
x = torch.squeeze(x)
x = torch.unsqueeze(x, 0)
return x
def to(self, device):
self.model.to(device)
return self
class GRUNetDenoiser(Denoiser):
def __init__(self, model_path):
from .models.qrnn import grunet_masked_nobn
model = grunet_masked_nobn()
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['net'])
model.eval()
for _, v in model.named_parameters():
v.requires_grad = False
self.model = model
self.use_noise_map = True
def denoise(self, x, sigma):
if self.use_noise_map:
sigma = torch.tensor(sigma).float().to(x.device)
sigma = sigma.repeat(1, x.shape[1], x.shape[2], x.shape[3])
x = torch.cat((x, sigma), dim=0)
x = torch.unsqueeze(x, 0)
x = self.model(x)
x = torch.squeeze(x)
x = torch.unsqueeze(x, 0)
return x
def to(self, device):
self.model.to(device)
return self
class GRUNetTVDenoiser(GRUNetDenoiser):
def __init__(self, model_path):
super().__init__(model_path)
self.tv_denoiser = TVDenoiser()
def denoise(self, x, sigma):
x1 = super().denoise(x, sigma)
x2 = self.tv_denoiser.denoise(x, sigma*255)
return (x1+x2)/2
| [
"numpy.ceil",
"torch.load",
"torch.cat",
"torch.zeros",
"torch.squeeze",
"torch.unsqueeze",
"torch.tensor"
] | [((1462, 1484), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (1472, 1484), False, 'import torch\n'), ((2134, 2156), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (2144, 2156), False, 'import torch\n'), ((5478, 5500), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (5488, 5500), False, 'import torch\n'), ((5900, 5921), 'torch.unsqueeze', 'torch.unsqueeze', (['x', '(0)'], {}), '(x, 0)\n', (5915, 5921), False, 'import torch\n'), ((5960, 5976), 'torch.squeeze', 'torch.squeeze', (['x'], {}), '(x)\n', (5973, 5976), False, 'import torch\n'), ((5989, 6010), 'torch.unsqueeze', 'torch.unsqueeze', (['x', '(0)'], {}), '(x, 0)\n', (6004, 6010), False, 'import torch\n'), ((6285, 6307), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (6295, 6307), False, 'import torch\n'), ((6777, 6798), 'torch.unsqueeze', 'torch.unsqueeze', (['x', '(0)'], {}), '(x, 0)\n', (6792, 6798), False, 'import torch\n'), ((6837, 6853), 'torch.squeeze', 'torch.squeeze', (['x'], {}), '(x)\n', (6850, 6853), False, 'import torch\n'), ((6866, 6887), 'torch.unsqueeze', 'torch.unsqueeze', (['x', '(0)'], {}), '(x, 0)\n', (6881, 6887), False, 'import torch\n'), ((863, 885), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (873, 885), False, 'import torch\n'), ((3125, 3147), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (3135, 3147), False, 'import torch\n'), ((6736, 6764), 'torch.cat', 'torch.cat', (['(x, sigma)'], {'dim': '(0)'}), '((x, sigma), dim=0)\n', (6745, 6764), False, 'import torch\n'), ((4854, 4887), 'torch.zeros', 'torch.zeros', (['b', 'c', '(sf * h)', '(sf * w)'], {}), '(b, c, sf * h, sf * w)\n', (4865, 4887), False, 'import torch\n'), ((6607, 6626), 'torch.tensor', 'torch.tensor', (['sigma'], {}), '(sigma)\n', (6619, 6626), False, 'import torch\n'), ((4044, 4063), 'numpy.ceil', 'np.ceil', (['(w / modulo)'], {}), '(w / modulo)\n', (4051, 4063), True, 'import numpy as np\n'), ((4080, 4099), 'numpy.ceil', 'np.ceil', (['(h / modulo)'], {}), '(h / modulo)\n', (4087, 4099), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.