code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import print_function, division
import numpy as np
from scipy.spatial.distance import cdist
class Solver(object):
def __init__(self, df):
self.df = df
self.num_samples = self.df.shape[0]
self.samples = self.df.to_numpy()
def compute_pairwise_l2distance(self):
# print('ues L2 norm to compute the distance')
R,C = np.triu_indices(self.num_samples,1) # Denote N = num_samples
pair_innerdot = np.einsum('ij,ij->i', self.samples[R,:], self.samples[C,:])
# shape: (Nx(N-1)/2,) items are uptriangular part of mat [(Xi, Xj)], () denotes inner product
norm = np.einsum('ij,ij->i', self.samples, self.samples) # shape: (N,)
return norm[R] + norm[C] - 2*pair_innerdot
def compute_pairwise_l1distance(self):
# print('ues L1 norm to compute the distance')
R, C = np.triu_indices(self.num_samples, 1) # Denote N = num_samples
# R, C contain the row indexs and column indexs of upper-triangular part
# shape: (Nx(N-1)/2,)
l1norm = np.abs(self.samples[R, :] - self.samples[C, :]).sum(-1)
return l1norm
def optimized_pairwise_l1distance(self):
# print('ues L1 norm to compute the distance')
"""
samples: matrix of size NxD
Returns: NxN matrix D, with entry D_ij = manhattan or L1 distance between rows X_i and X_j
"""
D = cdist(self.samples, self.samples, metric='cityblock')
iu1 = np.triu_indices(self.num_samples)
D = D.astype("float")
D[iu1] = float('inf')
# set the upper-triangular as Positive infinity
return D
def optimized_pairwise_l2distance(self):
# print('ues L2 norm to compute the distance')
"""
samples: matrix of size NxD
Returns: NxN matrix D, with entry D_ij = squared euclidean distance between rows X_i and X_j
"""
# Math? See https://stackoverflow.com/questions/37009647
sum_X = np.sum(np.square(self.samples), 1)
D = np.add(np.add(-2 * np.dot(self.samples, self.samples.T), sum_X).T, sum_X)
# **0.5 ?
iu1 = np.triu_indices(self.num_samples)
D = D.astype("float")
D[iu1] = float('inf')
# set the upper-triangular as Positive infinity
return D
def optimized_compute_Cr(self, distances, r):
return np.sum(distances < r) / (0.5*self.num_samples*(self.num_samples-1))
def compute_Cr(self, distances, r):
return np.sum(distances < r) / len(distances)
def show_curve(self, logrs, version=1):
start, end, step = logrs.split(":")
assert int(step) > 0
logrs = np.linspace(float(start), float(end), num=int(step))
rs = np.exp(logrs)
# distances = self.compute_pairwise_l1distance()
# if version == 1:
# distances = self.compute_pairwise_l1distance()
# else:
# distances = self.compute_pairwise_l2distance()
if version == 1:
distances = self.optimized_pairwise_l1distance()
else:
distances = self.optimized_pairwise_l2distance()
logCrs = []
for r in rs:
logCrs.append(self.optimized_compute_Cr(distances, r))
# logCrs.append(self.compute_Cr(distances, r))
logCrs = np.log(np.array(logCrs))
logCrs_d = (logCrs - logCrs[[*range(1,len(logCrs)), -1]]) / (logrs[0] - logrs[1])
logCrs_d = logCrs_d[~np.isnan(logCrs_d)]
logCrs_d = logCrs_d[np.isfinite(logCrs_d)]
# remove the nan and inf from logCrs_d
# print("candidate estiamted instrinsic dim: {}".format(logCrs_d))
return np.max(logCrs_d)
| [
"scipy.spatial.distance.cdist",
"numpy.sum",
"numpy.abs",
"numpy.square",
"numpy.einsum",
"numpy.triu_indices",
"numpy.isfinite",
"numpy.isnan",
"numpy.max",
"numpy.array",
"numpy.exp",
"numpy.dot"
] | [((388, 424), 'numpy.triu_indices', 'np.triu_indices', (['self.num_samples', '(1)'], {}), '(self.num_samples, 1)\n', (403, 424), True, 'import numpy as np\n'), ((476, 537), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'self.samples[R, :]', 'self.samples[C, :]'], {}), "('ij,ij->i', self.samples[R, :], self.samples[C, :])\n", (485, 537), True, 'import numpy as np\n'), ((654, 703), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'self.samples', 'self.samples'], {}), "('ij,ij->i', self.samples, self.samples)\n", (663, 703), True, 'import numpy as np\n'), ((892, 928), 'numpy.triu_indices', 'np.triu_indices', (['self.num_samples', '(1)'], {}), '(self.num_samples, 1)\n', (907, 928), True, 'import numpy as np\n'), ((1442, 1495), 'scipy.spatial.distance.cdist', 'cdist', (['self.samples', 'self.samples'], {'metric': '"""cityblock"""'}), "(self.samples, self.samples, metric='cityblock')\n", (1447, 1495), False, 'from scipy.spatial.distance import cdist\n'), ((1510, 1543), 'numpy.triu_indices', 'np.triu_indices', (['self.num_samples'], {}), '(self.num_samples)\n', (1525, 1543), True, 'import numpy as np\n'), ((2181, 2214), 'numpy.triu_indices', 'np.triu_indices', (['self.num_samples'], {}), '(self.num_samples)\n', (2196, 2214), True, 'import numpy as np\n'), ((2794, 2807), 'numpy.exp', 'np.exp', (['logrs'], {}), '(logrs)\n', (2800, 2807), True, 'import numpy as np\n'), ((3746, 3762), 'numpy.max', 'np.max', (['logCrs_d'], {}), '(logCrs_d)\n', (3752, 3762), True, 'import numpy as np\n'), ((2035, 2058), 'numpy.square', 'np.square', (['self.samples'], {}), '(self.samples)\n', (2044, 2058), True, 'import numpy as np\n'), ((2414, 2435), 'numpy.sum', 'np.sum', (['(distances < r)'], {}), '(distances < r)\n', (2420, 2435), True, 'import numpy as np\n'), ((2547, 2568), 'numpy.sum', 'np.sum', (['(distances < r)'], {}), '(distances < r)\n', (2553, 2568), True, 'import numpy as np\n'), ((3401, 3417), 'numpy.array', 'np.array', (['logCrs'], {}), '(logCrs)\n', (3409, 3417), True, 'import numpy as np\n'), ((3586, 3607), 'numpy.isfinite', 'np.isfinite', (['logCrs_d'], {}), '(logCrs_d)\n', (3597, 3607), True, 'import numpy as np\n'), ((1083, 1130), 'numpy.abs', 'np.abs', (['(self.samples[R, :] - self.samples[C, :])'], {}), '(self.samples[R, :] - self.samples[C, :])\n', (1089, 1130), True, 'import numpy as np\n'), ((3538, 3556), 'numpy.isnan', 'np.isnan', (['logCrs_d'], {}), '(logCrs_d)\n', (3546, 3556), True, 'import numpy as np\n'), ((2094, 2130), 'numpy.dot', 'np.dot', (['self.samples', 'self.samples.T'], {}), '(self.samples, self.samples.T)\n', (2100, 2130), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
from os.path import join
def search_best_id(root_path, index_list):
""" Given the indices in the list,
this function looks for the best realization
of the experiment, among different learning rates.
:param root_path: str of the folder containing results
:param index_list: list of indices for the experiments with same parameters
"""
loss_val = np.array([pd.read_csv(join(root_path,
'train_%i/history.csv' % index))['val_loss'].values[-1]
for index in index_list]) # validation loss at last iteration
loss_val[np.isnan(loss_val)] = np.inf
best_id = index_list[np.argmin(loss_val)] # best index
return best_id
def flatten_train_json(df):
""" We assume here that the train.json file has always the same keys.
:param df: pandas DataFrame
:return: flatten df, each row corresponds to an experiment.
"""
df = df.T
dataset_keys = ['scenario', 'original_dims', 'output_dims',
'additional_dims', 'mean_val', 'std_val',
'noise', 'noise_mean', 'noise_sigma',
'n_training', 'redundancy_amount']
hyper_keys = ['learning_rate', 'architecture', 'epochs',
'batch_size', 'loss', 'optimizer',
'lr_at_plateau', 'reduction_factor',
'validation_check']
upper_keys = ['id', 'output_path', 'train_completed']
columns_name = dataset_keys + hyper_keys + upper_keys
list_all_samples = []
for i_ in range(df.shape[0]):
list_per_sample = []
for d_k_ in dataset_keys:
list_per_sample.append(df['dataset'][i_][d_k_])
for h_k_ in hyper_keys:
list_per_sample.append(df['hyper'][i_][h_k_])
for u_p_ in upper_keys:
list_per_sample.append(df[u_p_][i_])
list_all_samples.append(list_per_sample)
return pd.DataFrame(list_all_samples, columns=columns_name)
def generate_bm(df, experiment_keys):
""" Given the flatten DataFrame, containing all the experiment
here we extract the experiments correspondent to the dictionary experiment_keys.
:param df: pandas DataFrame containing the flatten df
:param experiment_keys: dictionary with all the keys.
:returns df_copy: the reduced dictionary.
"""
df_copy = df.copy()
for (k_, v_) in experiment_keys.items():
df_copy = df_copy[df_copy[k_] == v_]
return df_copy
def retrieve_exp_from_json(json_path, experiment_keys):
""" We collapse the first three function in a unique one.
:param json_path: the file path to the json file
:param experiment_keys: the dictionary containing the keys for the experiment.
:returns data_:
"""
df = flatten_train_json(pd.read_json(json_path))
dirname = os.path.dirname(json_path)
index_list = generate_bm(df, experiment_keys)['id'].values
df_ = df.iloc[index_list]
n_exps, keys = df_.shape
bm = np.array(np.array([len(set(df_[f_]))
for f_ in df_.columns
if not (f_ == 'id' or f_ == 'output_path')]) != 1)
if np.sum(bm) > 1:
raise ValueError('There is more than one free parameter')
if np.sum(df_['train_completed']) < n_exps:
raise ValueError('Not all the experiments have been trained')
best_id = search_best_id(dirname, index_list)
path_best_id = join(dirname, 'train_%i' % best_id)
activations = np.load(join(path_best_id, 'activations.npy'))
history = pd.read_csv(join(path_best_id, 'history.csv'))
test = np.load(join(path_best_id, 'test.npy'))
df_ = df.iloc[best_id]
return [df_, history, activations, test] | [
"pandas.DataFrame",
"numpy.sum",
"os.path.dirname",
"numpy.isnan",
"numpy.argmin",
"pandas.read_json",
"os.path.join"
] | [((1976, 2028), 'pandas.DataFrame', 'pd.DataFrame', (['list_all_samples'], {'columns': 'columns_name'}), '(list_all_samples, columns=columns_name)\n', (1988, 2028), True, 'import pandas as pd\n'), ((2875, 2901), 'os.path.dirname', 'os.path.dirname', (['json_path'], {}), '(json_path)\n', (2890, 2901), False, 'import os\n'), ((3478, 3513), 'os.path.join', 'join', (['dirname', "('train_%i' % best_id)"], {}), "(dirname, 'train_%i' % best_id)\n", (3482, 3513), False, 'from os.path import join\n'), ((662, 680), 'numpy.isnan', 'np.isnan', (['loss_val'], {}), '(loss_val)\n', (670, 680), True, 'import numpy as np\n'), ((716, 735), 'numpy.argmin', 'np.argmin', (['loss_val'], {}), '(loss_val)\n', (725, 735), True, 'import numpy as np\n'), ((2836, 2859), 'pandas.read_json', 'pd.read_json', (['json_path'], {}), '(json_path)\n', (2848, 2859), True, 'import pandas as pd\n'), ((3207, 3217), 'numpy.sum', 'np.sum', (['bm'], {}), '(bm)\n', (3213, 3217), True, 'import numpy as np\n'), ((3297, 3327), 'numpy.sum', 'np.sum', (["df_['train_completed']"], {}), "(df_['train_completed'])\n", (3303, 3327), True, 'import numpy as np\n'), ((3540, 3577), 'os.path.join', 'join', (['path_best_id', '"""activations.npy"""'], {}), "(path_best_id, 'activations.npy')\n", (3544, 3577), False, 'from os.path import join\n'), ((3605, 3638), 'os.path.join', 'join', (['path_best_id', '"""history.csv"""'], {}), "(path_best_id, 'history.csv')\n", (3609, 3638), False, 'from os.path import join\n'), ((3659, 3689), 'os.path.join', 'join', (['path_best_id', '"""test.npy"""'], {}), "(path_best_id, 'test.npy')\n", (3663, 3689), False, 'from os.path import join\n'), ((447, 494), 'os.path.join', 'join', (['root_path', "('train_%i/history.csv' % index)"], {}), "(root_path, 'train_%i/history.csv' % index)\n", (451, 494), False, 'from os.path import join\n')] |
#!/usr/bin/python
import numpy as np
import sys
import os
import expression_parser
__doc__ = 'see source'
dx = 0
dy = 0
dz = 0
dt = 0
nx = 0
ny = 0
nz = 0
output_period = 0
n_ion_populations = 0
icmr = []
t_end = 0
tr_start = 0
deps = 0
deps_p = 0
deps_ph = 0
deps_i = 0
a0y = 0
a0z = 0
lmbda = 0
ne = 0
xsigma = 0
nfilm = 0
filmwidth = 0
nerflow = 0
Tlflow = 0
mcrlflow = 0
vlflow = 0
Trflow = 0
vrflow = 0
catching = False
dump_photons = False
particles_for_output = 'e'
output_mode = 0
data_folder = '../results/'
t = '0'
v = 1
def xi( x, t ):
return x - v * t
def read_parameters(log=None):
'Reads nx, ny, etc. from *log*.'
global dx,dy,dz,dt,nx,ny,nz,output_period,n_ion_populations,icmr,t_end,tr_start,\
deps,deps_p,deps_ph,deps_i,a0y,a0z,lmbda,ne,xsigma,nfilm,filmwidth,nerflow,\
Tlflow, mcrlflow, vlflow, Trflow, vrflow, catching, dump_photons, particles_for_output, output_mode
if log is None:
log = os.path.join(data_folder,'log')
icmr = []
reset_globals()
f = open(log)
for line in f:
if line=='dx\n':
dx = float(next(f))
elif line=='dy\n':
dy = float(next(f))
elif line=='dz\n':
dz = float(next(f))
elif line=='dt\n':
dt = float(next(f))
elif line=='nx\n':
nx = int(next(f))
elif line=='ny\n':
ny = int(next(f))
elif line=='nz\n':
nz = int(next(f))
elif line=='output_period\n':
output_period = float(next(f))
elif line=='n_ion_populations\n':
n_ion_populations = int(next(f))
elif line=='icmr\n':
icmr.append(float(next(f)))
elif line=='t_end\n':
t_end = float(next(f))
elif line=='tr_start\n':
tr_start = float(next(f))
elif line=='deps\n':
deps = float(next(f))
elif line=='deps_p\n':
deps_p = float(next(f))
elif line=='deps_ph\n':
deps_ph = float(next(f))
elif line=='deps_i\n':
deps_i = float(next(f))
elif line=='a0y\n':
a0y = float(next(f))
elif line=='a0z\n':
a0z = float(next(f))
elif line=='lambda\n':
lmbda = float(next(f))
elif line=='ne\n':
ne = float(next(f))
elif line=='xsigma\n':
xsigma = float(next(f))
elif line=='nfilm\n':
nfilm = float(next(f))
elif line=='filmwidth\n':
filmwidth = float(next(f))
elif line=='nerflow\n':
nerflow = float(next(f))
elif line=='Tlflow\n':
Tlflow = float(next(f))
elif line=='mcrlflow\n':
mcrlflow = float(next(f))
elif line=='vlflow\n':
vlflow = float(next(f))
elif line=='Trflow\n':
Trflow = float(next(f))
elif line=='vrflow\n':
vrflow = float(next(f))
elif line.strip() == 'catching':
ss = next(f).strip()
if ss == 'on':
catching = True
elif line.strip() == 'dump_photons':
ss = next(f).strip()
if ss == 'on':
dump_photons = True
elif line.strip() == 'particles_for_output':
particles_for_output = next(f).strip().replace('ph','g')
elif line.strip() == 'output_mode':
output_mode = int(next(f))
f.close()
def density(name='rho',plane='xy', log=None):
'Returns 2d data for plane *plane* from file\n\
data_folder+*name*+t.'
filename = os.path.join(data_folder, name + t)
if output_mode == 1 and name != 'w' and name != 'inv':
sys.path.append(os.path.join('..', 'lib', 'chameleon'))
try:
import chameleon
except Exception as e:
raise ImportError('Chameleon cannot be imported. Check if it is compiled. Error: ' + str(e))
chameleon.configure(log if log is not None else os.path.join(data_folder, 'log'))
return chameleon.read2d(filename, plane)
else:
with open(filename) as f:
density = np.array([float(x) for x in f])
n = nx*ny + nx*nz + ny*nz
if density.size != n:
raise Exception("The size of data in [%s] equal to %d doesn't match n=%d" % (name + t, density.size, n))
if (plane!='xy') & (plane!='xz') & (plane!='yz'):
print('resread.density: warning: ambiguous value for *plane* - {0}, value \'xy\' used instead'.format(plane))
plane = 'xy'
if plane=='xy':
density = np.reshape(density[:-ny*nz],(nx,ny+nz))[:,:-nz]
elif plane=='xz':
density = np.reshape(density[:-ny*nz],(nx,ny+nz))[:,ny:]
else:
density = np.reshape(density[nx*(ny+nz):],(ny,nz))
density = density.transpose()
return density
def particles(name='phasespace', s=['x','y','g'], every=1):
'Returns characteristics *s* for particles from the file\n\
data_folder+*name*+t.'
f = open(os.path.join(data_folder, name+t))
data = f.readlines() if every == 1 else [line for i, line in enumerate(f.readlines()) if (i//9) % every == 0]
f.close()
n = len(data)//9
m = len(s)
a = np.empty((m,n))
for i in np.arange(0,m,1):
if s[i]=='q':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j])
elif s[i]=='x':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+1])
elif s[i]=='y':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+2])
elif s[i]=='z':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+3])
elif s[i]=='ux':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+4])
elif s[i]=='uy':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+5])
elif s[i]=='uz':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+6])
elif s[i]=='g':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+7])
elif s[i]=='chi':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+8])
elif s[i]=='t': # for qplot.tracks()
for j in np.arange(n):
a[i][j] = tr_start + j*dt
elif s[i]=='xi': # for qplot.tracks()
for j in np.arange(n):
a[i][j] = xi( float(data[9*j+1]), ( tr_start + j*dt) )
elif s[i]=='vx':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+4])/float(data[9*j+7])
elif s[i]=='vy':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+5])/float(data[9*j+7])
elif s[i]=='vz':
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+6])/float(data[9*j+7])
# phi = 0, theta = 0 - direction of x axis
# theta = pi / 2 - direction of z axis
# phi = pi / 2, theta = 0 - direction of y axis
elif s[i]=='phi':
a[i,:] = np.arctan2(np.asfarray(data[5::9]), np.asfarray(data[4::9]))
elif s[i]=='theta':
x = np.asfarray(data[4::9])
y = np.asfarray(data[5::9])
a[i,:] = np.arctan2(np.asfarray(data[6::9]), np.sqrt(x * x + y * y))
else:
print('resread.particles: warning: ambiguous value for s[{0}] - {1}, value \'x\' used instead'.format(i, s[i]))
for j in np.arange(0,n,1):
a[i][j] = float(data[9*j+1])
return a
def t_data(name='energy', step=None, silent=False):
'Returns array of rows containing value of t and data from file\n\
data_folder+*name*.'
if not silent:
print ('Fetching t_data from file: {0}; data_folder = {1}'.format(name, data_folder))
if step==None:
step = dt
f = open(os.path.join(data_folder, name))
i = 0
data = []
for line in f:
a = line.split('\t')
data.append(i*step)
i+=1
for b in a:
data.append(float(b))
if i == 0:
raise ValueError('The file is empty!')
data = np.reshape(data,(i,len(data)//i))
return data
def tracks(particles='e', filter=None):
'Returns a list of tracks from the data_folder. Each track is a dictionary with keys: t, x, y, z, ux, uy, uz, q, g, file'
read_parameters()
suffix_dict = {'e': '-1', 'p': '1', 'g': '0'}
track_names = [x for x in os.listdir(data_folder) if x.startswith('track_'+suffix_dict[particles[0]])]
tracks = [read_track(x) for x in track_names]
if filter is not None:
filter_expr = expression_parser.to_polish(filter)
tracks = [t for t in tracks if expression_parser.evaluate(filter_expr, lambda var_name: t[var_name])]
return tracks
def read_track(track_name):
'Reads track from the specified track file. The returned track is a dictionary with keys: t, x, y, z, ux, uy, uz, q, g, file'
filename = os.path.join(data_folder, track_name)
raw_data = np.loadtxt(filename)
raw_track = raw_data.reshape(9, -1, order='F')
track_size = raw_track[0].size
track = {'x' : raw_track[1],
'y' : raw_track[2],
'z' : raw_track[3],
'ux' : raw_track[4],
'uy' : raw_track[5],
'uz' : raw_track[6],
'file' : filename,
'q' : raw_track[0],
'g' : raw_track[7],
'chi' : raw_track[8],
't' : np.linspace(0, dt * (track_size - 1), track_size)}
track['vx'] = track['ux'] / track['g']
track['vy'] = track['uy'] / track['g']
track['vz'] = track['uz'] / track['g']
return track
def smooth(xs, lr, squeeze = True):
'Returns smoothed array; *lr* >= len(xs) results no smoothing;\n\
if *squeze* == True, the length of resulting array is equal to *lr*,\n\
otherwise the length of the resulting array is equal to len(xs);\n\
for example, try\n\
squeeze(range(15), 5)'
n = len(xs)
if lr >= n:
return xs
else:
a = np.zeros(n)
sigma = 1.0 * n / lr
ns = int(np.ceil(sigma))
for i in np.arange(ns, n - ns):
for j in np.arange(-ns, ns + 1):
a[i] += xs[i+j] * np.cos(0.5 * np.pi * j / sigma)
tmp = 0
for j in np.arange(-ns, ns + 1):
tmp += np.cos(0.5 * np.pi * j / sigma)
a = a / tmp
for i in np.arange(ns):
tmp = 0
for j in np.arange(-i, i + 1):
a[i] += xs[i+j] * np.cos(0.5 * np.pi * j / sigma)
a[-(i + 1)] += xs[-(i+1)+j] * np.cos(0.5 * np.pi * j / sigma)
tmp += np.cos(0.5 * np.pi * j / sigma)
a[i] = a[i] / tmp
a[-(i+1)] = a[-(i+1)] / tmp
if squeeze == True:
b = np.zeros(lr)
b[0] = a[0]
b[-1] = a[-1]
for i in np.arange(1, lr - 1):
x = 1.0 * (n - 1) * i / (lr - 1)
j = int(np.floor(x))
x1 = x - j
x2 = 1 - x1
b[i] = a[j] * x2 + a[j+1] * x1
return b
else:
return a
def onaxis(filename, sx = 1, sy = 1, sz = 1, av = 'None'):
'sx, sy, sz are the number of neighboring points using for averaging and smoothing.\n\
av == \'y\' or av == \'z\' results density integrated along y or z axis, respectively.\n\
WARNING: if sx != 1, the x-distance between points is alternating.'
lr = int(nx / sx)
if filename == 'x':
a = np.arange(nx) * dx
else:
if av == 'y':
a = np.sum(density(filename), 0) / ny
elif av == 'z':
a = np.sum(density(filename, 'xz'), 0) / nz
else:
a = np.zeros(nx)
for i in np.linspace(1 - sy, sy - 1, 2 * sy - 1):
a += density(filename)[int(ny/2+i),:]
for i in np.linspace(1 - sz, sz - 1, 2 * sz - 1):
a += density(filename,'xz')[int(nz/2+i),:]
a = a / (2 * (sy + sz) - 2)
return smooth(a, lr)
def reset_globals():
global dx,dy,dz,dt,nx,ny,nz,output_period,n_ion_populations,icmr,t_end,tr_start,\
deps,deps_p,deps_ph,deps_i,a0y,a0z,lmbda,ne,xsigma,nfilm,filmwidth,nerflow,\
Tlflow, mcrlflow, vlflow, Trflow, vrflow, catching, particles_for_output
dx = 0
dy = 0
dz = 0
dt = 0
nx = 0
ny = 0
nz = 0
output_period = 0
n_ion_populations = 0
icmr = []
t_end = 0
tr_start = 0
deps = 0
deps_p = 0
deps_ph = 0
deps_i = 0
a0y = 0
a0z = 0
lmbda = 0
ne = 0
xsigma = 0
nfilm = 0
filmwidth = 0
nerflow = 0
Tlflow = 0
mcrlflow = 0
vlflow = 0
Trflow = 0
vrflow = 0
catching = False
dump_photons = False
particles_for_output = 'e'
output_mode = 0
| [
"chameleon.read2d",
"numpy.ceil",
"numpy.empty",
"numpy.floor",
"numpy.asfarray",
"numpy.zeros",
"expression_parser.evaluate",
"numpy.arange",
"numpy.loadtxt",
"expression_parser.to_polish",
"numpy.linspace",
"numpy.cos",
"numpy.reshape",
"os.path.join",
"os.listdir",
"numpy.sqrt"
] | [((3589, 3624), 'os.path.join', 'os.path.join', (['data_folder', '(name + t)'], {}), '(data_folder, name + t)\n', (3601, 3624), False, 'import os\n'), ((5248, 5264), 'numpy.empty', 'np.empty', (['(m, n)'], {}), '((m, n))\n', (5256, 5264), True, 'import numpy as np\n'), ((5277, 5295), 'numpy.arange', 'np.arange', (['(0)', 'm', '(1)'], {}), '(0, m, 1)\n', (5286, 5295), True, 'import numpy as np\n'), ((9049, 9086), 'os.path.join', 'os.path.join', (['data_folder', 'track_name'], {}), '(data_folder, track_name)\n', (9061, 9086), False, 'import os\n'), ((9102, 9122), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {}), '(filename)\n', (9112, 9122), True, 'import numpy as np\n'), ((948, 980), 'os.path.join', 'os.path.join', (['data_folder', '"""log"""'], {}), "(data_folder, 'log')\n", (960, 980), False, 'import os\n'), ((4031, 4064), 'chameleon.read2d', 'chameleon.read2d', (['filename', 'plane'], {}), '(filename, plane)\n', (4047, 4064), False, 'import chameleon\n'), ((5041, 5076), 'os.path.join', 'os.path.join', (['data_folder', '(name + t)'], {}), '(data_folder, name + t)\n', (5053, 5076), False, 'import os\n'), ((7941, 7972), 'os.path.join', 'os.path.join', (['data_folder', 'name'], {}), '(data_folder, name)\n', (7953, 7972), False, 'import os\n'), ((8711, 8746), 'expression_parser.to_polish', 'expression_parser.to_polish', (['filter'], {}), '(filter)\n', (8738, 8746), False, 'import expression_parser\n'), ((9562, 9611), 'numpy.linspace', 'np.linspace', (['(0)', '(dt * (track_size - 1))', 'track_size'], {}), '(0, dt * (track_size - 1), track_size)\n', (9573, 9611), True, 'import numpy as np\n'), ((10137, 10148), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (10145, 10148), True, 'import numpy as np\n'), ((10228, 10249), 'numpy.arange', 'np.arange', (['ns', '(n - ns)'], {}), '(ns, n - ns)\n', (10237, 10249), True, 'import numpy as np\n'), ((10395, 10417), 'numpy.arange', 'np.arange', (['(-ns)', '(ns + 1)'], {}), '(-ns, ns + 1)\n', (10404, 10417), True, 'import numpy as np\n'), ((10507, 10520), 'numpy.arange', 'np.arange', (['ns'], {}), '(ns)\n', (10516, 10520), True, 'import numpy as np\n'), ((3708, 3746), 'os.path.join', 'os.path.join', (['""".."""', '"""lib"""', '"""chameleon"""'], {}), "('..', 'lib', 'chameleon')\n", (3720, 3746), False, 'import os\n'), ((5338, 5356), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (5347, 5356), True, 'import numpy as np\n'), ((8535, 8558), 'os.listdir', 'os.listdir', (['data_folder'], {}), '(data_folder)\n', (8545, 8558), False, 'import os\n'), ((10195, 10209), 'numpy.ceil', 'np.ceil', (['sigma'], {}), '(sigma)\n', (10202, 10209), True, 'import numpy as np\n'), ((10272, 10294), 'numpy.arange', 'np.arange', (['(-ns)', '(ns + 1)'], {}), '(-ns, ns + 1)\n', (10281, 10294), True, 'import numpy as np\n'), ((10438, 10469), 'numpy.cos', 'np.cos', (['(0.5 * np.pi * j / sigma)'], {}), '(0.5 * np.pi * j / sigma)\n', (10444, 10469), True, 'import numpy as np\n'), ((10563, 10583), 'numpy.arange', 'np.arange', (['(-i)', '(i + 1)'], {}), '(-i, i + 1)\n', (10572, 10583), True, 'import numpy as np\n'), ((10898, 10910), 'numpy.zeros', 'np.zeros', (['lr'], {}), '(lr)\n', (10906, 10910), True, 'import numpy as np\n'), ((10982, 11002), 'numpy.arange', 'np.arange', (['(1)', '(lr - 1)'], {}), '(1, lr - 1)\n', (10991, 11002), True, 'import numpy as np\n'), ((11623, 11636), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (11632, 11636), True, 'import numpy as np\n'), ((3982, 4014), 'os.path.join', 'os.path.join', (['data_folder', '"""log"""'], {}), "(data_folder, 'log')\n", (3994, 4014), False, 'import os\n'), ((4595, 4640), 'numpy.reshape', 'np.reshape', (['density[:-ny * nz]', '(nx, ny + nz)'], {}), '(density[:-ny * nz], (nx, ny + nz))\n', (4605, 4640), True, 'import numpy as np\n'), ((4774, 4820), 'numpy.reshape', 'np.reshape', (['density[nx * (ny + nz):]', '(ny, nz)'], {}), '(density[nx * (ny + nz):], (ny, nz))\n', (4784, 4820), True, 'import numpy as np\n'), ((5444, 5462), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (5453, 5462), True, 'import numpy as np\n'), ((8786, 8855), 'expression_parser.evaluate', 'expression_parser.evaluate', (['filter_expr', '(lambda var_name: t[var_name])'], {}), '(filter_expr, lambda var_name: t[var_name])\n', (8812, 8855), False, 'import expression_parser\n'), ((10752, 10783), 'numpy.cos', 'np.cos', (['(0.5 * np.pi * j / sigma)'], {}), '(0.5 * np.pi * j / sigma)\n', (10758, 10783), True, 'import numpy as np\n'), ((11834, 11846), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (11842, 11846), True, 'import numpy as np\n'), ((11868, 11907), 'numpy.linspace', 'np.linspace', (['(1 - sy)', '(sy - 1)', '(2 * sy - 1)'], {}), '(1 - sy, sy - 1, 2 * sy - 1)\n', (11879, 11907), True, 'import numpy as np\n'), ((11984, 12023), 'numpy.linspace', 'np.linspace', (['(1 - sz)', '(sz - 1)', '(2 * sz - 1)'], {}), '(1 - sz, sz - 1, 2 * sz - 1)\n', (11995, 12023), True, 'import numpy as np\n'), ((4691, 4736), 'numpy.reshape', 'np.reshape', (['density[:-ny * nz]', '(nx, ny + nz)'], {}), '(density[:-ny * nz], (nx, ny + nz))\n', (4701, 4736), True, 'import numpy as np\n'), ((5552, 5570), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (5561, 5570), True, 'import numpy as np\n'), ((10330, 10361), 'numpy.cos', 'np.cos', (['(0.5 * np.pi * j / sigma)'], {}), '(0.5 * np.pi * j / sigma)\n', (10336, 10361), True, 'import numpy as np\n'), ((10619, 10650), 'numpy.cos', 'np.cos', (['(0.5 * np.pi * j / sigma)'], {}), '(0.5 * np.pi * j / sigma)\n', (10625, 10650), True, 'import numpy as np\n'), ((10697, 10728), 'numpy.cos', 'np.cos', (['(0.5 * np.pi * j / sigma)'], {}), '(0.5 * np.pi * j / sigma)\n', (10703, 10728), True, 'import numpy as np\n'), ((11077, 11088), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (11085, 11088), True, 'import numpy as np\n'), ((5660, 5678), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (5669, 5678), True, 'import numpy as np\n'), ((5769, 5787), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (5778, 5787), True, 'import numpy as np\n'), ((5878, 5896), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (5887, 5896), True, 'import numpy as np\n'), ((5987, 6005), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (5996, 6005), True, 'import numpy as np\n'), ((6095, 6113), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (6104, 6113), True, 'import numpy as np\n'), ((6205, 6223), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (6214, 6223), True, 'import numpy as np\n'), ((6338, 6350), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (6347, 6350), True, 'import numpy as np\n'), ((6469, 6481), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (6478, 6481), True, 'import numpy as np\n'), ((6604, 6622), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (6613, 6622), True, 'import numpy as np\n'), ((6732, 6750), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (6741, 6750), True, 'import numpy as np\n'), ((6860, 6878), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (6869, 6878), True, 'import numpy as np\n'), ((7155, 7178), 'numpy.asfarray', 'np.asfarray', (['data[5::9]'], {}), '(data[5::9])\n', (7166, 7178), True, 'import numpy as np\n'), ((7180, 7203), 'numpy.asfarray', 'np.asfarray', (['data[4::9]'], {}), '(data[4::9])\n', (7191, 7203), True, 'import numpy as np\n'), ((7249, 7272), 'numpy.asfarray', 'np.asfarray', (['data[4::9]'], {}), '(data[4::9])\n', (7260, 7272), True, 'import numpy as np\n'), ((7289, 7312), 'numpy.asfarray', 'np.asfarray', (['data[5::9]'], {}), '(data[5::9])\n', (7300, 7312), True, 'import numpy as np\n'), ((7553, 7571), 'numpy.arange', 'np.arange', (['(0)', 'n', '(1)'], {}), '(0, n, 1)\n', (7562, 7571), True, 'import numpy as np\n'), ((7345, 7368), 'numpy.asfarray', 'np.asfarray', (['data[6::9]'], {}), '(data[6::9])\n', (7356, 7368), True, 'import numpy as np\n'), ((7370, 7392), 'numpy.sqrt', 'np.sqrt', (['(x * x + y * y)'], {}), '(x * x + y * y)\n', (7377, 7392), True, 'import numpy as np\n')] |
import os
import random as ran
import time
import gym
from keras import backend as K
from keras.initializers import VarianceScaling
from keras.layers import Dense, Flatten
from keras.layers.convolutional import Conv2D
from keras.optimizers import Adam
import numpy as np
from skimage.color import rgb2gray
from skimage.transform import resize
import tensorflow as tf
from tqdm import tqdm
from dqn_lib import DQNAgent
from ring_buffer import RingBuffer
def pre_processing(observe):
"""
Frame grayscaling and subsampling
Args:
observe: input frame
Returns:
processed_observed: output frame
"""
grayscaled = rgb2gray(observe) # From 210x160x3 to 210x160
grayscaled = grayscaled[16:201,:]
processed_observe = np.uint8(resize(grayscaled, (84, 84), mode='constant') * 255)
return processed_observe
def experiment(n_episodes, default_policy=False, policy=None, render=False):
"""
Run a RL experiment that can be either training or testing
Args:
n_episodes: number of train/test episodes
default_policy: boolean to enable testing/training phase
policy: numpy tensor with a trained policy
render: enable OpenAI environment graphical rendering
Returns:
Dictionary with:
cumulative experiments outcomes
list of steps per episode
list of cumulative rewards
trained policy
"""
with tf.device('/gpu:0'):
res = [0,0] # array of results accumulator: {[0]: Loss, [1]: Victory}
scores = [] # Cumulative rewards
steps = [] # Steps per episode
reward_list = RingBuffer(100)
env = gym.make('PongDeterministic-v4')
input_dim = env.observation_space.shape[0]
output_dim = env.action_space.n
if default_policy:
agent = DQNAgent(output_dim, None, use_ddqn=True, default_policy=True, model_filename=policy, epsilon=0.05, epsilon_lower_bound=0.05)
else:
layers = [Conv2D(32, (8, 8), strides=(4, 4), activation='relu', input_shape=(84, 84, 4), kernel_initializer=VarianceScaling(scale=2.0)),
Conv2D(64, (4, 4), strides=(2, 2), activation='relu', kernel_initializer=VarianceScaling(scale=2.0)),
Conv2D(64, (3, 3), strides=(1, 1), activation='relu', kernel_initializer=VarianceScaling(scale=2.0)),
Flatten(),
Dense(512, activation='relu'),
Dense(output_dim)]
agent = DQNAgent(output_dim, layers, use_ddqn=True, memory_size=700000, gamma=0.99, learn_thresh=50000,
epsilon_lower_bound=0.02, epsilon_decay_function=lambda e: e - (0.98 / 950000), update_rate=10000,
optimizer=Adam(0.00025))
gathered_frame = 0
for episode_number in tqdm(range(n_episodes), desc="Episode"):
frame = env.reset()
state = pre_processing(frame)
empty_state = np.zeros(state.shape, dtype="uint8")
cumulative_reward = 0
has_lost_life = True
t = 0
while True:
if has_lost_life:
next_action = 1 # [1, 4, 5][ran.randint(0, 2)]
stack = np.stack((empty_state, empty_state, empty_state, empty_state), axis=2)
stack = np.reshape([stack], (1, 84, 84, 4))
for _ in range(ran.randint(1, 10)):
gathered_frame += 1
frame, reward,end,_ = env.step(next_action)
new_state = np.reshape(pre_processing(frame), (1, 84, 84, 1))
new_stack = np.append(new_state, stack[:, :, :, :3], axis=3)
stack = new_stack
if (render):
env.render()
has_lost_life = False
next_action = agent.act(stack)
new_state, reward, end, _ = env.step(next_action)
if (render):
env.render()
time.sleep(0.02)
reward = np.clip(reward, -1., 1.)
if reward != 0:
has_lost_life = True
cumulative_reward += reward
new_state = np.reshape(pre_processing(new_state), (1, 84, 84, 1))
new_stack = np.append(new_state, stack[:, :, :, :3], axis=3)
agent.memoise((stack, next_action, reward, new_state, has_lost_life))
stack = new_stack
gathered_frame += 1
if end:
reward_list.append(cumulative_reward)
if cumulative_reward > 0:
res[1] += 1
print("You Won!, steps:", t, "reward:", reward_list.mean(), "frames:", gathered_frame)
else:
res[0] += 1
print("You Lost!, steps:", t, "reward:", reward_list.mean(), "frames:", gathered_frame)
steps.append(t)
break
agent.learn()
t += 1
scores.append(cumulative_reward)
if episode_number >= 50 and episode_number % 10 == 0:
model_name = "partial_model_pong" + str(episode_number)
agent.save_model(model_name)
env.close()
return {"results": np.array(res), "steps": np.array(steps), "scores": np.array(scores), "agent": agent}
# Training
res = experiment(10000, render=False)
res["agent"].save_model("ddqn")
# Testing
res = experiment(20, render=True, default_policy=True, policy="ddqn")
| [
"numpy.stack",
"skimage.color.rgb2gray",
"gym.make",
"dqn_lib.DQNAgent",
"random.randint",
"tensorflow.device",
"numpy.zeros",
"keras.layers.Flatten",
"numpy.clip",
"keras.optimizers.Adam",
"time.sleep",
"numpy.append",
"keras.initializers.VarianceScaling",
"keras.layers.Dense",
"skimage... | [((658, 675), 'skimage.color.rgb2gray', 'rgb2gray', (['observe'], {}), '(observe)\n', (666, 675), False, 'from skimage.color import rgb2gray\n'), ((1451, 1470), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (1460, 1470), True, 'import tensorflow as tf\n'), ((1653, 1668), 'ring_buffer.RingBuffer', 'RingBuffer', (['(100)'], {}), '(100)\n', (1663, 1668), False, 'from ring_buffer import RingBuffer\n'), ((1683, 1715), 'gym.make', 'gym.make', (['"""PongDeterministic-v4"""'], {}), "('PongDeterministic-v4')\n", (1691, 1715), False, 'import gym\n'), ((775, 820), 'skimage.transform.resize', 'resize', (['grayscaled', '(84, 84)'], {'mode': '"""constant"""'}), "(grayscaled, (84, 84), mode='constant')\n", (781, 820), False, 'from skimage.transform import resize\n'), ((1868, 1997), 'dqn_lib.DQNAgent', 'DQNAgent', (['output_dim', 'None'], {'use_ddqn': '(True)', 'default_policy': '(True)', 'model_filename': 'policy', 'epsilon': '(0.05)', 'epsilon_lower_bound': '(0.05)'}), '(output_dim, None, use_ddqn=True, default_policy=True,\n model_filename=policy, epsilon=0.05, epsilon_lower_bound=0.05)\n', (1876, 1997), False, 'from dqn_lib import DQNAgent\n'), ((3017, 3053), 'numpy.zeros', 'np.zeros', (['state.shape'], {'dtype': '"""uint8"""'}), "(state.shape, dtype='uint8')\n", (3025, 3053), True, 'import numpy as np\n'), ((5518, 5531), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (5526, 5531), True, 'import numpy as np\n'), ((5542, 5557), 'numpy.array', 'np.array', (['steps'], {}), '(steps)\n', (5550, 5557), True, 'import numpy as np\n'), ((5569, 5585), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (5577, 5585), True, 'import numpy as np\n'), ((2421, 2430), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2428, 2430), False, 'from keras.layers import Dense, Flatten\n'), ((2452, 2481), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (2457, 2481), False, 'from keras.layers import Dense, Flatten\n'), ((2503, 2520), 'keras.layers.Dense', 'Dense', (['output_dim'], {}), '(output_dim)\n', (2508, 2520), False, 'from keras.layers import Dense, Flatten\n'), ((4198, 4224), 'numpy.clip', 'np.clip', (['reward', '(-1.0)', '(1.0)'], {}), '(reward, -1.0, 1.0)\n', (4205, 4224), True, 'import numpy as np\n'), ((4453, 4501), 'numpy.append', 'np.append', (['new_state', 'stack[:, :, :, :3]'], {'axis': '(3)'}), '(new_state, stack[:, :, :, :3], axis=3)\n', (4462, 4501), True, 'import numpy as np\n'), ((2803, 2816), 'keras.optimizers.Adam', 'Adam', (['(0.00025)'], {}), '(0.00025)\n', (2807, 2816), False, 'from keras.optimizers import Adam\n'), ((3319, 3389), 'numpy.stack', 'np.stack', (['(empty_state, empty_state, empty_state, empty_state)'], {'axis': '(2)'}), '((empty_state, empty_state, empty_state, empty_state), axis=2)\n', (3327, 3389), True, 'import numpy as np\n'), ((3418, 3453), 'numpy.reshape', 'np.reshape', (['[stack]', '(1, 84, 84, 4)'], {}), '([stack], (1, 84, 84, 4))\n', (3428, 3453), True, 'import numpy as np\n'), ((4155, 4171), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (4165, 4171), False, 'import time\n'), ((2128, 2154), 'keras.initializers.VarianceScaling', 'VarianceScaling', ([], {'scale': '(2.0)'}), '(scale=2.0)\n', (2143, 2154), False, 'from keras.initializers import VarianceScaling\n'), ((2250, 2276), 'keras.initializers.VarianceScaling', 'VarianceScaling', ([], {'scale': '(2.0)'}), '(scale=2.0)\n', (2265, 2276), False, 'from keras.initializers import VarianceScaling\n'), ((2372, 2398), 'keras.initializers.VarianceScaling', 'VarianceScaling', ([], {'scale': '(2.0)'}), '(scale=2.0)\n', (2387, 2398), False, 'from keras.initializers import VarianceScaling\n'), ((3490, 3508), 'random.randint', 'ran.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (3501, 3508), True, 'import random as ran\n'), ((3745, 3793), 'numpy.append', 'np.append', (['new_state', 'stack[:, :, :, :3]'], {'axis': '(3)'}), '(new_state, stack[:, :, :, :3], axis=3)\n', (3754, 3793), True, 'import numpy as np\n')] |
import numpy as np
import h5py
# from ipdb import set_trace as stop
__all__ = ['File_observation', 'File_photosphere', 'File_chromosphere']
class File_observation(object):
"""
Class that defines an observation. This can be used to easily save observations in the appropriate format
"""
def __init__(self, file=None, mode='single'):
if (file is not None):
self.obs = self.read(file)
self.mode = mode
self.obs = {'stokes': None, 'sigma': None, 'los': None, 'boundary': None, 'wavelength': None, 'weights': None}
def set_size(self, n_lambda, n_pixel=1):
"""
Set the number of wavelengths and number of pixels of the current observation
Parameters
----------
n_lambda : int
Number of wavelength points
n_pixel : int (optional, equal to 1 as default)
Number of pixels of the output
Returns
-------
None
"""
if (self.mode == 'single' and n_pixel > 1):
raise Exception("Single pixel models cannot contain more than one pixel")
self.n_pixel = n_pixel
self.n_lambda = n_lambda
self.obs['stokes'] = np.zeros((n_pixel,n_lambda,4), dtype=np.float64)
self.obs['sigma'] = np.zeros((n_pixel,n_lambda,4), dtype=np.float64)
self.obs['los'] = np.zeros((n_pixel,3), dtype=np.float64)
self.obs['boundary'] = np.zeros((n_pixel,n_lambda,4), dtype=np.float64)
self.obs['mask'] = np.zeros((n_pixel,), dtype=np.int8)
self.obs['weights'] = np.zeros((n_lambda,4), dtype=np.float64)
self.obs['wavelength'] = np.zeros((n_lambda), dtype=np.float64)
def save(self, file):
"""
Save the curent observation
Parameters
----------
file : str
Name of the output files. Extensions will be added to it
Returns
-------
None
"""
# Save wavelength
print("Saving wavelength file : {0}.wavelength".format(file))
np.savetxt('{0}.wavelength'.format(file), self.obs['wavelength'], header='lambda')
# Save weights
print("Saving weights file : {0}.weights".format(file))
f = open('{0}.weights'.format(file), 'w')
f.write('# WeightI WeightQ WeightU WeightV\n')
for i in range(self.n_lambda):
f.write('{0} {1} {2} {3}\n'.format(self.obs['weights'][i,0], self.obs['weights'][i,1], self.obs['weights'][i,2], self.obs['weights'][i,3]))
f.close()
if (self.mode == 'single'):
print("Saving 1D Stokes file : {0}.1d".format(file))
f = open('{0}.1d'.format(file), 'w')
f.write('# LOS theta_LOS, phi_LOS, gamma_LOS\n')
f.write('{0} {1} {2}\n'.format(self.obs['los'][0,0], self.obs['los'][0,1], self.obs['los'][0,2]))
f.write('\n')
f.write('# Boundary condition I/Ic(mu=1), Q/Ic(mu=1), U/Ic(mu=1), V/Ic(mu=1)\n')
f.write('{0} {1} {2} {3}\n'.format(self.obs['boundary'][0,0,0], self.obs['boundary'][0,0,1], self.obs['boundary'][0,0,2], self.obs['boundary'][0,0,3]))
f.write('\n')
f.write('# SI SQ SU SV sigmaI sigmaQ sigmaU sigmaV\n')
tmp = np.hstack([np.squeeze(self.obs['stokes']), np.squeeze(self.obs['sigma'])])
np.savetxt(f, tmp)
f.close()
if (self.mode == 'multi'):
print("Saving 3D Stokes file : {0}.h5".format(file))
f = h5py.File('{0}.h5'.format(file), 'w')
db_stokes = f.create_dataset('stokes', self.obs['stokes'].shape, dtype=np.float64)
db_sigma = f.create_dataset('sigma', self.obs['sigma'].shape, dtype=np.float64)
db_los = f.create_dataset('LOS', self.obs['los'].shape, dtype=np.float64)
db_boundary = f.create_dataset('boundary', self.obs['boundary'].shape, dtype=np.float64)
db_stokes[:] = self.obs['stokes']
db_sigma[:] = self.obs['sigma']
db_los[:] = self.obs['los']
db_boundary[:] = self.obs['boundary']
f.close()
print("Saving 3D mask file : {0}.mask".format(file))
f = h5py.File('{0}.mask'.format(file), 'w')
db_mask = f.create_dataset('mask', self.obs['mask'].shape, dtype=np.int8)
db_mask[:] = self.obs['mask']
f.close()
class File_photosphere(object):
"""
Class that defines a model photosphere and can be used to easily save observations
"""
def __init__(self, file=None, mode='single'):
if (file is not None):
self.model = self.read(file)
self.mode = mode
self.model = {'model': None, 'ff': None}
def set_size(self, nz, n_pixel=1):
"""
Set the number of depth points and number of pixels of the current atmosphere
Parameters
----------
nz : int
Number of depth points of the atmosphere
n_pixel : int (optional, equal to 1 as default)
Number of pixels of the output
Returns
-------
None
"""
if (self.mode == 'single' and n_pixel > 1):
raise Exception("Single pixel models cannot contain more than one pixel")
self.n_pixel = n_pixel
self.n_lambda = n_lambda
self.model['model'] = np.zeros((n_pixel,nz,8), dtype=np.float64)
self.model['ff'] = np.zeros((n_pixel,), dtype=np.float64)
def set_default(self, n_pixel=1, default='hsra'):
"""
Set the atmosphere to one of the default ones available in the code
Parameters
----------
n_pixel : int (optional, equal to 1 as default)
Number of pixels of the output
default : str ('hsra' -> Harvard-Smithsonian Reference Atmosphere)
Returns
-------
None
"""
if (self.mode == 'single' and n_pixel > 1):
raise Exception("Single pixel models cannot contain more than one pixel")
print("Setting photosphere to model {0}".format(default))
path = str(__file__).split('/')
filename = '/'.join(path[0:-1])+'/data/{0}.1d'.format(default)
f = open(filename, 'r')
f.readline()
ff = float(f.readline())
f.close()
model = np.loadtxt(filename, skiprows=4)
nz = model.shape[0]
self.model['model'] = np.zeros((n_pixel,nz,8), dtype=np.float64)
self.model['ff'] = np.zeros((n_pixel,), dtype=np.float64)
self.model['model'][:] = model[None,:,:]
self.model['ff'][:] = ff
def list_models(self):
docs = """
All models have been extracted from SIR (https://github.com/BasilioRuiz/SIR-code/tree/master/models)
One-component quiet Sun models:
holmu11.mod ... <NAME>., & <NAME>., 1974, Sol Phys. 39 19
hsra11.mod ... Harvard Smithsonian Reference Atmosphere (<NAME>.,
<NAME>., <NAME>., & <NAME>., 1971. Sol. Phys. 18, 347)
valc11.mod ... <NAME>., <NAME>., & <NAME>., 1981, ApJS 45, 635
mackkl11.mod ... <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>., 1986 ApJ 306 284
nelsoncold.mod <NAME>. 1978 Sol. Phys. 60, 5
nelsonhot.mod <NAME>. 1978 Sol. Phys. 60, 5
grevesse11.mod .. <NAME>., <NAME>. 1999 A&A 347, 348
Sunspot Models:
emaltby11.mod .. (E model) <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., & <NAME>., 1986 ApJ 306 284
mmaltby11.mod .. (M model) <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., & <NAME>., 1986 ApJ 306 284
cool11.mod ... <NAME>., <NAME>., <NAME>.,
Del Toro Iniesta J.C., & Vázquez M. 1994 A&A 291 622
(Umbral model for a big spot)
hot11.mod ... <NAME>., <NAME>., <NAME>.,
Del Toro Iniesta J.C., & <NAME>. 1994 A&A 291 622
(Umbral model for a small spot)
penumjti11.mod .. Del <NAME>., <NAME>., <NAME>.,
1994, ApJ 436,400
(penumbral model)
Active Regions Models:
solannt11.mod .. (network model, <NAME>, private comunication)
solanpl11.mod .. (plage model, <NAME>, private comunication)
Fontenla Models:
Photospheric models
FALB = MODEL1001
FALC11 - QS model cell center
An area with the same intensity as the median in a histogram of a Ca II K image of a quiet area of the Sun.
We find that the median is very close to the peak of the distribution but is statistically more stable.
These intensities correspond to most of the central area of supergranular cells that are usually known as "quiet-Sun cell interior."
FALD = MODEL1002 : Network
FALE11- QS model network
A bright area separating supergranulation (or network) cells, often called "network lane." We describe this category as "quiet-Sun network."
FALF = MODEL1003 - QS model active network
Certain network lane areas that are much brighter than the average. We describe this category as "active network."
It spans from logtau=1.3 to -6.8
FALF11= FALF but spans from logtau=1.4 to -5.7
FALH = MODEL1004- Plage model
It spans from logtau=1.5 to -6.6
FALH11= FALH but spans from logtau=1.4 to -5.7
FALP= MODEL1005- Facula model
It spans from logtau=1.3 to -6.7
FALP11=FALP but spans from logtau=1.1 to -5.7
FALR11 - penumbral model
FALS= MODEL1006 - sunspot model spans from logtau=1.4 to -5.4
FALS11= FALS but spans from logtau=1.3 to -4.9
"""
print(docs)
def save(self, file, default=None):
"""
Save the curent model
Parameters
----------
file : str
Name of the output files. Extensions will be added to it
Returns
-------
None
"""
if (self.mode == 'single'):
print("Saving photospheric 1D model : {0}.1d".format(file))
f = open('{0}.1d'.format(file), 'w')
f.write('ff\n')
f.write('{0}\n'.format(self.model['ff'][0]))
f.write('\n')
f.write(' logtau T Pe vmic v Bx By Bz\n')
np.savetxt(f, np.squeeze(self.model['model']))
f.close()
if (self.mode == 'multi'):
print("Saving photospheric 3D model : {0}.h5".format(file))
f = h5py.File('{0}.h5'.format(file), 'w')
db_model = f.create_dataset('model', self.model['model'].shape, dtype=np.float64)
db_ff = f.create_dataset('ff', self.model['ff'].shape, dtype=np.float64)
db_model[:] = self.model['model']
db_ff[:] = self.model['ff']
f.close()
class File_chromosphere(object):
"""
Class that defines a model atmosphere and can be used to easily save observations
"""
def __init__(self, file=None, mode='single'):
if (file is not None):
self.model = self.read(file)
self.mode = mode
self.model = {'model': None, 'ff': None}
def set_size(self, n_pixel=1):
"""
Set the number of pixels of the current atmosphere
Parameters
----------
n_pixel : int (optional, equal to 1 as default)
Number of pixels of the output
Returns
-------
None
"""
if (self.mode == 'single' and n_pixel > 1):
raise Exception("Single pixel models cannot contain more than one pixel")
self.n_pixel = n_pixel
self.n_lambda = n_lambda
self.model['model'] = np.zeros((n_pixel,8), dtype=np.float64)
self.model['ff'] = np.zeros((n_pixel,), dtype=np.float64)
def set_default(self, n_pixel=1, default='disk'):
"""
Set the atmosphere to one of the default ones available in the code
Parameters
----------
n_pixel : int (optional, equal to 1 as default)
Number of pixels of the output
default : str ('disk' -> on-disk observations, 'offlimb' -> off-limb observations)
Returns
-------
None
"""
if (self.mode == 'single' and n_pixel > 1):
raise Exception("Single pixel models cannot contain more than one pixel")
if (default == 'disk'):
print("Setting standard chromosphere")
self.model['model'] = np.zeros((n_pixel,8), dtype=np.float64)
self.model['ff'] = np.zeros((n_pixel,), dtype=np.float64)
self.model['model'][:] = np.array([0.0,0.0,0.0,1.0,0.0,8.0,1.0,0.0])[None,:]
self.model['ff'][:] = 1.0
if (default == 'offlimb'):
print("Setting standard chromosphere")
self.model['model'] = np.zeros((n_pixel,8), dtype=np.float64)
self.model['ff'] = np.zeros((n_pixel,), dtype=np.float64)
self.model['model'][:] = np.array([0.0,0.0,0.0,1.0,0.0,14.0,1.0,0.0])[None,:]
self.model['ff'][:] = 1.0
def save(self, file, default=None):
"""
Save the curent observation
Parameters
----------
file : str
Name of the output files. Extensions will be added to it
Returns
-------
None
"""
if (self.mode == 'single'):
print("Saving chromospheric 1D model : {0}.1d".format(file))
f = open('{0}.1d'.format(file), 'w')
f.write('Bx [G] By [G] Bz [G] tau v [km/s] deltav [km/s] beta a ff\n')
np.savetxt(f, np.atleast_2d(np.hstack([np.squeeze(self.model['model']), self.model['ff'][0]])))
f.close()
if (self.mode == 'multi'):
print("Saving photospheric 3D model : {0}.h5".format(file))
f = h5py.File('{0}.h5'.format(file), 'w')
db_model = f.create_dataset('model', self.model['model'].shape, dtype=np.float64)
db_ff = f.create_dataset('ff', self.model['ff'].shape, dtype=np.float64)
db_model[:] = self.model['model']
db_ff[:] = self.model['ff']
f.close() | [
"numpy.savetxt",
"numpy.zeros",
"numpy.array",
"numpy.loadtxt",
"numpy.squeeze"
] | [((1218, 1268), 'numpy.zeros', 'np.zeros', (['(n_pixel, n_lambda, 4)'], {'dtype': 'np.float64'}), '((n_pixel, n_lambda, 4), dtype=np.float64)\n', (1226, 1268), True, 'import numpy as np\n'), ((1295, 1345), 'numpy.zeros', 'np.zeros', (['(n_pixel, n_lambda, 4)'], {'dtype': 'np.float64'}), '((n_pixel, n_lambda, 4), dtype=np.float64)\n', (1303, 1345), True, 'import numpy as np\n'), ((1370, 1410), 'numpy.zeros', 'np.zeros', (['(n_pixel, 3)'], {'dtype': 'np.float64'}), '((n_pixel, 3), dtype=np.float64)\n', (1378, 1410), True, 'import numpy as np\n'), ((1441, 1491), 'numpy.zeros', 'np.zeros', (['(n_pixel, n_lambda, 4)'], {'dtype': 'np.float64'}), '((n_pixel, n_lambda, 4), dtype=np.float64)\n', (1449, 1491), True, 'import numpy as np\n'), ((1518, 1553), 'numpy.zeros', 'np.zeros', (['(n_pixel,)'], {'dtype': 'np.int8'}), '((n_pixel,), dtype=np.int8)\n', (1526, 1553), True, 'import numpy as np\n'), ((1585, 1626), 'numpy.zeros', 'np.zeros', (['(n_lambda, 4)'], {'dtype': 'np.float64'}), '((n_lambda, 4), dtype=np.float64)\n', (1593, 1626), True, 'import numpy as np\n'), ((1659, 1695), 'numpy.zeros', 'np.zeros', (['n_lambda'], {'dtype': 'np.float64'}), '(n_lambda, dtype=np.float64)\n', (1667, 1695), True, 'import numpy as np\n'), ((5434, 5478), 'numpy.zeros', 'np.zeros', (['(n_pixel, nz, 8)'], {'dtype': 'np.float64'}), '((n_pixel, nz, 8), dtype=np.float64)\n', (5442, 5478), True, 'import numpy as np\n'), ((5504, 5542), 'numpy.zeros', 'np.zeros', (['(n_pixel,)'], {'dtype': 'np.float64'}), '((n_pixel,), dtype=np.float64)\n', (5512, 5542), True, 'import numpy as np\n'), ((6410, 6442), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'skiprows': '(4)'}), '(filename, skiprows=4)\n', (6420, 6442), True, 'import numpy as np\n'), ((6503, 6547), 'numpy.zeros', 'np.zeros', (['(n_pixel, nz, 8)'], {'dtype': 'np.float64'}), '((n_pixel, nz, 8), dtype=np.float64)\n', (6511, 6547), True, 'import numpy as np\n'), ((6573, 6611), 'numpy.zeros', 'np.zeros', (['(n_pixel,)'], {'dtype': 'np.float64'}), '((n_pixel,), dtype=np.float64)\n', (6581, 6611), True, 'import numpy as np\n'), ((12418, 12458), 'numpy.zeros', 'np.zeros', (['(n_pixel, 8)'], {'dtype': 'np.float64'}), '((n_pixel, 8), dtype=np.float64)\n', (12426, 12458), True, 'import numpy as np\n'), ((12485, 12523), 'numpy.zeros', 'np.zeros', (['(n_pixel,)'], {'dtype': 'np.float64'}), '((n_pixel,), dtype=np.float64)\n', (12493, 12523), True, 'import numpy as np\n'), ((3394, 3412), 'numpy.savetxt', 'np.savetxt', (['f', 'tmp'], {}), '(f, tmp)\n', (3404, 3412), True, 'import numpy as np\n'), ((13232, 13272), 'numpy.zeros', 'np.zeros', (['(n_pixel, 8)'], {'dtype': 'np.float64'}), '((n_pixel, 8), dtype=np.float64)\n', (13240, 13272), True, 'import numpy as np\n'), ((13303, 13341), 'numpy.zeros', 'np.zeros', (['(n_pixel,)'], {'dtype': 'np.float64'}), '((n_pixel,), dtype=np.float64)\n', (13311, 13341), True, 'import numpy as np\n'), ((13604, 13644), 'numpy.zeros', 'np.zeros', (['(n_pixel, 8)'], {'dtype': 'np.float64'}), '((n_pixel, 8), dtype=np.float64)\n', (13612, 13644), True, 'import numpy as np\n'), ((13675, 13713), 'numpy.zeros', 'np.zeros', (['(n_pixel,)'], {'dtype': 'np.float64'}), '((n_pixel,), dtype=np.float64)\n', (13683, 13713), True, 'import numpy as np\n'), ((11024, 11055), 'numpy.squeeze', 'np.squeeze', (["self.model['model']"], {}), "(self.model['model'])\n", (11034, 11055), True, 'import numpy as np\n'), ((13380, 13430), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 0.0, 8.0, 1.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 0.0, 8.0, 1.0, 0.0])\n', (13388, 13430), True, 'import numpy as np\n'), ((13752, 13803), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 0.0, 14.0, 1.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 0.0, 14.0, 1.0, 0.0])\n', (13760, 13803), True, 'import numpy as np\n'), ((3318, 3348), 'numpy.squeeze', 'np.squeeze', (["self.obs['stokes']"], {}), "(self.obs['stokes'])\n", (3328, 3348), True, 'import numpy as np\n'), ((3350, 3379), 'numpy.squeeze', 'np.squeeze', (["self.obs['sigma']"], {}), "(self.obs['sigma'])\n", (3360, 3379), True, 'import numpy as np\n'), ((14468, 14499), 'numpy.squeeze', 'np.squeeze', (["self.model['model']"], {}), "(self.model['model'])\n", (14478, 14499), True, 'import numpy as np\n')] |
"""ngutils - utilities for manipulating a neuroglancer viewer
"""
from copy import deepcopy
import numpy as np
import neuroglancer
"""A shader that displays an image as gray in Neuroglancer"""
gray_shader = """
void main() {
emitGrayscale(%f * toNormalized(getDataValue()));
}
"""
"""A shader that displays an image in red in Neuroglancer"""
red_shader = """
void main() {
emitRGB(vec3(%f * toNormalized(getDataValue()), 0, 0));
}
"""
"""A shader that displays an image in green in Neuroglancer"""
green_shader = """
void main() {
emitRGB(vec3(0, %f * toNormalized(getDataValue()), 0));
}
"""
"""A shader that displays an image in blue in Neuroglancer"""
blue_shader = """
void main() {
emitRGB(vec3(0, 0, %f * toNormalized(getDataValue())));
}
"""
#
# Monkey-patch Neuroglancer to control the color of the annotation dots
#
if not hasattr(neuroglancer.PointAnnotationLayer, "annotation_color"):
from neuroglancer.viewer_state import wrapped_property, optional, text_type
neuroglancer.PointAnnotationLayer.annotation_color = \
wrapped_property('annotationColor', optional(text_type))
default_voxel_size = (1000, 1000, 1000)
def layer(txn, name, img, shader, multiplier, offx=0, offy=0, offz=0,
voxel_size=default_voxel_size):
"""Add an image layer to Neuroglancer
:param txn: The transaction context of the viewer
:param name: The name of the layer as displayed in Neuroglancer
:param img: The image to display
:param shader: the shader to use when displaying, e.g. gray_shader
:param multiplier: the multiplier to apply to the normalized data value.
This can be used to brighten or dim the image.
"""
frac = multiplier / np.percentile(img, 99.9)
if img.dtype.kind in ("i", "u"):
frac = frac * np.iinfo(img.dtype).max
txn.layers[name] = neuroglancer.ImageLayer(
source = neuroglancer.LocalVolume(img,
voxel_offset=(offx, offy, offz),
voxel_size=voxel_size),
shader = shader % frac
)
def seglayer(txn, name, seg, offx=0, offy=0, offz=0,
voxel_size=default_voxel_size):
"""Add a segmentation layer
:param txn: the neuroglancer transaction
:param name: the display name of the segmentation
:param seg: the segmentation to display
"""
txn.layers[name] = neuroglancer.SegmentationLayer(
source=neuroglancer.LocalVolume(
seg.astype(np.uint16),
voxel_offset=(offx, offy, offz),
voxel_size=voxel_size))
def pointlayer(txn, name, x, y, z, color):
"""Add a point layer
:param txn: the neuroglancer viewer transaction context
:param name: the displayable name of the point layer
:param x: the x coordinate per point
:param y: the y coordinate per point
:param z: the z coordinate per point
:param color: the color of the points in the layer, e.g. "red", "yellow"
"""
txn.layers[name] = neuroglancer.PointAnnotationLayer(
points=np.column_stack((x, y, z)),
annotation_color=color
)
def has_layer(txn, name):
"""Return true if the viewer state has a layer with the given name
:param txn: A viewer state transaction, e.g. viewer.txn()
:param name: the layer name to search for
"""
for layer in txn.layers:
if layer.name == name:
return True
return False
def post_message_immediately(viewer, topic, message):
"""Post a message to a viewer w/o waiting for the event loop
:param viewer: the neuroglancer viewer
:param topic: the status message topic
:param message: the message to display
"""
cs, generation = \
viewer.config_state.state_and_generation
cs = deepcopy(cs)
cs.status_messages[topic] = message
viewer.config_state.set_state(
cs, existing_generation=generation)
ioloop = neuroglancer.server.global_server.ioloop
cb = ioloop._callbacks.pop()
try:
ioloop._run_callback(cb)
except:
ioloop._callbacks.push(cb)
| [
"copy.deepcopy",
"neuroglancer.LocalVolume",
"neuroglancer.viewer_state.optional",
"numpy.iinfo",
"numpy.percentile",
"numpy.column_stack"
] | [((3785, 3797), 'copy.deepcopy', 'deepcopy', (['cs'], {}), '(cs)\n', (3793, 3797), False, 'from copy import deepcopy\n'), ((1099, 1118), 'neuroglancer.viewer_state.optional', 'optional', (['text_type'], {}), '(text_type)\n', (1107, 1118), False, 'from neuroglancer.viewer_state import wrapped_property, optional, text_type\n'), ((1712, 1736), 'numpy.percentile', 'np.percentile', (['img', '(99.9)'], {}), '(img, 99.9)\n', (1725, 1736), True, 'import numpy as np\n'), ((1885, 1975), 'neuroglancer.LocalVolume', 'neuroglancer.LocalVolume', (['img'], {'voxel_offset': '(offx, offy, offz)', 'voxel_size': 'voxel_size'}), '(img, voxel_offset=(offx, offy, offz), voxel_size=\n voxel_size)\n', (1909, 1975), False, 'import neuroglancer\n'), ((3065, 3091), 'numpy.column_stack', 'np.column_stack', (['(x, y, z)'], {}), '((x, y, z))\n', (3080, 3091), True, 'import numpy as np\n'), ((1796, 1815), 'numpy.iinfo', 'np.iinfo', (['img.dtype'], {}), '(img.dtype)\n', (1804, 1815), True, 'import numpy as np\n')] |
"""
gradient descent method1:
gradient methods with fixed step size
"""
import numpy as np
def main():
x = np.random.rand(100) # creat 100 numbers from 0 to 1.
y = 10 + 5 * x # creat a linear function
print(len(x), len(y))
epsilon = 0.01 # set threshold to stop loop
cnt = 0
alpha = 0.001 # set the step size of gradient update
theta0 = 0 # x0=1, according to theta0
theta1 = 0 # x1=xi
error1 = 0
error0 = 0
while True:
sum0 = 0
sum1 = 0
cnt = cnt + 1
for i in range(0, len(x)): # BGD method
diff = y[i] - (theta0 + theta1 * x[i]) # gradient function
sum0 = sum0 + diff # theta0 derivative
sum1 = sum1 + diff*x[i] # theta1 derivative
theta0 = theta0 + alpha * sum0/len(x) # gradient update
theta1 = theta1 + alpha * sum1/len(x) # gradient update
error1 = 0
for i in range(0, len(x)):
error1 = error1 + (y[i] - (theta0 + theta1 * x[i]))**2 # loss function
if abs(error1) < epsilon: # get a whole minimum numer
print('error1-0:', abs(error1))
break
# if abs(error1-error0) < epsilon: # get a relative minimum numer
# print('error1-0:', abs(error1))
# break
else:
error0 = error1 # get a relative minimum number
print('error1:', error1)
print('theta0:', theta0, 'theta1:', theta1)
if __name__ == '__main__':
main()
| [
"numpy.random.rand"
] | [((123, 142), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (137, 142), True, 'import numpy as np\n')] |
import numpy as np
import skimage
from metric import metric
from nrm import nrm
from im2tiles import im2tiles
from numba import jit, njit
def Quality_Map(img, ref):
blockSize = 32
size_y = min(blockSize, img.shape[0])
size_x = min(blockSize, img.shape[1])
size_z = 1
# Tiles_img = im2tiles(img, size_x, size_y, size_z)
# Tiles_ref = im2tiles(ref, size_x, size_y, size_z)
Tiles_img = img
Tiles_ref = ref
for idx in range(0, Tiles_img.size):
n1 = metric(255 * nrm(Tiles_ref[idx]), 255 * nrm(Tiles_img[idx]))
Tiles_img[idx] = n1 * np.ones(Tiles_img[idx].shape)
Qmap = list(Tiles_img)
return Qmap | [
"nrm.nrm",
"numpy.ones"
] | [((611, 640), 'numpy.ones', 'np.ones', (['Tiles_img[idx].shape'], {}), '(Tiles_img[idx].shape)\n', (618, 640), True, 'import numpy as np\n'), ((532, 551), 'nrm.nrm', 'nrm', (['Tiles_ref[idx]'], {}), '(Tiles_ref[idx])\n', (535, 551), False, 'from nrm import nrm\n'), ((559, 578), 'nrm.nrm', 'nrm', (['Tiles_img[idx]'], {}), '(Tiles_img[idx])\n', (562, 578), False, 'from nrm import nrm\n')] |
# Copyright (c) 2015, <NAME>
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
import os
import inspect
import functools
import warnings
import socket
import datetime
import pprint
import copy
import numpy as np
import quaternion
import scipy.constants as spc
from scipy.interpolate import CubicSpline
from . import *
@jit("void(c16[:,:], f8[:])")
def complex_array_norm(c, s):
for i in range(len(s)):
s[i] = 0.0
for j in range(c.shape[1]):
s[i] += c[i, j].real ** 2 + c[i, j].imag ** 2
return
@jit("void(c16[:,:], f8[:])")
def complex_array_abs(c, s):
for i in range(len(s)):
s[i] = 0.0
for j in range(c.shape[1]):
s[i] += c[i, j].real ** 2 + c[i, j].imag ** 2
s[i] = np.sqrt(s[i])
return
def waveform_alterations(func):
"""Temporarily increment history depth safely
This decorator stores the value of `self.__history_depth__`, then increments it by 1, calls the function,
returns the history depth to its original value, and then returns the result of the function. This should be
used on any member function that could alter the waveform on which it is called, or which could return a new
altered version of the original.
Typically, within the function itself, you will want to decrement the depth manually just before appending to the
history -- which will presumably take place at the end of the function. You do not need to undo this,
as the decorator will take care of that part.
"""
@functools.wraps(func)
def func_wrapper(self, *args, **kwargs):
if self.__history_depth__ == 0:
self._append_history("")
stored_history_depth = self.__history_depth__
self.__history_depth__ += 1
result = func(self, *args, **kwargs)
self.__history_depth__ = stored_history_depth
return result
return func_wrapper
def test_without_assertions(errs, val, msg=""):
"""Replacement for np.testing.assert_
This function should be able to replace `assert_`, but rather than raising an exception, this just adds a
description of the problem to the `errors` variable.
"""
if not val:
try:
smsg = msg()
except TypeError:
smsg = msg
errs += [smsg]
def test_with_assertions(errs, val, msg=""):
np.testing.assert_(val, "Failed assertion:\n\t" + msg)
class _object:
"""Useless class to allow multiple inheritance"""
def __init__(self, *args, **kwargs):
super().__init__()
class WaveformBase(_object):
"""Object containing time, frame, and data, along with related information
This object is just the base object from which these other classes are derived:
* WaveformModes
* WaveformGrid
* WaveformInDetector
* WaveformInDetectorFT
For more specific information, see the documentation of those classes.
Attributes
----------
t : float array
Time steps corresponding to other data
frame : quaternion array
Rotors taking static basis onto decomposition basis
data : 2-d array of complex or real numbers
The nature of this data depends on the derived type. First index is time, second index depends on type.
history : list of strings
As far as possible, all functions applied to the object are recorded in the `history` variable. In fact,
the object should almost be able to be recreated using the commands in the history list. Commands taking
large arrays, however, are shortened -- so the data will not be entirely reconstructable.
version_hist : list of pairs of strings
Records the git hash and description for any change in the way SpEC outputs waveform data.
frameType : int
Index corresponding to `scri.FrameType` appropriate for `data`.
dataType : int
Index corresponding to `scri.DataType` appropriate for `data`.
r_is_scaled_out : bool
True if the `data` have been multiplied by the appropriate power of radius so that the asymptotic value can
be finite and nonzero.
m_is_scaled_out : bool
True if the `data` have been scaled by the appropriate value of the total mass so that they are dimensionless.
num : int (read only)
Automatically assigned number of this object. The constructor of this type keeps count of the number of
objects it has created, to assign each object a more-or-less unique ID for use in the history strings. This
counter is reset at the beginning of each python session. Subclasses should automatically have a different
counter.
Indexing
--------
WaveformBase objects can be indexed much like a numpy array, where the first dimension gives the time indices,
and the second gives the data-set indices. This will return another WaveformBase object containing slices of the
original data.
It is important to note, however, that as with numpy array slices, slicing a WaveformBase will not typically copy
the original data; the result will simply be a view into the data. This means that changing the data in the
slice can change the data in the original. If you want to make a copy, you should probably use the copy
constructor: `W2 = WaveformBase(W1)`. It is also possible to use the standard copy.deepcopy method.
Also note that the first slice dimension corresponds to the indices of the time, but the second dimension may NOT
correspond to indices for derived types. In particular, for `WaveformModes`, the second index corresponds to
modes, because this type enforces completeness of each ell mode. For the `WaveformBase` type, however,
the second index does correspond to the second dimension of the data.
For example,
>>> W = WaveformBase()
>>> W[10:-20]
will give all columns in the data, but only at times starting with the
10th time step, and ending one before the -20th time step. Meanwhile,
>>> W[10:-20,2]
will give the same range of times, but only the second column (unless the subclass overrides this behavior,
as in `WaveformModes`). Similarly,
>>> W[10:-20,2:5]
will return the same range of times, along with the 2,3,4 columns. Note the lack of 5 column, for consistency
with python's usual slice syntax.
>>> W[:,:0]
will return all time steps, along with all `frame` data, but `data` will be empty (because the `:0` term selects
everything before the 0th element). Similarly,
>>> W[:0,:0]
is empty of all numerical data.
"""
__num = 0 # Used to count number of Waveforms created
def __init__(self, *args, **kwargs):
"""Initializer for WaveformBase object
WaveformBase objects may be created in two ways. First, by copying an existing WaveformBase object -- in
which case the only parameter should be that object. Second, by passing any of the (writable) attributes as
keywords.
In both cases, the last step in initialization is to check the validity of the result. By default,
this will raise an exception if the result is not valid. An additional keyword parameter
`override_exception_from_invalidity` may be set if this is not desired. This may be necessary if only some
of the data can be passed in to the initializer, for example.
Keyword parameters
------------------
t: float array, empty default
frame : quaternion array, empty default
data : 2-d complex array, empty default
history : list of strings, empty default
This is the list of strings prepended to the history, an additional line is appended, showing the call to
this initializer.
version_hist : list of pairs of strings, empty default
Remains empty if waveform data is on version 0.
frameType : int, defaults to 0 (UnknownFrameType)
See scri.FrameNames for possible values
dataType : int, defaults to 0 (UnknownDataType)
See scri.DataNames for possible values
r_is_scaled_out : bool, defaults to False
Set to True if the data represented could approach a nonzero value at Scri
m_is_scaled_out : bool, defaults to False
Set to True if the data represented are dimensionless and in units where the total mass is 1
override_exception_from_invalidity: bool, defaults to False
If True, report any errors, but do not raise them.
constructor_statement : str, optional
If this is present, it will replace the default constructor statement added to the history. It is
prepended with a string of the form `'{0} = '.format(self)`, which prints the ID of the resulting object
(unique to this session only).
"""
original_kwargs = kwargs.copy()
super().__init__(*args, **kwargs) # to ensure proper calling in multiple inheritance
override_exception_from_invalidity = kwargs.pop("override_exception_from_invalidity", False)
self.__num = type(self).__num
self.__history_depth__ = 0
type(self).__num += 1 # Increment class's instance tracker
if len(args) == 0:
self.t = kwargs.pop("t", np.empty((0,), dtype=float))
self.frame = kwargs.pop("frame", np.empty((0,), dtype=np.quaternion))
self.data = kwargs.pop("data", np.empty((0, 0), dtype=complex))
# Information about this object
self.history = kwargs.pop("history", [])
self.version_hist = kwargs.pop("version_hist", [])
self.frameType = kwargs.pop("frameType", UnknownFrameType)
self.dataType = kwargs.pop("dataType", UnknownDataType)
self.r_is_scaled_out = kwargs.pop("r_is_scaled_out", False)
self.m_is_scaled_out = kwargs.pop("m_is_scaled_out", False)
if "constructor_statement" in kwargs:
self._append_history("{} = {}".format(self, kwargs.pop("constructor_statement")))
else:
opts = np.get_printoptions()
np.set_printoptions(threshold=6)
self._append_history(
"{} = {}(**{})".format(self, type(self).__name__, pprint.pformat(original_kwargs, indent=4))
)
np.set_printoptions(**opts)
elif len(args) == 1 and isinstance(args[0], type(self)):
other = args[0]
self.t = np.copy(other.t)
self.frame = np.copy(other.frame)
self.data = np.copy(other.data)
# Information about this object
self.history = other.history[:]
self.version_hist = other.version_hist[:]
self.frameType = other.frameType
self.dataType = other.dataType
self.r_is_scaled_out = other.r_is_scaled_out
self.m_is_scaled_out = other.m_is_scaled_out
self._append_history(["", "{} = {}({})".format(self, type(self).__name__, other)])
else:
raise ValueError(
"Did not understand input arguments to `{}` constructor.\n".format(type(self).__name__)
+ "Note that explicit data values must be passed as keywords,\n"
+ "whereas objects to be copied must be passed as the sole argument."
)
hostname = socket.gethostname()
cwd = os.getcwd()
time = datetime.datetime.now().isoformat()
self.__history_depth__ = 1
self.ensure_validity(alter=True, assertions=(not override_exception_from_invalidity))
self.__history_depth__ = 0
self._append_history([f"hostname = {hostname}", f"cwd = {cwd}", f"datetime = {time}", version_info()], 1)
if kwargs:
warning = "\nIn `{}` initializer, unused keyword arguments:\n".format(type(self).__name__)
warning += pprint.pformat(kwargs, indent=4)
warnings.warn(warning)
@waveform_alterations
def ensure_validity(self, alter=True, assertions=False):
"""Try to ensure that the `WaveformBase` object is valid
This tests various qualities of the WaveformBase's members that are frequently assumed throughout the code.
If the optional argument `alter` is `True` (which is the default), this function tries to alter the
WaveformBase in place to ensure validity. Note that this is not always possible. If that is the case,
an exception may be raised. For example, if the `t` member is not a one-dimensional array of floats,
it is not clear what that data should be. Similarly, if the `t` and `data` members have mismatched
dimensions, there is no way to resolve that automatically.
Also note that this is almost certainly not be an exhaustive test of all assumptions made in the code.
If the optional `assertions` argument is `True` (default is `False`), the first test that fails will raise an
assertion error.
"""
import numbers
errors = []
alterations = []
if assertions:
test = test_with_assertions
else:
test = test_without_assertions
# Ensure that the various data are correct and compatible
test(
errors,
isinstance(self.t, np.ndarray),
"isinstance(self.t, np.ndarray) # type(self.t)={}".format(type(self.t)),
)
test(
errors,
self.t.dtype == np.dtype(np.float),
f"self.t.dtype == np.dtype(np.float) # self.t.dtype={self.t.dtype}",
)
if alter and self.t.ndim == 2 and self.t.shape[1] == 1:
self.t = self.t[:, 0]
alterations += ["{0}.t = {0}.t[:,0]".format(self)]
test(
errors,
not self.t.size or self.t.ndim == 1,
f"not self.t.size or self.t.ndim==1 # self.t.size={self.t.size}; self.t.ndim={self.t.ndim}",
)
test(
errors,
self.t.size <= 1 or np.all(np.diff(self.t) > 0.0),
"self.t.size<=1 or np.all(np.diff(self.t)>0.0) "
"# self.t.size={}; max(np.diff(self.t))={}".format(
self.t.size, (max(np.diff(self.t)) if self.t.size > 1 else np.nan)
),
)
test(errors, np.all(np.isfinite(self.t)), "np.all(np.isfinite(self.t))")
if alter and self.frame is None:
self.frame = np.empty((0,), dtype=np.quaternion)
alterations += [f"{self}.frame = np.empty((0,), dtype=np.quaternion)"]
test(
errors,
isinstance(self.frame, np.ndarray),
"isinstance(self.frame, np.ndarray) # type(self.frame)={}".format(type(self.frame)),
)
if alter and self.frame.dtype == np.dtype(np.float):
try: # Might fail because of shape
self.frame = quaternion.as_quat_array(self.frame)
alterations += ["{0}.frame = quaternion.as_quat_array({0}.frame)".format(self)]
except (AssertionError, ValueError):
pass
test(
errors,
self.frame.dtype == np.dtype(np.quaternion),
f"self.frame.dtype == np.dtype(np.quaternion) # self.frame.dtype={self.frame.dtype}",
)
test(
errors,
self.frame.size <= 1 or self.frame.size == self.t.size,
"self.frame.size<=1 or self.frame.size==self.t.size "
"# self.frame.size={}; self.t.size={}".format(self.frame.size, self.t.size),
)
test(errors, np.all(np.isfinite(self.frame)), "np.all(np.isfinite(self.frame))")
test(
errors,
isinstance(self.data, np.ndarray),
"isinstance(self.data, np.ndarray) # type(self.data)={}".format(type(self.data)),
)
test(errors, self.data.ndim >= 1, f"self.data.ndim >= 1 # self.data.ndim={self.data.ndim}")
test(
errors,
self.data.shape[0] == self.t.shape[0],
"self.data.shape[0]==self.t.shape[0] "
"# self.data.shape[0]={}; self.t.shape[0]={}".format(self.data.shape[0], self.t.shape[0]),
)
test(errors, np.all(np.isfinite(self.data)), "np.all(np.isfinite(self.data))")
# Information about this object
if alter and not self.history:
self.history = [""]
alterations += [f"{self}.history = ['']"]
if alter and isinstance(self.history, str):
self.history = self.history.split("\n")
alterations += ["{0}.history = {0}.history.split('\n')".format(self)]
test(
errors,
isinstance(self.history, list),
"isinstance(self.history, list) # type(self.history)={}".format(type(self.history)),
)
test(
errors,
isinstance(self.history[0], str),
"isinstance(self.history[0], str) # type(self.history[0])={}".format(type(self.history[0])),
)
test(
errors,
isinstance(self.frameType, numbers.Integral),
"isinstance(self.frameType, numbers.Integral) # type(self.frameType)={}".format(type(self.frameType)),
)
test(errors, self.frameType in FrameType, f"self.frameType in FrameType # self.frameType={self.frameType}")
test(
errors,
isinstance(self.dataType, numbers.Integral),
"isinstance(self.dataType, numbers.Integral) # type(self.dataType)={}".format(type(self.dataType)),
)
test(errors, self.dataType in DataType, f"self.dataType in DataType # self.dataType={self.dataType}")
test(
errors,
isinstance(self.r_is_scaled_out, bool),
"isinstance(self.r_is_scaled_out, bool) # type(self.r_is_scaled_out)={}".format(type(self.r_is_scaled_out)),
)
test(
errors,
isinstance(self.m_is_scaled_out, bool),
"isinstance(self.m_is_scaled_out, bool) # type(self.m_is_scaled_out)={}".format(type(self.m_is_scaled_out)),
)
test(
errors,
isinstance(self.num, numbers.Integral),
"isinstance(self.num, numbers.Integral) # type(self.num)={}".format(type(self.num)),
)
if alterations:
self._append_history(alterations)
warnings.warn("The following alterations were made:\n\t" + "\n\t".join(alterations))
if errors:
warnings.warn("The following conditions were found to be incorrectly False:\n\t" + "\n\t".join(errors))
return False
self.__history_depth__ -= 1
self._append_history("WaveformBase.ensure_validity" + f"({self}, alter={alter}, assertions={assertions})")
return True
@property
def is_valid(self):
return self.ensure_validity(alter=False, assertions=False)
# Data sizes
@property
def n_data_sets(self):
return int(np.prod(self.data.shape[1:]))
@property
def n_times(self):
return self.t.shape[0]
# Calculate weights
@property
def spin_weight(self):
return SpinWeights[self.dataType]
@property
def conformal_weight(self):
return ConformalWeights[self.dataType] + (-RScaling[self.dataType] if self.r_is_scaled_out else 0)
@property
def gamma_weight(self):
"""Non-conformal effect of a boost.
This factor allows for mass-scaling, for example. If the waveform describes `r*h/M`, for example,
then `r` and `h` vary by the conformal weight, which depends on the direction; whereas `M` is a monopole,
and thus cannot depend on the direction. Instead, `M` simply obeys the standard formula, scaling with gamma.
"""
return (MScaling[self.dataType] if self.m_is_scaled_out else 0) + (
-RScaling[self.dataType] if (self.r_is_scaled_out and self.m_is_scaled_out) else 0
)
@property
def r_scaling(self):
return RScaling[self.dataType]
@property
def m_scaling(self):
return MScaling[self.dataType]
# Text descriptions
@property
def num(self):
return self.__num
@property
def frame_type_string(self):
return FrameNames[self.frameType]
@property
def data_type_string(self):
return DataNames[self.dataType]
@property
def data_type_latex(self):
return DataNamesLaTeX[self.dataType]
@property
def descriptor_string(self):
"""Create a simple string describing the content of the waveform
This string will be suitable for file names. For example, 'rMpsi4' or 'rhOverM'. It uses the waveform's
knowledge of itself, so if this is incorrect, the result will be incorrect.
"""
if self.dataType == UnknownDataType:
return self.data_type_string
descriptor = ""
if self.r_is_scaled_out:
if RScaling[self.dataType] == 1:
descriptor = "r"
elif RScaling[self.dataType] > 1:
descriptor = "r" + str(RScaling[self.dataType])
if self.m_is_scaled_out:
Mexponent = MScaling[self.dataType] - (RScaling[self.dataType] if self.r_is_scaled_out else 0)
if Mexponent < -1:
descriptor = descriptor + self.data_type_string + "OverM" + str(-Mexponent)
elif Mexponent == -1:
descriptor = descriptor + self.data_type_string + "OverM"
elif Mexponent == 0:
descriptor = descriptor + self.data_type_string
elif Mexponent == 1:
descriptor = descriptor + "M" + self.data_type_string
elif Mexponent > 1:
descriptor = descriptor + "M" + str(Mexponent) + self.data_type_string
else:
descriptor = descriptor + self.data_type_string
return descriptor
# Data simplifications
@property
def data_2d(self):
return self.data.reshape((self.n_times, self.n_data_sets))
@property
def abs(self):
return np.abs(self.data)
@property
def arg(self):
return np.angle(self.data)
@property
def arg_unwrapped(self):
return np.unwrap(np.angle(self.data), axis=0)
def norm(self, take_sqrt=False, indices=slice(None, None, None)):
"""L2 norm of the waveform
The optional arguments say whether to take the square-root of
the norm at each time, and allow restriction to a slice of the
data, respectively.
"""
if indices == slice(None, None, None):
n = np.empty((self.n_times,), dtype=float)
else:
n = np.empty((self.t[indices].shape[0],), dtype=float)
if take_sqrt:
complex_array_abs(self.data_2d[indices], n)
else:
complex_array_norm(self.data_2d[indices], n)
return n
def max_norm_index(self, skip_fraction_of_data=4):
"""Index of time step with largest norm
The optional argument skips a fraction of the data. The default is
4, which means that it only searches the last three-fourths of the
data for the max. If 0 or 1 is input, this is ignored, and all the
data is searched.
"""
if skip_fraction_of_data == 0 or skip_fraction_of_data == 1:
indices = slice(None, None, None)
return np.argmax(self.norm(indices=indices))
else:
indices = slice(self.n_times // skip_fraction_of_data, None, None)
return np.argmax(self.norm(indices=indices)) + (self.n_times // skip_fraction_of_data)
def max_norm_time(self, skip_fraction_of_data=4):
"""Return time at which largest norm occurs in data
See `help(max_norm_index)` for explanation of the optional argument.
"""
return self.t[self.max_norm_index(skip_fraction_of_data=skip_fraction_of_data)]
def compare(self, w_a, min_time_step=0.005, min_time=-3.0e300):
"""Return a waveform with differences between the two inputs
This function simply subtracts the data in this waveform from the data
in Waveform A, and finds the rotation needed to take this frame into frame A.
Note that the waveform data are stored as complex numbers, rather than as
modulus and phase.
"""
from quaternion.means import mean_rotor_in_chordal_metric
from scri.extrapolation import intersection
import scri.waveform_modes
if self.frameType != w_a.frameType:
warning = (
"\nWarning:"
+ "\n This Waveform is in the "
+ self.frame_type_string
+ " frame,"
+ "\n The Waveform in the argument is in the "
+ w_a.frame_type_string
+ " frame."
+ "\n Comparing them probably does not make sense.\n"
)
warnings.warn(warning)
if self.n_modes != w_a.n_modes:
raise Exception(
"Trying to compare waveforms with mismatched LM data."
+ "\nA.n_modes="
+ str(w_a.n_modes)
+ "\tB.n_modes()="
+ str(self.n_modes)
)
new_times = intersection(self.t, w_a.t)
w_c = scri.waveform_modes.WaveformModes(
t=new_times,
data=np.zeros((new_times.shape[0], self.n_modes), dtype=self.data.dtype),
history=[],
version_hist=self.version_hist,
frameType=self.frameType,
dataType=self.dataType,
r_is_scaled_out=self.r_is_scaled_out,
m_is_scaled_out=self.m_is_scaled_out,
ell_min=self.ell_min,
ell_max=self.ell_max,
)
w_c.history += ["B.compare(A)\n"]
w_c.history += ["### A.history.str():\n" + "".join(w_a.history)]
w_c.history += ["### B.history.str():\n" + "".join(self.history)]
w_c.history += ["### End of old histories from `compare`"]
# Process the frame, depending on the sizes of the input frames
if w_a.frame.shape[0] > 1 and self.frame.shape[0] > 1:
# Find the frames interpolated to the appropriate times
Aframe = quaternion.squad(w_a.frame, w_a.t, w_c.t)
Bframe = quaternion.squad(self.frame, self.t, w_c.t)
# Assign the data
w_c.frame = Aframe * np.array([np.quaternion.inverse(v) for v in Bframe])
elif w_a.frame.shape[0] == 1 and self.frame.shape[0] > 1:
# Find the frames interpolated to the appropriate times
Bframe = np.quaternion.squad(self.frame, self.t, w_c.t)
# Assign the data
w_c.frame.resize(w_c.n_times)
w_c.frame = w_a.frame[0] * np.array([np.quaternion.inverse(v) for v in Bframe])
elif w_a.frame.shape[0] > 1 and self.frame.shape[0] == 1:
# Find the frames interpolated to the appropriate times
Aframe = np.quaternion.squad(w_a.frame, w_a.t, w_c.t)
# Assign the data
w_c.frame.resize(w_c.n_times)
w_c.frame = Aframe * np.quaternion.inverse(self.frame[0])
elif w_a.frame.shape[0] == 1 and self.frame.shape[0] == 1:
# Assign the data
w_c.frame = np.array(w_a.frame[0] * np.quaternions.inverse(self.frame[0]))
elif w_a.frame.shape[0] == 0 and self.frame.shape[0] == 1:
# Assign the data
w_c.frame = np.array(np.quaternions.inverse(self.frame[0]))
elif w_a.frame.shape[0] == 1 and self.frame.shape[0] == 1:
# Assign the data
w_c.frame = np.array(w_a.frame[0])
# else, leave the frame data empty
# If the average frame rotor is closer to -1 than to 1, flip the sign
if w_c.frame.shape[0] == w_c.n_times:
R_m = mean_rotor_in_chordal_metric(w_c.frame, w_c.t)
if quaternion.rotor_chordal_distance(R_m, -quaternion.one) < quaternion.rotor_chordal_distance(
R_m, quaternion.one
):
w_c.frame = -w_c.frame
elif w_c.frame.shape[0] == 1:
if quaternion.rotor_chordal_distance(w_c.frame[0], -quaternion.one) < quaternion.rotor_chordal_distance(
w_c.frame[0], quaternion.one
):
w_c.frame[0] = -w_c.frame[0]
# Now loop over each mode filling in the waveform data
for AMode in range(w_a.n_modes):
# Assume that all the ell,m data are the same, but not necessarily in the same order
BMode = self.index(w_a.LM[AMode][0], w_a.LM[AMode][1])
# Initialize the interpolators for this data set
# (Can't just re-view here because data are not contiguous)
splineReA = CubicSpline(w_a.t, w_a.data[:, AMode].real)
splineImA = CubicSpline(w_a.t, w_a.data[:, AMode].imag)
splineReB = CubicSpline(self.t, self.data[:, BMode].real)
splineImB = CubicSpline(self.t, self.data[:, BMode].imag)
# Assign the data from the transition
w_c.data[:, AMode] = (splineReA(w_c.t) - splineReB(w_c.t)) + 1j * (splineImA(w_c.t) - splineImB(w_c.t))
return w_c
@property
def data_dot(self):
return CubicSpline(self.t, self.data).derivative()(self.t)
@property
def data_ddot(self):
return CubicSpline(self.t, self.data).derivative(2)(self.t)
@property
def data_int(self):
return CubicSpline(self.t, self.data).antiderivative()(self.t)
@property
def data_iint(self):
return CubicSpline(self.t, self.data).antiderivative(2)(self.t)
# Data representations
def _append_history(self, hist, additional_depth=0):
"""Add to the object's history log
Input may be a single string or list of strings. Any newlines will be split into separate strings. Each
such string is then prepended with a number of `#`s, indicating that the content of that line was called from
within a member function, or is simply a piece of information relevant to the waveform. The idea behind this
is that the history should be -- as nearly as possible -- a script that could be run to reproduce the
waveform, so the lines beginning with `#` would not be run.
The number of `#`s is controlled by the object's `__history_depth__` field and the optional input to this
function; their sum is the number prepended. The user should never have to deal with this issue,
but all member functions should increment the `__history_depth__` before calling another member function,
and decrement it again as necessary before recording itself in the history. Also, for any lines added just
for informational purposes (e.g., the hostname, pwd, date, and versions added in `__init__`), this function
should be called with `1` as the optional argument.
"""
if not isinstance(hist, list):
hist = [hist]
self.history += [
"# " * (self.__history_depth__ + additional_depth) + hist_line
for hist_element in hist
for hist_line in hist_element.split("\n")
]
def __str__(self):
# "The goal of __str__ is to be readable; the goal of __repr__ is to be unambiguous." --- stackoverflow
return "{}_{}".format(type(self).__name__, self.num)
def __repr__(self):
# "The goal of __str__ is to be readable; the goal of __repr__ is to be unambiguous." --- stackoverflow
from textwrap import dedent
opts = np.get_printoptions()
np.set_printoptions(threshold=6, linewidth=150, precision=6)
rep = """
{0}(
t={1},
frame={2},
data={5},
frameType={6}, dataType={7},
r_is_scaled_out={8}, m_is_scaled_out={9}) # num = {10}"""
rep = rep.format(
type(self).__name__,
str(self.t).replace("\n", "\n" + " " * 15),
str(self.frame).replace("\n", "\n" + " " * 19),
self.history,
self.version_hist,
str(self.data).replace("\n", "\n" + " " * 18),
self.frameType,
self.dataType,
self.r_is_scaled_out,
self.m_is_scaled_out,
self.num,
)
np.set_printoptions(**opts)
return dedent(rep)
def __getstate__(self):
"""Get state of object for copying and pickling
The only nontrivial operation is with quaternions, since they can't
currently be pickled automatically. We just view the frame array as
a float array, and pickle as usual.
Also, we remove the `num` value, because this will get reset
properly on creation.
"""
state = copy.deepcopy(self.__dict__)
state["frame"] = quaternion.as_float_array(self.frame)
return state
def __setstate__(self, state):
"""Set state of object for copying and pickling
The only nontrivial operation is with quaternions, since they can't
currently be pickled automatically. We just view the frame array as
a float array, and unpickle as usual, then convert the float array
back to a quaternion array.
"""
new_num = self.__num
old_num = state.get("_WaveformBase__num")
self.__dict__.update(state)
# Make sure to preserve auto-incremented num
self.__num = new_num
self.frame = quaternion.as_quat_array(self.frame)
self._append_history(f"copied, deepcopied, or unpickled as {self}", 1)
self._append_history("{} = {}".format(self, f"{self}".replace(str(self.num), str(old_num))))
@waveform_alterations
def deepcopy(self):
"""Return a deep copy of the object
This is just an alias for `copy`, which is deep anyway.
"""
W = self.copy()
W.__history_depth__ -= 1
W._append_history(f"{W} = {self}.deepcopy()")
return W
@waveform_alterations
def copy(self):
"""Return a (deep) copy of the object
Note that this also copies all members if the object is a subclass. If you want a forgetful WaveformBase
object, you can simply use the copy constructor.
"""
W = type(self)()
state = copy.deepcopy(self.__dict__)
state.pop("_WaveformBase__num")
W.__dict__.update(state)
W.__history_depth__ -= 1
W._append_history(f"{W} = {self}.copy()")
return W
@waveform_alterations
def copy_without_data(self):
"""Return a copy of the object, with empty `t`, `frame`, and `data` fields
Note that subclasses may override this to set some related data members. For example,
`WaveformModes.copy_without_data` sets the `ell_min` and `ell_max` fields appropriately. If you wish to only
skip `t`, `frame`, and `data`, you can simply use `WaveformBase.copy_without_data(W)`. The latter is useful
if, for example, you will only be making changes to those three fields, and want everything else preserved.
Also note that some slicing operations can achieve similar -- but different -- goals. For example,
`w = w[:, :0]` will simply empty `data` and `ells`, without affecting the `time` and `frame`.
"""
W = type(self)()
state = copy.deepcopy(self.__dict__)
state.pop("_WaveformBase__num")
state.pop("t")
state.pop("frame")
state.pop("data")
W.__dict__.update(state)
W.__history_depth__ -= 1
W._append_history(f"{W} = {self}.copy_without_data()")
return W
def _allclose(
self, other, report_all=True, rtol=1e-10, atol=1e-10, compare_history_beginnings=False, exceptions=[]
):
"""Check that member data in two waveforms are the same
For data sets (time, modes, etc.), the numpy function `np.allclose` is used, with the input tolerances. See
that function's documentation for more details. The `*__num` datum is always ignored. By default,
the `history` is ignored, though this can be partially overridden -- in which case, the shortest subset of
the histories is compared for exact equality. This is probably only appropriate for the case where one
waveform was created from the other.
Parameters
----------
other : object
Another object subclassing WaveformBase to compare
report_all: bool, optional
Wait until all attributes have been checked (and reported on) before returning the verdict
rtol : float, optional
Relative tolerance to which to compare arrays (see np.allclose), defaults to 1e-10
atol : float, optional
Absolute tolerance to which to compare arrays (see np.allclose), defaults to 1e-10
compare_history_beginnings: bool, optional
Compare the shortest common part of the `history` fields for equality, defaults to False
exceptions : list, optional
Don't compare elements in this list, corresponding to keys in the object's `__dict__`, defaults to []
"""
equality = True
if not type(self) == type(other): # not isinstance(other, self.__class__):
warnings.warn("\n (type(self)={}) != (type(other)={})".format(type(self), type(other)))
equality = False
if not report_all and not equality:
return False
for key, val in self.__dict__.items():
if key.endswith("__num") or key in exceptions:
continue
elif key == "history":
if compare_history_beginnings:
min_length = min(len(self.history), len(other.history))
if self.history[:min_length] != other.history[:min_length]:
warnings.warn("\n `history` fields differ")
equality = False
elif key == "version_hist":
if self.version_hist != other.version_hist:
warnings.warn("\n `version_hist` fields differ")
equality = False
elif isinstance(val, np.ndarray):
if val.dtype == np.quaternion:
if not np.allclose(
quaternion.as_float_array(val), quaternion.as_float_array(other.__dict__[key]), rtol, atol
):
warnings.warn(f"\n `{key}` fields differ")
equality = False
elif not np.allclose(val, other.__dict__[key], rtol, atol):
warnings.warn(f"\n `{key}` fields differ")
equality = False
else:
if not val == other.__dict__[key]:
warnings.warn(
"\n (self.{0}={1}) != (other.{0}={2}) fields differ".format(key, val, other.__dict__[key])
)
equality = False
if not report_all and not equality:
return False
return equality
# Slicing
@waveform_alterations
def __getitem__(self, key):
"""Extract subsets of the data efficiently
See the docstring of the WaveformBase class for examples.
"""
W = WaveformBase.copy_without_data(self)
# Remove trivial tuple structure first
if isinstance(key, tuple) and len(key) == 1:
key = key[0]
# Now figure out which type of return is desired
if isinstance(key, tuple) and 2 <= len(key) <= self.n_data_sets:
# Return a subset of the data from a subset of times
W.t = self.t[key[0]]
W.frame = self.frame[key[0]]
W.data = self.data[key]
elif isinstance(key, slice) or isinstance(key, int):
# Return complete data from a subset of times (key is slice), or
# return complete data from a single instant in time (key is int)
W.t = self.t[key]
W.frame = self.frame[key]
W.data = self.data[key]
else:
raise ValueError("Could not understand input `{}` (of type `{}`) ".format(key, type(key)))
W.__history_depth__ -= 1
W._append_history(f"{W} = {self}[{key}]")
return W
@waveform_alterations
def interpolate(self, tprime):
"""Interpolate the frame and data onto the new set of time steps
Note that only `t`, `frame`, and `data` are changed in this function. If there is a corresponding data set
in a subclass, for example, the subclass must override this function to set that data set -- though this
function should probably be called to handle the ugly stuff.
"""
# Copy the information fields, but not the data
W = WaveformBase.copy_without_data(self)
W.t = np.copy(tprime)
W.frame = quaternion.squad(self.frame, self.t, W.t)
W.data = np.empty((W.n_times,) + self.data.shape[1:], dtype=self.data.dtype)
W.data_2d[:] = CubicSpline(self.t, self.data_2d.view(float))(W.t).view(complex)
W.__history_depth__ -= 1
W._append_history(f"{W} = {self}.interpolate({tprime})")
return W
@waveform_alterations
def SI_units(self, current_unit_mass_in_solar_masses, distance_from_source_in_megaparsecs=100):
"""Assuming current quantities are in geometric units, convert to SI units
This function assumes that the `dataType`, `r_is_scaled_out`, and `m_is_scaled_out` attributes are correct,
then scales the amplitude and time data appropriately so that the data correspond to data that could be
observed from a source with the given total mass at the given distance.
Note that the curvature scalars will have units of s^-2, rather than the arguably more correct m^-2. This
seems to be more standard in numerical relativity. The result can be divided by `scipy.constants.c**2`
to give units of m^-2 if desired.
Parameters
----------
current_unit_mass_in_solar_masses : float
Mass of the system in the data converted to solar masses
distance_from_source_in_megaparsecs : float, optional
Output will be waveform as observed from this distance, default=100 (Mpc)
"""
if not self.r_is_scaled_out:
warning = (
"\nTrying to convert to SI units, the radius is supposedly not scaled out.\n"
+ "This seems to suggest that the data may already be in some units..."
)
warnings.warn(warning)
if not self.m_is_scaled_out:
warning = (
"\nTrying to convert to SI units, the mass is supposedly not scaled out.\n"
+ "This seems to suggest that the data may already be in some units..."
)
warnings.warn(warning)
M_in_meters = current_unit_mass_in_solar_masses * m_sun_in_meters # m
M_in_seconds = M_in_meters / speed_of_light # s
R_in_meters = distance_from_source_in_megaparsecs * (1e6 * parsec_in_meters) # m
R_over_M = R_in_meters / M_in_meters # [dimensionless]
# The radius scaling `r_scaling` is the number of factors of the dimensionless quantity `R_over_M` required
# to keep the waveform asymptotically constant. So, for example, h and Psi4 both have `r_scaling=1`. The
# mass scaling `m_scaling` is the number of factors of `M_in_meters` required to make the waveform
# dimensionless, and does not account for the factors of mass in the radius scale. The Newman-Penrose
# quantities are curvature quantities, so they have dimensions 1/m^2, and thus have `m_scaling=2`.
if self.r_is_scaled_out:
if self.m_is_scaled_out:
amplitude_scaling = (R_over_M ** -self.r_scaling) * (M_in_meters ** -self.m_scaling)
else:
amplitude_scaling = R_over_M ** -self.r_scaling
else:
if self.m_is_scaled_out:
amplitude_scaling = M_in_meters ** -self.m_scaling
else:
amplitude_scaling = 1.0
# Copy the information fields, but not the data
W = WaveformBase.copy_without_data(self)
if self.m_is_scaled_out:
W.t = M_in_seconds * self.t # s
else:
W.t = np.copy(self.t) # supposedly already in the appropriate units...
W.frame = np.copy(self.frame)
W.data = amplitude_scaling * self.data
W.m_is_scaled_out = False
W.r_is_scaled_out = False
W.__history_depth__ -= 1
W._append_history(
"{} = {}.SI_units(current_unit_mass_in_solar_masses={}, "
"distance_from_source_in_megaparsecs={})".format(
W, self, current_unit_mass_in_solar_masses, distance_from_source_in_megaparsecs
)
)
return W
| [
"pprint.pformat",
"numpy.abs",
"numpy.angle",
"numpy.empty",
"scipy.interpolate.CubicSpline",
"numpy.allclose",
"quaternion.as_float_array",
"numpy.prod",
"numpy.set_printoptions",
"quaternion.means.mean_rotor_in_chordal_metric",
"numpy.copy",
"quaternion.squad",
"quaternion.rotor_chordal_di... | [((1557, 1578), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1572, 1578), False, 'import functools\n'), ((2383, 2437), 'numpy.testing.assert_', 'np.testing.assert_', (['val', "('Failed assertion:\\n\\t' + msg)"], {}), "(val, 'Failed assertion:\\n\\t' + msg)\n", (2401, 2437), True, 'import numpy as np\n'), ((782, 795), 'numpy.sqrt', 'np.sqrt', (['s[i]'], {}), '(s[i])\n', (789, 795), True, 'import numpy as np\n'), ((11461, 11481), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (11479, 11481), False, 'import socket\n'), ((11496, 11507), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11505, 11507), False, 'import os\n'), ((22186, 22203), 'numpy.abs', 'np.abs', (['self.data'], {}), '(self.data)\n', (22192, 22203), True, 'import numpy as np\n'), ((22253, 22272), 'numpy.angle', 'np.angle', (['self.data'], {}), '(self.data)\n', (22261, 22272), True, 'import numpy as np\n'), ((25406, 25433), 'scri.extrapolation.intersection', 'intersection', (['self.t', 'w_a.t'], {}), '(self.t, w_a.t)\n', (25418, 25433), False, 'from scri.extrapolation import intersection\n'), ((31767, 31788), 'numpy.get_printoptions', 'np.get_printoptions', ([], {}), '()\n', (31786, 31788), True, 'import numpy as np\n'), ((31797, 31857), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '(6)', 'linewidth': '(150)', 'precision': '(6)'}), '(threshold=6, linewidth=150, precision=6)\n', (31816, 31857), True, 'import numpy as np\n'), ((32525, 32552), 'numpy.set_printoptions', 'np.set_printoptions', ([], {}), '(**opts)\n', (32544, 32552), True, 'import numpy as np\n'), ((32568, 32579), 'textwrap.dedent', 'dedent', (['rep'], {}), '(rep)\n', (32574, 32579), False, 'from textwrap import dedent\n'), ((32992, 33020), 'copy.deepcopy', 'copy.deepcopy', (['self.__dict__'], {}), '(self.__dict__)\n', (33005, 33020), False, 'import copy\n'), ((33046, 33083), 'quaternion.as_float_array', 'quaternion.as_float_array', (['self.frame'], {}), '(self.frame)\n', (33071, 33083), False, 'import quaternion\n'), ((33693, 33729), 'quaternion.as_quat_array', 'quaternion.as_quat_array', (['self.frame'], {}), '(self.frame)\n', (33717, 33729), False, 'import quaternion\n'), ((34530, 34558), 'copy.deepcopy', 'copy.deepcopy', (['self.__dict__'], {}), '(self.__dict__)\n', (34543, 34558), False, 'import copy\n'), ((35587, 35615), 'copy.deepcopy', 'copy.deepcopy', (['self.__dict__'], {}), '(self.__dict__)\n', (35600, 35615), False, 'import copy\n'), ((41136, 41151), 'numpy.copy', 'np.copy', (['tprime'], {}), '(tprime)\n', (41143, 41151), True, 'import numpy as np\n'), ((41170, 41211), 'quaternion.squad', 'quaternion.squad', (['self.frame', 'self.t', 'W.t'], {}), '(self.frame, self.t, W.t)\n', (41186, 41211), False, 'import quaternion\n'), ((41229, 41296), 'numpy.empty', 'np.empty', (['((W.n_times,) + self.data.shape[1:])'], {'dtype': 'self.data.dtype'}), '((W.n_times,) + self.data.shape[1:], dtype=self.data.dtype)\n', (41237, 41296), True, 'import numpy as np\n'), ((44768, 44787), 'numpy.copy', 'np.copy', (['self.frame'], {}), '(self.frame)\n', (44775, 44787), True, 'import numpy as np\n'), ((11982, 12014), 'pprint.pformat', 'pprint.pformat', (['kwargs'], {'indent': '(4)'}), '(kwargs, indent=4)\n', (11996, 12014), False, 'import pprint\n'), ((12027, 12049), 'warnings.warn', 'warnings.warn', (['warning'], {}), '(warning)\n', (12040, 12049), False, 'import warnings\n'), ((14533, 14568), 'numpy.empty', 'np.empty', (['(0,)'], {'dtype': 'np.quaternion'}), '((0,), dtype=np.quaternion)\n', (14541, 14568), True, 'import numpy as np\n'), ((19059, 19087), 'numpy.prod', 'np.prod', (['self.data.shape[1:]'], {}), '(self.data.shape[1:])\n', (19066, 19087), True, 'import numpy as np\n'), ((22342, 22361), 'numpy.angle', 'np.angle', (['self.data'], {}), '(self.data)\n', (22350, 22361), True, 'import numpy as np\n'), ((22723, 22761), 'numpy.empty', 'np.empty', (['(self.n_times,)'], {'dtype': 'float'}), '((self.n_times,), dtype=float)\n', (22731, 22761), True, 'import numpy as np\n'), ((22792, 22842), 'numpy.empty', 'np.empty', (['(self.t[indices].shape[0],)'], {'dtype': 'float'}), '((self.t[indices].shape[0],), dtype=float)\n', (22800, 22842), True, 'import numpy as np\n'), ((25068, 25090), 'warnings.warn', 'warnings.warn', (['warning'], {}), '(warning)\n', (25081, 25090), False, 'import warnings\n'), ((26397, 26438), 'quaternion.squad', 'quaternion.squad', (['w_a.frame', 'w_a.t', 'w_c.t'], {}), '(w_a.frame, w_a.t, w_c.t)\n', (26413, 26438), False, 'import quaternion\n'), ((26460, 26503), 'quaternion.squad', 'quaternion.squad', (['self.frame', 'self.t', 'w_c.t'], {}), '(self.frame, self.t, w_c.t)\n', (26476, 26503), False, 'import quaternion\n'), ((28011, 28057), 'quaternion.means.mean_rotor_in_chordal_metric', 'mean_rotor_in_chordal_metric', (['w_c.frame', 'w_c.t'], {}), '(w_c.frame, w_c.t)\n', (28039, 28057), False, 'from quaternion.means import mean_rotor_in_chordal_metric\n'), ((28942, 28985), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['w_a.t', 'w_a.data[:, AMode].real'], {}), '(w_a.t, w_a.data[:, AMode].real)\n', (28953, 28985), False, 'from scipy.interpolate import CubicSpline\n'), ((29010, 29053), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['w_a.t', 'w_a.data[:, AMode].imag'], {}), '(w_a.t, w_a.data[:, AMode].imag)\n', (29021, 29053), False, 'from scipy.interpolate import CubicSpline\n'), ((29078, 29123), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['self.t', 'self.data[:, BMode].real'], {}), '(self.t, self.data[:, BMode].real)\n', (29089, 29123), False, 'from scipy.interpolate import CubicSpline\n'), ((29148, 29193), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['self.t', 'self.data[:, BMode].imag'], {}), '(self.t, self.data[:, BMode].imag)\n', (29159, 29193), False, 'from scipy.interpolate import CubicSpline\n'), ((42877, 42899), 'warnings.warn', 'warnings.warn', (['warning'], {}), '(warning)\n', (42890, 42899), False, 'import warnings\n'), ((43167, 43189), 'warnings.warn', 'warnings.warn', (['warning'], {}), '(warning)\n', (43180, 43189), False, 'import warnings\n'), ((44684, 44699), 'numpy.copy', 'np.copy', (['self.t'], {}), '(self.t)\n', (44691, 44699), True, 'import numpy as np\n'), ((9350, 9377), 'numpy.empty', 'np.empty', (['(0,)'], {'dtype': 'float'}), '((0,), dtype=float)\n', (9358, 9377), True, 'import numpy as np\n'), ((9424, 9459), 'numpy.empty', 'np.empty', (['(0,)'], {'dtype': 'np.quaternion'}), '((0,), dtype=np.quaternion)\n', (9432, 9459), True, 'import numpy as np\n'), ((9504, 9535), 'numpy.empty', 'np.empty', (['(0, 0)'], {'dtype': 'complex'}), '((0, 0), dtype=complex)\n', (9512, 9535), True, 'import numpy as np\n'), ((10169, 10190), 'numpy.get_printoptions', 'np.get_printoptions', ([], {}), '()\n', (10188, 10190), True, 'import numpy as np\n'), ((10207, 10239), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '(6)'}), '(threshold=6)\n', (10226, 10239), True, 'import numpy as np\n'), ((10425, 10452), 'numpy.set_printoptions', 'np.set_printoptions', ([], {}), '(**opts)\n', (10444, 10452), True, 'import numpy as np\n'), ((10567, 10583), 'numpy.copy', 'np.copy', (['other.t'], {}), '(other.t)\n', (10574, 10583), True, 'import numpy as np\n'), ((10609, 10629), 'numpy.copy', 'np.copy', (['other.frame'], {}), '(other.frame)\n', (10616, 10629), True, 'import numpy as np\n'), ((10654, 10673), 'numpy.copy', 'np.copy', (['other.data'], {}), '(other.data)\n', (10661, 10673), True, 'import numpy as np\n'), ((11523, 11546), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11544, 11546), False, 'import datetime\n'), ((13585, 13603), 'numpy.dtype', 'np.dtype', (['np.float'], {}), '(np.float)\n', (13593, 13603), True, 'import numpy as np\n'), ((14413, 14432), 'numpy.isfinite', 'np.isfinite', (['self.t'], {}), '(self.t)\n', (14424, 14432), True, 'import numpy as np\n'), ((14882, 14900), 'numpy.dtype', 'np.dtype', (['np.float'], {}), '(np.float)\n', (14890, 14900), True, 'import numpy as np\n'), ((14979, 15015), 'quaternion.as_quat_array', 'quaternion.as_quat_array', (['self.frame'], {}), '(self.frame)\n', (15003, 15015), False, 'import quaternion\n'), ((15248, 15271), 'numpy.dtype', 'np.dtype', (['np.quaternion'], {}), '(np.quaternion)\n', (15256, 15271), True, 'import numpy as np\n'), ((15676, 15699), 'numpy.isfinite', 'np.isfinite', (['self.frame'], {}), '(self.frame)\n', (15687, 15699), True, 'import numpy as np\n'), ((16300, 16322), 'numpy.isfinite', 'np.isfinite', (['self.data'], {}), '(self.data)\n', (16311, 16322), True, 'import numpy as np\n'), ((25526, 25593), 'numpy.zeros', 'np.zeros', (['(new_times.shape[0], self.n_modes)'], {'dtype': 'self.data.dtype'}), '((new_times.shape[0], self.n_modes), dtype=self.data.dtype)\n', (25534, 25593), True, 'import numpy as np\n'), ((26775, 26821), 'numpy.quaternion.squad', 'np.quaternion.squad', (['self.frame', 'self.t', 'w_c.t'], {}), '(self.frame, self.t, w_c.t)\n', (26794, 26821), True, 'import numpy as np\n'), ((28073, 28128), 'quaternion.rotor_chordal_distance', 'quaternion.rotor_chordal_distance', (['R_m', '(-quaternion.one)'], {}), '(R_m, -quaternion.one)\n', (28106, 28128), False, 'import quaternion\n'), ((28131, 28185), 'quaternion.rotor_chordal_distance', 'quaternion.rotor_chordal_distance', (['R_m', 'quaternion.one'], {}), '(R_m, quaternion.one)\n', (28164, 28185), False, 'import quaternion\n'), ((27141, 27185), 'numpy.quaternion.squad', 'np.quaternion.squad', (['w_a.frame', 'w_a.t', 'w_c.t'], {}), '(w_a.frame, w_a.t, w_c.t)\n', (27160, 27185), True, 'import numpy as np\n'), ((28309, 28373), 'quaternion.rotor_chordal_distance', 'quaternion.rotor_chordal_distance', (['w_c.frame[0]', '(-quaternion.one)'], {}), '(w_c.frame[0], -quaternion.one)\n', (28342, 28373), False, 'import quaternion\n'), ((28376, 28439), 'quaternion.rotor_chordal_distance', 'quaternion.rotor_chordal_distance', (['w_c.frame[0]', 'quaternion.one'], {}), '(w_c.frame[0], quaternion.one)\n', (28409, 28439), False, 'import quaternion\n'), ((29434, 29464), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['self.t', 'self.data'], {}), '(self.t, self.data)\n', (29445, 29464), False, 'from scipy.interpolate import CubicSpline\n'), ((29541, 29571), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['self.t', 'self.data'], {}), '(self.t, self.data)\n', (29552, 29571), False, 'from scipy.interpolate import CubicSpline\n'), ((29648, 29678), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['self.t', 'self.data'], {}), '(self.t, self.data)\n', (29659, 29678), False, 'from scipy.interpolate import CubicSpline\n'), ((29759, 29789), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['self.t', 'self.data'], {}), '(self.t, self.data)\n', (29770, 29789), False, 'from scipy.interpolate import CubicSpline\n'), ((10348, 10389), 'pprint.pformat', 'pprint.pformat', (['original_kwargs'], {'indent': '(4)'}), '(original_kwargs, indent=4)\n', (10362, 10389), False, 'import pprint\n'), ((14128, 14143), 'numpy.diff', 'np.diff', (['self.t'], {}), '(self.t)\n', (14135, 14143), True, 'import numpy as np\n'), ((14311, 14326), 'numpy.diff', 'np.diff', (['self.t'], {}), '(self.t)\n', (14318, 14326), True, 'import numpy as np\n'), ((26577, 26601), 'numpy.quaternion.inverse', 'np.quaternion.inverse', (['v'], {}), '(v)\n', (26598, 26601), True, 'import numpy as np\n'), ((27291, 27327), 'numpy.quaternion.inverse', 'np.quaternion.inverse', (['self.frame[0]'], {}), '(self.frame[0])\n', (27312, 27327), True, 'import numpy as np\n'), ((26943, 26967), 'numpy.quaternion.inverse', 'np.quaternion.inverse', (['v'], {}), '(v)\n', (26964, 26967), True, 'import numpy as np\n'), ((38115, 38162), 'warnings.warn', 'warnings.warn', (['"""\n `history` fields differ"""'], {}), '("""\n `history` fields differ""")\n', (38128, 38162), False, 'import warnings\n'), ((38321, 38373), 'warnings.warn', 'warnings.warn', (['"""\n `version_hist` fields differ"""'], {}), '("""\n `version_hist` fields differ""")\n', (38334, 38373), False, 'import warnings\n'), ((27473, 27510), 'numpy.quaternions.inverse', 'np.quaternions.inverse', (['self.frame[0]'], {}), '(self.frame[0])\n', (27495, 27510), True, 'import numpy as np\n'), ((27642, 27679), 'numpy.quaternions.inverse', 'np.quaternions.inverse', (['self.frame[0]'], {}), '(self.frame[0])\n', (27664, 27679), True, 'import numpy as np\n'), ((27802, 27824), 'numpy.array', 'np.array', (['w_a.frame[0]'], {}), '(w_a.frame[0])\n', (27810, 27824), True, 'import numpy as np\n'), ((38703, 38749), 'warnings.warn', 'warnings.warn', (['f"""\n `{key}` fields differ"""'], {}), '(f"""\n `{key}` fields differ""")\n', (38716, 38749), False, 'import warnings\n'), ((38813, 38862), 'numpy.allclose', 'np.allclose', (['val', 'other.__dict__[key]', 'rtol', 'atol'], {}), '(val, other.__dict__[key], rtol, atol)\n', (38824, 38862), True, 'import numpy as np\n'), ((38884, 38930), 'warnings.warn', 'warnings.warn', (['f"""\n `{key}` fields differ"""'], {}), '(f"""\n `{key}` fields differ""")\n', (38897, 38930), False, 'import warnings\n'), ((38565, 38595), 'quaternion.as_float_array', 'quaternion.as_float_array', (['val'], {}), '(val)\n', (38590, 38595), False, 'import quaternion\n'), ((38597, 38643), 'quaternion.as_float_array', 'quaternion.as_float_array', (['other.__dict__[key]'], {}), '(other.__dict__[key])\n', (38622, 38643), False, 'import quaternion\n')] |
"""Main module for training auto-constraint model."""
import argparse
import bisect
import datetime
import functools
import json
import os
import time
import torch
import torch.utils.tensorboard
import torch.utils.data
# numpy has come after pytorch due to MKL threading setup
import numpy as np
from sketchgraphs_models.nn.distributed import SingleDeviceDistributedParallel
from sketchgraphs_models import training, distributed_utils
from sketchgraphs_models.autoconstraint import dataset, model as auto_model
from sketchgraphs_models.graph.train import data_loading
_opt_factories = {
'sgd': torch.optim.SGD,
'adam': torch.optim.Adam,
'adamax': torch.optim.Adamax,
'rms': torch.optim.RMSprop
}
def _lr_schedule(epoch, warmup_epochs=5, decay_epochs=None):
if decay_epochs is None:
decay_epochs = []
warmup_factor = min((epoch + 1) / warmup_epochs, 1)
decay_factor = 0.1 ** (bisect.bisect_right(decay_epochs, epoch))
return warmup_factor * decay_factor
class AutoconstraintHarness(training.TrainingHarness):
def __init__(self, model, opt, config_train, config_eval, dist_config, scheduler=None, output_dir=None, profile_enabled=False, additional_model_information=None):
super(AutoconstraintHarness, self).__init__(model, opt, config_train, config_eval, dist_config)
self.scheduler = scheduler
self.output_dir = output_dir
self.profile_enabled = profile_enabled
self.additional_model_information = additional_model_information or {}
def single_step(self, batch, global_step):
self.opt.zero_grad()
batch['partner_index'] = training.load_cuda_async(batch['partner_index'], self.config_train.device)
with torch.autograd.profiler.record_function("forward"):
readout = self.model(batch)
losses, accuracy = auto_model.compute_losses(batch, readout)
total_loss = sum(losses.values())
if self.model.training:
with torch.autograd.profiler.record_function("backward"):
total_loss.backward()
with torch.autograd.profiler.record_function("opt_update"):
self.opt.step()
losses = training.map_structure_flat(losses, lambda x: x.detach())
losses = auto_model.compute_average_losses(batch, losses)
avg_loss = total_loss.detach() / batch['graph'].node_counts.shape[0]
losses['average'] = avg_loss
return losses, accuracy
def on_epoch_end(self, epoch, global_step):
if self.scheduler is not None:
self.scheduler.step()
if self.config_train.tb_writer is not None and self.is_leader():
lr = self.scheduler.get_last_lr()[0]
self.config_train.tb_writer.add_scalar('learning_rate', lr, global_step)
if self.is_leader() and self.output_dir is not None and (epoch + 1) % 10 == 0:
self.log('Saving checkpoint for epoch {}'.format(epoch + 1))
torch.save({
'opt': self.opt.state_dict(),
'model': self.model.state_dict(),
'epoch': epoch,
'global_step': global_step,
**self.additional_model_information,
},
os.path.join(self.output_dir, 'model_state_{0}.pt'.format(epoch + 1)))
def write_summaries(self, global_step, losses, accuracies, tb_writer):
if tb_writer is None:
return
for k, v in losses.items():
tb_writer.add_scalar('loss/' + k, v, global_step)
for k, v in accuracies.items():
tb_writer.add_scalar('accuracy/' + k, v, global_step)
def print_statistics(self, loss_acc, accuracy_acc):
self.log(f'Loss ({loss_acc["average"]:.3f}). Stop ({loss_acc["edge_stop"]:.3f}) Partner ({loss_acc["edge_partner"]:.3f}) Label ({loss_acc["edge_label"]:.3f})')
self.log(f'Accuracy Stop({accuracy_acc["edge_stop"]:4.1%}) Partner ({accuracy_acc["edge_partner"]:4.1%}) Label ({accuracy_acc["edge_label"]:4.1%})')
def train(node_feature_mapping, dataloader_train, args, output_dir=None, dataloader_eval=None, batches_per_epoch=None, dist_config=None):
print('Building model.')
core = auto_model.MODEL_CORES[args['model_core']](args['hidden_size'], node_feature_mapping.feature_dimensions, args['num_prop_rounds'])
model = auto_model.AutoconstraintModel(core)
if args['model_state']:
state = torch.load(args['model_state'], map_location=torch.device('cpu'))
## Remove "module." from beginning of keys
new_state_dict = {}
for key in state['model']:
new_state_dict[key[7:]] = state['model'][key]
state['model'] = new_state_dict
##
model.load_state_dict(state['model'])
epoch = state['epoch']
global_step = state['global_step']
else:
epoch = 0
global_step = 0
if dist_config:
gpu_id = dist_config.local_rank
print('Creating model on GPU {0}'.format(gpu_id))
device = torch.device('cuda', gpu_id)
# Create parallel device. Note that we need to use find_unused_parameters, as due to the dynamic
# nature of our computation graph, depending on the available targets in the dataset, not all
# parameters will have gradients computed for them.
model = SingleDeviceDistributedParallel(model.to(device), gpu_id, find_unused_parameters=True)
else:
device = torch.device('cpu')
model.to(device) # Set model device
print('Model done building.')
total_batch_size = args['batch_size']
if dist_config:
batch_size = total_batch_size // dist_config.world_size
else:
batch_size = total_batch_size
opt = _opt_factories[args['optimizer']](model.parameters(), lr=args['learning_rate'] * total_batch_size / 256)
scheduler = torch.optim.lr_scheduler.LambdaLR(
opt, functools.partial(_lr_schedule, warmup_epochs=5, decay_epochs=[20, 40]))
if distributed_utils.is_leader(dist_config):
tb_writer_main = torch.utils.tensorboard.SummaryWriter(output_dir)
tb_writer_eval = torch.utils.tensorboard.SummaryWriter(output_dir + '/eval/')
else:
tb_writer_main, tb_writer_eval = None, None
harness = AutoconstraintHarness(
model, opt,
training.TrainingConfig(
dataloader_train,
tb_writer_main,
device,
batch_size,
batches_per_epoch),
training.TrainingConfig(
dataloader_eval,
tb_writer_eval,
device,
batch_size)
if dataloader_eval is not None
else None,
scheduler=scheduler,
output_dir=output_dir,
dist_config=dist_config,
profile_enabled=args['profile'],
additional_model_information={
'node_feature_mapping': node_feature_mapping.state_dict(),
'model_configuration': {
'embedding_dim': args['hidden_size'],
'depth': args['num_prop_rounds'],
'model_core': args['model_core'],
'name': 'autoconstraint'
}
})
while epoch < args['num_epochs']:
epoch, global_step = harness.train_epochs(epoch, global_step)
return model
def get_argsparser():
parser = argparse.ArgumentParser()
parser.add_argument('--description', default=None,
help='Message describing the current run.')
parser.add_argument('--output_dir', default='output', help='Directory for output files.')
parser.add_argument('--dataset_train', required=True,
help='Path to training dataset')
parser.add_argument('--dataset_auxiliary', default=None, help='path to auxiliary dataset containing metadata')
parser.add_argument('--dataset_test', required=False, default=None,
help='Path to validation dataset.')
parser.add_argument('--model_state', default=None, help='Path to saved model state_dict.')
parser.add_argument('--num_quantize_length', type=int, default=383, help='number of quantization values for length')
parser.add_argument('--num_quantize_angle', type=int, default=127, help='number of quantization values for angle')
parser.add_argument('--batch_size', type=int, default=2048,
help='Training batch size.')
parser.add_argument('--learning_rate', type=float, default=1e-5)
parser.add_argument('--optimizer', default='adam', choices=list(_opt_factories.keys()))
parser.add_argument('--hidden_size', type=int, default=384)
parser.add_argument('--num_prop_rounds', type=int, default=3)
parser.add_argument('--num_epochs', type=int, default=60,
help='Number of training epochs.')
parser.add_argument('--num_workers', type=int, default=0,
help='Number of dataloader workers.')
parser.add_argument('--seed', type=int, default=7)
parser.add_argument('--world_size', type=int, default=1, help='Number of GPUs to use.')
parser.add_argument('--profile', action='store_true', help='Whether to produce autograd profiles')
parser.add_argument('--model_core', type=str, default='bidirectional_recurrent', choices=list(auto_model.MODEL_CORES.keys()))
return parser
# These keys are converted to absolute paths on save
_ARGS_PATH_KEYS = (
'output_dir',
'dataset_train',
'dataset_auxiliary',
'dataset_test',
'model_state'
)
def _feature_dimension(mapping):
if mapping is None:
return {}
return mapping.feature_dimensions
def initialize_datasets(args, distributed_config):
quantization = {'angle': args['num_quantize_angle'], 'length': args['num_quantize_length']}
dataset_train_path = args['dataset_train']
auxiliary_path = args['dataset_auxiliary']
train_data = data_loading.load_sequences_and_mappings(
dataset_train_path, auxiliary_path, quantization, edge_features=False)
ds_train = dataset.AutoconstraintDataset(
train_data['sequences'], train_data['entity_feature_mapping'], seed=args['seed'])
batch_size = args['batch_size']
num_workers = args['num_workers']
if distributed_config:
batch_size = batch_size // distributed_config.world_size
num_workers = num_workers // distributed_config.world_size
dl_train, batches_per_epoch = data_loading.make_dataloader_train(
dataset.collate, ds_train, train_data['weights'], batch_size,
args['num_epochs'], num_workers, distributed_config)
if args['dataset_test'] is not None:
raise NotImplementedError('loading testing set not implemented')
else:
dl_test = None
return dl_train, dl_test, batches_per_epoch, train_data['entity_feature_mapping']
def run(args, distributed_config=None):
"""Runs the entire training process according to the given configuration.
"""
# Set seeds
np.random.seed(args['seed'])
torch.manual_seed(args['seed'])
for key in _ARGS_PATH_KEYS:
if args[key] is not None:
args[key] = os.path.abspath(args[key])
print('Loading datasets')
dl_train, dl_test, batches_per_epoch, node_feature_mapping = initialize_datasets(
args, distributed_config)
print('Data loaded. Creating output folder.')
# Derive save_dir
if distributed_utils.is_leader(distributed_config):
output_dir = '{}/{}/time_{}'.format(args['output_dir'],
time.strftime('%m%d'),
time.strftime('%H%M%S'))
os.makedirs(output_dir)
with open(os.path.join(output_dir, 'args.txt'), 'w') as file_:
json.dump(args, file_, indent=4)
else:
output_dir = None
print('Starting training.')
start_time = time.perf_counter()
_ = train(
node_feature_mapping,
dl_train, args,
output_dir=output_dir, dataloader_eval=dl_test, batches_per_epoch=batches_per_epoch,
dist_config=distributed_config)
end_time = time.perf_counter()
print(f'Done training. Total time: {datetime.timedelta(seconds=end_time - start_time)}.')
def main():
"""Default main function."""
parser = get_argsparser()
args = parser.parse_args()
if args.world_size > 1:
distributed_utils.train_boostrap_distributed(vars(args), run)
else:
run(vars(args))
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.autograd.profiler.record_function",
"time.strftime",
"sketchgraphs_models.training.load_cuda_async",
"torch.device",
"os.path.join",
"sketchgraphs_models.autoconstraint.model.compute_average_losses",
"os.path.abspath",
"datetime.timedelta",
... | [((4389, 4425), 'sketchgraphs_models.autoconstraint.model.AutoconstraintModel', 'auto_model.AutoconstraintModel', (['core'], {}), '(core)\n', (4419, 4425), True, 'from sketchgraphs_models.autoconstraint import dataset, model as auto_model\n'), ((6033, 6073), 'sketchgraphs_models.distributed_utils.is_leader', 'distributed_utils.is_leader', (['dist_config'], {}), '(dist_config)\n', (6060, 6073), False, 'from sketchgraphs_models import training, distributed_utils\n'), ((7380, 7405), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7403, 7405), False, 'import argparse\n'), ((9930, 10045), 'sketchgraphs_models.graph.train.data_loading.load_sequences_and_mappings', 'data_loading.load_sequences_and_mappings', (['dataset_train_path', 'auxiliary_path', 'quantization'], {'edge_features': '(False)'}), '(dataset_train_path, auxiliary_path,\n quantization, edge_features=False)\n', (9970, 10045), False, 'from sketchgraphs_models.graph.train import data_loading\n'), ((10067, 10183), 'sketchgraphs_models.autoconstraint.dataset.AutoconstraintDataset', 'dataset.AutoconstraintDataset', (["train_data['sequences']", "train_data['entity_feature_mapping']"], {'seed': "args['seed']"}), "(train_data['sequences'], train_data[\n 'entity_feature_mapping'], seed=args['seed'])\n", (10096, 10183), False, 'from sketchgraphs_models.autoconstraint import dataset, model as auto_model\n'), ((10458, 10617), 'sketchgraphs_models.graph.train.data_loading.make_dataloader_train', 'data_loading.make_dataloader_train', (['dataset.collate', 'ds_train', "train_data['weights']", 'batch_size', "args['num_epochs']", 'num_workers', 'distributed_config'], {}), "(dataset.collate, ds_train, train_data[\n 'weights'], batch_size, args['num_epochs'], num_workers, distributed_config\n )\n", (10492, 10617), False, 'from sketchgraphs_models.graph.train import data_loading\n'), ((11008, 11036), 'numpy.random.seed', 'np.random.seed', (["args['seed']"], {}), "(args['seed'])\n", (11022, 11036), True, 'import numpy as np\n'), ((11041, 11072), 'torch.manual_seed', 'torch.manual_seed', (["args['seed']"], {}), "(args['seed'])\n", (11058, 11072), False, 'import torch\n'), ((11422, 11469), 'sketchgraphs_models.distributed_utils.is_leader', 'distributed_utils.is_leader', (['distributed_config'], {}), '(distributed_config)\n', (11449, 11469), False, 'from sketchgraphs_models import training, distributed_utils\n'), ((11907, 11926), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11924, 11926), False, 'import time\n'), ((12144, 12163), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (12161, 12163), False, 'import time\n'), ((921, 961), 'bisect.bisect_right', 'bisect.bisect_right', (['decay_epochs', 'epoch'], {}), '(decay_epochs, epoch)\n', (940, 961), False, 'import bisect\n'), ((1640, 1714), 'sketchgraphs_models.training.load_cuda_async', 'training.load_cuda_async', (["batch['partner_index']", 'self.config_train.device'], {}), "(batch['partner_index'], self.config_train.device)\n", (1664, 1714), False, 'from sketchgraphs_models import training, distributed_utils\n'), ((2279, 2327), 'sketchgraphs_models.autoconstraint.model.compute_average_losses', 'auto_model.compute_average_losses', (['batch', 'losses'], {}), '(batch, losses)\n', (2312, 2327), True, 'from sketchgraphs_models.autoconstraint import dataset, model as auto_model\n'), ((5070, 5098), 'torch.device', 'torch.device', (['"""cuda"""', 'gpu_id'], {}), "('cuda', gpu_id)\n", (5082, 5098), False, 'import torch\n'), ((5496, 5515), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5508, 5515), False, 'import torch\n'), ((5952, 6023), 'functools.partial', 'functools.partial', (['_lr_schedule'], {'warmup_epochs': '(5)', 'decay_epochs': '[20, 40]'}), '(_lr_schedule, warmup_epochs=5, decay_epochs=[20, 40])\n', (5969, 6023), False, 'import functools\n'), ((6100, 6149), 'torch.utils.tensorboard.SummaryWriter', 'torch.utils.tensorboard.SummaryWriter', (['output_dir'], {}), '(output_dir)\n', (6137, 6149), False, 'import torch\n'), ((6175, 6235), 'torch.utils.tensorboard.SummaryWriter', 'torch.utils.tensorboard.SummaryWriter', (["(output_dir + '/eval/')"], {}), "(output_dir + '/eval/')\n", (6212, 6235), False, 'import torch\n'), ((6364, 6464), 'sketchgraphs_models.training.TrainingConfig', 'training.TrainingConfig', (['dataloader_train', 'tb_writer_main', 'device', 'batch_size', 'batches_per_epoch'], {}), '(dataloader_train, tb_writer_main, device,\n batch_size, batches_per_epoch)\n', (6387, 6464), False, 'from sketchgraphs_models import training, distributed_utils\n'), ((11680, 11703), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (11691, 11703), False, 'import os\n'), ((1729, 1779), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""forward"""'], {}), "('forward')\n", (1768, 1779), False, 'import torch\n'), ((1852, 1893), 'sketchgraphs_models.autoconstraint.model.compute_losses', 'auto_model.compute_losses', (['batch', 'readout'], {}), '(batch, readout)\n', (1877, 1893), True, 'from sketchgraphs_models.autoconstraint import dataset, model as auto_model\n'), ((6531, 6607), 'sketchgraphs_models.training.TrainingConfig', 'training.TrainingConfig', (['dataloader_eval', 'tb_writer_eval', 'device', 'batch_size'], {}), '(dataloader_eval, tb_writer_eval, device, batch_size)\n', (6554, 6607), False, 'from sketchgraphs_models import training, distributed_utils\n'), ((11164, 11190), 'os.path.abspath', 'os.path.abspath', (['args[key]'], {}), '(args[key])\n', (11179, 11190), False, 'import os\n'), ((11579, 11600), 'time.strftime', 'time.strftime', (['"""%m%d"""'], {}), "('%m%d')\n", (11592, 11600), False, 'import time\n'), ((11646, 11669), 'time.strftime', 'time.strftime', (['"""%H%M%S"""'], {}), "('%H%M%S')\n", (11659, 11669), False, 'import time\n'), ((11788, 11820), 'json.dump', 'json.dump', (['args', 'file_'], {'indent': '(4)'}), '(args, file_, indent=4)\n', (11797, 11820), False, 'import json\n'), ((1990, 2041), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""backward"""'], {}), "('backward')\n", (2029, 2041), False, 'import torch\n'), ((2099, 2152), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""opt_update"""'], {}), "('opt_update')\n", (2138, 2152), False, 'import torch\n'), ((4516, 4535), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4528, 4535), False, 'import torch\n'), ((9322, 9351), 'sketchgraphs_models.autoconstraint.model.MODEL_CORES.keys', 'auto_model.MODEL_CORES.keys', ([], {}), '()\n', (9349, 9351), True, 'from sketchgraphs_models.autoconstraint import dataset, model as auto_model\n'), ((11723, 11759), 'os.path.join', 'os.path.join', (['output_dir', '"""args.txt"""'], {}), "(output_dir, 'args.txt')\n", (11735, 11759), False, 'import os\n'), ((12204, 12253), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(end_time - start_time)'}), '(seconds=end_time - start_time)\n', (12222, 12253), False, 'import datetime\n')] |
import os
import pathlib
import io
import flask
import MySQLdb.cursors
from pymemcache.client.base import Client as MemcacheClient
import pymc_session
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from chainer.links import VGG16Layers
from PIL import Image
_config = None
def config():
global _config
if _config is None:
_config = {
"db": {
"host": os.environ.get("ISUCONP_DB_HOST", 'localhost'),
"port": int(os.environ.get("ISUCONP_DB_PORT", "3306")),
"user": os.environ.get("ISUCONP_DB_USER", "root"),
"db": os.environ.get("ISUCONP_DB_NAME", "isuconp"),
},
}
password = os.environ.get("ISUCONP_DB_PASSWORD")
if password:
_config['db']['passwd'] = password
return _config
_db = None
def db():
global _db
if _db is None:
conf = config()["db"].copy()
conf['charset'] = 'utf8mb4'
conf['cursorclass'] = MySQLdb.cursors.DictCursor
conf['autocommit'] = True
_db = MySQLdb.connect(**conf)
return _db
def db_initialize():
cur = db().cursor()
sqls = [
'DELETE FROM users WHERE id > 1000',
'DELETE FROM posts WHERE id > 10000',
'DELETE FROM comments WHERE id > 100000',
'UPDATE users SET del_flg = 0',
'UPDATE users SET del_flg = 1 WHERE id % 50 = 0',
]
for q in sqls:
cur.execute(q)
_mcclient = None
def memcache():
global _mcclient
if _mcclient is None:
_mcclient = MemcacheClient(('127.0.0.1', 11211), no_delay=True,
default_noreply=False)
return _mcclient
# load image features
FEATURES_DIR = "./features"
_image_features_dict = {}
npy_fnames = os.listdir(FEATURES_DIR)
for fname in npy_fnames:
npy = np.load(os.path.join(FEATURES_DIR, fname))
npy_no = os.path.splitext(fname)[0]
_image_features_dict[int(npy_no)] = npy
# app setup
static_path = pathlib.Path(__file__).resolve().parent.parent / 'public'
app = flask.Flask(__name__, static_folder=str(static_path), static_url_path='')
#app.debug = True
app.session_interface = pymc_session.SessionInterface(memcache())
_NUM_SIMILAR_IMAGES = 6
@app.route("/image/<id>/similar", methods=["GET"])
def get_similar_images(id):
target_feature = _image_features_dict[int(id)]
cursor = db().cursor()
query = """
SELECT p.id, p.mime FROM posts AS p
JOIN users AS u ON p.user_id = u.id
WHERE p.searchable = 1 AND u.del_flg = 0
"""
cursor.execute(query)
result = cursor.fetchall()
similarities = []
for res in result:
if int(res["id"]) == int(id):
continue
feature = _image_features_dict[int(res["id"])]
sim = cosine_similarity(target_feature, feature)
similarities.append((res["id"], float(sim[0][0]), res["mime"]))
similarities.sort(key=lambda x: -x[1])
ret = [{"id": item[0], "similarity": item[1], "mime": item[2]}
for item in similarities[:_NUM_SIMILAR_IMAGES]]
for i, item in enumerate(ret):
ext = ""
mime = item['mime']
if mime == "image/jpeg":
ext = ".jpg"
elif mime == "image/png":
ext = ".png"
elif mime == "image/gif":
ext = ".gif"
item['fname'] = str(item['id']) + ext
return flask.jsonify(ret)
_vgg16 = VGG16Layers(pretrained_model="./VGG_ILSVRC_16_layers.npz")
@app.route("/image/<id>/extract_feature", methods=["GET"])
def extract_feature(id):
global _image_features_dict
cursor = db().cursor()
cursor.execute("SELECT imgdata FROM posts where id = %s", (id,))
result = cursor.fetchone()
img = Image.open(io.BytesIO(result["imgdata"]))
img_np = np.array(img)
feat = _vgg16.extract(img_np[np.newaxis, :], layers=["fc7"])["fc7"].data
np.save(os.path.join(FEATURES_DIR, "{}.npy".format(id)), feat)
_image_features_dict[int(id)] = feat
ret_msg = {"message": "ok"}
return flask.jsonify(ret_msg)
| [
"chainer.links.VGG16Layers",
"io.BytesIO",
"sklearn.metrics.pairwise.cosine_similarity",
"os.environ.get",
"flask.jsonify",
"pymemcache.client.base.Client",
"numpy.array",
"os.path.splitext",
"pathlib.Path",
"os.path.join",
"os.listdir"
] | [((1804, 1828), 'os.listdir', 'os.listdir', (['FEATURES_DIR'], {}), '(FEATURES_DIR)\n', (1814, 1828), False, 'import os\n'), ((3438, 3496), 'chainer.links.VGG16Layers', 'VGG16Layers', ([], {'pretrained_model': '"""./VGG_ILSVRC_16_layers.npz"""'}), "(pretrained_model='./VGG_ILSVRC_16_layers.npz')\n", (3449, 3496), False, 'from chainer.links import VGG16Layers\n'), ((3408, 3426), 'flask.jsonify', 'flask.jsonify', (['ret'], {}), '(ret)\n', (3421, 3426), False, 'import flask\n'), ((3809, 3822), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3817, 3822), True, 'import numpy as np\n'), ((4054, 4076), 'flask.jsonify', 'flask.jsonify', (['ret_msg'], {}), '(ret_msg)\n', (4067, 4076), False, 'import flask\n'), ((728, 765), 'os.environ.get', 'os.environ.get', (['"""ISUCONP_DB_PASSWORD"""'], {}), "('ISUCONP_DB_PASSWORD')\n", (742, 765), False, 'import os\n'), ((1581, 1655), 'pymemcache.client.base.Client', 'MemcacheClient', (["('127.0.0.1', 11211)"], {'no_delay': '(True)', 'default_noreply': '(False)'}), "(('127.0.0.1', 11211), no_delay=True, default_noreply=False)\n", (1595, 1655), True, 'from pymemcache.client.base import Client as MemcacheClient\n'), ((1872, 1905), 'os.path.join', 'os.path.join', (['FEATURES_DIR', 'fname'], {}), '(FEATURES_DIR, fname)\n', (1884, 1905), False, 'import os\n'), ((1920, 1943), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (1936, 1943), False, 'import os\n'), ((2808, 2850), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['target_feature', 'feature'], {}), '(target_feature, feature)\n', (2825, 2850), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((3765, 3794), 'io.BytesIO', 'io.BytesIO', (["result['imgdata']"], {}), "(result['imgdata'])\n", (3775, 3794), False, 'import io\n'), ((429, 475), 'os.environ.get', 'os.environ.get', (['"""ISUCONP_DB_HOST"""', '"""localhost"""'], {}), "('ISUCONP_DB_HOST', 'localhost')\n", (443, 475), False, 'import os\n'), ((573, 614), 'os.environ.get', 'os.environ.get', (['"""ISUCONP_DB_USER"""', '"""root"""'], {}), "('ISUCONP_DB_USER', 'root')\n", (587, 614), False, 'import os\n'), ((638, 682), 'os.environ.get', 'os.environ.get', (['"""ISUCONP_DB_NAME"""', '"""isuconp"""'], {}), "('ISUCONP_DB_NAME', 'isuconp')\n", (652, 682), False, 'import os\n'), ((505, 546), 'os.environ.get', 'os.environ.get', (['"""ISUCONP_DB_PORT"""', '"""3306"""'], {}), "('ISUCONP_DB_PORT', '3306')\n", (519, 546), False, 'import os\n'), ((2019, 2041), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (2031, 2041), False, 'import pathlib\n')] |
import numpy as np
class Loss:
def func(self, out, expect):
raise NotImplementedError
def dfunc(self, out, expect):
raise NotImplementedError
class Logistic(Loss):
def func(self, out, expect):
return -(expect * np.log(out) + (1 - expect) * np.log(1 - out))
def dfunc(self, out, expect):
f = out * (1 - out) + 1e-10
return (out - expect) / f
| [
"numpy.log"
] | [((252, 263), 'numpy.log', 'np.log', (['out'], {}), '(out)\n', (258, 263), True, 'import numpy as np\n'), ((281, 296), 'numpy.log', 'np.log', (['(1 - out)'], {}), '(1 - out)\n', (287, 296), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from lineage.fitting_distribution import check_dist
# read data into DataFrame
df = pd.read_excel(r'./lineage/data/G1_G2_duration_control.xlsx')
##----------------------- Preprocessing the data ------------------------##
# dataFrmae into numpy array
a = df.values
G1 = a[:, 0]
G2 = a[:, 1]
# removing nan from the array
G2 = G2[~np.isnan(G2)]
# converting from unit of [frames] into [hours]
# every frame is every 30 minutes, so dividing the numbers by 2 gives unit of [hours]
G1 = G1 / 2
G2 = G2 / 2
## --------------------- Check for our data ------------------------ ##
print('#### For G1 ####\n')
p_value_G1 = check_dist(G1, verbose=True)
print('\n #### For G2 ####\n')
p_value_G2 = check_dist(G2, verbose=True)
# What we get is:
#### probable distributions for G1: ####
# betaprime : p-value = 0.9496245807703753
# gamma : p-value = 0.8932369599221337
#### probable distributions for G2: ####
# betaprime : p-value = 0.060294405793795636
# gamma : p-value = 0.03292860500419339
| [
"pandas.read_excel",
"lineage.fitting_distribution.check_dist",
"numpy.isnan"
] | [((124, 183), 'pandas.read_excel', 'pd.read_excel', (['"""./lineage/data/G1_G2_duration_control.xlsx"""'], {}), "('./lineage/data/G1_G2_duration_control.xlsx')\n", (137, 183), True, 'import pandas as pd\n'), ((660, 688), 'lineage.fitting_distribution.check_dist', 'check_dist', (['G1'], {'verbose': '(True)'}), '(G1, verbose=True)\n', (670, 688), False, 'from lineage.fitting_distribution import check_dist\n'), ((733, 761), 'lineage.fitting_distribution.check_dist', 'check_dist', (['G2'], {'verbose': '(True)'}), '(G2, verbose=True)\n', (743, 761), False, 'from lineage.fitting_distribution import check_dist\n'), ((372, 384), 'numpy.isnan', 'np.isnan', (['G2'], {}), '(G2)\n', (380, 384), True, 'import numpy as np\n')] |
import numpy as np
import dynet as dy
class Bucket:
def __init__(self, gener_decoder, mlp_decoder, vocab, model, beam_width=None, prob=None):
self.gener_decoder = gener_decoder
self.mlp_decoder = mlp_decoder
self.vocab = vocab
self.model = model
self.beam_width = beam_width
self.prob = prob
self.gene_value = {}
self.gene_output = {}
def cal_loss(self, vectors, masks, indexes, is_train, accelerate=True,
dropout_x=None, dropout_h=None, ccg_dropout=None, mlp_dropout=None):
truth_batch = np.transpose(np.array(indexes['ccg']['atom_ccg']))
truth_batch_dim = truth_batch.shape
sent_len = truth_batch_dim[1]
truth_batch = np.reshape(truth_batch, (truth_batch_dim[0], truth_batch_dim[1] * truth_batch_dim[2]),
order='F')
arr = np.array(masks['2D'])
masks_batch = np.transpose(arr)
masks_batch_dim = masks_batch.shape
masks_batch = np.reshape(masks_batch, (masks_batch_dim[0], masks_batch_dim[1] * masks_batch_dim[2]),
order='F')
full_mask = np.transpose(np.array(masks['1D']))
full_mask_dim = full_mask.shape
full_mask = np.reshape(full_mask, (full_mask_dim[0] * full_mask_dim[1],), order='F')
full_truth = np.transpose(np.array(indexes['ccg']['ccg']))
full_truth_dim = full_truth.shape
full_truth = np.reshape(full_truth, (full_truth_dim[0] * full_truth_dim[1],), order='F')
full_len = np.transpose(np.array(indexes['ccg']['length']))
full_len_dim = full_len.shape
full_len = np.reshape(full_len, (full_len_dim[0] * full_len_dim[1],), order='F')
full_ccg = np.transpose(np.array(indexes['ccg']['full_ccg']))
full_ccg_dim = full_ccg.shape
full_ccg = np.reshape(full_ccg, (full_ccg_dim[0] * full_ccg_dim[1],), order='F')
full_word = np.transpose(np.array(indexes['word']['my_word']))
full_word_dim = full_word.shape
full_word = np.reshape(full_word, (full_word_dim[0] * full_word_dim[1],), order='F')
hidden_vectors = dy.concatenate_cols(vectors)
if is_train:
hidden_vectors = dy.dropout_dim(hidden_vectors, 1, mlp_dropout)
h_dim = hidden_vectors.dim()
h = dy.reshape(hidden_vectors, (h_dim[0][0], ), batch_size=h_dim[0][1]*h_dim[1])
length_bucket = []
length_list = [(idx, lens) for idx, lens in enumerate(full_len) if lens > 0]
# remove unk
if is_train and self.model == 'gener':
unk_index = self.vocab.get_token_index('*@UNK@*', 'ccg')
unk_list = []
for k, v in length_list:
if full_truth[k] == unk_index:
unk_list.append((k, v))
for k, v in unk_list:
length_list.remove((k, v))
if is_train or accelerate:
length_list_sort = sorted(length_list, key=lambda t: t[1])
ccg_index = [idx for idx, lens in length_list_sort]
bucket_size = 512 if self.model == 'mlp' else 256
for beg in range(0, len(ccg_index), bucket_size):
ins_batch = ccg_index[beg:beg + bucket_size]
length_bucket.append((ins_batch, full_len[ins_batch[-1]]))
else:
ccg_index = [idx for idx, lens in length_list]
bucket_size = 256 if self.model == 'mlp' else 256
# if not accelerate, set bucket_size smaller
for beg in range(0, len(ccg_index), bucket_size):
ins_batch = ccg_index[beg:beg+bucket_size]
length_bucket.append((ins_batch, masks_batch.shape[0]))
if is_train:
loss_bucket = []
total_token = 0
else:
good = 0
total = 0
for index_list, lens in length_bucket:
atom_truth = truth_batch[:, index_list]
atom_mask = masks_batch[:, index_list]
atom_truth = atom_truth[:lens]
atom_mask = atom_mask[:lens]
ccg_truth = full_truth[index_list]
full_ccg_batch = full_ccg[index_list]
ccg_mask = full_mask[index_list]
h_bucket = dy.pick_batch_elems(h, index_list)
word_bucket = full_word[index_list]
#prepare sent vector
# idx = np.array(index_list) if len(index_list) > 1 else np.array([index_list])
# sent_vec = dy.pick_batch_elems(hidden_vectors, idx//sent_len)
sent_vec = None
word_index = np.array(index_list) % sent_len
h_sent_info = (sent_vec, word_index, word_bucket)
if is_train:
if self.model == 'gener':
loss, token = self.gener_decoder(h_bucket, h_sent_info, self.vocab,
(atom_truth, atom_mask),
(ccg_truth, ccg_mask, full_ccg_batch),
True, accelerate, dropout_x,
dropout_h, ccg_dropout)
loss_bucket.append(loss)
total_token += token
elif self.model == 'class':
loss, token = self.mlp_decoder(h_bucket, h_sent_info, (ccg_truth, ccg_mask), True)
loss_bucket.append(loss)
total_token += token
else:
if self.model == 'gener':
if self.beam_width == 1:
good_bucket, total_bucket = self.gener_decoder(h_bucket, h_sent_info,
self.vocab,
(atom_truth, atom_mask),
(ccg_truth, ccg_mask, full_ccg_batch),
False, accelerate)
else:
good_bucket, total_bucket = \
self.gener_decoder.beam_search(h_bucket, h_sent_info, self.vocab,
(atom_truth, atom_mask),
(ccg_truth, ccg_mask, full_ccg_batch),
self.beam_width)
elif self.model == 'class':
if self.beam_width == 1:
good_bucket, total_bucket = self.mlp_decoder(h_bucket, h_sent_info,
(ccg_truth, ccg_mask), False)
else:
good_bucket, total_bucket = self.mlp_decoder.beam_search(self.vocab,
h_bucket, h_sent_info,
(ccg_truth, ccg_mask),
self.beam_width)
good += good_bucket
total += total_bucket
if is_train:
return dy.esum(loss_bucket)/(total_token+1)*0.5
else:
if self.model == 'rerank':
self.gene_output = {}
self.gene_value = {}
return good, total
| [
"dynet.pick_batch_elems",
"dynet.concatenate_cols",
"numpy.transpose",
"dynet.reshape",
"numpy.array",
"numpy.reshape",
"dynet.dropout_dim",
"dynet.esum"
] | [((743, 844), 'numpy.reshape', 'np.reshape', (['truth_batch', '(truth_batch_dim[0], truth_batch_dim[1] * truth_batch_dim[2])'], {'order': '"""F"""'}), "(truth_batch, (truth_batch_dim[0], truth_batch_dim[1] *\n truth_batch_dim[2]), order='F')\n", (753, 844), True, 'import numpy as np\n'), ((888, 909), 'numpy.array', 'np.array', (["masks['2D']"], {}), "(masks['2D'])\n", (896, 909), True, 'import numpy as np\n'), ((932, 949), 'numpy.transpose', 'np.transpose', (['arr'], {}), '(arr)\n', (944, 949), True, 'import numpy as np\n'), ((1016, 1117), 'numpy.reshape', 'np.reshape', (['masks_batch', '(masks_batch_dim[0], masks_batch_dim[1] * masks_batch_dim[2])'], {'order': '"""F"""'}), "(masks_batch, (masks_batch_dim[0], masks_batch_dim[1] *\n masks_batch_dim[2]), order='F')\n", (1026, 1117), True, 'import numpy as np\n'), ((1263, 1335), 'numpy.reshape', 'np.reshape', (['full_mask', '(full_mask_dim[0] * full_mask_dim[1],)'], {'order': '"""F"""'}), "(full_mask, (full_mask_dim[0] * full_mask_dim[1],), order='F')\n", (1273, 1335), True, 'import numpy as np\n'), ((1466, 1541), 'numpy.reshape', 'np.reshape', (['full_truth', '(full_truth_dim[0] * full_truth_dim[1],)'], {'order': '"""F"""'}), "(full_truth, (full_truth_dim[0] * full_truth_dim[1],), order='F')\n", (1476, 1541), True, 'import numpy as np\n'), ((1667, 1736), 'numpy.reshape', 'np.reshape', (['full_len', '(full_len_dim[0] * full_len_dim[1],)'], {'order': '"""F"""'}), "(full_len, (full_len_dim[0] * full_len_dim[1],), order='F')\n", (1677, 1736), True, 'import numpy as np\n'), ((1864, 1933), 'numpy.reshape', 'np.reshape', (['full_ccg', '(full_ccg_dim[0] * full_ccg_dim[1],)'], {'order': '"""F"""'}), "(full_ccg, (full_ccg_dim[0] * full_ccg_dim[1],), order='F')\n", (1874, 1933), True, 'import numpy as np\n'), ((2066, 2138), 'numpy.reshape', 'np.reshape', (['full_word', '(full_word_dim[0] * full_word_dim[1],)'], {'order': '"""F"""'}), "(full_word, (full_word_dim[0] * full_word_dim[1],), order='F')\n", (2076, 2138), True, 'import numpy as np\n'), ((2165, 2193), 'dynet.concatenate_cols', 'dy.concatenate_cols', (['vectors'], {}), '(vectors)\n', (2184, 2193), True, 'import dynet as dy\n'), ((2340, 2417), 'dynet.reshape', 'dy.reshape', (['hidden_vectors', '(h_dim[0][0],)'], {'batch_size': '(h_dim[0][1] * h_dim[1])'}), '(hidden_vectors, (h_dim[0][0],), batch_size=h_dim[0][1] * h_dim[1])\n', (2350, 2417), True, 'import dynet as dy\n'), ((601, 637), 'numpy.array', 'np.array', (["indexes['ccg']['atom_ccg']"], {}), "(indexes['ccg']['atom_ccg'])\n", (609, 637), True, 'import numpy as np\n'), ((1180, 1201), 'numpy.array', 'np.array', (["masks['1D']"], {}), "(masks['1D'])\n", (1188, 1201), True, 'import numpy as np\n'), ((1370, 1401), 'numpy.array', 'np.array', (["indexes['ccg']['ccg']"], {}), "(indexes['ccg']['ccg'])\n", (1378, 1401), True, 'import numpy as np\n'), ((1574, 1608), 'numpy.array', 'np.array', (["indexes['ccg']['length']"], {}), "(indexes['ccg']['length'])\n", (1582, 1608), True, 'import numpy as np\n'), ((1769, 1805), 'numpy.array', 'np.array', (["indexes['ccg']['full_ccg']"], {}), "(indexes['ccg']['full_ccg'])\n", (1777, 1805), True, 'import numpy as np\n'), ((1968, 2004), 'numpy.array', 'np.array', (["indexes['word']['my_word']"], {}), "(indexes['word']['my_word'])\n", (1976, 2004), True, 'import numpy as np\n'), ((2244, 2290), 'dynet.dropout_dim', 'dy.dropout_dim', (['hidden_vectors', '(1)', 'mlp_dropout'], {}), '(hidden_vectors, 1, mlp_dropout)\n', (2258, 2290), True, 'import dynet as dy\n'), ((4249, 4283), 'dynet.pick_batch_elems', 'dy.pick_batch_elems', (['h', 'index_list'], {}), '(h, index_list)\n', (4268, 4283), True, 'import dynet as dy\n'), ((4588, 4608), 'numpy.array', 'np.array', (['index_list'], {}), '(index_list)\n', (4596, 4608), True, 'import numpy as np\n'), ((7316, 7336), 'dynet.esum', 'dy.esum', (['loss_bucket'], {}), '(loss_bucket)\n', (7323, 7336), True, 'import dynet as dy\n')] |
'''
If you are using a GPU, write the following in ~/.theanorc.
[global]
device=gpu
floatX=float32
[blas]
ldflags=-lopenblas
[cuda]
root=/opt/apps/cuda/7.0
[nvcc]
fastmath=True
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import cPickle as pickle
import numpy as np
import theano
import theano.tensor as T
import lasagne
from lasagne import layers
from lasagne.updates import nesterov_momentum
from lasagne.objectives import categorical_crossentropy
from lasagne.nonlinearities import leaky_rectify
from lasagne.init import Orthogonal, Constant
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from lasagne.nonlinearities import softmax
import bmc
X = np.load("../data/sdss_training_images.npy")
print("X.shape = {}, X.min = {}, X.max = {}".format(X.shape, X.min(), X.max()))
y = np.load("../data/sdss_training_labels.npy")
print("y.shape = {}, y.min = {}, y.max = {}".format(y.shape, y.min(), y.max()))
def renormalize(array):
return (array - array.min()) / (array.max() - array.min())
for i in range(5):
X[:, i, :, :] = renormalize(X[:, i, :, :])
y = renormalize(y).astype(np.int32)
print("X.shape = {}, X.min = {}, X.max = {}".format(X.shape, X.min(), X.max()))
print("y.shape = {}, y.min = {}, y.max = {}".format(y.shape, y.min(), y.max()))
def compute_PCA(array):
nimages0, nchannels0, height0, width0 = array.shape
rolled = np.transpose(array, (0, 2, 3, 1))
# transpose from N x channels x height x width to N x height x width x channels
nimages1, height1, width1, nchannels1 = rolled.shape
# check shapes
assert nimages0 == nimages1
assert nchannels0 == nchannels1
assert height0 == height1
assert width0 == width1
# flatten
reshaped = rolled.reshape(nimages1 * height1 * width1, nchannels1)
from sklearn.decomposition import PCA
pca = PCA()
pca.fit(reshaped)
cov = pca.get_covariance()
eigenvalues, eigenvectors = np.linalg.eig(cov)
return eigenvalues, eigenvectors
class AugmentedBatchIterator(BatchIterator):
def __init__(self, batch_size, crop_size=8, testing=False):
super(AugmentedBatchIterator, self).__init__(batch_size)
self.crop_size = crop_size
self.testing = testing
def transform(self, Xb, yb):
Xb, yb = super(AugmentedBatchIterator, self).transform(Xb, yb)
batch_size, nchannels, width, height = Xb.shape
if self.testing:
if self.crop_size % 2 == 0:
right = left = self.crop_size // 2
else:
right = self.crop_size // 2
left = self.crop_size // 2 + 1
X_new = Xb[:, :, right: -left, right: -left]
return X_new, yb
eigenvalues, eigenvectors = compute_PCA(Xb)
# Flip half of the images horizontally at random
indices = np.random.choice(batch_size, batch_size // 2, replace=False)
Xb[indices] = Xb[indices, :, :, ::-1]
# Crop images
X_new = np.zeros(
(batch_size, nchannels, width - self.crop_size, height - self.crop_size),
dtype=np.float32
)
for i in range(batch_size):
# Choose x, y pixel posiitions at random
px, py = np.random.choice(self.crop_size, size=2)
sx = slice(px, px + width - self.crop_size)
sy = slice(py, py + height - self.crop_size)
# Rotate 0, 90, 180, or 270 degrees at random
nrotate = np.random.choice(4)
# add random color perturbation
alpha = np.random.normal(loc=0.0, scale=0.5, size=5)
noise = np.dot(eigenvectors, np.transpose(alpha * eigenvalues))
for j in range(nchannels):
X_new[i, j] = np.rot90(Xb[i, j, sx, sy] + noise[j], k=nrotate)
return X_new, yb
class SaveParams(object):
def __init__(self, name):
self.name = name
def __call__(self, nn, train_history):
if train_history[-1]["valid_loss_best"]:
nn.save_params_to("{}.params".format(self.name))
with open("{}.history".format(self.name), "w") as f:
pickle.dump(train_history, f)
class UpdateLearningRate(object):
def __init__(self, start=0.001, stop=0.0001):
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
epoch = train_history[-1]['epoch']
new_value = np.float32(self.ls[epoch - 1])
getattr(nn, "update_learning_rate").set_value(new_value)
class TrainSplit(object):
def __init__(self, eval_size):
self.eval_size = eval_size
def __call__(self, X, y, net):
if self.eval_size:
X_train, y_train = X[:-self.eval_size], y[:-self.eval_size]
X_valid, y_valid = X[-self.eval_size:], y[-self.eval_size:]
else:
X_train, y_train = X, y
X_valid, y_valid = _sldict(X, slice(len(y), None)), y[len(y):]
return X_train, X_valid, y_train, y_valid
net = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv11', layers.Conv2DLayer),
('conv12', layers.Conv2DLayer),
('pool1', layers.MaxPool2DLayer),
('conv21', layers.Conv2DLayer),
('conv22', layers.Conv2DLayer),
('conv23', layers.Conv2DLayer),
('pool2', layers.MaxPool2DLayer),
('conv31', layers.Conv2DLayer),
('conv32', layers.Conv2DLayer),
('conv33', layers.Conv2DLayer),
('pool3', layers.MaxPool2DLayer),
('dropout4', layers.DropoutLayer),
('hidden4', layers.DenseLayer),
('dropout5', layers.DropoutLayer),
('hidden5', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 5, 44, 44),
conv11_num_filters=32, conv11_filter_size=(5, 5),
conv11_nonlinearity=leaky_rectify,
conv11_W=Orthogonal(np.sqrt(2 / (1 + 0.01**2))), conv11_b=Constant(0.1),
conv12_num_filters=32, conv12_filter_size=(3, 3), conv12_pad=1,
conv12_nonlinearity=leaky_rectify,
conv12_W=Orthogonal(np.sqrt(2 / (1 + 0.01**2))), conv12_b=Constant(0.1),
pool1_pool_size=(2, 2),
conv21_num_filters=64, conv21_filter_size=(3, 3), conv21_pad=1,
conv21_nonlinearity=leaky_rectify,
conv21_W=Orthogonal(np.sqrt(2 / (1 + 0.01**2))), conv21_b=Constant(0.1),
conv22_num_filters=64, conv22_filter_size=(3, 3), conv22_pad=1,
conv22_nonlinearity=leaky_rectify,
conv22_W=Orthogonal(np.sqrt(2 / (1 + 0.01**2))), conv22_b=Constant(0.1),
conv23_num_filters=64, conv23_filter_size=(3, 3), conv23_pad=1,
conv23_nonlinearity=leaky_rectify,
conv23_W=Orthogonal(np.sqrt(2 / (1 + 0.01**2))), conv23_b=Constant(0.1),
pool2_pool_size=(2, 2),
conv31_num_filters=128, conv31_filter_size=(3, 3), conv31_pad=1,
conv31_nonlinearity=leaky_rectify,
conv31_W=Orthogonal(np.sqrt(2 / (1 + 0.01**2))), conv31_b=Constant(0.1),
conv32_num_filters=128, conv32_filter_size=(3, 3), conv32_pad=1,
conv32_nonlinearity=leaky_rectify,
conv32_W=Orthogonal(np.sqrt(2 / (1 + 0.01**2))), conv32_b=Constant(0.1),
conv33_num_filters=128, conv33_filter_size=(3, 3), conv33_pad=1,
conv33_nonlinearity=leaky_rectify,
conv33_W=Orthogonal(np.sqrt(2 / (1 + 0.01**2))), conv33_b=Constant(0.1),
pool3_pool_size=(2, 2),
hidden4_num_units=2048,
hidden4_nonlinearity=leaky_rectify,
hidden4_W=Orthogonal(np.sqrt(2 / (1 + 0.01**2))), hidden4_b=Constant(0.01),
dropout4_p=0.5,
hidden5_num_units=2048,
hidden5_nonlinearity=leaky_rectify,
hidden5_W=Orthogonal(np.sqrt(2 / (1 + 0.01**2))), hidden5_b=Constant(0.01),
dropout5_p=0.5,
output_num_units=2,
output_nonlinearity=softmax,
update_learning_rate=theano.shared(np.float32(0.003)),
update_momentum=0.9,
objective_loss_function=categorical_crossentropy,
regression=False,
max_epochs=750,
batch_iterator_train=AugmentedBatchIterator(batch_size=128, crop_size=4),
batch_iterator_test=AugmentedBatchIterator(batch_size=128, crop_size=4, testing=True),
on_epoch_finished=[
UpdateLearningRate(start=0.003, stop=0.0001),
SaveParams("net")
],
verbose=2,
train_split=TrainSplit(eval_size=15000)
)
net.fit(X, y)
best_valid_loss = min([row['valid_loss'] for row in net.train_history_])
print("Best valid loss: {}".format(best_valid_loss))
X_valid = X[-15000:]
y_valid = y[-15000:]
for i in range(5):
X_valid[:, i, :, :] = renormalize(X_valid[:, i, :, :])
y_valid = renormalize(y_valid).astype(np.int32)
y_pred_valid = np.zeros((len(y_valid), 64))
class AugmentedBatchIterator(BatchIterator):
def __init__(self, batch_size, crop_size=8, validation=False, testing=False, startx=None, starty=None, rotate=None):
super(AugmentedBatchIterator, self).__init__(batch_size)
self.crop_size = crop_size
self.validation = validation
self.testing = testing
self.startx, self.starty = startx, starty
self.rotate = rotate
def transform(self, Xb, yb):
Xb, yb = super(AugmentedBatchIterator, self).transform(Xb, yb)
batch_size, nchannels, width, height = Xb.shape
if self.validation:
if self.crop_size % 2 == 0:
right = left = self.crop_size // 2
else:
right = self.crop_size // 2
left = self.crop_size // 2 + 1
X_new = Xb[:, :, right: -left, right: -left]
return X_new, yb
if not self.testing:
eigenvalues, eigenvectors = compute_PCA(Xb)
# Flip half of the images horizontally at random
indices = np.random.choice(batch_size, batch_size // 2, replace=False)
Xb[indices] = Xb[indices, :, :, ::-1]
# Crop images
X_new = np.zeros(
(batch_size, nchannels, width - self.crop_size, height - self.crop_size),
dtype=np.float32
)
for i in range(batch_size):
if self.testing:
px, py = self.startx, self.starty
else:
# Choose x, y pixel posiitions at random
px, py = np.random.choice(self.crop_size, size=2)
sx = slice(px, px + width - self.crop_size)
sy = slice(py, py + height - self.crop_size)
# Rotate 0, 90, 180, or 270 degrees at random
if self.testing:
nrotate = self.rotate
noise = np.zeros(nchannels)
else:
nrotate = np.random.choice(4)
# add random color perturbation
alpha = np.random.normal(loc=0.0, scale=0.5, size=5)
noise = np.dot(eigenvectors, np.transpose(alpha * eigenvalues))
for j in range(nchannels):
X_new[i, j] = np.rot90(Xb[i, j, sx, sy] + noise[j], k=nrotate)
return X_new, yb
count = 0
print("Starting model combination...")
for startx in range(4):
for starty in range(4):
for rotate in range(4):
net.batch_iterator_test=AugmentedBatchIterator(
batch_size=128,
crop_size=4,
testing=True,
startx=startx,
starty=starty,
rotate=rotate
)
y_pred_valid[:, count] = net.predict_proba(X_valid)[:, 1]
count += 1
print("Iteration: {} / 64".format(count))
combine = bmc.BMC()
combine.fit(y_pred_valid, y_valid)
print("Validation set done.")
X_test = np.load("../data/sdss_test_images.npy")
y_test = np.load("../data/sdss_test_labels.npy")
for i in range(5):
X_test[:, i, :, :] = renormalize(X_test[:, i, :, :])
y_test = renormalize(y_test).astype(np.int32)
y_pred_test = np.zeros((len(y_test), 64))
count = 0
for startx in range(4):
for starty in range(4):
for rotate in range(4):
net.batch_iterator_test=AugmentedBatchIterator(
batch_size=128,
crop_size=4,
testing=True,
startx=startx,
starty=starty,
rotate=rotate
)
y_pred_test[:, count] = net.predict_proba(X_test)[:, 1]
count += 1
print("Iteration: {} / 64".format(count))
y_pred = combine.predict_proba(y_pred_test)
np.save("sdss_convnet_pred.npy", y_pred)
print("Testing set done.")
| [
"numpy.load",
"numpy.save",
"bmc.BMC",
"numpy.float32",
"lasagne.init.Constant",
"numpy.transpose",
"numpy.zeros",
"numpy.linalg.eig",
"cPickle.dump",
"numpy.rot90",
"sklearn.decomposition.PCA",
"numpy.random.normal",
"numpy.random.choice",
"numpy.linspace",
"numpy.sqrt"
] | [((835, 878), 'numpy.load', 'np.load', (['"""../data/sdss_training_images.npy"""'], {}), "('../data/sdss_training_images.npy')\n", (842, 878), True, 'import numpy as np\n'), ((964, 1007), 'numpy.load', 'np.load', (['"""../data/sdss_training_labels.npy"""'], {}), "('../data/sdss_training_labels.npy')\n", (971, 1007), True, 'import numpy as np\n'), ((11858, 11867), 'bmc.BMC', 'bmc.BMC', ([], {}), '()\n', (11865, 11867), False, 'import bmc\n'), ((11944, 11983), 'numpy.load', 'np.load', (['"""../data/sdss_test_images.npy"""'], {}), "('../data/sdss_test_images.npy')\n", (11951, 11983), True, 'import numpy as np\n'), ((11993, 12032), 'numpy.load', 'np.load', (['"""../data/sdss_test_labels.npy"""'], {}), "('../data/sdss_test_labels.npy')\n", (12000, 12032), True, 'import numpy as np\n'), ((12747, 12787), 'numpy.save', 'np.save', (['"""sdss_convnet_pred.npy"""', 'y_pred'], {}), "('sdss_convnet_pred.npy', y_pred)\n", (12754, 12787), True, 'import numpy as np\n'), ((1539, 1572), 'numpy.transpose', 'np.transpose', (['array', '(0, 2, 3, 1)'], {}), '(array, (0, 2, 3, 1))\n', (1551, 1572), True, 'import numpy as np\n'), ((2008, 2013), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (2011, 2013), False, 'from sklearn.decomposition import PCA\n'), ((2109, 2127), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (2122, 2127), True, 'import numpy as np\n'), ((3028, 3088), 'numpy.random.choice', 'np.random.choice', (['batch_size', '(batch_size // 2)'], {'replace': '(False)'}), '(batch_size, batch_size // 2, replace=False)\n', (3044, 3088), True, 'import numpy as np\n'), ((3182, 3287), 'numpy.zeros', 'np.zeros', (['(batch_size, nchannels, width - self.crop_size, height - self.crop_size)'], {'dtype': 'np.float32'}), '((batch_size, nchannels, width - self.crop_size, height - self.\n crop_size), dtype=np.float32)\n', (3190, 3287), True, 'import numpy as np\n'), ((4793, 4823), 'numpy.float32', 'np.float32', (['self.ls[epoch - 1]'], {}), '(self.ls[epoch - 1])\n', (4803, 4823), True, 'import numpy as np\n'), ((6302, 6315), 'lasagne.init.Constant', 'Constant', (['(0.1)'], {}), '(0.1)\n', (6310, 6315), False, 'from lasagne.init import Orthogonal, Constant\n'), ((6491, 6504), 'lasagne.init.Constant', 'Constant', (['(0.1)'], {}), '(0.1)\n', (6499, 6504), False, 'from lasagne.init import Orthogonal, Constant\n'), ((6705, 6718), 'lasagne.init.Constant', 'Constant', (['(0.1)'], {}), '(0.1)\n', (6713, 6718), False, 'from lasagne.init import Orthogonal, Constant\n'), ((6894, 6907), 'lasagne.init.Constant', 'Constant', (['(0.1)'], {}), '(0.1)\n', (6902, 6907), False, 'from lasagne.init import Orthogonal, Constant\n'), ((7079, 7092), 'lasagne.init.Constant', 'Constant', (['(0.1)'], {}), '(0.1)\n', (7087, 7092), False, 'from lasagne.init import Orthogonal, Constant\n'), ((7294, 7307), 'lasagne.init.Constant', 'Constant', (['(0.1)'], {}), '(0.1)\n', (7302, 7307), False, 'from lasagne.init import Orthogonal, Constant\n'), ((7484, 7497), 'lasagne.init.Constant', 'Constant', (['(0.1)'], {}), '(0.1)\n', (7492, 7497), False, 'from lasagne.init import Orthogonal, Constant\n'), ((7674, 7687), 'lasagne.init.Constant', 'Constant', (['(0.1)'], {}), '(0.1)\n', (7682, 7687), False, 'from lasagne.init import Orthogonal, Constant\n'), ((7851, 7865), 'lasagne.init.Constant', 'Constant', (['(0.01)'], {}), '(0.01)\n', (7859, 7865), False, 'from lasagne.init import Orthogonal, Constant\n'), ((8024, 8038), 'lasagne.init.Constant', 'Constant', (['(0.01)'], {}), '(0.01)\n', (8032, 8038), False, 'from lasagne.init import Orthogonal, Constant\n'), ((10076, 10136), 'numpy.random.choice', 'np.random.choice', (['batch_size', '(batch_size // 2)'], {'replace': '(False)'}), '(batch_size, batch_size // 2, replace=False)\n', (10092, 10136), True, 'import numpy as np\n'), ((10222, 10327), 'numpy.zeros', 'np.zeros', (['(batch_size, nchannels, width - self.crop_size, height - self.crop_size)'], {'dtype': 'np.float32'}), '((batch_size, nchannels, width - self.crop_size, height - self.\n crop_size), dtype=np.float32)\n', (10230, 10327), True, 'import numpy as np\n'), ((3428, 3468), 'numpy.random.choice', 'np.random.choice', (['self.crop_size'], {'size': '(2)'}), '(self.crop_size, size=2)\n', (3444, 3468), True, 'import numpy as np\n'), ((3692, 3711), 'numpy.random.choice', 'np.random.choice', (['(4)'], {}), '(4)\n', (3708, 3711), True, 'import numpy as np\n'), ((3789, 3833), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(0.5)', 'size': '(5)'}), '(loc=0.0, scale=0.5, size=5)\n', (3805, 3833), True, 'import numpy as np\n'), ((4679, 4728), 'numpy.linspace', 'np.linspace', (['self.start', 'self.stop', 'nn.max_epochs'], {}), '(self.start, self.stop, nn.max_epochs)\n', (4690, 4728), True, 'import numpy as np\n'), ((6264, 6292), 'numpy.sqrt', 'np.sqrt', (['(2 / (1 + 0.01 ** 2))'], {}), '(2 / (1 + 0.01 ** 2))\n', (6271, 6292), True, 'import numpy as np\n'), ((6453, 6481), 'numpy.sqrt', 'np.sqrt', (['(2 / (1 + 0.01 ** 2))'], {}), '(2 / (1 + 0.01 ** 2))\n', (6460, 6481), True, 'import numpy as np\n'), ((6667, 6695), 'numpy.sqrt', 'np.sqrt', (['(2 / (1 + 0.01 ** 2))'], {}), '(2 / (1 + 0.01 ** 2))\n', (6674, 6695), True, 'import numpy as np\n'), ((6856, 6884), 'numpy.sqrt', 'np.sqrt', (['(2 / (1 + 0.01 ** 2))'], {}), '(2 / (1 + 0.01 ** 2))\n', (6863, 6884), True, 'import numpy as np\n'), ((7041, 7069), 'numpy.sqrt', 'np.sqrt', (['(2 / (1 + 0.01 ** 2))'], {}), '(2 / (1 + 0.01 ** 2))\n', (7048, 7069), True, 'import numpy as np\n'), ((7256, 7284), 'numpy.sqrt', 'np.sqrt', (['(2 / (1 + 0.01 ** 2))'], {}), '(2 / (1 + 0.01 ** 2))\n', (7263, 7284), True, 'import numpy as np\n'), ((7446, 7474), 'numpy.sqrt', 'np.sqrt', (['(2 / (1 + 0.01 ** 2))'], {}), '(2 / (1 + 0.01 ** 2))\n', (7453, 7474), True, 'import numpy as np\n'), ((7636, 7664), 'numpy.sqrt', 'np.sqrt', (['(2 / (1 + 0.01 ** 2))'], {}), '(2 / (1 + 0.01 ** 2))\n', (7643, 7664), True, 'import numpy as np\n'), ((7812, 7840), 'numpy.sqrt', 'np.sqrt', (['(2 / (1 + 0.01 ** 2))'], {}), '(2 / (1 + 0.01 ** 2))\n', (7819, 7840), True, 'import numpy as np\n'), ((7985, 8013), 'numpy.sqrt', 'np.sqrt', (['(2 / (1 + 0.01 ** 2))'], {}), '(2 / (1 + 0.01 ** 2))\n', (7992, 8013), True, 'import numpy as np\n'), ((8158, 8175), 'numpy.float32', 'np.float32', (['(0.003)'], {}), '(0.003)\n', (8168, 8175), True, 'import numpy as np\n'), ((3875, 3908), 'numpy.transpose', 'np.transpose', (['(alpha * eigenvalues)'], {}), '(alpha * eigenvalues)\n', (3887, 3908), True, 'import numpy as np\n'), ((3992, 4040), 'numpy.rot90', 'np.rot90', (['(Xb[i, j, sx, sy] + noise[j])'], {'k': 'nrotate'}), '(Xb[i, j, sx, sy] + noise[j], k=nrotate)\n', (4000, 4040), True, 'import numpy as np\n'), ((4402, 4431), 'cPickle.dump', 'pickle.dump', (['train_history', 'f'], {}), '(train_history, f)\n', (4413, 4431), True, 'import cPickle as pickle\n'), ((10573, 10613), 'numpy.random.choice', 'np.random.choice', (['self.crop_size'], {'size': '(2)'}), '(self.crop_size, size=2)\n', (10589, 10613), True, 'import numpy as np\n'), ((10878, 10897), 'numpy.zeros', 'np.zeros', (['nchannels'], {}), '(nchannels)\n', (10886, 10897), True, 'import numpy as np\n'), ((10942, 10961), 'numpy.random.choice', 'np.random.choice', (['(4)'], {}), '(4)\n', (10958, 10961), True, 'import numpy as np\n'), ((11034, 11078), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(0.5)', 'size': '(5)'}), '(loc=0.0, scale=0.5, size=5)\n', (11050, 11078), True, 'import numpy as np\n'), ((11229, 11277), 'numpy.rot90', 'np.rot90', (['(Xb[i, j, sx, sy] + noise[j])'], {'k': 'nrotate'}), '(Xb[i, j, sx, sy] + noise[j], k=nrotate)\n', (11237, 11277), True, 'import numpy as np\n'), ((11124, 11157), 'numpy.transpose', 'np.transpose', (['(alpha * eigenvalues)'], {}), '(alpha * eigenvalues)\n', (11136, 11157), True, 'import numpy as np\n')] |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from __future__ import annotations
import logging
import math
import time
from dataclasses import dataclass, field
from typing import List, Optional, Sequence, Set
import SimpleITK as sitk
import numpy as np
import torch
import torch.nn.functional as F
from azureml.core import Run
from InnerEye.Azure.azure_util import get_run_context_or_default
from InnerEye.Common.metrics_constants import LoggingColumns, MetricType
from InnerEye.Common.type_annotations import DictStrFloat, TupleFloat3
from InnerEye.ML.common import ModelExecutionMode
from InnerEye.ML.config import BACKGROUND_CLASS_NAME
from InnerEye.ML.metrics_dict import DataframeLogger, INTERNAL_TO_LOGGING_COLUMN_NAMES, MetricsDict, \
ScalarMetricsDict
from InnerEye.ML.scalar_config import ScalarLoss
from InnerEye.ML.utils.image_util import binaries_from_multi_label_array, is_binary_array
from InnerEye.ML.utils.io_util import reverse_tuple_float3
from InnerEye.ML.utils.metrics_util import binary_classification_accuracy, mean_absolute_error, r2_score
from InnerEye.ML.utils.ml_util import check_size_matches
from InnerEye.ML.utils.sequence_utils import get_masked_model_outputs_and_labels
MAX_ITEM_LOAD_TIME_SEC = 0.5
MAX_LOAD_TIME_WARNINGS = 3
MAX_LOAD_TIME_EPOCHS = 5
@dataclass(frozen=True)
class InferenceMetrics:
"""
Defined purely to serve as a superclass.
"""
pass
@dataclass(frozen=True)
class InferenceMetricsForClassification(InferenceMetrics):
"""
Stores a dictionary mapping from epoch number to the metrics that were achieved in that epoch.
"""
metrics: MetricsDict
@dataclass(frozen=True)
class InferenceMetricsForSegmentation(InferenceMetrics):
"""
Stores metrics for segmentation models, per execution mode and epoch.
"""
data_split: ModelExecutionMode
metrics: float
def get_metrics_log_key(self) -> str:
"""
Gets a string name for logging the metrics specific to the execution mode (train, val, test)
:return:
"""
return f"InferenceMetrics_{self.data_split.value}"
def log_metrics(self, run_context: Run = None) -> None:
"""
Log metrics for each epoch to the provided runs logs, or the current run context if None provided
:param run_context: Run for which to log the metrics to, use the current run context if None provided
:return:
"""
run_context = get_run_context_or_default(run_context)
run_context.log_table(name=self.get_metrics_log_key(), value={
"Dice": self.metrics
})
@dataclass
class EpochTimers:
"""
Contains all information necessary to compute the IO metrics: Epoch times, batch times, loading times.
"""
epoch_start_time: float = time.time()
epoch_end_time: float = time.time()
batch_start_time: float = time.time()
num_load_time_warnings: int = 0
num_load_time_exceeded: int = 0
total_extra_load_time: float = 0.0
total_load_time: float = 0.0
num_batches: int = 0
load_time_warning_epochs: Set[int] = field(default_factory=set)
def reset(self) -> None:
"""
Resets all timers to the current time, and all counters to 0. The set of epochs for which warnings about
load time were produced will not be reset.
"""
current_time = time.time()
self.epoch_start_time = current_time
self.epoch_end_time = current_time
self.batch_start_time = current_time
self.num_load_time_warnings = 0
self.num_load_time_exceeded = 0
self.total_extra_load_time = 0.0
self.total_load_time = 0.0
self.num_batches = 0
def epoch_end(self) -> None:
"""
Stores the present time in the epoch_end_time field of the object.
"""
self.epoch_end_time = time.time()
@property
def total_epoch_time(self) -> float:
"""
Gets the time in seconds between epoch start and epoch end.
"""
return self.epoch_end_time - self.epoch_start_time
@property
def should_warn_in_this_epoch(self) -> bool:
"""
Returns True if warnings about loading time should be printed in the present epoch. Returns False if
this warning has been printed already in more than MAX_LOAD_TIME_EPOCHS epochs.
:return:
"""
return len(self.load_time_warning_epochs) <= MAX_LOAD_TIME_EPOCHS
def batch_start(self, batch_index: int, epoch: int, message_prefix: str) -> float:
"""
Called when a minibatch of data has been loaded. This computes the time it took to load the minibatch,
and adds it to the internal bookkeeping.
:return: The time it took to load the minibatch, in seconds.
"""
item_finish_time = time.time()
item_load_time = item_finish_time - self.batch_start_time
self.total_load_time += item_load_time
# Having slow minibatch loading is OK in the very first batch of the every epoch, where processes
# are spawned. Later, the load time should be zero.
if batch_index == 0:
logging.info(f"{message_prefix}: Loaded the first minibatch of data in {item_load_time:0.2f} sec.")
elif item_load_time > MAX_ITEM_LOAD_TIME_SEC:
self.load_time_warning_epochs.add(epoch)
self.num_load_time_exceeded += 1
self.total_extra_load_time += item_load_time
if self.num_load_time_warnings < MAX_LOAD_TIME_WARNINGS and self.should_warn_in_this_epoch:
logging.warning(f"{message_prefix}: Loading minibatch {batch_index} took {item_load_time:0.2f} sec. "
"This can mean that there are not enough data loader worker processes, or that there "
"is a performance problem in loading. This warning will be printed at most "
f"{MAX_LOAD_TIME_WARNINGS} times in at most {MAX_LOAD_TIME_EPOCHS} epochs.")
self.num_load_time_warnings += 1
return item_load_time
def batch_end(self) -> float:
"""
Called after a minibatch has been processed (training or validation step completed). Returns the time it took
to process the current batch (including loading).
:return: The time it took to process the current batch, in seconds.
"""
current_time = time.time()
elapsed = current_time - self.batch_start_time
self.batch_start_time = current_time
self.num_batches += 1
return elapsed
def surface_distance(seg: sitk.Image, reference_segmentation: sitk.Image) -> float:
"""
Symmetric surface distances taking into account the image spacing
https://github.com/InsightSoftwareConsortium/SimpleITK-Notebooks/blob/master/Python/34_Segmentation_Evaluation.ipynb
:param seg: mask 1
:param reference_segmentation: mask 2
:return: mean distance
"""
statistics_image_filter = sitk.StatisticsImageFilter()
# Get the number of pixels in the reference surface by counting all pixels that are 1.
reference_surface = sitk.LabelContour(reference_segmentation)
statistics_image_filter.Execute(reference_surface)
num_reference_surface_pixels = int(statistics_image_filter.GetSum())
reference_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(reference_segmentation, squaredDistance=False, useImageSpacing=True))
reference_surface = sitk.LabelContour(reference_segmentation)
# Symmetric surface distance measures
segmented_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(seg, squaredDistance=False, useImageSpacing=True))
segmented_surface = sitk.LabelContour(seg)
# Multiply the binary surface segmentations with the distance maps. The resulting distance
# maps contain non-zero values only on the surface (they can also contain zero on the surface)
seg2ref_distance_map = reference_distance_map * sitk.Cast(segmented_surface, sitk.sitkFloat32)
ref2seg_distance_map = segmented_distance_map * sitk.Cast(reference_surface, sitk.sitkFloat32)
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(segmented_surface)
num_segmented_surface_pixels = int(statistics_image_filter.GetSum())
seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)
seg2ref_distances = _add_zero_distances(num_segmented_surface_pixels, seg2ref_distance_map_arr)
ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)
ref2seg_distances = _add_zero_distances(num_reference_surface_pixels, ref2seg_distance_map_arr)
all_surface_distances = seg2ref_distances + ref2seg_distances
return np.mean(all_surface_distances).item()
def _add_zero_distances(num_segmented_surface_pixels: int, seg2ref_distance_map_arr: np.ndarray) -> List[float]:
"""
# Get all non-zero distances and then add zero distances if required.
:param num_segmented_surface_pixels:
:param seg2ref_distance_map_arr:
:return: list of distances, augmented with zeros.
"""
seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])
seg2ref_distances = seg2ref_distances + list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances)))
return seg2ref_distances
def calculate_metrics_per_class(segmentation: np.ndarray,
ground_truth: np.ndarray,
ground_truth_ids: List[str],
voxel_spacing: TupleFloat3,
patient_id: Optional[int] = None) -> MetricsDict:
"""
Calculate the dice for all foreground structures (the background class is completely ignored).
Returns a MetricsDict with metrics for each of the foreground
structures. Metrics are NaN if both ground truth and prediction are all zero for a class.
:param ground_truth_ids: The names of all foreground classes.
:param segmentation: predictions multi-value array with dimensions: [Z x Y x X]
:param ground_truth: ground truth binary array with dimensions: [C x Z x Y x X]
:param voxel_spacing: voxel_spacing in 3D Z x Y x X
:param patient_id: for logging
"""
number_of_classes = ground_truth.shape[0]
if len(ground_truth_ids) != (number_of_classes - 1):
raise ValueError(f"Received {len(ground_truth_ids)} foreground class names, but "
f"the label tensor indicates that there are {number_of_classes - 1} classes.")
binaries = binaries_from_multi_label_array(segmentation, number_of_classes)
all_classes_are_binary = [is_binary_array(ground_truth[label_id]) for label_id in range(ground_truth.shape[0])]
if not np.all(all_classes_are_binary):
raise ValueError("Ground truth values should be 0 or 1")
overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
metrics = MetricsDict(hues=ground_truth_ids)
for i, prediction in enumerate(binaries):
if i == 0:
continue
check_size_matches(prediction, ground_truth[i], arg1_name="prediction", arg2_name="ground_truth")
if not is_binary_array(prediction):
raise ValueError("Predictions values should be 0 or 1")
# simpleitk returns a Dice score of 0 if both ground truth and prediction are all zeros.
# We want to be able to fish out those cases, and treat them specially later.
prediction_zero = np.all(prediction == 0)
gt_zero = np.all(ground_truth[i] == 0)
dice = mean_surface_distance = hausdorff_distance = math.nan
if not (prediction_zero and gt_zero):
prediction_image = sitk.GetImageFromArray(prediction.astype(np.uint8))
prediction_image.SetSpacing(sitk.VectorDouble(reverse_tuple_float3(voxel_spacing)))
ground_truth_image = sitk.GetImageFromArray(ground_truth[i].astype(np.uint8))
ground_truth_image.SetSpacing(sitk.VectorDouble(reverse_tuple_float3(voxel_spacing)))
overlap_measures_filter.Execute(prediction_image, ground_truth_image)
dice = overlap_measures_filter.GetDiceCoefficient()
if prediction_zero or gt_zero:
hausdorff_distance = mean_surface_distance = math.inf
else:
try:
hausdorff_distance_filter.Execute(prediction_image, ground_truth_image)
hausdorff_distance = hausdorff_distance_filter.GetHausdorffDistance()
except Exception as e:
logging.warning("Cannot calculate Hausdorff distance for "
f"structure {i} of patient {patient_id}: {e}")
try:
mean_surface_distance = surface_distance(prediction_image, ground_truth_image)
except Exception as e:
logging.warning(f"Cannot calculate mean distance for structure {i} of patient {patient_id}: {e}")
logging.debug(f"Patient {patient_id}, class {i} has Dice score {dice}")
def add_metric(metric_type: MetricType, value: float) -> None:
metrics.add_metric(metric_type, value, skip_nan_when_averaging=True, hue=ground_truth_ids[i - 1])
add_metric(MetricType.DICE, dice)
add_metric(MetricType.HAUSDORFF_mm, hausdorff_distance)
add_metric(MetricType.MEAN_SURFACE_DIST_mm, mean_surface_distance)
return metrics
def compute_dice_across_patches(segmentation: torch.Tensor,
ground_truth: torch.Tensor,
allow_multiple_classes_for_each_pixel: bool = False) -> torch.Tensor:
"""
Computes the Dice scores for all classes across all patches in the arguments.
:param segmentation: Tensor containing class ids predicted by a model.
:param ground_truth: One-hot encoded torch tensor containing ground-truth label ids.
:param allow_multiple_classes_for_each_pixel: If set to False, ground-truth tensor has
to contain only one foreground label for each pixel.
:return A torch tensor of size (Patches, Classes) with the Dice scores. Dice scores are computed for
all classes including the background class at index 0.
"""
check_size_matches(segmentation, ground_truth, 4, 5, [0, -3, -2, -1],
arg1_name="segmentation", arg2_name="ground_truth")
# One-hot encoded ground-truth values should sum up to one for all pixels
if not allow_multiple_classes_for_each_pixel:
if not torch.allclose(torch.sum(ground_truth, dim=1).float(),
torch.ones(segmentation.shape, device=ground_truth.device).float()):
raise Exception("Ground-truth one-hot matrix does not sum up to one for all pixels")
# Convert the ground-truth to one-hot-encoding
[num_patches, num_classes] = ground_truth.size()[:2]
one_hot_segmentation = F.one_hot(segmentation, num_classes=num_classes).permute(0, 4, 1, 2, 3)
# Convert the tensors to bool tensors
one_hot_segmentation = one_hot_segmentation.bool().view(num_patches, num_classes, -1)
ground_truth = ground_truth.bool().view(num_patches, num_classes, -1)
# And operation between segmentation and ground-truth - reduction operation
# Count the number of samples in segmentation and ground-truth
intersection = 2.0 * torch.sum(one_hot_segmentation & ground_truth, dim=-1).float()
union = torch.sum(one_hot_segmentation, dim=-1) + torch.sum(ground_truth, dim=-1).float() + 1.0e-6
return intersection / union
def store_epoch_metrics(metrics: DictStrFloat,
epoch: int,
file_logger: DataframeLogger) -> None:
"""
Writes all metrics (apart from ones that measure run time) into a CSV file,
with an additional columns for epoch number.
:param file_logger: An instance of DataframeLogger, for logging results to csv.
:param epoch: The epoch corresponding to the results.
:param metrics: The metrics of the specified epoch, averaged along its batches.
"""
logger_row = {}
for key, value in metrics.items():
if key == MetricType.SECONDS_PER_BATCH.value or key == MetricType.SECONDS_PER_EPOCH.value:
continue
if key in INTERNAL_TO_LOGGING_COLUMN_NAMES.keys():
logger_row[INTERNAL_TO_LOGGING_COLUMN_NAMES[key].value] = value
else:
logger_row[key] = value
logger_row[LoggingColumns.Epoch.value] = epoch
file_logger.add_record(logger_row)
file_logger.flush()
def compute_scalar_metrics(metrics_dict: ScalarMetricsDict,
subject_ids: Sequence[str],
model_output: torch.Tensor,
labels: torch.Tensor,
loss_type: ScalarLoss = ScalarLoss.BinaryCrossEntropyWithLogits) -> None:
"""
Computes various metrics for a binary classification task from real-valued model output and a label vector,
and stores them in the given `metrics_dict`.
The model output is assumed to be in the range between 0 and 1, a value larger than 0.5 indicates a prediction
of class 1. The label vector is expected to contain class indices 0 and 1 only.
Metrics for each model output channel will be isolated, and a non-default hue for each model output channel is
expected, and must exist in the provided metrics_dict. The Default hue is used for single model outputs.
:param metrics_dict: An object that holds all metrics. It will be updated in-place.
:param subject_ids: Subject ids for the model output and labels.
:param model_output: A tensor containing model outputs.
:param labels: A tensor containing class labels.
:param loss_type: The type of loss that the model uses. This is required to optionally convert 2-dim model output
to probabilities.
"""
_model_output_channels = model_output.shape[1]
model_output_hues = metrics_dict.get_hue_names(include_default=len(metrics_dict.hues_without_default) == 0)
if len(model_output_hues) < _model_output_channels:
raise ValueError("Hues must be provided for each model output channel, found "
f"{_model_output_channels} channels but only {len(model_output_hues)} hues")
for i, hue in enumerate(model_output_hues):
# mask the model outputs and labels if required
masked_model_outputs_and_labels = get_masked_model_outputs_and_labels(
model_output[:, i, ...], labels[:, i, ...], subject_ids)
# compute metrics on valid masked tensors only
if masked_model_outputs_and_labels is not None:
_model_output, _labels, _subject_ids = \
masked_model_outputs_and_labels.model_outputs.data, \
masked_model_outputs_and_labels.labels.data, \
masked_model_outputs_and_labels.subject_ids
# Convert labels to the same datatype as the model outputs, necessary when running with AMP
_labels = _labels.to(dtype=_model_output.dtype)
if loss_type == ScalarLoss.MeanSquaredError:
metrics = {
MetricType.MEAN_SQUARED_ERROR: F.mse_loss(_model_output, _labels, reduction='mean').item(),
MetricType.MEAN_ABSOLUTE_ERROR: mean_absolute_error(_model_output, _labels),
MetricType.EXPLAINED_VAR: r2_score(_model_output, _labels)
}
else:
metrics = {
MetricType.CROSS_ENTROPY: F.binary_cross_entropy(_model_output, _labels, reduction='mean').item(),
MetricType.ACCURACY_AT_THRESHOLD_05: binary_classification_accuracy(_model_output, _labels)
}
for key, value in metrics.items():
if key == MetricType.EXPLAINED_VAR:
# For a batch size 1, R2 score can be nan. We need to ignore nans
# when average in case the last batch is of size 1.
metrics_dict.add_metric(key, value, skip_nan_when_averaging=True, hue=hue)
else:
metrics_dict.add_metric(key, value, hue=hue)
assert _subject_ids is not None
metrics_dict.add_predictions(_subject_ids, _model_output.detach().cpu().numpy(),
_labels.cpu().numpy(), hue=hue)
def add_average_foreground_dice(metrics: MetricsDict) -> None:
"""
If the given metrics dictionary contains an entry for Dice score, and only one value for the Dice score per class,
then add an average Dice score for all foreground classes to the metrics dictionary (modified in place).
:param metrics: The object that holds metrics. The average Dice score will be written back into this object.
"""
all_dice = []
for structure_name in metrics.get_hue_names(include_default=False):
if structure_name != BACKGROUND_CLASS_NAME:
all_dice.append(metrics.get_single_metric(MetricType.DICE, hue=structure_name))
metrics.add_metric(MetricType.DICE, np.nanmean(all_dice).item())
| [
"SimpleITK.HausdorffDistanceImageFilter",
"torch.nn.functional.binary_cross_entropy",
"InnerEye.Azure.azure_util.get_run_context_or_default",
"InnerEye.ML.utils.sequence_utils.get_masked_model_outputs_and_labels",
"numpy.mean",
"SimpleITK.Cast",
"numpy.nanmean",
"SimpleITK.GetArrayViewFromImage",
"t... | [((1592, 1614), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1601, 1614), False, 'from dataclasses import dataclass, field\n'), ((1712, 1734), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1721, 1734), False, 'from dataclasses import dataclass, field\n'), ((1937, 1959), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1946, 1959), False, 'from dataclasses import dataclass, field\n'), ((3086, 3097), 'time.time', 'time.time', ([], {}), '()\n', (3095, 3097), False, 'import time\n'), ((3126, 3137), 'time.time', 'time.time', ([], {}), '()\n', (3135, 3137), False, 'import time\n'), ((3168, 3179), 'time.time', 'time.time', ([], {}), '()\n', (3177, 3179), False, 'import time\n'), ((3390, 3416), 'dataclasses.field', 'field', ([], {'default_factory': 'set'}), '(default_factory=set)\n', (3395, 3416), False, 'from dataclasses import dataclass, field\n'), ((7307, 7335), 'SimpleITK.StatisticsImageFilter', 'sitk.StatisticsImageFilter', ([], {}), '()\n', (7333, 7335), True, 'import SimpleITK as sitk\n'), ((7451, 7492), 'SimpleITK.LabelContour', 'sitk.LabelContour', (['reference_segmentation'], {}), '(reference_segmentation)\n', (7468, 7492), True, 'import SimpleITK as sitk\n'), ((7792, 7833), 'SimpleITK.LabelContour', 'sitk.LabelContour', (['reference_segmentation'], {}), '(reference_segmentation)\n', (7809, 7833), True, 'import SimpleITK as sitk\n'), ((8019, 8041), 'SimpleITK.LabelContour', 'sitk.LabelContour', (['seg'], {}), '(seg)\n', (8036, 8041), True, 'import SimpleITK as sitk\n'), ((8687, 8735), 'SimpleITK.GetArrayViewFromImage', 'sitk.GetArrayViewFromImage', (['seg2ref_distance_map'], {}), '(seg2ref_distance_map)\n', (8713, 8735), True, 'import SimpleITK as sitk\n'), ((8867, 8915), 'SimpleITK.GetArrayViewFromImage', 'sitk.GetArrayViewFromImage', (['ref2seg_distance_map'], {}), '(ref2seg_distance_map)\n', (8893, 8915), True, 'import SimpleITK as sitk\n'), ((10931, 10995), 'InnerEye.ML.utils.image_util.binaries_from_multi_label_array', 'binaries_from_multi_label_array', (['segmentation', 'number_of_classes'], {}), '(segmentation, number_of_classes)\n', (10962, 10995), False, 'from InnerEye.ML.utils.image_util import binaries_from_multi_label_array, is_binary_array\n'), ((11251, 11289), 'SimpleITK.LabelOverlapMeasuresImageFilter', 'sitk.LabelOverlapMeasuresImageFilter', ([], {}), '()\n', (11287, 11289), True, 'import SimpleITK as sitk\n'), ((11322, 11357), 'SimpleITK.HausdorffDistanceImageFilter', 'sitk.HausdorffDistanceImageFilter', ([], {}), '()\n', (11355, 11357), True, 'import SimpleITK as sitk\n'), ((11372, 11406), 'InnerEye.ML.metrics_dict.MetricsDict', 'MetricsDict', ([], {'hues': 'ground_truth_ids'}), '(hues=ground_truth_ids)\n', (11383, 11406), False, 'from InnerEye.ML.metrics_dict import DataframeLogger, INTERNAL_TO_LOGGING_COLUMN_NAMES, MetricsDict, ScalarMetricsDict\n'), ((14700, 14825), 'InnerEye.ML.utils.ml_util.check_size_matches', 'check_size_matches', (['segmentation', 'ground_truth', '(4)', '(5)', '[0, -3, -2, -1]'], {'arg1_name': '"""segmentation"""', 'arg2_name': '"""ground_truth"""'}), "(segmentation, ground_truth, 4, 5, [0, -3, -2, -1],\n arg1_name='segmentation', arg2_name='ground_truth')\n", (14718, 14825), False, 'from InnerEye.ML.utils.ml_util import check_size_matches\n'), ((2745, 2784), 'InnerEye.Azure.azure_util.get_run_context_or_default', 'get_run_context_or_default', (['run_context'], {}), '(run_context)\n', (2771, 2784), False, 'from InnerEye.Azure.azure_util import get_run_context_or_default\n'), ((3658, 3669), 'time.time', 'time.time', ([], {}), '()\n', (3667, 3669), False, 'import time\n'), ((4151, 4162), 'time.time', 'time.time', ([], {}), '()\n', (4160, 4162), False, 'import time\n'), ((5114, 5125), 'time.time', 'time.time', ([], {}), '()\n', (5123, 5125), False, 'import time\n'), ((6727, 6738), 'time.time', 'time.time', ([], {}), '()\n', (6736, 6738), False, 'import time\n'), ((7669, 7770), 'SimpleITK.SignedMaurerDistanceMap', 'sitk.SignedMaurerDistanceMap', (['reference_segmentation'], {'squaredDistance': '(False)', 'useImageSpacing': '(True)'}), '(reference_segmentation, squaredDistance=False,\n useImageSpacing=True)\n', (7697, 7770), True, 'import SimpleITK as sitk\n'), ((7915, 7993), 'SimpleITK.SignedMaurerDistanceMap', 'sitk.SignedMaurerDistanceMap', (['seg'], {'squaredDistance': '(False)', 'useImageSpacing': '(True)'}), '(seg, squaredDistance=False, useImageSpacing=True)\n', (7943, 7993), True, 'import SimpleITK as sitk\n'), ((8289, 8335), 'SimpleITK.Cast', 'sitk.Cast', (['segmented_surface', 'sitk.sitkFloat32'], {}), '(segmented_surface, sitk.sitkFloat32)\n', (8298, 8335), True, 'import SimpleITK as sitk\n'), ((8388, 8434), 'SimpleITK.Cast', 'sitk.Cast', (['reference_surface', 'sitk.sitkFloat32'], {}), '(reference_surface, sitk.sitkFloat32)\n', (8397, 8434), True, 'import SimpleITK as sitk\n'), ((11027, 11066), 'InnerEye.ML.utils.image_util.is_binary_array', 'is_binary_array', (['ground_truth[label_id]'], {}), '(ground_truth[label_id])\n', (11042, 11066), False, 'from InnerEye.ML.utils.image_util import binaries_from_multi_label_array, is_binary_array\n'), ((11124, 11154), 'numpy.all', 'np.all', (['all_classes_are_binary'], {}), '(all_classes_are_binary)\n', (11130, 11154), True, 'import numpy as np\n'), ((11501, 11602), 'InnerEye.ML.utils.ml_util.check_size_matches', 'check_size_matches', (['prediction', 'ground_truth[i]'], {'arg1_name': '"""prediction"""', 'arg2_name': '"""ground_truth"""'}), "(prediction, ground_truth[i], arg1_name='prediction',\n arg2_name='ground_truth')\n", (11519, 11602), False, 'from InnerEye.ML.utils.ml_util import check_size_matches\n'), ((11920, 11943), 'numpy.all', 'np.all', (['(prediction == 0)'], {}), '(prediction == 0)\n', (11926, 11943), True, 'import numpy as np\n'), ((11962, 11990), 'numpy.all', 'np.all', (['(ground_truth[i] == 0)'], {}), '(ground_truth[i] == 0)\n', (11968, 11990), True, 'import numpy as np\n'), ((18912, 19008), 'InnerEye.ML.utils.sequence_utils.get_masked_model_outputs_and_labels', 'get_masked_model_outputs_and_labels', (['model_output[:, i, ...]', 'labels[:, i, ...]', 'subject_ids'], {}), '(model_output[:, i, ...], labels[:, i,\n ...], subject_ids)\n', (18947, 19008), False, 'from InnerEye.ML.utils.sequence_utils import get_masked_model_outputs_and_labels\n'), ((5446, 5555), 'logging.info', 'logging.info', (['f"""{message_prefix}: Loaded the first minibatch of data in {item_load_time:0.2f} sec."""'], {}), "(\n f'{message_prefix}: Loaded the first minibatch of data in {item_load_time:0.2f} sec.'\n )\n", (5458, 5555), False, 'import logging\n'), ((9094, 9124), 'numpy.mean', 'np.mean', (['all_surface_distances'], {}), '(all_surface_distances)\n', (9101, 9124), True, 'import numpy as np\n'), ((11614, 11641), 'InnerEye.ML.utils.image_util.is_binary_array', 'is_binary_array', (['prediction'], {}), '(prediction)\n', (11629, 11641), False, 'from InnerEye.ML.utils.image_util import binaries_from_multi_label_array, is_binary_array\n'), ((13443, 13514), 'logging.debug', 'logging.debug', (['f"""Patient {patient_id}, class {i} has Dice score {dice}"""'], {}), "(f'Patient {patient_id}, class {i} has Dice score {dice}')\n", (13456, 13514), False, 'import logging\n'), ((15376, 15424), 'torch.nn.functional.one_hot', 'F.one_hot', (['segmentation'], {'num_classes': 'num_classes'}), '(segmentation, num_classes=num_classes)\n', (15385, 15424), True, 'import torch.nn.functional as F\n'), ((15903, 15942), 'torch.sum', 'torch.sum', (['one_hot_segmentation'], {'dim': '(-1)'}), '(one_hot_segmentation, dim=-1)\n', (15912, 15942), False, 'import torch\n'), ((16743, 16782), 'InnerEye.ML.metrics_dict.INTERNAL_TO_LOGGING_COLUMN_NAMES.keys', 'INTERNAL_TO_LOGGING_COLUMN_NAMES.keys', ([], {}), '()\n', (16780, 16782), False, 'from InnerEye.ML.metrics_dict import DataframeLogger, INTERNAL_TO_LOGGING_COLUMN_NAMES, MetricsDict, ScalarMetricsDict\n'), ((15828, 15882), 'torch.sum', 'torch.sum', (['(one_hot_segmentation & ground_truth)'], {'dim': '(-1)'}), '(one_hot_segmentation & ground_truth, dim=-1)\n', (15837, 15882), False, 'import torch\n'), ((21572, 21592), 'numpy.nanmean', 'np.nanmean', (['all_dice'], {}), '(all_dice)\n', (21582, 21592), True, 'import numpy as np\n'), ((5875, 6217), 'logging.warning', 'logging.warning', (['f"""{message_prefix}: Loading minibatch {batch_index} took {item_load_time:0.2f} sec. This can mean that there are not enough data loader worker processes, or that there is a performance problem in loading. This warning will be printed at most {MAX_LOAD_TIME_WARNINGS} times in at most {MAX_LOAD_TIME_EPOCHS} epochs."""'], {}), "(\n f'{message_prefix}: Loading minibatch {batch_index} took {item_load_time:0.2f} sec. This can mean that there are not enough data loader worker processes, or that there is a performance problem in loading. This warning will be printed at most {MAX_LOAD_TIME_WARNINGS} times in at most {MAX_LOAD_TIME_EPOCHS} epochs.'\n )\n", (5890, 6217), False, 'import logging\n'), ((12247, 12282), 'InnerEye.ML.utils.io_util.reverse_tuple_float3', 'reverse_tuple_float3', (['voxel_spacing'], {}), '(voxel_spacing)\n', (12267, 12282), False, 'from InnerEye.ML.utils.io_util import reverse_tuple_float3\n'), ((12435, 12470), 'InnerEye.ML.utils.io_util.reverse_tuple_float3', 'reverse_tuple_float3', (['voxel_spacing'], {}), '(voxel_spacing)\n', (12455, 12470), False, 'from InnerEye.ML.utils.io_util import reverse_tuple_float3\n'), ((15945, 15976), 'torch.sum', 'torch.sum', (['ground_truth'], {'dim': '(-1)'}), '(ground_truth, dim=-1)\n', (15954, 15976), False, 'import torch\n'), ((19789, 19832), 'InnerEye.ML.utils.metrics_util.mean_absolute_error', 'mean_absolute_error', (['_model_output', '_labels'], {}), '(_model_output, _labels)\n', (19808, 19832), False, 'from InnerEye.ML.utils.metrics_util import binary_classification_accuracy, mean_absolute_error, r2_score\n'), ((19880, 19912), 'InnerEye.ML.utils.metrics_util.r2_score', 'r2_score', (['_model_output', '_labels'], {}), '(_model_output, _labels)\n', (19888, 19912), False, 'from InnerEye.ML.utils.metrics_util import binary_classification_accuracy, mean_absolute_error, r2_score\n'), ((20153, 20207), 'InnerEye.ML.utils.metrics_util.binary_classification_accuracy', 'binary_classification_accuracy', (['_model_output', '_labels'], {}), '(_model_output, _labels)\n', (20183, 20207), False, 'from InnerEye.ML.utils.metrics_util import binary_classification_accuracy, mean_absolute_error, r2_score\n'), ((13012, 13124), 'logging.warning', 'logging.warning', (['f"""Cannot calculate Hausdorff distance for structure {i} of patient {patient_id}: {e}"""'], {}), "(\n f'Cannot calculate Hausdorff distance for structure {i} of patient {patient_id}: {e}'\n )\n", (13027, 13124), False, 'import logging\n'), ((13333, 13440), 'logging.warning', 'logging.warning', (['f"""Cannot calculate mean distance for structure {i} of patient {patient_id}: {e}"""'], {}), "(\n f'Cannot calculate mean distance for structure {i} of patient {patient_id}: {e}'\n )\n", (13348, 13440), False, 'import logging\n'), ((15004, 15034), 'torch.sum', 'torch.sum', (['ground_truth'], {'dim': '(1)'}), '(ground_truth, dim=1)\n', (15013, 15034), False, 'import torch\n'), ((15074, 15132), 'torch.ones', 'torch.ones', (['segmentation.shape'], {'device': 'ground_truth.device'}), '(segmentation.shape, device=ground_truth.device)\n', (15084, 15132), False, 'import torch\n'), ((19676, 19728), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['_model_output', '_labels'], {'reduction': '"""mean"""'}), "(_model_output, _labels, reduction='mean')\n", (19686, 19728), True, 'import torch.nn.functional as F\n'), ((20023, 20087), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['_model_output', '_labels'], {'reduction': '"""mean"""'}), "(_model_output, _labels, reduction='mean')\n", (20045, 20087), True, 'import torch.nn.functional as F\n')] |
import numpy as np
import numpy.typing as npt
import iterations_lib.python_inspectors_diag.utils as utils
from typing import Tuple
def TwoSGD_solver(complex_matrix: np.ndarray,
f_vector: np.ndarray,
u0_vector: np.ndarray = None,
eps: float = 10e-7,
n_iter: int = 10000) -> Tuple[np.ndarray, np.ndarray, np.ndarray,
np.ndarray, np.ndarray, np.ndarray]:
# Инициализация начальной переменной
if u0_vector is None:
u0_vector = np.ones(f_vector.shape[0], dtype=complex)
it_space = np.zeros((1,), dtype=float)
# Формирование матрицы истории итераций
u_vector = np.zeros((1, len(u0_vector)), dtype=complex)
u_vector[0] = u0_vector.copy()
# Явное указание комплексности матрицы
complex_matrix = np.array(complex_matrix, dtype=complex)
f_vector = np.array(f_vector, dtype=complex)
r = np.zeros((1, complex_matrix.shape[0]), dtype=complex)
delta_r = np.zeros((1, complex_matrix.shape[0]), dtype=complex)
alpha = np.zeros((2,), dtype=float)
gamma = np.zeros((2,), dtype=float)
H_star = np.transpose(np.conj(complex_matrix))
r[0] = utils.matrix_diag_prod(complex_matrix, u_vector[0]) - f_vector
H_s_r = utils.matrix_diag_prod(H_star, r[0])
H_H_s_r = utils.matrix_diag_prod(complex_matrix, H_s_r)
new_u = u_vector[0] - \
utils.vec_dot_complex_prod(H_s_r, H_s_r) / utils.vec_dot_complex_prod(H_H_s_r, H_H_s_r) * H_s_r
u_vector = np.concatenate((u_vector, new_u.reshape((1, -1))), axis=0)
it_space = np.concatenate((it_space, np.array(3).reshape((1, ))), axis=0)
for iter_index in range(1, n_iter):
new_r = utils.matrix_diag_prod(complex_matrix, u_vector[iter_index]) - f_vector
r = np.concatenate((r, new_r.reshape((1, -1))), axis=0)
delta_r = r[iter_index] - r[iter_index - 1]
delta_u = u_vector[iter_index] - u_vector[iter_index - 1]
H_s_r = utils.matrix_diag_prod(H_star, r[iter_index])
H_H_s_r = utils.matrix_diag_prod(complex_matrix, H_s_r)
a = utils.vec_dot_complex_prod(delta_r, delta_r)
b = utils.vec_dot_complex_prod(H_s_r, H_s_r)
c = utils.vec_dot_complex_prod(H_H_s_r, H_H_s_r)
new_alpha = -b**2 / (a * c - b**2)
alpha = np.concatenate((alpha, new_alpha.reshape((1,))), axis=0)
new_gamma = a * b / (a * c - b**2)
gamma = np.concatenate((gamma, new_gamma.reshape((1,))), axis=0)
new_u = u_vector[iter_index] - alpha[iter_index + 1] * delta_u - gamma[iter_index + 1] * H_s_r
u_vector = np.concatenate((u_vector, new_u.reshape((1, -1))), axis=0)
it_space = np.concatenate((it_space, np.array(it_space[iter_index] + 3).reshape((1,))), axis=0)
difference = utils.l2_norm(u_vector[iter_index + 1] - u_vector[iter_index]) / utils.l2_norm(f_vector)
if difference < eps:
break
return u_vector, it_space, r, delta_r, alpha, gamma
def _main():
A_matrix = np.array((0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 50, 500, 5000))
f_vector = np.array((1, 2, 3, 4, 5, 6, 7, 8, 100, 1000, 10000))
solve, it_space, _, _, alpha, gamma = TwoSGD_solver(A_matrix, f_vector)
real_solve = f_vector / A_matrix
print("Real_Solve")
print(real_solve)
print("\nIterations Solve")
print(solve[-1])
print("\nIterations Space")
print(it_space)
print("\nalpha")
print(alpha)
print("\ngamma")
print(gamma)
return 0
if __name__ == "__main__":
_main() | [
"numpy.conj",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"iterations_lib.python_inspectors_diag.utils.l2_norm",
"iterations_lib.python_inspectors_diag.utils.matrix_diag_prod",
"iterations_lib.python_inspectors_diag.utils.vec_dot_complex_prod"
] | [((630, 657), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'float'}), '((1,), dtype=float)\n', (638, 657), True, 'import numpy as np\n'), ((863, 902), 'numpy.array', 'np.array', (['complex_matrix'], {'dtype': 'complex'}), '(complex_matrix, dtype=complex)\n', (871, 902), True, 'import numpy as np\n'), ((918, 951), 'numpy.array', 'np.array', (['f_vector'], {'dtype': 'complex'}), '(f_vector, dtype=complex)\n', (926, 951), True, 'import numpy as np\n'), ((961, 1014), 'numpy.zeros', 'np.zeros', (['(1, complex_matrix.shape[0])'], {'dtype': 'complex'}), '((1, complex_matrix.shape[0]), dtype=complex)\n', (969, 1014), True, 'import numpy as np\n'), ((1029, 1082), 'numpy.zeros', 'np.zeros', (['(1, complex_matrix.shape[0])'], {'dtype': 'complex'}), '((1, complex_matrix.shape[0]), dtype=complex)\n', (1037, 1082), True, 'import numpy as np\n'), ((1096, 1123), 'numpy.zeros', 'np.zeros', (['(2,)'], {'dtype': 'float'}), '((2,), dtype=float)\n', (1104, 1123), True, 'import numpy as np\n'), ((1136, 1163), 'numpy.zeros', 'np.zeros', (['(2,)'], {'dtype': 'float'}), '((2,), dtype=float)\n', (1144, 1163), True, 'import numpy as np\n'), ((1304, 1340), 'iterations_lib.python_inspectors_diag.utils.matrix_diag_prod', 'utils.matrix_diag_prod', (['H_star', 'r[0]'], {}), '(H_star, r[0])\n', (1326, 1340), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((1355, 1400), 'iterations_lib.python_inspectors_diag.utils.matrix_diag_prod', 'utils.matrix_diag_prod', (['complex_matrix', 'H_s_r'], {}), '(complex_matrix, H_s_r)\n', (1377, 1400), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((3064, 3121), 'numpy.array', 'np.array', (['(0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 50, 500, 5000)'], {}), '((0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 50, 500, 5000))\n', (3072, 3121), True, 'import numpy as np\n'), ((3137, 3189), 'numpy.array', 'np.array', (['(1, 2, 3, 4, 5, 6, 7, 8, 100, 1000, 10000)'], {}), '((1, 2, 3, 4, 5, 6, 7, 8, 100, 1000, 10000))\n', (3145, 3189), True, 'import numpy as np\n'), ((572, 613), 'numpy.ones', 'np.ones', (['f_vector.shape[0]'], {'dtype': 'complex'}), '(f_vector.shape[0], dtype=complex)\n', (579, 613), True, 'import numpy as np\n'), ((1191, 1214), 'numpy.conj', 'np.conj', (['complex_matrix'], {}), '(complex_matrix)\n', (1198, 1214), True, 'import numpy as np\n'), ((1228, 1279), 'iterations_lib.python_inspectors_diag.utils.matrix_diag_prod', 'utils.matrix_diag_prod', (['complex_matrix', 'u_vector[0]'], {}), '(complex_matrix, u_vector[0])\n', (1250, 1279), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((2020, 2065), 'iterations_lib.python_inspectors_diag.utils.matrix_diag_prod', 'utils.matrix_diag_prod', (['H_star', 'r[iter_index]'], {}), '(H_star, r[iter_index])\n', (2042, 2065), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((2084, 2129), 'iterations_lib.python_inspectors_diag.utils.matrix_diag_prod', 'utils.matrix_diag_prod', (['complex_matrix', 'H_s_r'], {}), '(complex_matrix, H_s_r)\n', (2106, 2129), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((2143, 2187), 'iterations_lib.python_inspectors_diag.utils.vec_dot_complex_prod', 'utils.vec_dot_complex_prod', (['delta_r', 'delta_r'], {}), '(delta_r, delta_r)\n', (2169, 2187), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((2200, 2240), 'iterations_lib.python_inspectors_diag.utils.vec_dot_complex_prod', 'utils.vec_dot_complex_prod', (['H_s_r', 'H_s_r'], {}), '(H_s_r, H_s_r)\n', (2226, 2240), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((2253, 2297), 'iterations_lib.python_inspectors_diag.utils.vec_dot_complex_prod', 'utils.vec_dot_complex_prod', (['H_H_s_r', 'H_H_s_r'], {}), '(H_H_s_r, H_H_s_r)\n', (2279, 2297), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((1748, 1808), 'iterations_lib.python_inspectors_diag.utils.matrix_diag_prod', 'utils.matrix_diag_prod', (['complex_matrix', 'u_vector[iter_index]'], {}), '(complex_matrix, u_vector[iter_index])\n', (1770, 1808), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((2841, 2903), 'iterations_lib.python_inspectors_diag.utils.l2_norm', 'utils.l2_norm', (['(u_vector[iter_index + 1] - u_vector[iter_index])'], {}), '(u_vector[iter_index + 1] - u_vector[iter_index])\n', (2854, 2903), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((2906, 2929), 'iterations_lib.python_inspectors_diag.utils.l2_norm', 'utils.l2_norm', (['f_vector'], {}), '(f_vector)\n', (2919, 2929), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((1442, 1482), 'iterations_lib.python_inspectors_diag.utils.vec_dot_complex_prod', 'utils.vec_dot_complex_prod', (['H_s_r', 'H_s_r'], {}), '(H_s_r, H_s_r)\n', (1468, 1482), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((1485, 1529), 'iterations_lib.python_inspectors_diag.utils.vec_dot_complex_prod', 'utils.vec_dot_complex_prod', (['H_H_s_r', 'H_H_s_r'], {}), '(H_H_s_r, H_H_s_r)\n', (1511, 1529), True, 'import iterations_lib.python_inspectors_diag.utils as utils\n'), ((1654, 1665), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (1662, 1665), True, 'import numpy as np\n'), ((2760, 2794), 'numpy.array', 'np.array', (['(it_space[iter_index] + 3)'], {}), '(it_space[iter_index] + 3)\n', (2768, 2794), True, 'import numpy as np\n')] |
#-*-: coding:utf-8 -*-
from __future__ import absolute_import
import numpy
def load_data(trainset='lcg/DL_train.csv',validset='lcg/DL_valid.csv',testset='lcg/DL_test.csv'):
#分别读入三个文件并share他们
data=numpy.loadtxt(validset, delimiter=',', dtype=float, skiprows=1)
valid_set_x, valid_set_y =(data[:,:-1],data[:,-1])
data=numpy.loadtxt(testset, delimiter=',', dtype=float, skiprows=1)
test_set_x, test_set_y =(data[:,:-1],data[:,-1])
data=numpy.loadtxt(trainset, delimiter=',', dtype=float, skiprows=1)
train_set_x, train_set_y=(data[:,:-1],data[:,-1])
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval | [
"numpy.loadtxt"
] | [((206, 269), 'numpy.loadtxt', 'numpy.loadtxt', (['validset'], {'delimiter': '""","""', 'dtype': 'float', 'skiprows': '(1)'}), "(validset, delimiter=',', dtype=float, skiprows=1)\n", (219, 269), False, 'import numpy\n'), ((335, 397), 'numpy.loadtxt', 'numpy.loadtxt', (['testset'], {'delimiter': '""","""', 'dtype': 'float', 'skiprows': '(1)'}), "(testset, delimiter=',', dtype=float, skiprows=1)\n", (348, 397), False, 'import numpy\n'), ((461, 524), 'numpy.loadtxt', 'numpy.loadtxt', (['trainset'], {'delimiter': '""","""', 'dtype': 'float', 'skiprows': '(1)'}), "(trainset, delimiter=',', dtype=float, skiprows=1)\n", (474, 524), False, 'import numpy\n')] |
import math
import numpy as np
def eval_state(probs, labels, thr):
predict = probs >= thr
TN = np.sum((labels == 0) & (predict == False))
FN = np.sum((labels == 1) & (predict == False))
FP = np.sum((labels == 0) & (predict == True))
TP = np.sum((labels == 1) & (predict == True))
return TN, FN, FP, TP
def calculate(probs, labels):
TN, FN, FP, TP = eval_state(probs, labels, 0.5)
APCER = 1.0 if (FP + TN == 0) else FP / float(FP + TN)
NPCER = 1.0 if (FN + TP == 0) else FN / float(FN + TP)
ACER = (APCER + NPCER) / 2.0
ACC = (TP + TN) / labels.shape[0]
return APCER, NPCER, ACER, ACC
def calculate_threshold(probs, labels, threshold):
TN, FN, FP, TP = eval_state(probs, labels, threshold)
ACC = (TP + TN) / labels.shape[0]
return ACC
def get_threshold(probs, grid_density):
Min, Max = min(probs), max(probs)
thresholds = []
for i in range(grid_density + 1):
thresholds.append(0.0 + i * 1.0 / float(grid_density))
thresholds.append(1.1)
return thresholds
def get_EER_states(probs, labels, grid_density=10000):
thresholds = get_threshold(probs, grid_density)
min_dist = 1.0
min_dist_states = []
FRR_list = []
FAR_list = []
for thr in thresholds:
TN, FN, FP, TP = eval_state(probs, labels, thr)
if(FN + TP == 0):
FRR = TPR = 1.0
FAR = FP / float(FP + TN)
TNR = TN / float(TN + FP)
elif(FP + TN == 0):
TNR = FAR = 1.0
FRR = FN / float(FN + TP)
TPR = TP / float(TP + FN)
else:
FAR = FP / float(FP + TN)
FRR = FN / float(FN + TP)
TNR = TN / float(TN + FP)
TPR = TP / float(TP + FN)
dist = math.fabs(FRR - FAR)
FAR_list.append(FAR)
FRR_list.append(FRR)
if dist <= min_dist:
min_dist = dist
min_dist_states = [FAR, FRR, thr]
EER = (min_dist_states[0] + min_dist_states[1]) / 2.0
thr = min_dist_states[2]
return EER, thr, FRR_list, FAR_list
def get_HTER_at_thr(probs, labels, thr):
TN, FN, FP, TP = eval_state(probs, labels, thr)
if (FN + TP == 0):
FRR = 1.0
FAR = FP / float(FP + TN)
elif(FP + TN == 0):
FAR = 1.0
FRR = FN / float(FN + TP)
else:
FAR = FP / float(FP + TN)
FRR = FN / float(FN + TP)
HTER = (FAR + FRR) / 2.0
return HTER
| [
"numpy.sum",
"math.fabs"
] | [((105, 147), 'numpy.sum', 'np.sum', (['((labels == 0) & (predict == False))'], {}), '((labels == 0) & (predict == False))\n', (111, 147), True, 'import numpy as np\n'), ((157, 199), 'numpy.sum', 'np.sum', (['((labels == 1) & (predict == False))'], {}), '((labels == 1) & (predict == False))\n', (163, 199), True, 'import numpy as np\n'), ((209, 250), 'numpy.sum', 'np.sum', (['((labels == 0) & (predict == True))'], {}), '((labels == 0) & (predict == True))\n', (215, 250), True, 'import numpy as np\n'), ((260, 301), 'numpy.sum', 'np.sum', (['((labels == 1) & (predict == True))'], {}), '((labels == 1) & (predict == True))\n', (266, 301), True, 'import numpy as np\n'), ((1765, 1785), 'math.fabs', 'math.fabs', (['(FRR - FAR)'], {}), '(FRR - FAR)\n', (1774, 1785), False, 'import math\n')] |
# --------------
# Code starts here
import numpy as np
# Code starts here
# Adjacency matrix
adj_mat = np.array([[0,0,0,0,0,0,1/3,0],
[1/2,0,1/2,1/3,0,0,0,0],
[1/2,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0],
[0,0,1/2,1/3,0,0,1/3,0],
[0,0,0,1/3,1/3,0,0,1/2],
[0,0,0,0,1/3,0,0,1/2],
[0,0,0,0,1/3,1,1/3,0]])
# Compute eigenvalues and eigencevectrs
eigenvalues, eigenvectors = np.linalg.eig(adj_mat)
# Eigen vector corresponding to 1
eigen_1 = abs(eigenvectors[:,0])/np.linalg.norm(eigenvectors[:,0],1)
# most important page
page = np.where(eigen_1 == eigen_1.max())[0][0]+1
print(page)
# Code ends here
# --------------
# Code starts here
# Initialize stationary vector I
init_I = [1,0,0,0,0,0,0,0]
# Perform iterations for power method
for i in range(10):
init_I = np.dot(adj_mat,init_I)
power_page = np.argmax(init_I)+1
power_page
# Code ends here
# --------------
# Code starts here
# New Adjancency matrix
# New Adjancency matrix
new_adj_mat = np.array([[0,0,0,0,0,0,0,0],
[1/2,0,1/2,1/3,0,0,0,0],
[1/2,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0],
[0,0,1/2,1/3,0,0,1/2,0],
[0,0,0,1/3,1/3,0,0,1/2],
[0,0,0,0,1/3,0,0,1/2],
[0,0,0,0,1/3,1,1/2,0]])
# Initialize stationary vector I
new_init_I = [1,0,0,0,0,0,0,0]
# Perform iterations for power method
for i in range(10):
new_init_I = np.dot(new_adj_mat, new_init_I)
new_init_I
# Code ends here
# --------------
# Alpha value
alpha = 0.85
# Code starts here
# Modified adjancency matrix
n = len(new_adj_mat)
G = (alpha * new_adj_mat) + (((1 - alpha) * (1/n)) * np.ones(new_adj_mat.shape))
# Initialize stationary vector I
final_init_I = [1,0,0,0,0,0,0,0]
# Perform iterations for power method
for i in range(1000):
final_init_I = np.dot(G,final_init_I)
final_init_I /= np.linalg.norm(final_init_I,1)
final_init_I
# Code ends here
| [
"numpy.argmax",
"numpy.ones",
"numpy.linalg.eig",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot"
] | [((106, 397), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 1 / 3, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0], [1 / 2,\n 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2, 1 / 3, 0,\n 0, 1 / 3, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3,\n 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 3, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 1 / 3, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0\n ], [1 / 2, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2,\n 1 / 3, 0, 0, 1 / 3, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0,\n 0, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 3, 0]])\n', (114, 397), True, 'import numpy as np\n'), ((500, 522), 'numpy.linalg.eig', 'np.linalg.eig', (['adj_mat'], {}), '(adj_mat)\n', (513, 522), True, 'import numpy as np\n'), ((1087, 1375), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0], [1 / 2, 0,\n 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2, 1 / 3, 0, 0,\n 1 / 2, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 0,\n 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 2, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0], [\n 1 / 2, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2, 1 /\n 3, 0, 0, 1 / 2, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, \n 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 2, 0]])\n', (1095, 1375), True, 'import numpy as np\n'), ((591, 628), 'numpy.linalg.norm', 'np.linalg.norm', (['eigenvectors[:, 0]', '(1)'], {}), '(eigenvectors[:, 0], 1)\n', (605, 628), True, 'import numpy as np\n'), ((901, 924), 'numpy.dot', 'np.dot', (['adj_mat', 'init_I'], {}), '(adj_mat, init_I)\n', (907, 924), True, 'import numpy as np\n'), ((937, 954), 'numpy.argmax', 'np.argmax', (['init_I'], {}), '(init_I)\n', (946, 954), True, 'import numpy as np\n'), ((1551, 1582), 'numpy.dot', 'np.dot', (['new_adj_mat', 'new_init_I'], {}), '(new_adj_mat, new_init_I)\n', (1557, 1582), True, 'import numpy as np\n'), ((1957, 1980), 'numpy.dot', 'np.dot', (['G', 'final_init_I'], {}), '(G, final_init_I)\n', (1963, 1980), True, 'import numpy as np\n'), ((2000, 2031), 'numpy.linalg.norm', 'np.linalg.norm', (['final_init_I', '(1)'], {}), '(final_init_I, 1)\n', (2014, 2031), True, 'import numpy as np\n'), ((1782, 1808), 'numpy.ones', 'np.ones', (['new_adj_mat.shape'], {}), '(new_adj_mat.shape)\n', (1789, 1808), True, 'import numpy as np\n')] |
from distutils.version import LooseVersion
import pytest
import numpy as np
from opensfm.synthetic_data import synthetic_examples
def pytest_configure(config):
use_legacy_numpy_printoptions()
def use_legacy_numpy_printoptions():
"""Ensure numpy use legacy print formant."""
if LooseVersion(np.__version__).version[:2] > [1, 13]:
np.set_printoptions(legacy='1.13')
@pytest.fixture(scope='module')
def scene_synthetic():
np.random.seed(42)
data = synthetic_examples.synthetic_ellipse_scene()
maximum_depth = 40
projection_noise = 1.0
gps_noise = 5.0
exifs = data.get_scene_exifs(gps_noise)
features, desc, colors, graph = data.get_tracks_data(maximum_depth,
projection_noise)
return data, exifs, features, desc, colors, graph
| [
"numpy.set_printoptions",
"numpy.random.seed",
"distutils.version.LooseVersion",
"pytest.fixture",
"opensfm.synthetic_data.synthetic_examples.synthetic_ellipse_scene"
] | [((393, 423), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (407, 423), False, 'import pytest\n'), ((451, 469), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (465, 469), True, 'import numpy as np\n'), ((481, 525), 'opensfm.synthetic_data.synthetic_examples.synthetic_ellipse_scene', 'synthetic_examples.synthetic_ellipse_scene', ([], {}), '()\n', (523, 525), False, 'from opensfm.synthetic_data import synthetic_examples\n'), ((355, 389), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'legacy': '"""1.13"""'}), "(legacy='1.13')\n", (374, 389), True, 'import numpy as np\n'), ((295, 323), 'distutils.version.LooseVersion', 'LooseVersion', (['np.__version__'], {}), '(np.__version__)\n', (307, 323), False, 'from distutils.version import LooseVersion\n')] |
##################################################################
# <NAME> #
# https://www.linkedin.com/in/haosleeper/ #
##################################################################
import logging
import sys
import copy
import numpy as np
from numpy.lib.financial import rate
import pandas as pd
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
logger = logging.getLogger()
class CollaborativeFiltering(object):
"""
Base class for User-based and Item-based collaborative filtering
methods: compute user mean rating, Pearson correlation between 2 array,
consine similarity between 2 arrays, centering the ratings accross user,
"""
def __init__(self,data:pd.DataFrame, k_neighbors:int= 10, rating_matrix_row:str=None,
rating_matrix_column:str = None, rating_matrix_value:str = None, movies_data = None ) -> None:
"""
Base class for Collaborative Filtering Algorithms
"""
assert isinstance(data, pd.DataFrame), 'data must be a pandas dataframe'
assert rating_matrix_row in data.columns, 'row must be a column in data'
assert rating_matrix_column in data.columns, 'column must be a column in data'
assert rating_matrix_value in data.columns, 'value must be a column in data'
#create rating matrix from data
self.rating_matrix = data.pivot( rating_matrix_row, rating_matrix_column, rating_matrix_value)
self.k_neighbors = k_neighbors
self.movies_data = movies_data
# compute mean rating for each user
self.user_mean_ratings = self.compute_user_mean_ratings()
# a dict to save all the computed similarity score for all users
self.all_similarity_score = dict.fromkeys(self.rating_matrix.columns, None)
if movies_data is not None:
self.movies_data = self.save_movies(movies_data)
# replace nan with 0
self.rating_matrix = self.rating_matrix.fillna(0)
def predict_rating(self, ) -> float:
raise NotImplementedError
def recommend(self,) -> list:
raise NotImplementedError
@classmethod
def save_movies(cls, movie_data:pd.DataFrame ) -> None:
"""
Read all movies and its id
"""
return {id:title for i, (id, title, genres) in movie_data.iterrows()}
def get_movie_names(self, ids:list) -> list:
"""
Get the movie names from the previous saved movies
param:
ids: a list contains the id of the movies
return a list of movie names of the provided ids
"""
def compute_user_mean_ratings(self) -> dict:
"""
compute and save the mean rating for each user in the rating matrix
"""
assert self.rating_matrix is not None, 'Rating matrix is None'
user_mean_ratings =self.rating_matrix.mean(axis =1)
assert len(user_mean_ratings) == self.rating_matrix.shape[0], 'Some thing went wrong'
return {userid:user_mean_ratings[userid] for userid in self.rating_matrix.index}
def pearson_correlation(self, a: list, b: list ,mean_a:float = None, mean_b:float = None) -> float:
"""
Compute the pearson correlation coefficient between a and b
a: a list of rating of user/item a
b: similar to a
mean_a, mean_b: mean of a and b, if not provided, then it will be computed using a and b
return the Pearson(a, b)
"""
if a is None:
mean_a = np.mean(a)
if b is None:
mean_b = np.mean(b)
numerator = np.sum([(a_i - mean_a)*(b_i - mean_b) for a_i, b_i in zip(a, b)])
denominator = np.sqrt(np.sum([(a_i - mean_a)**2 for a_i in a])) * np.sqrt(np.sum([(b_i - mean_b)**2 for b_i in b]))
return numerator/denominator
def cosine_similarity(self, a:list, b:list, mean_a = None, mean_b = None) -> float:
"""
compute cosine similarity between a and b
return: cosine(a, b)
mean_a and mean_b is just for compatibility in other method.
"""
return np.sum([ai*bi for ai, bi in zip(a, b)])/(np.sqrt(np.sum([ai**2 for ai in a])) * np.sqrt(np.sum([bi**2 for bi in b])))
def get_rated_items(self, userid:int) -> list:
"""
get all rated items by userid
return a list of items (column name)
"""
user_row = self.rating_matrix.loc[userid, :]
# user_row is a series, can only be indexed by its column name
items = [x for x in self.rating_matrix.columns if user_row.loc[x] > 0]
return items
class UserBasedCF(CollaborativeFiltering):
def __init__(self, data:pd.DataFrame, k_neighbors:int = 10,
rating_matrix_row:str = None, rating_matrix_column:str = None,
rating_matrix_value:str = None, movies_data:dict = None) -> None:
"""
User-based Collaborative filtering algorithm
params:
data: pandas dataframe contains the data
k_neighbors: number of most similar neighbors use to average the ratings over.
rating_matrix_row/column/value: the name of the column in data use to create rating_matrix
movie_data: a dict {movieid:movie_name} of all movies in the database
"""
super().__init__(data, k_neighbors, rating_matrix_row, rating_matrix_column,
rating_matrix_value, movies_data)
def get_mutually_rated_items(self, user1: int, user2: int) -> dict:
"""
find the set of mutually observed rating between user1 and user2
"""
user1_rated_items = self.get_rated_items(user1)
user2_rated_items = self.get_rated_items(user2)
mutually_rated_items = np.intersect1d(user1_rated_items, user2_rated_items, assume_unique=True )
return mutually_rated_items
def compute_similarity_score(self, target_user: int, similarity_metric:str) -> dict:
"""
compute the similar score between target_user and all other users
param:
target_user: id of the target user
metric: 'Pearson' or 'Cosine'
return: a dict, its keys are user ids, its values are the metric scores
"""
score_function = self.pearson_correlation if similarity_metric == 'Pearson' else self.cosine_similarity
# k closest users to the user_id at hand, w.r.t one specific item
scores = dict.fromkeys(self.rating_matrix.index, 0)
items_rated_by_target_user = self.get_rated_items(target_user)
for user in scores.keys():
if target_user == user:
scores[user] = 1
continue
items_rated_by_this_user = self.get_rated_items(user)
mutually_rated_items = np.intersect1d(items_rated_by_target_user, items_rated_by_this_user)
# if there is no common rated item between 2 user, the similar is 0
if len(mutually_rated_items) == 0:
continue
else:
scores[user] = score_function(self.rating_matrix.loc[target_user, mutually_rated_items],
self.rating_matrix.loc[user, mutually_rated_items],
self.user_mean_ratings[target_user], self.user_mean_ratings[user] )
return scores
def predict_rating(
self, target_user:int, target_item:int, k_neighbors:int = None,
similarity_threshold: float = 0.5, mean_centered:bool= True, similarity_metric:str = 'Pearson') -> float:
"""
Predict rating of target item of target user, using k most similar users
to the target user that have rated the target item, and their similarity
score > threshold. If not enough users satisfy the above conditions, then
less than k users will be used to predict the rating.
param:
target_user: id of target user
target_item: id of target item
k_neighbors: number of neighbors use to predict rating
threshold: the minumum score acceptable for a user to be a neighbor of target user
mean_center: whether use the mean centered prediction fomular or not. If False, use
the raw prediction formula.
similarity_metric: either 'Pearson' or 'Cosine', the metric to compute similarity score
return the predicted rating of the target item by the target user
"""
if k_neighbors is None:
k_neighbors = self.k_neighbors
# check if the target user similarity score with other users has already computed,
# if not, compute and save to the self.all_similarity_score dictionary
# otherwise, just retrieve it
if self.all_similarity_score[target_user] is not None:
if similarity_metric in self.all_similarity_score[target_user]:
scores = self.all_similarity_score[target_user][similarity_metric]
else:
logger.info(f'Computing {similarity_metric} similarity of the target user and other users...')
scores = self.compute_similarity_score(target_user = target_user, similarity_metric = similarity_metric)
# save the score to memory
self.all_similarity_score[target_user][similarity_metric] = scores
logger.info('Done.')
else:
logger.info(f'Computing {similarity_metric} similarity of the target user and other users...')
self.all_similarity_score[target_user] = {}
scores = self.compute_similarity_score(target_user = target_user, similarity_metric = similarity_metric)
self.all_similarity_score[target_user][similarity_metric] = scores
logger.info('Done.')
# sort the scores according to its values
sorted_scores = {k: v for k, v in sorted(scores.items(), key=lambda item: item[1], reverse=True)}
# filter out users that has similarity score < similarity_threshold
sorted_scores = {user:score for user, score in sorted_scores.items() if score > similarity_threshold }
neighbors = []
# start finding neighbors
for key in sorted_scores.keys():
# if this user is not the target user and she rated the target item, append to neighbors
if key != target_user and self.rating_matrix.loc[key, target_item] > 0:
neighbors.append(key)
# if there are enough neighbors, stop
if len(neighbors) >= k_neighbors:
break
if len(neighbors) == 0:
return 0
# I'm not sure this is the right formula for the case there is only one neighbor!
elif len(neighbors) == 1:
return self.user_mean_ratings[target_user] + \
(sorted_scores[neighbors[0]]*(self.rating_matrix.loc[neighbors[0], target_item] - self.user_mean_ratings[neighbors[0]]))/np.abs(sorted_scores[neighbors[0]])
if mean_centered:
predicted = self.user_mean_ratings[target_user] + \
np.sum([ sorted_scores[user]*(self.rating_matrix.loc[user, target_item] - self.user_mean_ratings[user]) for user in neighbors ] ) / \
np.sum([np.abs(sorted_scores[user]) for user in neighbors])
else:
# weighted average over raw ratings of neighbors
predicted = np.sum([sorted_scores[user]*self.rating_matrix[user, target_item] for user in neighbors]) / \
np.sum([sorted_scores[user] for user in neighbors])
return predicted
def recommend(self, target_user: int, num_items: int, similarity_metric: str = 'Pearson',
k_neighbors: int = None, similarity_threshold: float = 0.5, mean_centered: bool = True, rating_threshold: int = 3) -> list:
"""
recommend num_items to target_user
param:
target_user: the id of the target user
num_items: number of items to recommend
similarity_metric: either 'Pearson' or 'Cosine'
k_neighbors: number of neighbors used to predict the rating
similarity_threshold: the threshold of similarity metrics to choose neighbors
mean_centered: if True, use the mean centered formula to predict the rating, otherwise use the raw formula
rating_threshold: recomend items if its predicted rating > rating_threshold
return a list of item id recommended by the algorithm
"""
assert similarity_metric in ['Pearson', 'Cosine'], "similarity_metric must be 'Pearson' or 'Cosine'"
predicted_rating = self.predict_ratings(target_user, similarity_metric, k_neighbors, similarity_threshold, mean_centered)
logger.info('Predict rating done. Recommending promising items')
if len(predicted_rating) > 1:
predicted_rating = {k: v for k, v in sorted(predicted_rating.items(), key=lambda item: item[1], reverse=True)}
# filter out the items that have predicted rating < rating_threshold
recommending_items = {item:predicted_rating[item] for item in predicted_rating.keys() if predicted_rating[item] > rating_threshold}
if len(recommending_items) > num_items:
recommending_items = {item:predicted_rating[item] for item in list(recommending_items.keys())[:num_items] }
logger.info(f'These are {num_items} promising items for the target user {target_user} ')
if self.movies_data is not None:
return {self.movies_data[item]:score for item, score in recommending_items.items()}
else:
return recommending_items
def predict_ratings(self, target_user:int, similarity_metric:str = 'Pearson',
k_neighbors:int = None, similarity_threshold:float = 0.5, mean_centered:bool = True, ) -> dict:
"""
Predict ratings of the target user for all the items she did not rated
return: a dict {item:predicted_rating} for all item
"""
if k_neighbors is None:
k_neighbors = self.k_neighbors
rated_items = self.get_rated_items(target_user)
not_rated_items = list(set(self.rating_matrix.columns) - set(rated_items))
# consider the case the user has rated all the items, but I doubt this if will ever entered
if len(not_rated_items) == 0:
print('There is nothing left for this user')
return []
logger.info('Start predict rating...')
rating_predicted = {}
for item in not_rated_items:
rating_predicted[item] = self.predict_rating(target_user = target_user, target_item = item, k_neighbors = k_neighbors,
similarity_threshold = similarity_threshold, mean_centered = mean_centered,
similarity_metric=similarity_metric)
return rating_predicted
class ItemBasedCF(CollaborativeFiltering):
def __init__(self, data:pd.DataFrame, k_neighbors:int= 10, rating_matrix_row:str=None,
rating_matrix_column:str = None, rating_matrix_value:str = None, movies_data:pd.DataFrame = None ) -> None:
"""
Item-based collaborative filtering algorithm.
Similarity metric to compare 2 items can be either Pearson or Adjusted Cosine
The rating matrix in this class is still (m,n) of m users and n items
params:
data: pandas dataframe contains the data
movie_data: a dict {movieid:movie_name} of all movies in the database
k_neighbors: number of most similar neighbors use to average the ratings over.
rating_matrix_row/column/value: the name of the column in data use to create rating_matrix
"""
# the super class __init__ method create the rating matrix, compute the user mean rating.
logger.info('Creating rating matrix')
super( ).__init__(data, k_neighbors, rating_matrix_row, rating_matrix_column,
rating_matrix_value, movies_data)
# create the user-mean centered rating_matrix version of the raw rating_matrix
logger.info('creating centered version of the rating matrix')
self.centered_rating_matrix = self.get_centered_rating_matrix()
def get_centered_rating_matrix(self) -> pd.DataFrame:
"""
Centering the rating matrix by its row mean
"""
assert 'user_mean_ratings' in dir(self), 'Not found the user_mean_ratings dict'
centered_rating_matrix = copy.deepcopy(self.rating_matrix)
for user in self.user_mean_ratings:
centered_rating_matrix.loc[user, :] -= self.user_mean_ratings[user]
return centered_rating_matrix
def adjusted_cosine(self,a:list, b:list, )->float:
"""
Compute the ajusted Cosine between a and b
a and b are expected to be already centered by the user mean rating
"""
return np.sum([(ai*bi) for ai, bi in zip(a, b)])/(np.sqrt(np.sum([ai**2 for ai in a])) * np.sqrt(np.sum([bi**2 for bi in b])))
def get_user_rated_item(self, item:int) -> list:
"""
Get a list of users who rated the item
"""
item_col = self.rating_matrix.loc[:, item]
# user_row is a series, can only be indexed by its column name
users = [x for x in self.rating_matrix.index if item_col.loc[x] > 0]
return users
def compute_item_mean_ratings(self, ) -> dict:
"""
Compute the average rating for each item
"""
self.rating_matrix = self.rating_matrix.replace(0, np.nan)
means = self.rating_matrix.mean(axis = 0)
assert len(means) == self.rating_matrix.shape[1], 'Some thing went wrong'
item_mean_ratings = {item: means[item ] for item in self.rating_matrix.columns}
return item_mean_ratings
def compute_similarity_score(self, target_item:int, similarity_metric:str = 'AdjustedCosine' ) -> dict:
"""
compute the similar score between target_item and all other items
param:
target_item: id of the target item
metric: 'Pearson' or 'AdjustedCosine'
return: a dict, its keys are user ids, its values are the metric scores
"""
# score_function = self.adjusted_cosine if similarity_metric == 'AdjustedCosine' else self.pearson_correlation
scores = dict.fromkeys(self.rating_matrix.columns, 0)
# get all users who rated the target item
users_rated_target_item = self.get_user_rated_item(target_item)
#loop over all the items to compute the similarity score with the target item
for item in scores.keys():
if target_item == item:
scores[item] = 1
continue
users_rated_this_item = self.get_user_rated_item(item)
mutually_rated_users = np.intersect1d(users_rated_target_item, users_rated_this_item, assume_unique= True)
# if there is no common users who both rated target item and the current item, set the score = 0
if len(mutually_rated_users) == 0:
continue
else:
if similarity_metric == 'AdjustedCosine':
scores[item] = self.adjusted_cosine(self.centered_rating_matrix.loc[mutually_rated_users, target_item],
self.centered_rating_matrix.loc[mutually_rated_users, item] )
elif similarity_metric == 'Pearson':
scores[item] = self.pearson_correlation(self.rating_matrix.loc[mutually_rated_users, target_item],
self.rating_matrix.loc[mutually_rated_users, item],
self.item_mean_ratings[target_item], self.item_mean_ratings[item])
return scores
def predict_rating(
self, target_user:int, target_item:int, k_neighbors:int = None,
similarity_threshold:float = 0.5, similarity_metric:str = 'AdjustedCosine') -> float:
"""
Predict rating of the target user to the target item.
param:
target_user: id of target user
target_item: id of target item
k_neighbors: number of neighbor use to predict rating
similarity_threshold: only consider an item as a neighbor if its similarity score with the target item > this threshold
similarity_metric: either 'AdjustedCosine' or 'Pearson'.
Return the predicted rating of the target user to the target item
Either the similarity metric is Ajusted Cosine or Pearson,
the prediction formular will be the raw weighted average, not the
mean centered prediction formula as in the UserBasedCF case.
"""
if k_neighbors == None:
k_neighbors = self.k_neighbors
# retrieve or compute (if have to) similarity scores between target item and all other items
if self.all_similarity_score[target_item] is not None:
if similarity_metric in self.all_similarity_score[target_item].keys():
scores = self.all_similarity_score[target_item][similarity_metric]
# logger.info(f'Retrieving similarity score {len(scores)}')
else:
logger.info(f'Computing {similarity_metric} similarity of the target item and other items...')
scores = self.compute_similarity_score(target_item = target_item, similarity_metric = similarity_metric)
# save the score to memory
self.all_similarity_score[target_item][similarity_metric] = scores
logger.info('Done.')
else:
logger.info(f'Computing {similarity_metric} similarity of the target item and other items...')
self.all_similarity_score[target_item] = {}
scores = self.compute_similarity_score(target_item = target_item, similarity_metric = similarity_metric)
self.all_similarity_score[target_item][similarity_metric] = scores
logger.info('Done.')
# start finding neighbors, an item is only accepted as a neighbor of the target item
# for the target user if that user has rated that item, and the similartity score is higher
# than the similarity_threshold
# a smart way is only loop over the rated items of the target user
rated_items_by_target_user = self.get_rated_items(target_user)
# neighbor items are items that have rated by the target user AND have similarity score > similarity threshold
neighbors = {item:scores[item] for item in rated_items_by_target_user if scores[item] > similarity_threshold }
predicted = -1
if len(neighbors) == 0:
return predicted
elif len(neighbors) == 1:
return self.rating_matrix.loc[target_user, target_item]
elif len(neighbors) < k_neighbors:
# average over raw rating, no matter it is AdjustedCosine or Pearson
predicted = np.sum([score*self.rating_matrix.loc[target_user, item] for item, score in neighbors.items() ])/np.sum(list(neighbors.values()))
else:
# there are more acceptable neighbor items than we need, so we will choose the k_neighbors with the largest similarity score.
sorted_neighbors = {k: v for k, v in sorted(neighbors.items(), key=lambda item: item[1], reverse=True)}
neighbors_items = list(sorted_neighbors.keys())[:k_neighbors]
neighbors = {item:sorted_neighbors[item] for item in neighbors_items }
predicted = np.sum([score*self.rating_matrix.loc[target_user, item] for item, score in neighbors.items()])/np.sum(list(neighbors.values()))
return predicted
def predict_ratings(self, target_item:int, similarity_metric:str = 'AdjustedCosine',
k_neighbors:int = 10, similarity_threshold:float = 0.5, ) -> dict:
"""
Predict ratings of all the users who did not rate the target item for the target item.
return a dict {user:predicted_rating}
"""
if k_neighbors is None:
k_neighbors = self.k_neighbors
assert similarity_metric in ['AdjustedCosine', 'Pearson'], "similarity_metric can only be 'AdjustedCosine' or 'Pearson'"
# compute item mean rating if the similarity metric is Pearson
if similarity_metric == 'Pearson':
self.item_mean_ratings = self.compute_item_mean_ratings()
# We only consider the users who did not rate the target item
users_not_rated_target_item = list(set(self.rating_matrix.index) - set(self.get_user_rated_item(target_item)) )
# for each user that did not rate the target_item, predict the rating of that user to the target item
logger.info('Start predict rating...')
predicted_rating = dict.fromkeys(users_not_rated_target_item, 0)
for user in users_not_rated_target_item:
predicted_rating[user] = self.predict_rating(user, target_item, k_neighbors, similarity_threshold, similarity_metric )
logger.info('Predict rating done. Recommending promising users')
# sort the predicted rating
predicted_rating = {k: v for k, v in sorted(predicted_rating.items(), key=lambda item: item[1], reverse=True)}
# filter out the predicted rating that < rating_threshold
return predicted_rating
def recommend(self, target_item:int, num_users:int, similarity_metric:str = 'AdjustedCosine',
k_neighbors:int = 10, similarity_threshold:float = 0.5, rating_threshold:int = 4 ) -> dict:
"""
Find k most promising users for the target item
param:
target_user: id of target user
target_item: id of target item
k_neighbors: number of neighbor use to predict rating
similarity_threshold: only consider an item as a neighbor if its similarity score with the target item > this threshold
similarity_metric: either 'AdjustedCosine' or 'Pearson'.
rating_threshold: only recommend a user if her predicted rating for the target item > this threshold
return a dict of recommended items and its predicted rating
The steps for Item-Based CF:
1. Given a target item, compute the similarity score between the target item
and all other items.
2. For each user in the database that does not rate the target item:
Find k rated item by that user, such that they are most similar to the target item.
Predict the rating of this user to the target item, using the weighted average formula
Save the predicted rating to a dict {userid: predicted_rating}
3. Sort the previous saved dict, then take out k users that have the highest predicted rating.
Those users are then considered the most promising users.
"""
predicted_rating = self.predict_ratings(target_item, similarity_metric, k_neighbors, similarity_threshold)
# filter out the predicted rating that < rating_threshold
predicted_rating = {user:rating for user, rating in predicted_rating.items() if rating > rating_threshold}
if len(predicted_rating) > num_users:
predicted_rating = {user:predicted_rating[user] for user in list(predicted_rating.keys())[:num_users] }
logger.info(f'These are {num_users} promising users for the target item {target_item}')
if self.movies_data is None:
return predicted_rating
else:
return {user:predicted_rating[user] for user in list(predicted_rating.keys())[:num_users] }
if __name__ == '__main__':
# these are my test data
data_dict = {'userID': [1,1,1,1,1, 2,2,2,2,2, 3,3,3,3,3, 4,4,4,4,4],
'movieID': [1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5],
'rating': [np.nan ,4,1,1, np.nan, 1,2, 4,np.nan, 1, 5, 5, 3,4,np.nan, 5,5,1, np.nan, 1]}
data = pd.DataFrame.from_dict(data_dict)
recommender = ItemBasedCF(data, 2, 'userID', 'movieID', 'rating')
# print(recommender.rating_matrix)
print(recommender.recommend(4, 1, 'Pearson', 1)) | [
"copy.deepcopy",
"numpy.sum",
"pandas.DataFrame.from_dict",
"logging.basicConfig",
"numpy.abs",
"numpy.mean",
"numpy.intersect1d",
"logging.getLogger"
] | [((430, 541), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': 'log_format', 'datefmt': '"""%m/%d %I:%M:%S %p"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n log_format, datefmt='%m/%d %I:%M:%S %p')\n", (449, 541), False, 'import logging\n'), ((568, 587), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (585, 587), False, 'import logging\n'), ((29044, 29077), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_dict'], {}), '(data_dict)\n', (29066, 29077), True, 'import pandas as pd\n'), ((6070, 6142), 'numpy.intersect1d', 'np.intersect1d', (['user1_rated_items', 'user2_rated_items'], {'assume_unique': '(True)'}), '(user1_rated_items, user2_rated_items, assume_unique=True)\n', (6084, 6142), True, 'import numpy as np\n'), ((17221, 17254), 'copy.deepcopy', 'copy.deepcopy', (['self.rating_matrix'], {}), '(self.rating_matrix)\n', (17234, 17254), False, 'import copy\n'), ((3783, 3793), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (3790, 3793), True, 'import numpy as np\n'), ((3839, 3849), 'numpy.mean', 'np.mean', (['b'], {}), '(b)\n', (3846, 3849), True, 'import numpy as np\n'), ((7123, 7191), 'numpy.intersect1d', 'np.intersect1d', (['items_rated_by_target_user', 'items_rated_by_this_user'], {}), '(items_rated_by_target_user, items_rated_by_this_user)\n', (7137, 7191), True, 'import numpy as np\n'), ((19625, 19711), 'numpy.intersect1d', 'np.intersect1d', (['users_rated_target_item', 'users_rated_this_item'], {'assume_unique': '(True)'}), '(users_rated_target_item, users_rated_this_item,\n assume_unique=True)\n', (19639, 19711), True, 'import numpy as np\n'), ((3970, 4014), 'numpy.sum', 'np.sum', (['[((a_i - mean_a) ** 2) for a_i in a]'], {}), '([((a_i - mean_a) ** 2) for a_i in a])\n', (3976, 4014), True, 'import numpy as np\n'), ((4022, 4066), 'numpy.sum', 'np.sum', (['[((b_i - mean_b) ** 2) for b_i in b]'], {}), '([((b_i - mean_b) ** 2) for b_i in b])\n', (4028, 4066), True, 'import numpy as np\n'), ((11927, 12024), 'numpy.sum', 'np.sum', (['[(sorted_scores[user] * self.rating_matrix[user, target_item]) for user in\n neighbors]'], {}), '([(sorted_scores[user] * self.rating_matrix[user, target_item]) for\n user in neighbors])\n', (11933, 12024), True, 'import numpy as np\n'), ((12038, 12089), 'numpy.sum', 'np.sum', (['[sorted_scores[user] for user in neighbors]'], {}), '([sorted_scores[user] for user in neighbors])\n', (12044, 12089), True, 'import numpy as np\n'), ((4435, 4466), 'numpy.sum', 'np.sum', (['[(ai ** 2) for ai in a]'], {}), '([(ai ** 2) for ai in a])\n', (4441, 4466), True, 'import numpy as np\n'), ((4474, 4505), 'numpy.sum', 'np.sum', (['[(bi ** 2) for bi in b]'], {}), '([(bi ** 2) for bi in b])\n', (4480, 4505), True, 'import numpy as np\n'), ((11607, 11741), 'numpy.sum', 'np.sum', (['[(sorted_scores[user] * (self.rating_matrix.loc[user, target_item] - self.\n user_mean_ratings[user])) for user in neighbors]'], {}), '([(sorted_scores[user] * (self.rating_matrix.loc[user, target_item] -\n self.user_mean_ratings[user])) for user in neighbors])\n', (11613, 11741), True, 'import numpy as np\n'), ((17700, 17731), 'numpy.sum', 'np.sum', (['[(ai ** 2) for ai in a]'], {}), '([(ai ** 2) for ai in a])\n', (17706, 17731), True, 'import numpy as np\n'), ((17739, 17770), 'numpy.sum', 'np.sum', (['[(bi ** 2) for bi in b]'], {}), '([(bi ** 2) for bi in b])\n', (17745, 17770), True, 'import numpy as np\n'), ((11460, 11495), 'numpy.abs', 'np.abs', (['sorted_scores[neighbors[0]]'], {}), '(sorted_scores[neighbors[0]])\n', (11466, 11495), True, 'import numpy as np\n'), ((11770, 11797), 'numpy.abs', 'np.abs', (['sorted_scores[user]'], {}), '(sorted_scores[user])\n', (11776, 11797), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import random
cmd = None
debug = False
seed = 20160930
oldseed = False
alpha = 3.0
if __name__ == '__main__':
if '-h' in sys.argv or '--help' in sys.argv or len(sys.argv) < 2:
print ('Usage: python bwm.py <cmd> [arg...] [opts...]')
print (' cmds:')
print (' encode <image> <watermark> <image(encoded)>')
print (' image + watermark -> image(encoded)')
print (' decode <image> <image(encoded)> <watermark>')
print (' image + image(encoded) -> watermark')
print (' opts:')
print (' --debug, Show debug')
print (' --seed <int>, Manual setting random seed (default is 20160930)')
print (' --oldseed Use python2 random algorithm.')
print (' --alpha <float>, Manual setting alpha (default is 3.0)')
sys.exit(1)
cmd = sys.argv[1]
if cmd != 'encode' and cmd != 'decode':
print ('Wrong cmd %s' % cmd)
sys.exit(1)
if '--debug' in sys.argv:
debug = True
del sys.argv[sys.argv.index('--debug')]
if '--seed' in sys.argv:
p = sys.argv.index('--seed')
if len(sys.argv) <= p+1:
print ('Missing <int> for --seed')
sys.exit(1)
seed = int(sys.argv[p+1])
del sys.argv[p+1]
del sys.argv[p]
if '--oldseed' in sys.argv:
oldseed = True
del sys.argv[sys.argv.index('--oldseed')]
if '--alpha' in sys.argv:
p = sys.argv.index('--alpha')
if len(sys.argv) <= p+1:
print ('Missing <float> for --alpha')
sys.exit(1)
alpha = float(sys.argv[p+1])
del sys.argv[p+1]
del sys.argv[p]
if len(sys.argv) < 5:
print ('Missing arg...')
sys.exit(1)
fn1 = sys.argv[2]
fn2 = sys.argv[3]
fn3 = sys.argv[4]
import cv2
import numpy as np
import matplotlib.pyplot as plt
# OpenCV是以(BGR)的顺序存储图像数据的
# 而Matplotlib是以(RGB)的顺序显示图像的
def bgr_to_rgb(img):
b, g, r = cv2.split(img)
return cv2.merge([r, g, b])
if cmd == 'encode':
print ('image<%s> + watermark<%s> -> image(encoded)<%s>' % (fn1, fn2, fn3))
img = cv2.imread(fn1)
wm = cv2.imread(fn2)
if debug:
plt.subplot(231), plt.imshow(bgr_to_rgb(img)), plt.title('image')
plt.xticks([]), plt.yticks([])
plt.subplot(234), plt.imshow(bgr_to_rgb(wm)), plt.title('watermark')
plt.xticks([]), plt.yticks([])
# print img.shape # 高, 宽, 通道
h, w = img.shape[0], img.shape[1]
hwm = np.zeros((int(h * 0.5), w, img.shape[2]))
assert hwm.shape[0] > wm.shape[0]
assert hwm.shape[1] > wm.shape[1]
hwm2 = np.copy(hwm)
for i in range(wm.shape[0]):
for j in range(wm.shape[1]):
hwm2[i][j] = wm[i][j]
if oldseed: random.seed(seed,version=1)
else: random.seed(seed)
m, n = list(range(hwm.shape[0])), list(range(hwm.shape[1]))
if oldseed:
random.shuffle(m,random=random.random)
random.shuffle(n,random=random.random)
else:
random.shuffle(m)
random.shuffle(n)
for i in range(hwm.shape[0]):
for j in range(hwm.shape[1]):
hwm[i][j] = hwm2[m[i]][n[j]]
rwm = np.zeros(img.shape)
for i in range(hwm.shape[0]):
for j in range(hwm.shape[1]):
rwm[i][j] = hwm[i][j]
rwm[rwm.shape[0] - i - 1][rwm.shape[1] - j - 1] = hwm[i][j]
if debug:
plt.subplot(235), plt.imshow(bgr_to_rgb(rwm)), \
plt.title('encrypted(watermark)')
plt.xticks([]), plt.yticks([])
f1 = np.fft.fft2(img)
f2 = f1 + alpha * rwm
_img = np.fft.ifft2(f2)
if debug:
plt.subplot(232), plt.imshow(bgr_to_rgb(np.real(f1))), \
plt.title('fft(image)')
plt.xticks([]), plt.yticks([])
img_wm = np.real(_img)
assert cv2.imwrite(fn3, img_wm, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# 这里计算下保存前后的(溢出)误差
img_wm2 = cv2.imread(fn3)
sum = 0
for i in range(img_wm.shape[0]):
for j in range(img_wm.shape[1]):
for k in range(img_wm.shape[2]):
sum += np.power(img_wm[i][j][k] - img_wm2[i][j][k], 2)
miss = np.sqrt(sum) / (img_wm.shape[0] * img_wm.shape[1] * img_wm.shape[2]) * 100
print ('Miss %s%% in save' % miss)
if debug:
plt.subplot(233), plt.imshow(bgr_to_rgb(np.uint8(img_wm))), \
plt.title('image(encoded)')
plt.xticks([]), plt.yticks([])
f2 = np.fft.fft2(img_wm)
rwm = (f2 - f1) / alpha
rwm = np.real(rwm)
wm = np.zeros(rwm.shape)
for i in range(int(rwm.shape[0] * 0.5)):
for j in range(rwm.shape[1]):
wm[m[i]][n[j]] = np.uint8(rwm[i][j])
for i in range(int(rwm.shape[0] * 0.5)):
for j in range(rwm.shape[1]):
wm[rwm.shape[0] - i - 1][rwm.shape[1] - j - 1] = wm[i][j]
if debug:
assert cv2.imwrite('_bwm.debug.wm.jpg', wm)
plt.subplot(236), plt.imshow(bgr_to_rgb(wm)), plt.title(u'watermark')
plt.xticks([]), plt.yticks([])
if debug:
plt.show()
elif cmd == 'decode':
print ('image<%s> + image(encoded)<%s> -> watermark<%s>' % (fn1, fn2, fn3))
img = cv2.imread(fn1)
img_wm = cv2.imread(fn2)
if debug:
plt.subplot(231), plt.imshow(bgr_to_rgb(img)), plt.title('image')
plt.xticks([]), plt.yticks([])
plt.subplot(234), plt.imshow(bgr_to_rgb(img_wm)), plt.title('image(encoded)')
plt.xticks([]), plt.yticks([])
if oldseed: random.seed(seed,version=1)
else: random.seed(seed)
m, n = list(range(int(img.shape[0] * 0.5))), list(range(img.shape[1]))
if oldseed:
random.shuffle(m,random=random.random)
random.shuffle(n,random=random.random)
else:
random.shuffle(m)
random.shuffle(n)
f1 = np.fft.fft2(img)
f2 = np.fft.fft2(img_wm)
if debug:
plt.subplot(232), plt.imshow(bgr_to_rgb(np.real(f1))), \
plt.title('fft(image)')
plt.xticks([]), plt.yticks([])
plt.subplot(235), plt.imshow(bgr_to_rgb(np.real(f1))), \
plt.title('fft(image(encoded))')
plt.xticks([]), plt.yticks([])
rwm = (f2 - f1) / alpha
rwm = np.real(rwm)
if debug:
plt.subplot(233), plt.imshow(bgr_to_rgb(rwm)), \
plt.title('encrypted(watermark)')
plt.xticks([]), plt.yticks([])
wm = np.zeros(rwm.shape)
for i in range(int(rwm.shape[0] * 0.5)):
for j in range(rwm.shape[1]):
wm[m[i]][n[j]] = np.uint8(rwm[i][j])
for i in range(int(rwm.shape[0] * 0.5)):
for j in range(rwm.shape[1]):
wm[rwm.shape[0] - i - 1][rwm.shape[1] - j - 1] = wm[i][j]
assert cv2.imwrite(fn3, wm)
if debug:
plt.subplot(236), plt.imshow(bgr_to_rgb(wm)), plt.title(u'watermark')
plt.xticks([]), plt.yticks([])
if debug:
plt.show()
| [
"matplotlib.pyplot.title",
"random.shuffle",
"numpy.fft.ifft2",
"numpy.copy",
"cv2.imwrite",
"numpy.power",
"matplotlib.pyplot.yticks",
"cv2.split",
"random.seed",
"sys.argv.index",
"numpy.real",
"matplotlib.pyplot.xticks",
"numpy.uint8",
"matplotlib.pyplot.show",
"numpy.fft.fft2",
"cv... | [((2067, 2081), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (2076, 2081), False, 'import cv2\n'), ((2093, 2113), 'cv2.merge', 'cv2.merge', (['[r, g, b]'], {}), '([r, g, b])\n', (2102, 2113), False, 'import cv2\n'), ((2225, 2240), 'cv2.imread', 'cv2.imread', (['fn1'], {}), '(fn1)\n', (2235, 2240), False, 'import cv2\n'), ((2250, 2265), 'cv2.imread', 'cv2.imread', (['fn2'], {}), '(fn2)\n', (2260, 2265), False, 'import cv2\n'), ((2721, 2733), 'numpy.copy', 'np.copy', (['hwm'], {}), '(hwm)\n', (2728, 2733), True, 'import numpy as np\n'), ((3272, 3291), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (3280, 3291), True, 'import numpy as np\n'), ((3637, 3653), 'numpy.fft.fft2', 'np.fft.fft2', (['img'], {}), '(img)\n', (3648, 3653), True, 'import numpy as np\n'), ((3691, 3707), 'numpy.fft.ifft2', 'np.fft.ifft2', (['f2'], {}), '(f2)\n', (3703, 3707), True, 'import numpy as np\n'), ((3877, 3890), 'numpy.real', 'np.real', (['_img'], {}), '(_img)\n', (3884, 3890), True, 'import numpy as np\n'), ((4004, 4019), 'cv2.imread', 'cv2.imread', (['fn3'], {}), '(fn3)\n', (4014, 4019), False, 'import cv2\n'), ((4525, 4544), 'numpy.fft.fft2', 'np.fft.fft2', (['img_wm'], {}), '(img_wm)\n', (4536, 4544), True, 'import numpy as np\n'), ((4583, 4595), 'numpy.real', 'np.real', (['rwm'], {}), '(rwm)\n', (4590, 4595), True, 'import numpy as np\n'), ((4606, 4625), 'numpy.zeros', 'np.zeros', (['rwm.shape'], {}), '(rwm.shape)\n', (4614, 4625), True, 'import numpy as np\n'), ((913, 924), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (921, 924), False, 'import sys\n'), ((1036, 1047), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1044, 1047), False, 'import sys\n'), ((1188, 1212), 'sys.argv.index', 'sys.argv.index', (['"""--seed"""'], {}), "('--seed')\n", (1202, 1212), False, 'import sys\n'), ((1548, 1573), 'sys.argv.index', 'sys.argv.index', (['"""--alpha"""'], {}), "('--alpha')\n", (1562, 1573), False, 'import sys\n'), ((1835, 1846), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1843, 1846), False, 'import sys\n'), ((2855, 2883), 'random.seed', 'random.seed', (['seed'], {'version': '(1)'}), '(seed, version=1)\n', (2866, 2883), False, 'import random\n'), ((2893, 2910), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2904, 2910), False, 'import random\n'), ((2999, 3038), 'random.shuffle', 'random.shuffle', (['m'], {'random': 'random.random'}), '(m, random=random.random)\n', (3013, 3038), False, 'import random\n'), ((3046, 3085), 'random.shuffle', 'random.shuffle', (['n'], {'random': 'random.random'}), '(n, random=random.random)\n', (3060, 3085), False, 'import random\n'), ((3103, 3120), 'random.shuffle', 'random.shuffle', (['m'], {}), '(m)\n', (3117, 3120), False, 'import random\n'), ((3129, 3146), 'random.shuffle', 'random.shuffle', (['n'], {}), '(n)\n', (3143, 3146), False, 'import random\n'), ((4941, 4977), 'cv2.imwrite', 'cv2.imwrite', (['"""_bwm.debug.wm.jpg"""', 'wm'], {}), "('_bwm.debug.wm.jpg', wm)\n", (4952, 4977), False, 'import cv2\n'), ((5118, 5128), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5126, 5128), True, 'import matplotlib.pyplot as plt\n'), ((5242, 5257), 'cv2.imread', 'cv2.imread', (['fn1'], {}), '(fn1)\n', (5252, 5257), False, 'import cv2\n'), ((5271, 5286), 'cv2.imread', 'cv2.imread', (['fn2'], {}), '(fn2)\n', (5281, 5286), False, 'import cv2\n'), ((5870, 5886), 'numpy.fft.fft2', 'np.fft.fft2', (['img'], {}), '(img)\n', (5881, 5886), True, 'import numpy as np\n'), ((5896, 5915), 'numpy.fft.fft2', 'np.fft.fft2', (['img_wm'], {}), '(img_wm)\n', (5907, 5915), True, 'import numpy as np\n'), ((6259, 6271), 'numpy.real', 'np.real', (['rwm'], {}), '(rwm)\n', (6266, 6271), True, 'import numpy as np\n'), ((6439, 6458), 'numpy.zeros', 'np.zeros', (['rwm.shape'], {}), '(rwm.shape)\n', (6447, 6458), True, 'import numpy as np\n'), ((6755, 6775), 'cv2.imwrite', 'cv2.imwrite', (['fn3', 'wm'], {}), '(fn3, wm)\n', (6766, 6775), False, 'import cv2\n'), ((1120, 1145), 'sys.argv.index', 'sys.argv.index', (['"""--debug"""'], {}), "('--debug')\n", (1134, 1145), False, 'import sys\n'), ((1305, 1316), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1313, 1316), False, 'import sys\n'), ((1477, 1504), 'sys.argv.index', 'sys.argv.index', (['"""--oldseed"""'], {}), "('--oldseed')\n", (1491, 1504), False, 'import sys\n'), ((1669, 1680), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1677, 1680), False, 'import sys\n'), ((2289, 2305), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (2300, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2336, 2354), 'matplotlib.pyplot.title', 'plt.title', (['"""image"""'], {}), "('image')\n", (2345, 2354), True, 'import matplotlib.pyplot as plt\n'), ((2363, 2377), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2373, 2377), True, 'import matplotlib.pyplot as plt\n'), ((2379, 2393), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2389, 2393), True, 'import matplotlib.pyplot as plt\n'), ((2402, 2418), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (2413, 2418), True, 'import matplotlib.pyplot as plt\n'), ((2448, 2470), 'matplotlib.pyplot.title', 'plt.title', (['"""watermark"""'], {}), "('watermark')\n", (2457, 2470), True, 'import matplotlib.pyplot as plt\n'), ((2479, 2493), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2489, 2493), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2509), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2505, 2509), True, 'import matplotlib.pyplot as plt\n'), ((3493, 3509), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (3504, 3509), True, 'import matplotlib.pyplot as plt\n'), ((3554, 3587), 'matplotlib.pyplot.title', 'plt.title', (['"""encrypted(watermark)"""'], {}), "('encrypted(watermark)')\n", (3563, 3587), True, 'import matplotlib.pyplot as plt\n'), ((3596, 3610), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (3606, 3610), True, 'import matplotlib.pyplot as plt\n'), ((3612, 3626), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3622, 3626), True, 'import matplotlib.pyplot as plt\n'), ((3731, 3747), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (3742, 3747), True, 'import matplotlib.pyplot as plt\n'), ((3800, 3823), 'matplotlib.pyplot.title', 'plt.title', (['"""fft(image)"""'], {}), "('fft(image)')\n", (3809, 3823), True, 'import matplotlib.pyplot as plt\n'), ((3832, 3846), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (3842, 3846), True, 'import matplotlib.pyplot as plt\n'), ((3848, 3862), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3858, 3862), True, 'import matplotlib.pyplot as plt\n'), ((4237, 4249), 'numpy.sqrt', 'np.sqrt', (['sum'], {}), '(sum)\n', (4244, 4249), True, 'import numpy as np\n'), ((4374, 4390), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (4385, 4390), True, 'import matplotlib.pyplot as plt\n'), ((4448, 4475), 'matplotlib.pyplot.title', 'plt.title', (['"""image(encoded)"""'], {}), "('image(encoded)')\n", (4457, 4475), True, 'import matplotlib.pyplot as plt\n'), ((4484, 4498), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4494, 4498), True, 'import matplotlib.pyplot as plt\n'), ((4500, 4514), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4510, 4514), True, 'import matplotlib.pyplot as plt\n'), ((4738, 4757), 'numpy.uint8', 'np.uint8', (['rwm[i][j]'], {}), '(rwm[i][j])\n', (4746, 4757), True, 'import numpy as np\n'), ((4986, 5002), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (4997, 5002), True, 'import matplotlib.pyplot as plt\n'), ((5032, 5055), 'matplotlib.pyplot.title', 'plt.title', (['u"""watermark"""'], {}), "(u'watermark')\n", (5041, 5055), True, 'import matplotlib.pyplot as plt\n'), ((5064, 5078), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5074, 5078), True, 'import matplotlib.pyplot as plt\n'), ((5080, 5094), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5090, 5094), True, 'import matplotlib.pyplot as plt\n'), ((5557, 5585), 'random.seed', 'random.seed', (['seed'], {'version': '(1)'}), '(seed, version=1)\n', (5568, 5585), False, 'import random\n'), ((5595, 5612), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5606, 5612), False, 'import random\n'), ((5712, 5751), 'random.shuffle', 'random.shuffle', (['m'], {'random': 'random.random'}), '(m, random=random.random)\n', (5726, 5751), False, 'import random\n'), ((5759, 5798), 'random.shuffle', 'random.shuffle', (['n'], {'random': 'random.random'}), '(n, random=random.random)\n', (5773, 5798), False, 'import random\n'), ((5816, 5833), 'random.shuffle', 'random.shuffle', (['m'], {}), '(m)\n', (5830, 5833), False, 'import random\n'), ((5842, 5859), 'random.shuffle', 'random.shuffle', (['n'], {}), '(n)\n', (5856, 5859), False, 'import random\n'), ((6931, 6941), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6939, 6941), True, 'import matplotlib.pyplot as plt\n'), ((4178, 4225), 'numpy.power', 'np.power', (['(img_wm[i][j][k] - img_wm2[i][j][k])', '(2)'], {}), '(img_wm[i][j][k] - img_wm2[i][j][k], 2)\n', (4186, 4225), True, 'import numpy as np\n'), ((5310, 5326), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (5321, 5326), True, 'import matplotlib.pyplot as plt\n'), ((5357, 5375), 'matplotlib.pyplot.title', 'plt.title', (['"""image"""'], {}), "('image')\n", (5366, 5375), True, 'import matplotlib.pyplot as plt\n'), ((5384, 5398), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5394, 5398), True, 'import matplotlib.pyplot as plt\n'), ((5400, 5414), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5410, 5414), True, 'import matplotlib.pyplot as plt\n'), ((5423, 5439), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (5434, 5439), True, 'import matplotlib.pyplot as plt\n'), ((5473, 5500), 'matplotlib.pyplot.title', 'plt.title', (['"""image(encoded)"""'], {}), "('image(encoded)')\n", (5482, 5500), True, 'import matplotlib.pyplot as plt\n'), ((5509, 5523), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5519, 5523), True, 'import matplotlib.pyplot as plt\n'), ((5525, 5539), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5535, 5539), True, 'import matplotlib.pyplot as plt\n'), ((5939, 5955), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (5950, 5955), True, 'import matplotlib.pyplot as plt\n'), ((6008, 6031), 'matplotlib.pyplot.title', 'plt.title', (['"""fft(image)"""'], {}), "('fft(image)')\n", (6017, 6031), True, 'import matplotlib.pyplot as plt\n'), ((6040, 6054), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6050, 6054), True, 'import matplotlib.pyplot as plt\n'), ((6056, 6070), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6066, 6070), True, 'import matplotlib.pyplot as plt\n'), ((6079, 6095), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (6090, 6095), True, 'import matplotlib.pyplot as plt\n'), ((6148, 6180), 'matplotlib.pyplot.title', 'plt.title', (['"""fft(image(encoded))"""'], {}), "('fft(image(encoded))')\n", (6157, 6180), True, 'import matplotlib.pyplot as plt\n'), ((6189, 6203), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6199, 6203), True, 'import matplotlib.pyplot as plt\n'), ((6205, 6219), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6215, 6219), True, 'import matplotlib.pyplot as plt\n'), ((6295, 6311), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (6306, 6311), True, 'import matplotlib.pyplot as plt\n'), ((6356, 6389), 'matplotlib.pyplot.title', 'plt.title', (['"""encrypted(watermark)"""'], {}), "('encrypted(watermark)')\n", (6365, 6389), True, 'import matplotlib.pyplot as plt\n'), ((6398, 6412), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6408, 6412), True, 'import matplotlib.pyplot as plt\n'), ((6414, 6428), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6424, 6428), True, 'import matplotlib.pyplot as plt\n'), ((6571, 6590), 'numpy.uint8', 'np.uint8', (['rwm[i][j]'], {}), '(rwm[i][j])\n', (6579, 6590), True, 'import numpy as np\n'), ((6799, 6815), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (6810, 6815), True, 'import matplotlib.pyplot as plt\n'), ((6845, 6868), 'matplotlib.pyplot.title', 'plt.title', (['u"""watermark"""'], {}), "(u'watermark')\n", (6854, 6868), True, 'import matplotlib.pyplot as plt\n'), ((6877, 6891), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6887, 6891), True, 'import matplotlib.pyplot as plt\n'), ((6893, 6907), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6903, 6907), True, 'import matplotlib.pyplot as plt\n'), ((3771, 3782), 'numpy.real', 'np.real', (['f1'], {}), '(f1)\n', (3778, 3782), True, 'import numpy as np\n'), ((4414, 4430), 'numpy.uint8', 'np.uint8', (['img_wm'], {}), '(img_wm)\n', (4422, 4430), True, 'import numpy as np\n'), ((5979, 5990), 'numpy.real', 'np.real', (['f1'], {}), '(f1)\n', (5986, 5990), True, 'import numpy as np\n'), ((6119, 6130), 'numpy.real', 'np.real', (['f1'], {}), '(f1)\n', (6126, 6130), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import pickle
import multiprocessing
from joblib import Parallel, delayed
from run_analysis import load_metabric, illumina2ensembl_dictionary, get_reactome_illumina, return_pathway
import porch
def permute_set(set_size):
try:
set_genes = np.random.choice(genes, set_size)
setname, set_annot, set_size, activity, eigen_sample_dict = porch.porch_proc('setname', 'annot', set_genes, expression_df)
activity_df = pd.DataFrame(data=activity, index=expression_df.columns, columns= ['set' + str(set_size)]).T
rowresult = porch.survival(activity_df.iloc[0], metadata_df, duration_col = 'T', event_col = 'E')
return rowresult['z']
except:
return 'error'
if __name__ == "__main__":
metabric_path = '../../data/metabric'
illumina2ensembl_path = 'data/illumina2ensembl.txt'
data = load_metabric(metabric_path)
expression_df = data.iloc[8:,:]
metadata_df = data.iloc[:8,:]
metadata_df.loc['E'] = (metadata_df.loc['last_follow_up_status'] == 'd-d.s.')*1
genes = expression_df.index
activities = pickle.load(open('results/metabric_path_activities.p', 'rb'))
set_sizes = np.unique(activities['set_size'])
num_cores = multiprocessing.cpu_count() - 1
num_perm = 10**4
results = pd.DataFrame(columns=np.arange(num_perm))
i = 0
for set_size in set_sizes:
i+=1
print('Permutations for set size ' + str(set_size) + ', ' + str(i) + '/' + str(len(set_sizes)))
permutaions = Parallel(n_jobs = num_cores, prefer="threads")(delayed(permute_set)(set_size) for i in range(num_perm))
results.loc[set_size] = permutaions
pickle.dump(results, open('results/set_permutation_results.p', 'wb'))
###
survival = pickle.load(open('results/metabric_path_survival.p', 'rb'))
survival.index = [x.replace('_','-') for x in survival.index]
survival['ngenes'] = activity['set_size']
for pathway, row in survival.iterrows():
z = np.abs(row['z'])
perms = perm_results.loc[row['ngenes']]
perms = np.abs(perms[perms != 'error'])
num_higher = sum(x > z for x in perms)
p = num_higher/len(perms)
survival.loc[pathway, 'p_perms'] = p
survival['p_perms'] = np.round(survival['p_perms'], decimals=4)
survival['annotation'] = activity['annotation']
survival[['annotation', 'p_perms']].sort_values('p_perms').to_csv('permutation_p_results.csv')
| [
"numpy.abs",
"porch.survival",
"porch.porch_proc",
"joblib.Parallel",
"numpy.arange",
"numpy.random.choice",
"run_analysis.load_metabric",
"joblib.delayed",
"numpy.round",
"numpy.unique",
"multiprocessing.cpu_count"
] | [((882, 910), 'run_analysis.load_metabric', 'load_metabric', (['metabric_path'], {}), '(metabric_path)\n', (895, 910), False, 'from run_analysis import load_metabric, illumina2ensembl_dictionary, get_reactome_illumina, return_pathway\n'), ((1195, 1228), 'numpy.unique', 'np.unique', (["activities['set_size']"], {}), "(activities['set_size'])\n", (1204, 1228), True, 'import numpy as np\n'), ((2290, 2331), 'numpy.round', 'np.round', (["survival['p_perms']"], {'decimals': '(4)'}), "(survival['p_perms'], decimals=4)\n", (2298, 2331), True, 'import numpy as np\n'), ((291, 324), 'numpy.random.choice', 'np.random.choice', (['genes', 'set_size'], {}), '(genes, set_size)\n', (307, 324), True, 'import numpy as np\n'), ((394, 456), 'porch.porch_proc', 'porch.porch_proc', (['"""setname"""', '"""annot"""', 'set_genes', 'expression_df'], {}), "('setname', 'annot', set_genes, expression_df)\n", (410, 456), False, 'import porch\n'), ((592, 677), 'porch.survival', 'porch.survival', (['activity_df.iloc[0]', 'metadata_df'], {'duration_col': '"""T"""', 'event_col': '"""E"""'}), "(activity_df.iloc[0], metadata_df, duration_col='T',\n event_col='E')\n", (606, 677), False, 'import porch\n'), ((1246, 1273), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1271, 1273), False, 'import multiprocessing\n'), ((2024, 2040), 'numpy.abs', 'np.abs', (["row['z']"], {}), "(row['z'])\n", (2030, 2040), True, 'import numpy as np\n'), ((2105, 2136), 'numpy.abs', 'np.abs', (["perms[perms != 'error']"], {}), "(perms[perms != 'error'])\n", (2111, 2136), True, 'import numpy as np\n'), ((1335, 1354), 'numpy.arange', 'np.arange', (['num_perm'], {}), '(num_perm)\n', (1344, 1354), True, 'import numpy as np\n'), ((1541, 1585), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores', 'prefer': '"""threads"""'}), "(n_jobs=num_cores, prefer='threads')\n", (1549, 1585), False, 'from joblib import Parallel, delayed\n'), ((1588, 1608), 'joblib.delayed', 'delayed', (['permute_set'], {}), '(permute_set)\n', (1595, 1608), False, 'from joblib import Parallel, delayed\n')] |
"""
Builder for Distiller
Author: <NAME> (https://github.com/vectominist)
"""
import sys
import copy
import math
from distutils.util import strtobool
import yaml
import numpy as np
import torch
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from .model import DistillerConfig, DistillerModel
import s3prl.optimizers
class DistillerBuilder(nn.Module):
"""
A builder class for all pre-trained Distiller.
Child classes only need to implement the __init__() and forward() method.
"""
def __init__(self, options, config, verbose=False):
super().__init__()
# read config
if config is not None:
self.config = yaml.load(open(config, "r"), Loader=yaml.FullLoader)
else:
# Since some old checkpoints contained pickled scheduler which needs 'optimizers'
# module which is now moved into s3prl package.
original_optimizer = sys.modules.get("optimizers")
sys.modules["optimizers"] = s3prl.optimizers
self.all_states = torch.load(options["ckpt_file"], map_location="cpu")
self.config = self.all_states["Config"]
del sys.modules["optimizers"]
if original_optimizer is not None:
sys.modules["optimizers"] = original_optimizer
# parse the options dict
self.load = bool(strtobool(options["load_pretrain"]))
self.no_grad = bool(strtobool(options["no_grad"]))
self.permute_input = bool(strtobool(options["permute_input"]))
# Set model config
self.model_config = DistillerConfig(self.config["distiller"])
self.hidden_size = self.model_config.encoder_embed_dim
self.max_input_length = 0
if self.max_input_length > 0 and verbose:
print("[DistillerBuilder] - Maximum input length: ", self.max_input_length)
def load_model(self, model, state_dict, verbose=False):
try:
model.load_state_dict(state_dict)
if verbose:
print("[DistillerBuilder] - Pre-trained weights loaded!")
return model
except:
raise RuntimeError("[DistillerBuilder] - Pre-trained weights NOT loaded!")
def process_input_data(self, wave, wave_len):
"""Process input data for the model"""
# add arbitary batch axis B if input `wave` has shape of T
if wave.dim() == 1:
wave = wave.unsqueeze(0)
elif wave.dim() > 2:
raise ValueError
batch_size = wave.shape[0]
seq_len = wave.shape[1]
pad_mask = np.ones((batch_size, seq_len)) # (batch_size, seq_len)
# zero vectors for padding dimension
for idx in range(wave.shape[0]):
pad_mask[idx, wave_len[idx] :] = 0
wave = wave.to(dtype=torch.float32) # (batch_size, seq_len, 1)
pad_mask = torch.FloatTensor(pad_mask).to(
device=wave.device, dtype=torch.float32
) # (batch_size, seq_len)
return wave, pad_mask # (x, pad_mask)
def _forward(self, x, x_len, get_hidden=False, no_pred=False):
wave, pad_mask = self.process_input_data(x, x_len)
x = self.model(wave, pad_mask, get_hidden=get_hidden, no_pred=no_pred)
# x: (feat, feat_final, pred, pad_mask)
return x
class PretrainedDistiller(DistillerBuilder):
"""
Use this class to extract features from the Distiller model,
or to finetune the pre-trained Distiller with any downstream tasks.
"""
def __init__(self, options, config=None, verbose=False):
super().__init__(options, config, verbose)
# Build model
self.model = DistillerModel(self.model_config)
self.model.eval() if self.no_grad else self.model.train()
self.out_dim = self.hidden_size
# Load from a PyTorch state_dict
if self.load:
self.model = self.load_model(
self.model, self.all_states["Distiller"], verbose
)
if verbose:
print(
"[PretrainedDistiller] - Number of parameters: "
+ str(
sum(
p.numel()
for p in self.model.parameters()
if p.requires_grad
)
)
)
def forward(self, wave_inputs, get_hidden=False, no_pred=False):
wave_len = [len(wave) for wave in wave_inputs]
wave_inputs = pad_sequence(wave_inputs, batch_first=True)
# (batch_size, audio_len)
if self.no_grad:
with torch.no_grad():
x = self._forward(
wave_inputs, wave_len, get_hidden=get_hidden, no_pred=no_pred
)
else:
x = self._forward(
wave_inputs, wave_len, get_hidden=get_hidden, no_pred=no_pred
)
return x
| [
"distutils.util.strtobool",
"torch.load",
"torch.FloatTensor",
"numpy.ones",
"torch.nn.utils.rnn.pad_sequence",
"torch.no_grad",
"sys.modules.get"
] | [((2602, 2632), 'numpy.ones', 'np.ones', (['(batch_size, seq_len)'], {}), '((batch_size, seq_len))\n', (2609, 2632), True, 'import numpy as np\n'), ((4536, 4579), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['wave_inputs'], {'batch_first': '(True)'}), '(wave_inputs, batch_first=True)\n', (4548, 4579), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((943, 972), 'sys.modules.get', 'sys.modules.get', (['"""optimizers"""'], {}), "('optimizers')\n", (958, 972), False, 'import sys\n'), ((1061, 1113), 'torch.load', 'torch.load', (["options['ckpt_file']"], {'map_location': '"""cpu"""'}), "(options['ckpt_file'], map_location='cpu')\n", (1071, 1113), False, 'import torch\n'), ((1378, 1413), 'distutils.util.strtobool', 'strtobool', (["options['load_pretrain']"], {}), "(options['load_pretrain'])\n", (1387, 1413), False, 'from distutils.util import strtobool\n'), ((1443, 1472), 'distutils.util.strtobool', 'strtobool', (["options['no_grad']"], {}), "(options['no_grad'])\n", (1452, 1472), False, 'from distutils.util import strtobool\n'), ((1508, 1543), 'distutils.util.strtobool', 'strtobool', (["options['permute_input']"], {}), "(options['permute_input'])\n", (1517, 1543), False, 'from distutils.util import strtobool\n'), ((2884, 2911), 'torch.FloatTensor', 'torch.FloatTensor', (['pad_mask'], {}), '(pad_mask)\n', (2901, 2911), False, 'import torch\n'), ((4657, 4672), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4670, 4672), False, 'import torch\n')] |
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
import ipywidgets as widgets
from traitlets import Unicode, Float, List, Instance
from IPython.display import display
import open3d as o3
import numpy as np
def geometry_to_json(geometry):
"""Convert Open3D geometry to Json (Dict)"""
json = dict()
if isinstance(geometry, o3.PointCloud):
json['type'] = 'PointCloud'
# TODO: do not flatten
json['points'] = np.asarray(
geometry.points, dtype=np.float32).reshape(-1).tolist()
json['colors'] = np.asarray(
geometry.colors, dtype=np.float32).reshape(-1).tolist()
else:
raise NotImplementedError(
"Only supporting geometry_to_json for PointCloud")
return json
@widgets.register
class JVisualizer(widgets.DOMWidget):
_view_name = Unicode('JVisualizerView').tag(sync=True)
_view_module = Unicode('open3d').tag(sync=True)
_view_module_version = Unicode('~@PROJECT_VERSION_THREE_NUMBER@').tag(sync=True)
_model_name = Unicode('JVisualizerModel').tag(sync=True)
_model_module = Unicode('open3d').tag(sync=True)
_model_module_version = Unicode('~@PROJECT_VERSION_THREE_NUMBER@').tag(sync=True)
# We need to declare class attributes for traitlets to work
geometry_jsons = List(Instance(dict)).tag(sync=True)
def __init__(self):
super(JVisualizer, self).__init__()
self.geometry_jsons = []
self.geometries = []
def __repr__(self):
return "JVisualizer with %s geometries" % len(self.geometry_jsons)
def add_geometry(self, geometry):
# TODO: See if we can use self.send(content=content)
# For some reason self.geometry_jsons has to be directly assigned,
# so we keep track of self.geometries and self.geometry_jsons.
self.geometries.append(geometry)
self.geometry_jsons = [geometry_to_json(g) for g in self.geometries]
def clear(self):
self.geometries = []
self.geometry_jsons = []
# TODO: consider using this mechanism to send geometry data
# def send_dog(self):
# print("py: sending gwen")
# content = {
# "type": "dog",
# "name": "gwen"
# }
# self.send(content=content)
def show(self):
display(self)
| [
"traitlets.Unicode",
"numpy.asarray",
"traitlets.Instance",
"IPython.display.display"
] | [((3691, 3704), 'IPython.display.display', 'display', (['self'], {}), '(self)\n', (3698, 3704), False, 'from IPython.display import display\n'), ((2214, 2240), 'traitlets.Unicode', 'Unicode', (['"""JVisualizerView"""'], {}), "('JVisualizerView')\n", (2221, 2240), False, 'from traitlets import Unicode, Float, List, Instance\n'), ((2275, 2292), 'traitlets.Unicode', 'Unicode', (['"""open3d"""'], {}), "('open3d')\n", (2282, 2292), False, 'from traitlets import Unicode, Float, List, Instance\n'), ((2335, 2377), 'traitlets.Unicode', 'Unicode', (['"""~@PROJECT_VERSION_THREE_NUMBER@"""'], {}), "('~@PROJECT_VERSION_THREE_NUMBER@')\n", (2342, 2377), False, 'from traitlets import Unicode, Float, List, Instance\n'), ((2411, 2438), 'traitlets.Unicode', 'Unicode', (['"""JVisualizerModel"""'], {}), "('JVisualizerModel')\n", (2418, 2438), False, 'from traitlets import Unicode, Float, List, Instance\n'), ((2474, 2491), 'traitlets.Unicode', 'Unicode', (['"""open3d"""'], {}), "('open3d')\n", (2481, 2491), False, 'from traitlets import Unicode, Float, List, Instance\n'), ((2535, 2577), 'traitlets.Unicode', 'Unicode', (['"""~@PROJECT_VERSION_THREE_NUMBER@"""'], {}), "('~@PROJECT_VERSION_THREE_NUMBER@')\n", (2542, 2577), False, 'from traitlets import Unicode, Float, List, Instance\n'), ((2684, 2698), 'traitlets.Instance', 'Instance', (['dict'], {}), '(dict)\n', (2692, 2698), False, 'from traitlets import Unicode, Float, List, Instance\n'), ((1830, 1875), 'numpy.asarray', 'np.asarray', (['geometry.points'], {'dtype': 'np.float32'}), '(geometry.points, dtype=np.float32)\n', (1840, 1875), True, 'import numpy as np\n'), ((1935, 1980), 'numpy.asarray', 'np.asarray', (['geometry.colors'], {'dtype': 'np.float32'}), '(geometry.colors, dtype=np.float32)\n', (1945, 1980), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # NVIDIA TensorRT MNIST Example with Triton Inference Server
#
# 
#
# This example shows how you can deploy a TensorRT model with NVIDIA Triton Server. In this case we use a prebuilt TensorRT model for NVIDIA v100 GPUs.
#
# Note this example requires some advanced setup and is directed for those with tensorRT experience.
#
# ## Prerequisites
#
# * Install requirements in `requirements.txt`
# * An authorized kubernetes cluster with V100 GPUs installed and configured.
# * For GKE see [GKE GPU Documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/gpus)
# * [Install Seldon Core](file:///home/clive/work/seldon-core/fork-seldon-core/doc/_build/html/examples/seldon_core_setup.html) and install Ambassador and port-forward to Ambassador on localhost:8003
#
#
# This example uses the [KFServing protocol supported by Triton Infernence Server](https://github.com/triton-inference-server/server/tree/master/docs/protocol) which Seldon also supports.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
import json
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from matplotlib import pyplot as plt
def gen_image(arr):
two_d = (np.reshape(arr, (28, 28)) * 255).astype(np.uint8)
plt.imshow(two_d, cmap=plt.cm.gray_r, interpolation="nearest")
return plt
# In[2]:
(ds_train, ds_test), ds_info = tfds.load(
"mnist",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
# In[3]:
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) * 255, label
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
npX = tfds.as_numpy(ds_train, graph=None)
# In[4]:
MEANS = np.array(
[
255.0,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
254,
254,
254,
253,
252,
252,
251,
251,
252,
252,
253,
254,
254,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
254,
254,
253,
251,
249,
248,
245,
243,
242,
242,
243,
246,
248,
251,
253,
254,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
254,
253,
250,
247,
242,
235,
228,
220,
213,
210,
211,
216,
224,
232,
240,
246,
251,
253,
254,
255,
255,
255,
255,
255,
255,
255,
255,
254,
251,
248,
242,
234,
223,
211,
196,
181,
170,
164,
166,
175,
189,
205,
221,
233,
243,
248,
252,
254,
255,
255,
255,
255,
255,
255,
254,
252,
248,
241,
231,
217,
202,
184,
166,
149,
136,
131,
134,
143,
159,
180,
201,
220,
234,
243,
249,
253,
255,
255,
255,
255,
255,
254,
253,
249,
243,
233,
219,
201,
181,
161,
143,
130,
122,
120,
122,
129,
141,
161,
185,
208,
227,
240,
248,
252,
254,
255,
255,
255,
255,
254,
251,
246,
238,
226,
208,
187,
164,
146,
135,
131,
132,
133,
132,
133,
139,
154,
178,
202,
223,
239,
248,
252,
255,
255,
255,
255,
254,
253,
251,
245,
236,
221,
200,
177,
156,
144,
144,
150,
156,
156,
151,
144,
144,
156,
178,
202,
224,
240,
249,
253,
255,
255,
255,
255,
254,
253,
251,
245,
235,
218,
195,
172,
155,
152,
161,
172,
176,
170,
161,
150,
149,
161,
183,
207,
227,
242,
250,
254,
255,
255,
255,
255,
255,
254,
251,
246,
234,
215,
191,
168,
156,
160,
173,
182,
179,
169,
157,
147,
149,
166,
190,
213,
230,
243,
251,
254,
255,
255,
255,
255,
255,
254,
252,
246,
233,
212,
186,
165,
157,
164,
175,
176,
165,
153,
142,
137,
147,
170,
196,
217,
231,
242,
251,
255,
255,
255,
255,
255,
255,
254,
252,
245,
230,
207,
182,
163,
158,
164,
168,
158,
143,
131,
125,
128,
146,
174,
200,
218,
231,
241,
250,
254,
255,
255,
255,
255,
255,
255,
252,
243,
227,
205,
181,
164,
159,
161,
157,
139,
124,
115,
118,
127,
148,
176,
199,
216,
230,
240,
249,
254,
255,
255,
255,
255,
255,
254,
251,
241,
224,
204,
184,
169,
163,
160,
150,
132,
119,
116,
123,
133,
153,
177,
197,
214,
228,
240,
249,
254,
255,
255,
255,
255,
255,
254,
251,
239,
222,
205,
189,
177,
171,
166,
154,
139,
129,
128,
134,
144,
159,
177,
195,
213,
228,
241,
249,
254,
255,
255,
255,
255,
255,
254,
249,
237,
222,
207,
195,
186,
180,
175,
166,
153,
143,
140,
142,
150,
162,
178,
195,
214,
230,
242,
250,
254,
255,
255,
255,
255,
255,
253,
247,
235,
220,
207,
197,
189,
183,
179,
172,
160,
148,
142,
143,
150,
161,
178,
198,
217,
233,
244,
250,
254,
255,
255,
255,
255,
255,
253,
246,
233,
218,
204,
192,
184,
177,
172,
165,
153,
142,
137,
139,
148,
163,
183,
204,
222,
236,
246,
251,
254,
255,
255,
255,
255,
255,
253,
247,
234,
218,
201,
186,
174,
165,
157,
148,
137,
130,
129,
137,
151,
171,
194,
214,
230,
242,
248,
252,
254,
255,
255,
255,
255,
255,
253,
249,
238,
222,
203,
184,
168,
154,
143,
132,
124,
123,
130,
145,
165,
188,
209,
227,
239,
247,
251,
253,
255,
255,
255,
255,
255,
255,
254,
251,
244,
232,
214,
194,
174,
156,
142,
132,
130,
134,
148,
167,
189,
210,
226,
238,
246,
250,
253,
254,
255,
255,
255,
255,
255,
255,
255,
253,
250,
243,
231,
215,
196,
178,
163,
155,
156,
164,
179,
197,
215,
230,
240,
247,
251,
253,
254,
255,
255,
255,
255,
255,
255,
255,
255,
254,
253,
251,
246,
238,
228,
217,
208,
203,
204,
210,
218,
228,
236,
243,
248,
251,
253,
254,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
254,
252,
249,
245,
241,
238,
237,
237,
239,
242,
245,
247,
250,
252,
253,
254,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
254,
254,
253,
252,
250,
249,
248,
249,
249,
250,
252,
253,
253,
254,
254,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
254,
254,
254,
254,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
]
)
# In[5]:
get_ipython().run_cell_magic('writefile', 'model.yaml', 'apiVersion: machinelearning.seldon.io/v1alpha2\nkind: SeldonDeployment\nmetadata:\n name: mnist\nspec:\n protocol: kfserving\n transport: rest\n predictors:\n - graph:\n children: []\n implementation: TRITON_SERVER\n modelUri: gs://seldon-models/tensorrt/v100_mnist\n name: mnist\n componentSpecs:\n - spec:\n containers:\n - name: mnist\n resources:\n limits:\n nvidia.com/gpu: 1\n name: tensorrt\n replicas: 1')
# In[6]:
get_ipython().system('kubectl apply -f model.yaml')
# In[7]:
get_ipython().system("kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=mnist -o jsonpath='{.items[0].metadata.name}')")
# Check metadata of model
# In[8]:
get_ipython().system('curl http://0.0.0.0:8003/seldon/default/mnist/v2/models/mnist')
# Test prediction on random digit.
# In[9]:
x,y = next(npX)
X = 255 - x
X = (X.reshape(784) - MEANS)
gen_image(x)
values = np.expand_dims(X, axis=0).reshape((1,1,28,28)).flatten().tolist()
cmd = '{"inputs":[{"name":"data","data":'+str(values)+',"datatype":"FP32","shape":[1,1,28,28]}]}'
with open("input.json","w") as f:
f.write(cmd)
res = get_ipython().getoutput('curl -s -d @./input.json -X POST http://0.0.0.0:8003/seldon/default/mnist/v2/models/mnist/infer -H "Content-Type: application/json"')
d=json.loads(res[0])
print(d)
predicted = np.array(d["outputs"][0]["data"]).argmax()
print("Truth",y,"predicted",predicted)
# In[ ]:
| [
"tensorflow_datasets.load",
"json.loads",
"tensorflow_datasets.as_numpy",
"matplotlib.pyplot.imshow",
"numpy.expand_dims",
"tensorflow.cast",
"numpy.array",
"numpy.reshape"
] | [((1451, 1554), 'tensorflow_datasets.load', 'tfds.load', (['"""mnist"""'], {'split': "['train', 'test']", 'shuffle_files': '(True)', 'as_supervised': '(True)', 'with_info': '(True)'}), "('mnist', split=['train', 'test'], shuffle_files=True,\n as_supervised=True, with_info=True)\n", (1460, 1554), True, 'import tensorflow_datasets as tfds\n'), ((1820, 1855), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds_train'], {'graph': 'None'}), '(ds_train, graph=None)\n', (1833, 1855), True, 'import tensorflow_datasets as tfds\n'), ((1877, 6083), 'numpy.array', 'np.array', (['[255.0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 254, 254, 253, 252, \n 252, 251, 251, 252, 252, 253, 254, 254, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 254, 254, 253, 251, 249, 248, 245, \n 243, 242, 242, 243, 246, 248, 251, 253, 254, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 254, 253, 250, 247, 242, 235, 228, 220, \n 213, 210, 211, 216, 224, 232, 240, 246, 251, 253, 254, 255, 255, 255, \n 255, 255, 255, 255, 255, 254, 251, 248, 242, 234, 223, 211, 196, 181, \n 170, 164, 166, 175, 189, 205, 221, 233, 243, 248, 252, 254, 255, 255, \n 255, 255, 255, 255, 254, 252, 248, 241, 231, 217, 202, 184, 166, 149, \n 136, 131, 134, 143, 159, 180, 201, 220, 234, 243, 249, 253, 255, 255, \n 255, 255, 255, 254, 253, 249, 243, 233, 219, 201, 181, 161, 143, 130, \n 122, 120, 122, 129, 141, 161, 185, 208, 227, 240, 248, 252, 254, 255, \n 255, 255, 255, 254, 251, 246, 238, 226, 208, 187, 164, 146, 135, 131, \n 132, 133, 132, 133, 139, 154, 178, 202, 223, 239, 248, 252, 255, 255, \n 255, 255, 254, 253, 251, 245, 236, 221, 200, 177, 156, 144, 144, 150, \n 156, 156, 151, 144, 144, 156, 178, 202, 224, 240, 249, 253, 255, 255, \n 255, 255, 254, 253, 251, 245, 235, 218, 195, 172, 155, 152, 161, 172, \n 176, 170, 161, 150, 149, 161, 183, 207, 227, 242, 250, 254, 255, 255, \n 255, 255, 255, 254, 251, 246, 234, 215, 191, 168, 156, 160, 173, 182, \n 179, 169, 157, 147, 149, 166, 190, 213, 230, 243, 251, 254, 255, 255, \n 255, 255, 255, 254, 252, 246, 233, 212, 186, 165, 157, 164, 175, 176, \n 165, 153, 142, 137, 147, 170, 196, 217, 231, 242, 251, 255, 255, 255, \n 255, 255, 255, 254, 252, 245, 230, 207, 182, 163, 158, 164, 168, 158, \n 143, 131, 125, 128, 146, 174, 200, 218, 231, 241, 250, 254, 255, 255, \n 255, 255, 255, 255, 252, 243, 227, 205, 181, 164, 159, 161, 157, 139, \n 124, 115, 118, 127, 148, 176, 199, 216, 230, 240, 249, 254, 255, 255, \n 255, 255, 255, 254, 251, 241, 224, 204, 184, 169, 163, 160, 150, 132, \n 119, 116, 123, 133, 153, 177, 197, 214, 228, 240, 249, 254, 255, 255, \n 255, 255, 255, 254, 251, 239, 222, 205, 189, 177, 171, 166, 154, 139, \n 129, 128, 134, 144, 159, 177, 195, 213, 228, 241, 249, 254, 255, 255, \n 255, 255, 255, 254, 249, 237, 222, 207, 195, 186, 180, 175, 166, 153, \n 143, 140, 142, 150, 162, 178, 195, 214, 230, 242, 250, 254, 255, 255, \n 255, 255, 255, 253, 247, 235, 220, 207, 197, 189, 183, 179, 172, 160, \n 148, 142, 143, 150, 161, 178, 198, 217, 233, 244, 250, 254, 255, 255, \n 255, 255, 255, 253, 246, 233, 218, 204, 192, 184, 177, 172, 165, 153, \n 142, 137, 139, 148, 163, 183, 204, 222, 236, 246, 251, 254, 255, 255, \n 255, 255, 255, 253, 247, 234, 218, 201, 186, 174, 165, 157, 148, 137, \n 130, 129, 137, 151, 171, 194, 214, 230, 242, 248, 252, 254, 255, 255, \n 255, 255, 255, 253, 249, 238, 222, 203, 184, 168, 154, 143, 132, 124, \n 123, 130, 145, 165, 188, 209, 227, 239, 247, 251, 253, 255, 255, 255, \n 255, 255, 255, 254, 251, 244, 232, 214, 194, 174, 156, 142, 132, 130, \n 134, 148, 167, 189, 210, 226, 238, 246, 250, 253, 254, 255, 255, 255, \n 255, 255, 255, 255, 253, 250, 243, 231, 215, 196, 178, 163, 155, 156, \n 164, 179, 197, 215, 230, 240, 247, 251, 253, 254, 255, 255, 255, 255, \n 255, 255, 255, 255, 254, 253, 251, 246, 238, 228, 217, 208, 203, 204, \n 210, 218, 228, 236, 243, 248, 251, 253, 254, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 254, 252, 249, 245, 241, 238, 237, 237, \n 239, 242, 245, 247, 250, 252, 253, 254, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 254, 254, 253, 252, 250, 249, 248, 249, \n 249, 250, 252, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 254, \n 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255]'], {}), '([255.0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 254, 254, 253, \n 252, 252, 251, 251, 252, 252, 253, 254, 254, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 255, 254, 254, 253, 251, 249, 248, \n 245, 243, 242, 242, 243, 246, 248, 251, 253, 254, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 254, 253, 250, 247, 242, 235, 228, \n 220, 213, 210, 211, 216, 224, 232, 240, 246, 251, 253, 254, 255, 255, \n 255, 255, 255, 255, 255, 255, 254, 251, 248, 242, 234, 223, 211, 196, \n 181, 170, 164, 166, 175, 189, 205, 221, 233, 243, 248, 252, 254, 255, \n 255, 255, 255, 255, 255, 254, 252, 248, 241, 231, 217, 202, 184, 166, \n 149, 136, 131, 134, 143, 159, 180, 201, 220, 234, 243, 249, 253, 255, \n 255, 255, 255, 255, 254, 253, 249, 243, 233, 219, 201, 181, 161, 143, \n 130, 122, 120, 122, 129, 141, 161, 185, 208, 227, 240, 248, 252, 254, \n 255, 255, 255, 255, 254, 251, 246, 238, 226, 208, 187, 164, 146, 135, \n 131, 132, 133, 132, 133, 139, 154, 178, 202, 223, 239, 248, 252, 255, \n 255, 255, 255, 254, 253, 251, 245, 236, 221, 200, 177, 156, 144, 144, \n 150, 156, 156, 151, 144, 144, 156, 178, 202, 224, 240, 249, 253, 255, \n 255, 255, 255, 254, 253, 251, 245, 235, 218, 195, 172, 155, 152, 161, \n 172, 176, 170, 161, 150, 149, 161, 183, 207, 227, 242, 250, 254, 255, \n 255, 255, 255, 255, 254, 251, 246, 234, 215, 191, 168, 156, 160, 173, \n 182, 179, 169, 157, 147, 149, 166, 190, 213, 230, 243, 251, 254, 255, \n 255, 255, 255, 255, 254, 252, 246, 233, 212, 186, 165, 157, 164, 175, \n 176, 165, 153, 142, 137, 147, 170, 196, 217, 231, 242, 251, 255, 255, \n 255, 255, 255, 255, 254, 252, 245, 230, 207, 182, 163, 158, 164, 168, \n 158, 143, 131, 125, 128, 146, 174, 200, 218, 231, 241, 250, 254, 255, \n 255, 255, 255, 255, 255, 252, 243, 227, 205, 181, 164, 159, 161, 157, \n 139, 124, 115, 118, 127, 148, 176, 199, 216, 230, 240, 249, 254, 255, \n 255, 255, 255, 255, 254, 251, 241, 224, 204, 184, 169, 163, 160, 150, \n 132, 119, 116, 123, 133, 153, 177, 197, 214, 228, 240, 249, 254, 255, \n 255, 255, 255, 255, 254, 251, 239, 222, 205, 189, 177, 171, 166, 154, \n 139, 129, 128, 134, 144, 159, 177, 195, 213, 228, 241, 249, 254, 255, \n 255, 255, 255, 255, 254, 249, 237, 222, 207, 195, 186, 180, 175, 166, \n 153, 143, 140, 142, 150, 162, 178, 195, 214, 230, 242, 250, 254, 255, \n 255, 255, 255, 255, 253, 247, 235, 220, 207, 197, 189, 183, 179, 172, \n 160, 148, 142, 143, 150, 161, 178, 198, 217, 233, 244, 250, 254, 255, \n 255, 255, 255, 255, 253, 246, 233, 218, 204, 192, 184, 177, 172, 165, \n 153, 142, 137, 139, 148, 163, 183, 204, 222, 236, 246, 251, 254, 255, \n 255, 255, 255, 255, 253, 247, 234, 218, 201, 186, 174, 165, 157, 148, \n 137, 130, 129, 137, 151, 171, 194, 214, 230, 242, 248, 252, 254, 255, \n 255, 255, 255, 255, 253, 249, 238, 222, 203, 184, 168, 154, 143, 132, \n 124, 123, 130, 145, 165, 188, 209, 227, 239, 247, 251, 253, 255, 255, \n 255, 255, 255, 255, 254, 251, 244, 232, 214, 194, 174, 156, 142, 132, \n 130, 134, 148, 167, 189, 210, 226, 238, 246, 250, 253, 254, 255, 255, \n 255, 255, 255, 255, 255, 253, 250, 243, 231, 215, 196, 178, 163, 155, \n 156, 164, 179, 197, 215, 230, 240, 247, 251, 253, 254, 255, 255, 255, \n 255, 255, 255, 255, 255, 254, 253, 251, 246, 238, 228, 217, 208, 203, \n 204, 210, 218, 228, 236, 243, 248, 251, 253, 254, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 254, 252, 249, 245, 241, 238, 237, \n 237, 239, 242, 245, 247, 250, 252, 253, 254, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 254, 254, 253, 252, 250, 249, 248, \n 249, 249, 250, 252, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255, \n 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, \n 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255])\n', (1885, 6083), True, 'import numpy as np\n'), ((13541, 13559), 'json.loads', 'json.loads', (['res[0]'], {}), '(res[0])\n', (13551, 13559), False, 'import json\n'), ((1329, 1391), 'matplotlib.pyplot.imshow', 'plt.imshow', (['two_d'], {'cmap': 'plt.cm.gray_r', 'interpolation': '"""nearest"""'}), "(two_d, cmap=plt.cm.gray_r, interpolation='nearest')\n", (1339, 1391), True, 'from matplotlib import pyplot as plt\n'), ((13581, 13614), 'numpy.array', 'np.array', (["d['outputs'][0]['data']"], {}), "(d['outputs'][0]['data'])\n", (13589, 13614), True, 'import numpy as np\n'), ((1682, 1708), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (1689, 1708), True, 'import tensorflow as tf\n'), ((1275, 1300), 'numpy.reshape', 'np.reshape', (['arr', '(28, 28)'], {}), '(arr, (28, 28))\n', (1285, 1300), True, 'import numpy as np\n'), ((13142, 13167), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (13156, 13167), True, 'import numpy as np\n')] |
from PIL import Image
from filters.emboss_filter import EmbossFilter
from filters.mean_filter import MeanFilter, MeanFilterY
from filters.median_filter import MedianFilter, MedianFilterY
from filters.prewitt_filter import PrewittFilter
from filters.negative_filter import NegativeFilterRGB, NegativeFilterYIQ
from tarefas.tarefa6 import reproduce_example_6
import numpy as np
import argparse as ap
import cv2
parser = ap.ArgumentParser(description = "System made for the lecture Processamento Digital de Imagens in UFPB.")
parser.add_argument('image_path', help="Path to the image", type=str)
parser.add_argument('mask_path', help="Path to the mask and filter", type=str)
parser.add_argument('-v','--verbose', help="Enable RGB to YIQ conversion.", action="store_true", default = 0)
parser.add_argument('-s','--stretching', help="Enable histogram stretching.", action="store_true")
parser.add_argument('-y','--yiq_rgb', help="Enable YIQ to RGB conversion.", action="store_true")
parser.add_argument('-r','--rgb_yiq', help="Enable RGB to YIQ conversion.", action="store_true", default = 1)
parser.add_argument('-4', '--test4', help="Run test 4.", action="store_true")
parser.add_argument('-6','--test6', help="Run test 6.", action="store_true")
args = parser.parse_args()
def from_rgb_to_yiq(rgb: np.ndarray):
yiq = np.ndarray(np.shape(rgb))
for i in range(np.shape(yiq)[0]):
for j in range(np.shape(yiq)[1]):
yiq[i,j,0] = 0.299 * rgb[i,j,0] + 0.587 * rgb[i,j,1] + 0.114 * rgb[i,j,2]
yiq[i,j,1] = 0.596 * rgb[i,j,0] - 0.274 * rgb[i,j,1] - 0.322 * rgb[i,j,2]
yiq[i,j,2] = 0.211 * rgb[i,j,0] - 0.523 * rgb[i,j,1] + 0.312 * rgb[i,j,2]
return yiq
def from_yiq_to_rgb(yiq: np.ndarray):
rgb = np.ndarray(np.shape(yiq))
for i in range(np.shape(yiq)[0]):
for j in range(np.shape(yiq)[1]):
rgb[i,j,0] = min(max(round(1 * yiq[i,j,0] + 0.956 * yiq[i,j,1] + 0.621 * yiq[i,j,2]),0),255)
rgb[i,j,1] = min(max(round(1 * yiq[i,j,0] - 0.272 * yiq[i,j,1] - 0.647 * yiq[i,j,2]),0),255)
rgb[i,j,2] = min(max(round(1 * yiq[i,j,0] - 1.106 * yiq[i,j,1] + 1.703 * yiq[i,j,2]),0),255)
return rgb
def read_mask_from_file(file):
filter = None
with open(file, mode='r') as f:
line = f.read().split(" ")
mode = line[0]
m = int(line[1])
n = int(line[2])
filter = np.zeros((m, n))
if mode == "mean":
filter = filter + (1.0/(m * n))
elif mode == "emboss":
if n==3:
filter = np.array(((-2,-1,0),(-1,1,1),(0,1,2)))
elif n==5:
filter = np.array((-2,0,-1,0,0),(0,-2,-1,0,0),(-1,-1,1,1,1),(0,0,1,2,0),(0,0,1,0,2))
else:
assert False, f"{m} x {n} emboss filter not implemented yet"
elif mode == "prewitt":
if n==3:
filter = np.zeros((m,n,2))
filter[0,0,0] = 1
filter[0,1,0] = 1
filter[0,2,0] = 1
filter[2,0,0] = -1
filter[2,1,0] = -1
filter[2,2,0] = -1
filter[0,0,1] = 1
filter[1,0,1] = 1
filter[2,0,1] = 1
filter[0,2,1] = -1
filter[1,2,1] = -1
filter[2,2,1] = -1
else:
assert False, f"{m} x {n} prewitt filter not implemented yet"
elif mode == "median":
filter = np.ones((m, n))
elif mode == "negative":
filter = np.ones((1,1))
else:
assert False, f"{m} x {n} {mode} filter not implemented yet (maybe a typo in {mode}?) modes supported:\nprewitt, emboss, mean, median\n"
return filter, mode
def histogram_stretching_y(image : np.ndarray):
min_value = image[:,:,0].min()
max_value = image[:,:,0].max()
new_image = np.copy(image)
for i in range(np.shape(image)[0]):
for j in range(np.shape(image)[1]):
new_image[i,j,0] = round(((image[i,j,0] - min_value)/(max_value - min_value)) * 255)
return new_image
def histogram_stretching(image : np.ndarray):
min_value_r = image.min()
max_value_r = image.max()
new_image = np.copy(image)
for i in range(np.shape(image)[0]):
for j in range(np.shape(image)[1]):
new_image[i,j,0] = round(((image[i,j,0] - min_value_r)/(max_value_r - min_value_r)) * 255)
new_image[i,j,1] = round(((image[i,j,1] - min_value_r)/(max_value_r - min_value_r)) * 255)
new_image[i,j,2] = round(((image[i,j,2] - min_value_r)/(max_value_r - min_value_r)) * 255)
return new_image
image = Image.open(args.image_path).convert("RGB")
pixels = np.array(image)
if args.verbose:
print("Image:\n", image)
if args.test4:
pixels_yiq = from_rgb_to_yiq(pixels)
mask1, _ = read_mask_from_file("masks/mean_1x21.txt")
mask2, _ = read_mask_from_file("masks/mean_21x1.txt")
filter = MeanFilterY(mask1)
filter.set_image(pixels_yiq)
image_after_filter = filter.apply_filter_on_image()
pixels_new_image = image_after_filter
filter2 = MeanFilterY(mask2)
filter.set_image(pixels_new_image)
image_after_filter2 = filter.apply_filter_on_image()
PIL_image = Image.fromarray(image_after_filter2.astype(np.uint8))
PIL_image.show()
exit(3)
if args.test6:
pixels = cv2.imread(args.image_path)
mask = cv2.imread(args.mask_path)
if args.verbose:
print(mask)
reproduce_example_6(pixels, mask)
exit(2)
mask, mode = read_mask_from_file(args.mask_path)
if args.verbose:
print("Using mask: ", mask)
filter = None
image_after_filter = None
if(args.yiq_rgb):
pixels = from_rgb_to_yiq(pixels)
if mode == "mean":
if(args.yiq_rgb):
filter = MeanFilterY(mask)
else:
filter = MeanFilter(mask)
elif mode == "emboss":
filter = EmbossFilter(mask)
elif mode == "prewitt":
filter = PrewittFilter(mask)
elif mode == "median":
filter = MedianFilterY(mask)
elif mode == "negative":
if(args.yiq_rgb):
filter = NegativeFilterYIQ(mask)
else:
filter = NegativeFilterRGB(mask)
filter.set_image(pixels)
image_after_filter = filter.apply_filter_on_image()
if(args.stretching):
if(args.yiq_rgb):
image_after_filter = histogram_stretching_y(image_after_filter)
else:
image_after_filter = histogram_stretching(image_after_filter)
if(args.yiq_rgb):
image_after_filter = from_yiq_to_rgb(image_after_filter)
PIL_image = Image.fromarray(image_after_filter.astype(np.uint8))
PIL_image.show()
| [
"argparse.ArgumentParser",
"numpy.copy",
"filters.emboss_filter.EmbossFilter",
"tarefas.tarefa6.reproduce_example_6",
"filters.mean_filter.MeanFilter",
"filters.prewitt_filter.PrewittFilter",
"filters.negative_filter.NegativeFilterRGB",
"numpy.zeros",
"numpy.ones",
"filters.mean_filter.MeanFilterY... | [((419, 526), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'description': '"""System made for the lecture Processamento Digital de Imagens in UFPB."""'}), "(description=\n 'System made for the lecture Processamento Digital de Imagens in UFPB.')\n", (436, 526), True, 'import argparse as ap\n'), ((4773, 4788), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4781, 4788), True, 'import numpy as np\n'), ((3932, 3946), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (3939, 3946), True, 'import numpy as np\n'), ((4279, 4293), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (4286, 4293), True, 'import numpy as np\n'), ((5023, 5041), 'filters.mean_filter.MeanFilterY', 'MeanFilterY', (['mask1'], {}), '(mask1)\n', (5034, 5041), False, 'from filters.mean_filter import MeanFilter, MeanFilterY\n'), ((5189, 5207), 'filters.mean_filter.MeanFilterY', 'MeanFilterY', (['mask2'], {}), '(mask2)\n', (5200, 5207), False, 'from filters.mean_filter import MeanFilter, MeanFilterY\n'), ((5441, 5468), 'cv2.imread', 'cv2.imread', (['args.image_path'], {}), '(args.image_path)\n', (5451, 5468), False, 'import cv2\n'), ((5480, 5506), 'cv2.imread', 'cv2.imread', (['args.mask_path'], {}), '(args.mask_path)\n', (5490, 5506), False, 'import cv2\n'), ((5553, 5586), 'tarefas.tarefa6.reproduce_example_6', 'reproduce_example_6', (['pixels', 'mask'], {}), '(pixels, mask)\n', (5572, 5586), False, 'from tarefas.tarefa6 import reproduce_example_6\n'), ((1331, 1344), 'numpy.shape', 'np.shape', (['rgb'], {}), '(rgb)\n', (1339, 1344), True, 'import numpy as np\n'), ((1793, 1806), 'numpy.shape', 'np.shape', (['yiq'], {}), '(yiq)\n', (1801, 1806), True, 'import numpy as np\n'), ((2440, 2456), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (2448, 2456), True, 'import numpy as np\n'), ((4720, 4747), 'PIL.Image.open', 'Image.open', (['args.image_path'], {}), '(args.image_path)\n', (4730, 4747), False, 'from PIL import Image\n'), ((5855, 5872), 'filters.mean_filter.MeanFilterY', 'MeanFilterY', (['mask'], {}), '(mask)\n', (5866, 5872), False, 'from filters.mean_filter import MeanFilter, MeanFilterY\n'), ((5900, 5916), 'filters.mean_filter.MeanFilter', 'MeanFilter', (['mask'], {}), '(mask)\n', (5910, 5916), False, 'from filters.mean_filter import MeanFilter, MeanFilterY\n'), ((5953, 5971), 'filters.emboss_filter.EmbossFilter', 'EmbossFilter', (['mask'], {}), '(mask)\n', (5965, 5971), False, 'from filters.emboss_filter import EmbossFilter\n'), ((1366, 1379), 'numpy.shape', 'np.shape', (['yiq'], {}), '(yiq)\n', (1374, 1379), True, 'import numpy as np\n'), ((1828, 1841), 'numpy.shape', 'np.shape', (['yiq'], {}), '(yiq)\n', (1836, 1841), True, 'import numpy as np\n'), ((3967, 3982), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (3975, 3982), True, 'import numpy as np\n'), ((4314, 4329), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (4322, 4329), True, 'import numpy as np\n'), ((6009, 6028), 'filters.prewitt_filter.PrewittFilter', 'PrewittFilter', (['mask'], {}), '(mask)\n', (6022, 6028), False, 'from filters.prewitt_filter import PrewittFilter\n'), ((1408, 1421), 'numpy.shape', 'np.shape', (['yiq'], {}), '(yiq)\n', (1416, 1421), True, 'import numpy as np\n'), ((1870, 1883), 'numpy.shape', 'np.shape', (['yiq'], {}), '(yiq)\n', (1878, 1883), True, 'import numpy as np\n'), ((4011, 4026), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (4019, 4026), True, 'import numpy as np\n'), ((4358, 4373), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (4366, 4373), True, 'import numpy as np\n'), ((6065, 6084), 'filters.median_filter.MedianFilterY', 'MedianFilterY', (['mask'], {}), '(mask)\n', (6078, 6084), False, 'from filters.median_filter import MedianFilter, MedianFilterY\n'), ((2605, 2651), 'numpy.array', 'np.array', (['((-2, -1, 0), (-1, 1, 1), (0, 1, 2))'], {}), '(((-2, -1, 0), (-1, 1, 1), (0, 1, 2)))\n', (2613, 2651), True, 'import numpy as np\n'), ((2692, 2795), 'numpy.array', 'np.array', (['(-2, 0, -1, 0, 0)', '(0, -2, -1, 0, 0)', '(-1, -1, 1, 1, 1)', '(0, 0, 1, 2, 0)', '(0, 0, 1, 0, 2)'], {}), '((-2, 0, -1, 0, 0), (0, -2, -1, 0, 0), (-1, -1, 1, 1, 1), (0, 0, 1,\n 2, 0), (0, 0, 1, 0, 2))\n', (2700, 2795), True, 'import numpy as np\n'), ((2941, 2960), 'numpy.zeros', 'np.zeros', (['(m, n, 2)'], {}), '((m, n, 2))\n', (2949, 2960), True, 'import numpy as np\n'), ((3524, 3539), 'numpy.ones', 'np.ones', (['(m, n)'], {}), '((m, n))\n', (3531, 3539), True, 'import numpy as np\n'), ((6149, 6172), 'filters.negative_filter.NegativeFilterYIQ', 'NegativeFilterYIQ', (['mask'], {}), '(mask)\n', (6166, 6172), False, 'from filters.negative_filter import NegativeFilterRGB, NegativeFilterYIQ\n'), ((6200, 6223), 'filters.negative_filter.NegativeFilterRGB', 'NegativeFilterRGB', (['mask'], {}), '(mask)\n', (6217, 6223), False, 'from filters.negative_filter import NegativeFilterRGB, NegativeFilterYIQ\n'), ((3594, 3609), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (3601, 3609), True, 'import numpy as np\n')] |
# work on increasing efficiency
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
def kml2slu(file, write=False):
"""Converts Polygons drawn in Google Earth to slu used by GeoClaw flag_regions
:param str file: path to .kml file downloaded from Google Earth
- there must not be any three points on the polygon with the same latitude
:param bool write: will write slus to a .txt file named slu_outputs
:return: a dictionary of polygon names with their slus in ndarray format
"""
class Segment:
# Stores info needed for each line segment making up the polygon
def __init__(self, x0, x1, y0, y1):
self.x = x0
self.y = y0
self.ylower = np.min([y0, y1])
self.yupper = np.max([y0, y1])
self.slope = (y1 - y0) / (x1 - x0)
polygons = {}
placemark = False
name = None
polygon = False
grab_next = False
threats = []
with open(file, "r") as kml_file:
for num, line in enumerate(kml_file, 1):
if "<Placemark>" in line:
placemark = True
elif placemark:
if "<name>" in line:
name = line[line.find(">") + 1:line.find("</")]
elif "<Polygon>" in line:
polygon = True
elif ("<coordinates>" in line) and polygon:
grab_next = True
elif grab_next:
tmp_threats = ["\n" + name]
# Grabs coordinates of points and puts them in dataframe with [lat, lon]
s = pd.Series(np.array(line.strip().split(" "))).str.split(",")
df = pd.concat([s.str.get(0).astype(float), s.str.get(1).astype(float)], axis=1)
df.columns = ["lon", "lat"]
# Identify polygon segments
segments = [
Segment(df["lon"].iloc[i], df["lon"].iloc[i + 1], df["lat"].iloc[i], df["lat"].iloc[i + 1])
for i in df.index.to_list() if i != len(df) - 1]
# Get longitudes for each latitude
df["lon 0"] = None
df["lon 1"] = None
for i in df.index.to_list():
lat = df["lat"].iloc[i]
lon_orig = df["lon"].iloc[i]
for segment in segments:
if segment.ylower <= lat <= segment.yupper:
lon = ((lat - segment.y) / segment.slope) + segment.x
if lon != lon_orig:
if df["lon 0"].iloc[i] is not None:
# Found three points with same latitude
tmp_threats.append(str(lat))
elif lon > lon_orig:
df["lon 1"].iloc[i] = lon
df["lon 0"].iloc[i] = lon_orig
else:
df["lon 1"].iloc[i] = lon_orig
df["lon 0"].iloc[i] = lon
# For top and bottom verticies
if df["lon 0"].iloc[i] is None and df["lon 1"].iloc[i] is None:
df["lon 0"].iloc[i] = lon_orig
df["lon 1"].iloc[i] = lon_orig
polygons[name] = df[["lat", "lon 0", "lon 1"]].drop_duplicates(subset=['lat']).sort_values(
by=['lat']).reset_index(drop=True).to_numpy()
if len(tmp_threats) > 1:
threats += tmp_threats
polygon = False
grab_next = False
placemark = False
if threats:
raise TypeError("The following polygons have three points with the same latitude. Each latitude "
"can belong to at most two points on a polygon." + '\n'.join(threats))
if write:
with open("slu_ouputs.txt", "w") as slu_file:
[slu_file.writelines([p, ":\n", str(polygons.get(p)), "\n\n"]) for p in polygons]
return polygons
| [
"numpy.min",
"numpy.max"
] | [((773, 789), 'numpy.min', 'np.min', (['[y0, y1]'], {}), '([y0, y1])\n', (779, 789), True, 'import numpy as np\n'), ((816, 832), 'numpy.max', 'np.max', (['[y0, y1]'], {}), '([y0, y1])\n', (822, 832), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 10:59:49 2018
@author: joseb
"""
# min x1^2 + x2^2
# s.t. x1^2 +x1*x2 + x2^2 - 3 <= 0
# var x1, x2
# %%
from scipy.optimize import minimize
import numpy as np
from cvxpy import *
print('\nSOLVING USING SCIPY\n')
# Jacobian
def jacobian(x):
dx = 2 * x[0]
dy = 2 * x[1]
return np.array((dx, dy))
# Jacobian
def hessian():
d11 = 2.
d12 = 0.
d22 = 2.
return np.array(((d11, d12), (d12, d22)))
# Objective function
def obj_fun():
return lambda x: x[0] ** 2 + x[1] ** 2
# Minimize objective function
def min_func(x):
return minimize(obj_fun(), x, method='SLSQP', bounds=bounds, constraints=cons, options={'ftol': 1e-9,'disp':True})
# Minimize objective function with gradient
def min_func_jacobian(x):
return minimize(obj_fun(), x, method='SLSQP', bounds=bounds, constraints=cons, jac=jacobian, options={'ftol': 1e-9,'disp':True})
# Minimize objective function with hessian
def min_func_hessian(x):
return minimize(obj_fun(), x, bounds=bounds, constraints=cons, jac=jacobian, hess=hessian, options={'ftol': 1e-9,'disp':True})
# constraints functions
cons = ({'type': 'ineq', 'fun': lambda x: -x[0] ** 2 - x[0] * x[1] - x[1] ** 2 + 3},
{'type': 'ineq', 'fun': lambda x: 3 * x[0] + 2 * x[1] - 3})
# bounds, if any, e.g. x1 and x2 have to be positive
bounds = ((None, None), (None, None))
# initial guess
x0 = np.asarray((10, 10))
# Method SLSQP uses Sequential Least SQuares Programming to minimize a function
# of several variables with any combination of bounds, equality and inequality constraints.
res = min_func(x0)
print(res)
res2 = min_func_jacobian(x0)
print('\n', res2)
# print 'C1',res2.x[0]**2+res2.x[1]**2+res2.x[0]*res2.x[1],'C2',3*res2.x[0]+2*res2.x[1]
res3 = min_func_hessian(x0)
print('\n', res3)
print('\n SOLVING USING CVXPY\n')
# Create two scalar optimization variables.
x = Variable(2, name='x')
# Constraints
P1 = np.array(np.mat('1. 0.5; 0.5 1.'))
f1 = quad_form(x, P1)
f2 = 3. * x[0] + 2. * x[1]
constraints = [f1 <= 3., f2 >= 3.]
# Form objective.
P0 = np.array(np.mat('1. 0.; 0. 1.'))
f0 = quad_form(x, P0)
obj = Minimize(f0)
# Form and solve problem.
prob = Problem(obj, constraints)
print("solve", prob.solve()) # Returns the optimal value.
print("status:", prob.status)
print("optimal value p* = ", prob.value)
print("optimal var: x1 = ", x[0].value, " x2 = ", x[1].value)
print("optimal dual variables lambda1 = ", constraints[0].dual_value)
print("optimal dual variables lambda2 = ", constraints[1].dual_value)
| [
"numpy.asarray",
"numpy.array",
"numpy.mat"
] | [((1439, 1459), 'numpy.asarray', 'np.asarray', (['(10, 10)'], {}), '((10, 10))\n', (1449, 1459), True, 'import numpy as np\n'), ((359, 377), 'numpy.array', 'np.array', (['(dx, dy)'], {}), '((dx, dy))\n', (367, 377), True, 'import numpy as np\n'), ((456, 490), 'numpy.array', 'np.array', (['((d11, d12), (d12, d22))'], {}), '(((d11, d12), (d12, d22)))\n', (464, 490), True, 'import numpy as np\n'), ((1985, 2009), 'numpy.mat', 'np.mat', (['"""1. 0.5; 0.5 1."""'], {}), "('1. 0.5; 0.5 1.')\n", (1991, 2009), True, 'import numpy as np\n'), ((2128, 2150), 'numpy.mat', 'np.mat', (['"""1. 0.; 0. 1."""'], {}), "('1. 0.; 0. 1.')\n", (2134, 2150), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class Momentum:
def __init__(self, num_features=5, alpha=0.7, beta=0.9):
self.m = np.zeros((num_features, 1))
self.alpha = alpha
self.beta = beta
def optimize_weights(self, weights, grads):
self.m = self.beta * self.m + (1 - self.beta) * grads
new_weights = weights - (self.alpha * self.m)
return new_weights
class Model:
def __init__(self, num_features=5, alpha=0.03):
self.weights = np.zeros((num_features, 1)) + 5
self.optimizer = Momentum()
self.num_features = num_features
self.alpha = alpha
def predict(self, x):
return self.sigmoid(x @ self.weights)
@staticmethod
def sigmoid(z):
y = 1 / (1 + np.exp(-z))
return y
def optimize_once(self, x, y):
loss_reg = np.sum(self.alpha * np.abs(self.weights))
grad_reg = np.sum(self.alpha * np.abs(self.weights))
ones = np.ones(y.shape)
hypothesis = self.predict(x)
grads = (-y @ np.log(hypothesis) - (ones - y) @ np.log(ones - hypothesis)) / x.shape[0] # + grad_reg
# self.weights = self.optimizer.optimize_weights(self.weights, grads)
loss_train = (y - hypothesis).T @ (y - hypothesis) / x.shape[0]
return loss_train, grads, loss_reg
def fit(self, x, y, batch_size=32, grad_tol=0.001, epochs=50):
self.x_train = x[:batch_size]
self.y_train = y[:batch_size]
self.x_test = x[batch_size:]
self.y_test = y[batch_size:]
grad_norm = np.inf
n_iter = 0
losses_train, losses_test, losses_reg = [], [], []
while (grad_norm > grad_tol) and (n_iter < epochs):
loss_train, grads, loss_reg = self.optimize_once(self.x_train, self.y_train)
grad_norm = np.linalg.norm(grads)
n_iter += 1
loss_test = (self.y_test - self.predict(self.x_test)).T @ (self.y_test - self.predict(self.x_test)) / self.x_test.shape[0]
losses_train.append(loss_train[0][0])
losses_test.append(loss_test)
losses_reg.append(loss_reg)
return np.array(losses_train), np.array(losses_test), np.array(losses_reg)
def predict_proba(self, x):
print(self.predict(x))
def normalize(features):
new_features = np.asarray(features).T
min_f = np.amin(new_features, axis=1)
max_f = np.amax(new_features, axis=1)
new_features = (new_features.T - min_f) / (max_f - min_f)
return new_features
if __name__ == "__main__":
df = pd.read_csv("heart_failure_clinical_records_dataset.csv")
# print(dataset.head())
features = ['age', 'ejection_fraction', 'serum_creatinine', 'serum_sodium', 'time']
X_init = df[features]
Y_init = df["DEATH_EVENT"]
X = normalize(X_init)
Y = np.asarray(Y_init).copy()
model = Model()
model.fit(X, Y)
model.predict_proba(X[100:120])
print(Y[100:120])
| [
"numpy.abs",
"numpy.amin",
"numpy.log",
"pandas.read_csv",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.amax",
"numpy.linalg.norm",
"numpy.array",
"numpy.exp"
] | [((2403, 2432), 'numpy.amin', 'np.amin', (['new_features'], {'axis': '(1)'}), '(new_features, axis=1)\n', (2410, 2432), True, 'import numpy as np\n'), ((2445, 2474), 'numpy.amax', 'np.amax', (['new_features'], {'axis': '(1)'}), '(new_features, axis=1)\n', (2452, 2474), True, 'import numpy as np\n'), ((2601, 2658), 'pandas.read_csv', 'pd.read_csv', (['"""heart_failure_clinical_records_dataset.csv"""'], {}), "('heart_failure_clinical_records_dataset.csv')\n", (2612, 2658), True, 'import pandas as pd\n'), ((167, 194), 'numpy.zeros', 'np.zeros', (['(num_features, 1)'], {}), '((num_features, 1))\n', (175, 194), True, 'import numpy as np\n'), ((1002, 1018), 'numpy.ones', 'np.ones', (['y.shape'], {}), '(y.shape)\n', (1009, 1018), True, 'import numpy as np\n'), ((2368, 2388), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (2378, 2388), True, 'import numpy as np\n'), ((530, 557), 'numpy.zeros', 'np.zeros', (['(num_features, 1)'], {}), '((num_features, 1))\n', (538, 557), True, 'import numpy as np\n'), ((1860, 1881), 'numpy.linalg.norm', 'np.linalg.norm', (['grads'], {}), '(grads)\n', (1874, 1881), True, 'import numpy as np\n'), ((2190, 2212), 'numpy.array', 'np.array', (['losses_train'], {}), '(losses_train)\n', (2198, 2212), True, 'import numpy as np\n'), ((2214, 2235), 'numpy.array', 'np.array', (['losses_test'], {}), '(losses_test)\n', (2222, 2235), True, 'import numpy as np\n'), ((2237, 2257), 'numpy.array', 'np.array', (['losses_reg'], {}), '(losses_reg)\n', (2245, 2257), True, 'import numpy as np\n'), ((2868, 2886), 'numpy.asarray', 'np.asarray', (['Y_init'], {}), '(Y_init)\n', (2878, 2886), True, 'import numpy as np\n'), ((799, 809), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (805, 809), True, 'import numpy as np\n'), ((903, 923), 'numpy.abs', 'np.abs', (['self.weights'], {}), '(self.weights)\n', (909, 923), True, 'import numpy as np\n'), ((964, 984), 'numpy.abs', 'np.abs', (['self.weights'], {}), '(self.weights)\n', (970, 984), True, 'import numpy as np\n'), ((1078, 1096), 'numpy.log', 'np.log', (['hypothesis'], {}), '(hypothesis)\n', (1084, 1096), True, 'import numpy as np\n'), ((1112, 1137), 'numpy.log', 'np.log', (['(ones - hypothesis)'], {}), '(ones - hypothesis)\n', (1118, 1137), True, 'import numpy as np\n')] |
# External Modules
import torch
from torch import cuda, FloatTensor, LongTensor
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
from typing import Union
from time import time
# comes from PointCNN.Pytorch repository
# https://github.com/hxdengBerkeley/PointCNN.Pytorch.git
# Types to allow for both CPU and GPU models.
UFloatTensor = Union[FloatTensor, cuda.FloatTensor]
ULongTensor = Union[LongTensor, cuda.LongTensor]
def knn_indices_func_cpu(rep_pts: FloatTensor, # (N, pts, dim)
pts: FloatTensor, # (N, x, dim)
K: int, D=1
) -> LongTensor: # (N, pts, K)
"""
CPU-based Indexing function based on K-Nearest Neighbors search.
:param rep_pts: Representative points.
:param pts: Point cloud to get indices from.
:param K: Number of nearest neighbors to collect.
:param D: "Spread" of neighboring points.
:return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
is the set k-nearest neighbors for the representative points in pts[n].
"""
time1 = time()
rep_pts = rep_pts.data.numpy()
pts = pts.data.numpy()
region_idx = []
for n, p in enumerate(rep_pts):
P_particular = pts[n]
nbrs = NearestNeighbors(
D*K + 1, algorithm="ball_tree").fit(P_particular)
indices = nbrs.kneighbors(p)[1]
region_idx.append(indices[:, 1::D])
region_idx = torch.from_numpy(np.stack(region_idx, axis=0))
print("using cpu,time:{}s".format(time()-time1))
return region_idx
def knn_indices_func_gpu(seed: cuda.FloatTensor, # (B,C,npoint)
pts: cuda.FloatTensor, # (B,C,N)
k: int
) -> cuda.LongTensor: # (N,npoint,K)
"""knn indices func reimplemented
Args:
seed (cuda.FloatTensor) : clusting seed->(B,C,npoint)
pts (cuda.FloatTensor) : pointcloud using clusting method->(B,C,N)
l (int) : k neibor in knn
Returns:
cuda.LongTensor: knn idx(B,npoint,k)
"""
_, _, N = seed.shape
_, _, M = pts.shape
mseed = seed.unsqueeze(-1).expand(-1, -1, -1, M)
mpts = pts.unsqueeze(-2).expand(-1, -1, N, -1)
mdist = torch.sum((mpts-mseed)**2, dim=1)
# print("mseed:", mseed.shape, "\nmpts:",
# mpts.shape, "\nmdist:", mdist.shape)
_, idx = torch.topk(mdist, k=k+1, largest=False)
return idx[:, :, 1:]
| [
"numpy.stack",
"torch.topk",
"time.time",
"sklearn.neighbors.NearestNeighbors",
"torch.sum"
] | [((1141, 1147), 'time.time', 'time', ([], {}), '()\n', (1145, 1147), False, 'from time import time\n'), ((2319, 2356), 'torch.sum', 'torch.sum', (['((mpts - mseed) ** 2)'], {'dim': '(1)'}), '((mpts - mseed) ** 2, dim=1)\n', (2328, 2356), False, 'import torch\n'), ((2461, 2502), 'torch.topk', 'torch.topk', (['mdist'], {'k': '(k + 1)', 'largest': '(False)'}), '(mdist, k=k + 1, largest=False)\n', (2471, 2502), False, 'import torch\n'), ((1511, 1539), 'numpy.stack', 'np.stack', (['region_idx'], {'axis': '(0)'}), '(region_idx, axis=0)\n', (1519, 1539), True, 'import numpy as np\n'), ((1312, 1362), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', (['(D * K + 1)'], {'algorithm': '"""ball_tree"""'}), "(D * K + 1, algorithm='ball_tree')\n", (1328, 1362), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1579, 1585), 'time.time', 'time', ([], {}), '()\n', (1583, 1585), False, 'from time import time\n')] |
#!/usr/bin/env python
from decimal import Decimal
import numpy as np
from numpy.testing import assert_almost_equal
from tune.db_workers.utils import (
TimeControl,
compute_probabilities,
compute_probabilities_for_bias,
draw_rate_to_elo,
elo_to_bayeselo,
ldw_probabilities,
penta,
penta_to_score,
)
def test_penta():
ldw1 = np.array([0.1, 0.2, 0.7])
ldw2 = np.array([0.2, 0.2, 0.6])
result = penta(ldw1, ldw2)
expected = np.array([0.02, 0.06, 0.24, 0.26, 0.42])
assert_almost_equal(result, expected, decimal=3)
def test_ldw_probabilities():
result = ldw_probabilities(elo=50, draw_elo=200, bias=200)
expected = np.array([0.06975828735890623, 0.35877859523271227, 0.5714631174083815])
assert_almost_equal(result, expected)
def test_draw_rate_to_elo():
result = draw_rate_to_elo(0.5)
expected = np.array(190.84850188786498)
assert_almost_equal(result, expected)
def test_compute_probabilities_for_bias():
result = compute_probabilities_for_bias(elo=50, draw_elo=200, bias=200)
expected = np.array([0.029894, 0.18540627, 0.41591514, 0.30154527, 0.06723932])
assert_almost_equal(result, expected)
def test_compute_probabilities():
result = compute_probabilities(elo=50, draw_elo=200, biases=(0, 200))
expected = np.array(
[
0.033318056828286285,
0.19078749500997028,
0.3957332350793835,
0.30255132321508954,
0.07760988986727044,
]
)
assert_almost_equal(result, expected)
def test_elo_to_bayeselo():
result = elo_to_bayeselo(elo=50, draw_elo=200, biases=(0, 200))
expected = 71.513929
assert_almost_equal(result, expected, decimal=5)
def test_penta_to_score():
counts = np.array([1, 2, 3, 4, 5])
result = penta_to_score(draw_rate=0.5, counts=counts, prior_games=10, prior_elo=0)
expected = 0.4016368226279837
assert_almost_equal(result, expected)
def test_timecontrol():
strings = ("3.0+0.03", "7.0+0.0")
result = TimeControl.from_strings(*strings)
expected = (Decimal("3.0"), Decimal("0.03"), Decimal(7), Decimal(0))
assert result == expected
assert result.to_strings() == strings
| [
"tune.db_workers.utils.compute_probabilities",
"decimal.Decimal",
"numpy.testing.assert_almost_equal",
"tune.db_workers.utils.compute_probabilities_for_bias",
"tune.db_workers.utils.penta_to_score",
"tune.db_workers.utils.penta",
"numpy.array",
"tune.db_workers.utils.elo_to_bayeselo",
"tune.db_worke... | [((364, 389), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.7]'], {}), '([0.1, 0.2, 0.7])\n', (372, 389), True, 'import numpy as np\n'), ((401, 426), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.6]'], {}), '([0.2, 0.2, 0.6])\n', (409, 426), True, 'import numpy as np\n'), ((441, 458), 'tune.db_workers.utils.penta', 'penta', (['ldw1', 'ldw2'], {}), '(ldw1, ldw2)\n', (446, 458), False, 'from tune.db_workers.utils import TimeControl, compute_probabilities, compute_probabilities_for_bias, draw_rate_to_elo, elo_to_bayeselo, ldw_probabilities, penta, penta_to_score\n'), ((474, 514), 'numpy.array', 'np.array', (['[0.02, 0.06, 0.24, 0.26, 0.42]'], {}), '([0.02, 0.06, 0.24, 0.26, 0.42])\n', (482, 514), True, 'import numpy as np\n'), ((519, 567), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', 'expected'], {'decimal': '(3)'}), '(result, expected, decimal=3)\n', (538, 567), False, 'from numpy.testing import assert_almost_equal\n'), ((613, 662), 'tune.db_workers.utils.ldw_probabilities', 'ldw_probabilities', ([], {'elo': '(50)', 'draw_elo': '(200)', 'bias': '(200)'}), '(elo=50, draw_elo=200, bias=200)\n', (630, 662), False, 'from tune.db_workers.utils import TimeControl, compute_probabilities, compute_probabilities_for_bias, draw_rate_to_elo, elo_to_bayeselo, ldw_probabilities, penta, penta_to_score\n'), ((678, 750), 'numpy.array', 'np.array', (['[0.06975828735890623, 0.35877859523271227, 0.5714631174083815]'], {}), '([0.06975828735890623, 0.35877859523271227, 0.5714631174083815])\n', (686, 750), True, 'import numpy as np\n'), ((755, 792), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (774, 792), False, 'from numpy.testing import assert_almost_equal\n'), ((837, 858), 'tune.db_workers.utils.draw_rate_to_elo', 'draw_rate_to_elo', (['(0.5)'], {}), '(0.5)\n', (853, 858), False, 'from tune.db_workers.utils import TimeControl, compute_probabilities, compute_probabilities_for_bias, draw_rate_to_elo, elo_to_bayeselo, ldw_probabilities, penta, penta_to_score\n'), ((874, 902), 'numpy.array', 'np.array', (['(190.84850188786498)'], {}), '(190.84850188786498)\n', (882, 902), True, 'import numpy as np\n'), ((907, 944), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (926, 944), False, 'from numpy.testing import assert_almost_equal\n'), ((1003, 1065), 'tune.db_workers.utils.compute_probabilities_for_bias', 'compute_probabilities_for_bias', ([], {'elo': '(50)', 'draw_elo': '(200)', 'bias': '(200)'}), '(elo=50, draw_elo=200, bias=200)\n', (1033, 1065), False, 'from tune.db_workers.utils import TimeControl, compute_probabilities, compute_probabilities_for_bias, draw_rate_to_elo, elo_to_bayeselo, ldw_probabilities, penta, penta_to_score\n'), ((1081, 1149), 'numpy.array', 'np.array', (['[0.029894, 0.18540627, 0.41591514, 0.30154527, 0.06723932]'], {}), '([0.029894, 0.18540627, 0.41591514, 0.30154527, 0.06723932])\n', (1089, 1149), True, 'import numpy as np\n'), ((1154, 1191), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (1173, 1191), False, 'from numpy.testing import assert_almost_equal\n'), ((1241, 1301), 'tune.db_workers.utils.compute_probabilities', 'compute_probabilities', ([], {'elo': '(50)', 'draw_elo': '(200)', 'biases': '(0, 200)'}), '(elo=50, draw_elo=200, biases=(0, 200))\n', (1262, 1301), False, 'from tune.db_workers.utils import TimeControl, compute_probabilities, compute_probabilities_for_bias, draw_rate_to_elo, elo_to_bayeselo, ldw_probabilities, penta, penta_to_score\n'), ((1317, 1437), 'numpy.array', 'np.array', (['[0.033318056828286285, 0.19078749500997028, 0.3957332350793835, \n 0.30255132321508954, 0.07760988986727044]'], {}), '([0.033318056828286285, 0.19078749500997028, 0.3957332350793835, \n 0.30255132321508954, 0.07760988986727044])\n', (1325, 1437), True, 'import numpy as np\n'), ((1522, 1559), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (1541, 1559), False, 'from numpy.testing import assert_almost_equal\n'), ((1603, 1657), 'tune.db_workers.utils.elo_to_bayeselo', 'elo_to_bayeselo', ([], {'elo': '(50)', 'draw_elo': '(200)', 'biases': '(0, 200)'}), '(elo=50, draw_elo=200, biases=(0, 200))\n', (1618, 1657), False, 'from tune.db_workers.utils import TimeControl, compute_probabilities, compute_probabilities_for_bias, draw_rate_to_elo, elo_to_bayeselo, ldw_probabilities, penta, penta_to_score\n'), ((1687, 1735), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', 'expected'], {'decimal': '(5)'}), '(result, expected, decimal=5)\n', (1706, 1735), False, 'from numpy.testing import assert_almost_equal\n'), ((1778, 1803), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1786, 1803), True, 'import numpy as np\n'), ((1817, 1890), 'tune.db_workers.utils.penta_to_score', 'penta_to_score', ([], {'draw_rate': '(0.5)', 'counts': 'counts', 'prior_games': '(10)', 'prior_elo': '(0)'}), '(draw_rate=0.5, counts=counts, prior_games=10, prior_elo=0)\n', (1831, 1890), False, 'from tune.db_workers.utils import TimeControl, compute_probabilities, compute_probabilities_for_bias, draw_rate_to_elo, elo_to_bayeselo, ldw_probabilities, penta, penta_to_score\n'), ((1929, 1966), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (1948, 1966), False, 'from numpy.testing import assert_almost_equal\n'), ((2044, 2078), 'tune.db_workers.utils.TimeControl.from_strings', 'TimeControl.from_strings', (['*strings'], {}), '(*strings)\n', (2068, 2078), False, 'from tune.db_workers.utils import TimeControl, compute_probabilities, compute_probabilities_for_bias, draw_rate_to_elo, elo_to_bayeselo, ldw_probabilities, penta, penta_to_score\n'), ((2095, 2109), 'decimal.Decimal', 'Decimal', (['"""3.0"""'], {}), "('3.0')\n", (2102, 2109), False, 'from decimal import Decimal\n'), ((2111, 2126), 'decimal.Decimal', 'Decimal', (['"""0.03"""'], {}), "('0.03')\n", (2118, 2126), False, 'from decimal import Decimal\n'), ((2128, 2138), 'decimal.Decimal', 'Decimal', (['(7)'], {}), '(7)\n', (2135, 2138), False, 'from decimal import Decimal\n'), ((2140, 2150), 'decimal.Decimal', 'Decimal', (['(0)'], {}), '(0)\n', (2147, 2150), False, 'from decimal import Decimal\n')] |
"""
This module provides utilities for creating custom data viewers. The
goal of this module is to make it easy for users to make new
data viewers by focusing on matplotlib visualization logic,
and not UI or event processing logic.
The end user typically interacts with this code via
:func:`glue.custom_viewer`
"""
from __future__ import print_function, division
"""
Implementation notes:
Here's a high-level summary of how this code works right now:
The user creates a custom viewer using either of the following
syntaxes:
from glue import custom_viewer
my_viewer = custom_viewer('my viewer', checked=True, x='att', ...)
@my_viewer.plot_data
def plot_data(x, checked, axes):
if checked:
axes.plot(x)
...
or
from glue.qt.custom_viewer import CustomViewer
class MyViewer(CustomViewer):
checked = True
x = 'att'
def plot_data(self, x, checked, axes):
if checked:
axes.plot(x)
This code has two "magic" features:
1. Attributes like 'checked' and 'x', passed as kwargs to custom_viewer
or set as class-level attributes in the subclass, are turned
into widgets based on their value
2. Functions like plot_data can take these settings as input (as well
as some general purpose arguments like axes). Glue takes care of
passing the proper arguments to these functions by introspecting
their call signature. Furthermore, it extracts the current
value of each setting (ie checked is set to True or False depending
on what if the box is checked).
The intention of all of this magic is to let a user write "simple" functions
to draw custom plots, without having to use Glue or Qt logic directly.
Internally, Glue accomlishes this magic as follows:
`FormElement`s are created for each attribute in (1). They build the widget
and have a method of extracting the current value of the widget
Functions like `plot_data` that are designed to be overriden by users
are defined as custom descriptors -- when called at the class level,
they become decorators that wrap and register the user-defined function.
When called at the instance level, they become dispatch functions which
deal with the logic in (2). The metaclass deals with registering
UDFs when they are overridden in a subclass.
"""
from inspect import getmodule, getargspec
from types import FunctionType, MethodType
from copy import copy
import numpy as np
from ..clients import LayerArtist, GenericMplClient
from ..core import Data
from ..core.edit_subset_mode import EditSubsetMode
from ..core.util import (nonpartial, as_list,
all_artists, new_artists, remove_artists)
from .. import core
from .widgets.data_viewer import DataViewer
from . import widget_properties as wp
from ..external import six
from ..external.qt import QtGui
from ..external.qt.QtCore import Qt
from .widgets import MplWidget
from .glue_toolbar import GlueToolbar
from .mouse_mode import PolyMode, RectangleMode
CUSTOM_WIDGETS = []
class AttributeInfo(np.ndarray):
"""
An array subclass wrapping a Component of a dataset
It is an array with the following additional attributes:
* ``id`` contains the ComponentID or string name of the Component
* ``categories`` is an array or None. For categorical Components,
contains the distinct categories which are integer-encoded
in the AttributeInfo
"""
@classmethod
def make(cls, id, values, categories=None):
values = np.asarray(values)
result = values.view(AttributeInfo)
result.id = id
result.values = values
result.categories = categories
return result
@classmethod
def from_layer(cls, layer, cid, view=None):
"""
Build an AttributeInfo out of a subset or dataset
Parameters
----------
layer : Data or Subset
The data to use
cid : ComponentID
The ComponentID to use
view : numpy-style view (optional)
What slice into the data to use
"""
values = layer[cid, view]
comp = layer.data.get_component(cid)
categories = None
if isinstance(comp, core.data.CategoricalComponent):
categories = comp._categories
return cls.make(cid, values, categories)
def __gluestate__(self, context):
return dict(cid=context.id(self.id))
@classmethod
def __setgluestate__(cls, rec, context):
return cls.make(context.object(rec['cid']), [])
class ViewerState(object):
"""
Empty object for users to store data inside
"""
pass
def __gluestate__(self, context):
return dict(data=[(k, context.id(v)) for k, v in self.__dict__.items()])
@classmethod
def __setgluestate__(cls, rec, context):
result = cls()
rec = rec['data']
for k in rec:
setattr(result, k, context.object(rec[k]))
return result
from functools import partial
class UserDefinedFunction(object):
"""
Descriptor to specify a UserDefinedFunction.
Defined in CustomViewer like this:
class CustomViewer(object):
...
plot_data = UserDefinedFunction('plot_data')
The descriptor gives CustomViewer.plot_data a dual functionality.
When accessed at the class level, it behaves as a decorator to
register new UDFs:
cv = custom_viewer(...)
@cv.plot_data # becomes a decorator
def plot_data_implementation(...):
...
When accessed at the instance level, it becomes a dispatch function
that calls `plot_data_implementation` with the proper arguments
Alternatively, plot_data_implementation can be specified by
explicitly overriding plot_data in a subclass. A metaclass
takes care of registering the UDF in that case, so you
can define plot_data as a normal (non-decorator, non-descriptor) method.
"""
def __init__(self, name):
self.name = name
def __get__(self, instance, cls=None):
if instance is None:
# accessed from class level, return a decorator
# to wrap a custom UDF
return partial(cls._register_override_method, self.name)
# method called at instance level,
# return a dispatcher to the UDF
return partial(instance._call_udf, self.name)
def introspect_and_call(func, settings):
"""
Introspect a function for its arguments, extract values for those
arguments from a settings oracle, and call the function
Parameters
----------
func : function
A function to call. It should not define any keywords
settings : SettingsOracle
An oracle to extract values for the arguments func expects
Returns
-------
The result of calling func with the proper arguments
*Example*
def a(x, y):
return x, y
introspect_and_call(a, settings) will return
a(settings('x'), settings('y'))
"""
a, k, _, _ = getargspec(func)
try:
# get the current values of each input to the UDF
a = [settings(item) for item in a]
except AttributeError as exc:
# the UDF expects an argument that we don't know how to provide
# try to give a helpful error message
missing = exc.args[0]
setting_list = "\n -".join(settings.setting_names())
raise AttributeError("This custom viewer is trying to use an "
"unrecognized variable named %s\n. Valid "
"variable names are\n -%s" %
(missing, setting_list))
k = k or {}
return func(*a, **k)
class SettingsOracleInterface(object):
def __call__(self, key):
raise NotImplementedError()
def setting_names(self):
return NotImplementedError()
class SettingsOracle(SettingsOracleInterface):
def __init__(self, settings, **override):
self.settings = settings # dict-like, items have a value() method
self.override = override # look for settings here first
# layer and view are special keywords
self.layer = override.pop('layer', None)
self.view = override.pop('view', None)
def __call__(self, key):
try:
if key == 'self':
return self.override['_self']
if key in self.override:
return self.override[key]
if key == 'style':
return self.layer.style
if key == 'layer':
return self.layer
return self.settings[key].value(self.layer, self.view)
except (KeyError, AttributeError):
raise AttributeError(key)
def setting_names(self):
return list(set(list(self.settings.keys()) + ['style', 'layer']))
class CustomViewerMeta(type):
"""
Metaclass to construct CustomViewer and subclasses
The metaclass does two things when constructing new
classes:
- it finds the class-level attributes that describe
ui elements (eg `checked=False`). It bundles these
into a `ui` dict attribute, later used to construct
the FormElements and widgets to represent each setting
- It creates the qt DataViewer widget class associated with this class.
- It looks for overridden user-defined methods like `plot_subset`,
and registers them for later use.
"""
def __new__(cls, name, bases, attrs):
# don't muck with the base class
if name == 'CustomViewer':
return type.__new__(cls, name, bases, attrs)
# Build UI Form
ui = {}
for key, value in list(attrs.items()):
if key.startswith('_') or key in CustomViewer.__dict__:
continue
if not isinstance(value, (MethodType, FunctionType)):
ui[key] = attrs.pop(key)
attrs['ui'] = ui
attrs.setdefault('name', name)
# collect the UDFs
udfs = {}
for nm, value in list(attrs.items()):
dscr = CustomViewer.__dict__.get(nm, None)
if isinstance(dscr, UserDefinedFunction):
# remove them as class method
# register them below instead
udfs[nm] = attrs.pop(nm)
result = type.__new__(cls, name, bases, attrs)
# now wrap the custom UDFs using the descriptors
for k, v in udfs.items():
# register UDF by mimicing the decorator syntax
udf_decorator = getattr(result, k)
udf_decorator(v)
result._build_widget_subclass()
return result
class CustomSubsetState(core.subset.SubsetState):
"""
A SubsetState subclass that uses a CustomViewer's "select" function
"""
def __init__(self, viewer_cls, roi, settings):
super(CustomSubsetState, self).__init__()
self._viewer_cls = viewer_cls
self._settings = settings
self._roi = roi
def to_mask(self, data, view=None):
settings = SettingsOracle(self._settings,
layer=data, roi=self._roi, view=view)
return introspect_and_call(self._viewer_cls._custom_functions['select'],
settings)
def copy(self):
return CustomSubsetState(self._viewer_cls, self._roi.copy(), copy(self._settings))
def __gluestate__(self, context):
result = {}
result['viewer_cls'] = self._viewer_cls.__name__
result['settings'] = context.do(self._settings)
result['roi'] = context.id(self._roi)
return result
@classmethod
def __setgluestate__(cls, rec, context):
viewer = getattr(getmodule(ViewerState), rec['viewer_cls'])
settings = context.object(rec['settings'])
roi = context.object(rec['roi'])
return cls(viewer, roi, settings)
class FrozenSettings(object):
"""
Encapsulates the current settings of a CustomViewer
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
def value(self, key, layer=None, view=None):
try:
result = self.kwargs[key]
except KeyError:
raise AttributeError(key)
if isinstance(result, AttributeInfo) and layer is not None:
cid = result.id
return AttributeInfo.from_layer(layer, cid, view)
return result
def __getitem__(self, key):
class o(object):
@staticmethod
def value(layer=None, view=None):
return self.value(key, layer, view)
return o
def keys(self):
return self.kwargs.keys()
def __gluestate__(self, context):
return dict(data=[(k, context.do(v)) for k, v in self.kwargs.items()])
@classmethod
def __setgluestate__(cls, rec, context):
kwargs = dict((k, context.object(v)) for k, v in rec['data'])
return cls(**kwargs)
@six.add_metaclass(CustomViewerMeta)
class CustomViewer(object):
"""
Base class for custom data viewers.
Users can either subclass this class and override
one or more custom methods listed below, or use the
:func:`glue.custom_viewer` function and decorate custom
plot functions.
*Custom Plot Methods*
The following methods can be overridden:
- :meth:`CustomViewer.setup`
- :meth:`CustomViewer.plot_data`
- :meth:`CustomViewer.plot_subset`
- :meth:`CustomViewer.settings_changed`
- :meth:`CustomViewer.make_selector`
- :meth:`CustomViewer.select`
*Method Signatures*
Custom methods should use argument names from the following list:
- The name of a UI element(e.g. keywords passed to :func:`glue.custom_viewer`,
or class-level variables in subclasses). The value assigned to this
argument will be the current UI setting (e.g. bools for checkboxes).
- ``axes`` will contain a matplotlib Axes object
- ``roi`` will contain the ROI a user has drawn (only available for ``make_selector``)
- ``state`` will contain a general-purpose object to store other data
- ``style`` contains the :class:`~glue.core.visual.VisualAttributes` describing
a subset or dataset. Only available for ``plot_data`` and `plot_subset``
- ``subset`` will contain the relevant :class:`~glue.core.subset.Subset` object.
Only available for ``plot_subset``
*Defining the UI*
Simple widget-based UIs can be specified by providing keywords to :func:`~glue.custom_viewer`
or class-level variables to subsets. The kind of widget to associate with each
UI element is determined from it's type.
*Example decorator*
::
v = custom_viewer('Example', checkbox=False)
@v.plot_data
def plot(checkbox, axes):
axes.plot([1, 2, 3])
*Example subclass*
::
class CustomViewerSubset(CustomViewer):
checkbox = False
def plot_data(self, checkbox, axes):
axes.plot([1, 2, 3])
The order of arguments can be listed in any order.
"""
redraw_on_settings_change = True #: redraw all layers when UI state changes?
remove_artists = True #: auto-delete artists?
name = '' #: Label to give this widget in the GUI
# hold user descriptions of desired FormElements to create
ui = {}
# map, e.g., 'plot_data' -> user defined function
# subclasses must override this dict!
_custom_functions = {}
def __init__(self, widget_instance):
self.widget = widget_instance
self.state = ViewerState()
self._settings = {}
# tracks artists created by each custom function
self._created_artists = {}
@property
def selections_enabled(self):
return 'make_selector' in self._custom_functions or 'select' in self._custom_functions
@classmethod
def create_new_subclass(cls, name, **kwargs):
"""
Convenience method to build a new CustomViewer subclass
:param name: Name of the new viewer
:param kwargs: UI elements in the subclass
"""
kwargs = kwargs.copy()
kwargs['name'] = name
# each subclass needs its own dict
kwargs['_custom_functions'] = {}
name = name.replace(' ', '')
return CustomViewerMeta(name, (CustomViewer,), kwargs)
@classmethod
def _build_widget_subclass(cls):
"""
Build the DataViewer subclass for this viewer
"""
props = CustomWidgetBase._property_set + list(cls.ui.keys())
widget_dict = {'LABEL': cls.name,
'ui': cls.ui,
'coordinator_cls': cls,
'_property_set': props}
widget_dict.update(**dict((k, FormDescriptor(k))
for k in cls.ui))
widget_cls = type('%sWidget' % cls.__name__,
(CustomWidgetBase,),
widget_dict)
cls._widget_cls = widget_cls
CUSTOM_WIDGETS.append(widget_cls)
# add new classes to module namespace
# needed for proper state saving/restoring
for c in [widget_cls, cls]:
w = getattr(getmodule(ViewerState), c.__name__, None)
if w is not None:
raise RuntimeError("Duplicate custom viewer detected %s" % c)
setattr(getmodule(ViewerState), c.__name__, c)
@classmethod
def _register_override_method(cls, name, func):
"""
Register a new custom method like "plot_data"
User's need not call this directly -- it is
called when a method is overridden or decorated
"""
cls._custom_functions[name] = func
def _add_data(self, data):
for w in self._settings.values():
w.add_data(data)
def register_to_hub(self, hub):
for w in self._settings.values():
w.register_to_hub(hub)
def unregister(self, hub):
for w in self._settings.values():
hub.unsubscribe_all(w)
def _build_ui(self, callback):
result = QtGui.QWidget()
layout = QtGui.QFormLayout()
layout.setFieldGrowthPolicy(layout.AllNonFixedFieldsGrow)
result.setLayout(layout)
for k in sorted(self.ui):
v = self.ui[k]
w = FormElement.auto(v)
w.container = self.widget._container
w.add_callback(callback)
self._settings[k] = w
if w.ui is not None:
layout.addRow(k.title().replace('_', ' '), w.ui)
return result
def value(self, key, layer=None, view=None):
return SettingsOracle(self._settings, layer=layer, view=view)(key)
def create_axes(self, figure):
"""
Build a new axes object
Override for custom axes
"""
return figure.add_subplot(1, 1, 1)
def _build_subset_state(self, roi):
if 'make_selector' in self._custom_functions:
return self.make_selector(roi=roi)
if 'select' in self._custom_functions:
return CustomSubsetState(type(self), roi, self.settings())
raise RuntimeError("Selection not supported for this viewer.")
def __copy__(self):
"""
Copying a CustomViewer freezes custom settings at their current value,
decoupling them from future changes to the main viewer
"""
result = type(self)(self.widget)
result.state = copy(self.state)
# share public attributes
for k, v in self.__dict__.items():
if not k.startswith('_'):
result.__dict__[k] = v
# copy settings
for k in self._settings:
result._settings[k] = copy(self._settings[k])
return result
def settings(self):
"""
Return a frozen copy of the current settings of the viewer
"""
result = {'state': copy(self.state)}
for k in self._settings:
result[k] = self.value(k)
return FrozenSettings(**result)
# List of user-defined functions.
# Users can either use these as decorators to
# wrap custom functions, or override them in subclasses.
setup = UserDefinedFunction('setup')
"""
Custom method called when plot is created
"""
plot_subset = UserDefinedFunction('plot_subset')
"""
Custom method called to show a subset
"""
plot_data = UserDefinedFunction('plot_data')
"""
Custom method called to show a dataset
"""
make_selector = UserDefinedFunction('make_selector')
"""
Custom method called to build a :class:`~glue.core.subset.SubsetState` from an ROI.
See :meth:`~CutsomViewer.select` for an alternative way to define selections,
by returning Boolean arrays instead of SubsetStates.
Functions have access to the roi by accepting an ``roi``
argument to this function
"""
settings_changed = UserDefinedFunction('settings_changed')
"""
Custom method called when UI settings change.
"""
select = UserDefinedFunction('select')
"""
Custom method called to filter data using an ROI.
This is an alternative function to :meth:`~CustomViewer.make_selector`,
which returns a numpy boolean array instead of a SubsetState.
Functions have access to the roi by accepting an ``roi``
argument to this function
"""
"""
End of UDF list.
"""
def _call_udf(self, method_name, **kwargs):
"""
Call a user-defined function stored in the _custom_functions dict
Parameters
----------
method_name : str
The name of the user-defined method to setup a dispatch for
**kwargs : dict
Custom settings to pass to the UDF if they are requested by name
as input arguments
Returns
-------
The result of the UDF
Notes
-----
This function builds the necessary arguments to the
user-defined function. It also attempts to monitor
the state of the matplotlib plot, removing stale
artists and re-rendering the cavnas as needed.
"""
# get the custom function
try:
func = self._custom_functions[method_name]
except KeyError:
return []
# clear any MPL artists created on last call
if self.remove_artists:
layer = kwargs.get('layer', None)
key = (layer, method_name)
old = self._created_artists.get(key, set())
remove_artists(old)
current = all_artists(self.axes.figure)
# add some extra information that the user might want
kwargs.setdefault('_self', self)
kwargs.setdefault('axes', self.axes)
kwargs.setdefault('figure', self.axes.figure)
kwargs.setdefault('state', self.state)
# call method, keep track of newly-added artists
settings = SettingsOracle(self._settings, **kwargs)
result = introspect_and_call(func, settings)
if self.remove_artists:
new = new_artists(self.axes.figure, current)
self._created_artists[key] = new
if new:
self.axes.figure.canvas.draw()
else:
self.axes.figure.canvas.draw()
return result
class CustomArtist(LayerArtist):
"""
LayerArtist for custom viewers
"""
def __init__(self, layer, axes, coordinator):
"""
:param layer: Data or Subset object to draw
:param axes: Matplotlib axes to use
:param settings: dict of :class:`FormElement` instnaces
representing UI state
"""
super(CustomArtist, self).__init__(layer, axes)
self._coordinator = coordinator
def update(self, view=None):
"""
Redraw the layer
"""
if not self._visible:
return
self.clear()
if self._coordinator.remove_artists:
old = all_artists(self._axes.figure)
if isinstance(self._layer, Data):
a = self._coordinator.plot_data(layer=self._layer)
else:
a = self._coordinator.plot_subset(layer=self._layer, subset=self._layer)
# if user explicitly returns the newly-created artists,
# then use them. Otherwise, introspect to find the new artists
if a is None:
if self._coordinator.remove_artists:
self.artists = list(new_artists(self._axes.figure, old))
else:
self.artists = []
else:
self.artists = as_list(a)
for a in self.artists:
a.set_zorder(self.zorder)
class CustomClient(GenericMplClient):
def __init__(self, *args, **kwargs):
self._coordinator = kwargs.pop('coordinator')
kwargs.setdefault('axes_factory', self._coordinator.create_axes)
super(CustomClient, self).__init__(*args, **kwargs)
self._coordinator.axes = self.axes
self._coordinator.setup()
def new_layer_artist(self, layer):
return CustomArtist(layer, self.axes, self._coordinator)
def apply_roi(self, roi):
if len(self.artists) > 0:
focus = self.artists[0].layer.data
elif len(self.collect) > 0:
focus = self.collect[0]
else:
return
s = self._coordinator._build_subset_state(roi=roi)
if s:
EditSubsetMode().update(self.collect, s, focus_data=focus)
def _update_layer(self, layer):
for artist in self.artists[layer]:
artist.update()
self._redraw()
class CustomWidgetBase(DataViewer):
"""Base Qt widget class for custom viewers"""
# Widget name
LABEL = ''
coordinator_cls = None
def __init__(self, session, parent=None):
super(CustomWidgetBase, self).__init__(session, parent)
self.central_widget = MplWidget()
self.setCentralWidget(self.central_widget)
self._build_coordinator()
self.option_widget = self._build_ui()
self.client = CustomClient(self._data,
self.central_widget.canvas.fig,
artist_container=self._container,
coordinator=self._coordinator)
self.make_toolbar()
self.statusBar().setSizeGripEnabled(False)
self._update_artists = []
self.settings_changed()
def options_widget(self):
return self.option_widget
def _build_coordinator(self):
self._coordinator = self.coordinator_cls(self)
def _build_ui(self):
return self._coordinator._build_ui(self.settings_changed)
def settings_changed(self):
"""
Called when UI settings change
"""
if self._coordinator.redraw_on_settings_change:
self.client._update_all()
self.client._redraw()
self._coordinator.settings_changed()
def make_toolbar(self):
result = GlueToolbar(self.central_widget.canvas, self, name=self.LABEL)
for mode in self._mouse_modes():
result.add_mode(mode)
self.addToolBar(result)
return result
def _mouse_modes(self):
if not self._coordinator.selections_enabled:
return []
axes = self.client.axes
def apply_mode(mode):
self.client.apply_roi(mode.roi())
# return []
return [RectangleMode(axes, roi_callback=apply_mode),
PolyMode(axes, roi_callback=apply_mode)]
def add_data(self, data):
"""Add a new data set to the widget
:returns: True if the addition was expected, False otherwise
"""
if data in self.client:
return
self.client.add_layer(data)
self._coordinator._add_data(data)
return True
def add_subset(self, subset):
"""Add a subset to the widget
:returns: True if the addition was accepted, False otherwise
"""
self.add_data(subset.data)
if subset.data in self.client:
self.client.add_layer(subset)
return True
def register_to_hub(self, hub):
super(CustomWidgetBase, self).register_to_hub(hub)
self.client.register_to_hub(hub)
self._coordinator.register_to_hub(hub)
def unregister(self, hub):
super(CustomWidgetBase, self).unregister(hub)
hub.unsubscribe_all(self.client)
hub.unsubscribe_all(self)
self._coordinator.unregister(hub)
class FormDescriptor(object):
def __init__(self, name):
self.name = name
def __get__(self, inst, owner=None):
return inst._coordinator._settings[self.name].state
def __set__(self, inst, value):
inst._coordinator._settings[self.name].state = value
class FormElement(object):
"""
Base class for user-defined settings in a custom widget.
Each form element has a value() and a widget. Subclasses
must override _build_ui, value, and recognizes. They
may override register_to_hub and add_data.
"""
def __init__(self, params):
self.params = params
self._callbacks = []
self.ui = self._build_ui()
self.container = None # layer container
def _build_ui(self):
"""
Build and return a widget to represent this setting.
The widget should automaticallhy call the changed()
method when it's state changes
"""
raise NotImplementedError()
def value(self, layer=None, view=None):
"""
Extract the value of this element
:param layer: The Data or Subset object to use,
if extracting numerical data
"""
raise NotImplementedError()
@property
def state(self):
raise NotImplementedError()
@state.setter
def state(self, value):
raise NotImplementedError()
def __copy__(self):
result = type(self)(self.params)
result.state = self.state
return result
def changed(self):
for cb in self._callbacks:
cb()
def add_callback(self, cb):
"""
Register a new callback function to be invoked
when the form state changes
"""
self._callbacks.append(cb)
@classmethod
def recognizes(cls, params):
"""
Returns whether or not a shorthand "params" object
can be passed to __init__ to construct an element
"""
raise NotImplementedError
@staticmethod
def auto(params):
"""
Construct the appropriate FormElement subclass,
given a shorthand object. For examle,
FormElement.auto((0., 1.)) returns a NumberElement
"""
for cls in FormElement.__subclasses__():
if cls.recognizes(params):
return cls(params)
raise ValueError("Unrecognzied UI Component: %s" % (params,))
@staticmethod
def dereference(elements, layer=None):
"""
Given a dict of elements, extract their current settings
into a dict
:param elements: dict mapping labels -> FormElements
:param layer: Subset or Data object as reference
:reteurns: dict mapping labels -> setting value
"""
return dict((k, v.value(layer)) for k, v in elements.items())
def register_to_hub(self, hub):
"""
Register the element to the hub
"""
pass
def add_data(self, data):
"""
Add data to the element
"""
pass
class NumberElement(FormElement):
"""
A form element representing a number
The shorthand is a tuple of 2 or 3 numbers:
(min, max) or (min, max default)::
e = FormElement.auto((0., 1.))
"""
state = wp.ValueProperty('ui')
@classmethod
def recognizes(cls, params):
try:
if len(params) not in [2, 3]:
return False
return all(isinstance(p, six.integer_types + (float,)) for p in params)
except TypeError:
return False
def _build_ui(self):
w = QtGui.QSlider()
w = LabeledSlider(*self.params[:3])
w.valueChanged.connect(nonpartial(self.changed))
return w
def value(self, layer=None, view=None):
return self.ui.value()
class LabeledSlider(QtGui.QWidget):
"""
A labeled slider widget, that handles floats and integers
"""
def __init__(self, min, max, default=None, parent=None):
"""
:param min: Minimum slider value
:param max: Maximum slider value
:param default: Initial value
:param parent: Widget parent
"""
super(LabeledSlider, self).__init__(parent)
self._slider = QtGui.QSlider()
self._slider.setMinimum(0)
self._slider.setMaximum(100)
self._slider.setOrientation(Qt.Horizontal)
self._min = min
self._ptp = (max - min)
self._isint = (isinstance(min, int) and
isinstance(max, int) and
isinstance(default, (int, type(None))))
if default is None:
default = (min + max) / 2
self.set_value(default)
# setup layout
self._lbl = QtGui.QLabel(str(self.value()))
self._l = QtGui.QHBoxLayout()
self._l.setContentsMargins(2, 2, 2, 2)
self._l.addWidget(self._slider)
self._l.addWidget(self._lbl)
self.setLayout(self._l)
# connect signals
self._slider.valueChanged.connect(lambda x: self._lbl.setText(str(self.value())))
@property
def valueChanged(self):
"""
Pointer to valueChanged signal.
WARNING: the value emitted by this signal is unscaled,
and shouldn't be used directly. Use .value() instead
"""
return self._slider.valueChanged
def value(self, layer=None, view=None):
"""
Return the numerical value of the slider
"""
v = self._slider.value() / 100. * self._ptp + self._min
if self._isint:
v = int(v)
return v
def set_value(self, val):
"""
Set the numerical value of the slider
"""
v = (1. * (val - self._min)) / self._ptp * 100
v = min(max(int(v), 0), 100)
self._slider.setValue(v)
setValue = set_value
class BoolElement(FormElement):
"""
A checkbox representing a boolean setting
The shorthand notation is True or False::
e = FormElement.auto(False)
"""
state = wp.ButtonProperty('ui')
@classmethod
def recognizes(cls, params):
return isinstance(params, bool)
def _build_ui(self):
w = QtGui.QCheckBox()
w.setChecked(self.params)
w.toggled.connect(nonpartial(self.changed))
return w
def value(self, layer=None, view=None):
return self.ui.isChecked()
class FixedComponent(FormElement):
"""
An element for a Data Component. Does not have a widget
The shorthand notation is 'att(comp_name)'::
e = FormElement.auto('att(foo)')
"""
@classmethod
def recognizes(cls, params):
try:
return params.startswith('att(')
except AttributeError:
return False
def _build_ui(self):
pass
def value(self, layer=None, view=None):
"""
Extract the component value as an AttributeInfo object
"""
cid = self.params.split('(')[-1][:-1]
if layer is not None:
cid = layer.data.id[cid]
return AttributeInfo.from_layer(layer, cid, view)
return AttributeInfo.make(cid, [])
@property
def state(self):
return self.params
@state.setter
def state(self, value):
self.params = value
class ComponenentElement(FormElement, core.hub.HubListener):
"""
A dropdown selector to choose a component
The shorthand notation is 'att'::
e = FormElement.auto('att')
"""
_component = wp.CurrentComboProperty('ui')
@property
def state(self):
return self._component
@state.setter
def state(self, value):
self._update_components()
if value is None:
return
self._component = value
@classmethod
def recognizes(cls, params):
return params == 'att'
def _build_ui(self):
result = QtGui.QComboBox()
result.currentIndexChanged.connect(nonpartial(self.changed))
return result
def value(self, layer=None, view=None):
cid = self._component
if layer is None or cid is None:
return AttributeInfo.make(cid, [])
return AttributeInfo.from_layer(layer, cid, view)
def _update_components(self):
combo = self.ui
old = self._component
combo.blockSignals(True)
combo.clear()
comps = list(set([c for l in self.container.layers
for c in l.data.components if not c._hidden]))
comps = sorted(comps, key=lambda x: x.label)
for c in comps:
combo.addItem(c.label, userData=c)
try:
combo.setCurrentIndex(comps.index(old))
except ValueError:
combo.setCurrentIndex(0)
combo.blockSignals(False)
def register_to_hub(self, hub):
hub.subscribe(self, core.message.ComponentsChangedMessage,
nonpartial(self._update_components))
def add_data(self, data):
self._update_components()
class ChoiceElement(FormElement):
"""
A dropdown selector to choose between a set of items
Shorthand notation is a sequence of strings or a dict::
e = FormElement.auto({'a':1, 'b':2})
e = FormElement.auto(['a', 'b', 'c'])
"""
state = wp.CurrentComboProperty('ui')
@classmethod
def recognizes(cls, params):
if isinstance(params, six.string_types):
return False
try:
return all(isinstance(p, six.string_types) for p in params)
except TypeError:
return False
def _build_ui(self):
w = QtGui.QComboBox()
for p in sorted(self.params):
w.addItem(p)
if isinstance(self.params, list):
self.params = dict((p, p) for p in self.params)
w.currentIndexChanged.connect(nonpartial(self.changed))
return w
def value(self, layer=None, view=None):
return self.params[self.ui.currentText()]
| [
"functools.partial",
"numpy.asarray",
"copy.copy",
"inspect.getmodule",
"inspect.getargspec"
] | [((6956, 6972), 'inspect.getargspec', 'getargspec', (['func'], {}), '(func)\n', (6966, 6972), False, 'from inspect import getmodule, getargspec\n'), ((3463, 3481), 'numpy.asarray', 'np.asarray', (['values'], {}), '(values)\n', (3473, 3481), True, 'import numpy as np\n'), ((6281, 6319), 'functools.partial', 'partial', (['instance._call_udf', 'self.name'], {}), '(instance._call_udf, self.name)\n', (6288, 6319), False, 'from functools import partial\n'), ((19454, 19470), 'copy.copy', 'copy', (['self.state'], {}), '(self.state)\n', (19458, 19470), False, 'from copy import copy\n'), ((6131, 6180), 'functools.partial', 'partial', (['cls._register_override_method', 'self.name'], {}), '(cls._register_override_method, self.name)\n', (6138, 6180), False, 'from functools import partial\n'), ((11289, 11309), 'copy.copy', 'copy', (['self._settings'], {}), '(self._settings)\n', (11293, 11309), False, 'from copy import copy\n'), ((11639, 11661), 'inspect.getmodule', 'getmodule', (['ViewerState'], {}), '(ViewerState)\n', (11648, 11661), False, 'from inspect import getmodule, getargspec\n'), ((19718, 19741), 'copy.copy', 'copy', (['self._settings[k]'], {}), '(self._settings[k])\n', (19722, 19741), False, 'from copy import copy\n'), ((19908, 19924), 'copy.copy', 'copy', (['self.state'], {}), '(self.state)\n', (19912, 19924), False, 'from copy import copy\n'), ((17193, 17215), 'inspect.getmodule', 'getmodule', (['ViewerState'], {}), '(ViewerState)\n', (17202, 17215), False, 'from inspect import getmodule, getargspec\n'), ((17364, 17386), 'inspect.getmodule', 'getmodule', (['ViewerState'], {}), '(ViewerState)\n', (17373, 17386), False, 'from inspect import getmodule, getargspec\n')] |
"""
Goal - pass argument to make figure with and without data
Date - Mar 15 2021
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pickle
from matplotlib import cm
import argparse
import seaborn as sns
#argparse
def boolean_string(s):
# this function helps with getting Boolean input
if s not in ['False', 'True']:
raise ValueError('Not a valid boolean string')
return s == 'True' # note use of ==
# create the parser object
parser = argparse.ArgumentParser()
# NOTE: argparse will throw an error if:
# - a flag is given with no value
# - the value does not match the type
# and if a flag is not given it will be filled with the default.
parser.add_argument('-a', '--a_string', default='annd_after_loom_predictions.csv', type=str)
#parser.add_argument('-s', '--a_string', default='annd_std', type=str)
parser.add_argument('-b', '--integer_b', default=3, type=int)
parser.add_argument('-c', '--float_c', default=1.5, type=float)
parser.add_argument('-v', '--verbose', default=True, type=boolean_string)
# Note that you assign a short name and a long name to each argument.
# You can use either when you call the program, but you have to use the
# long name when getting the values back from "args".
# get the arguments
args = parser.parse_args()
#################################################################################
data1 = pd.read_csv('../../data/temp_collective/roi/'+args.a_string)
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_w_loom.csv')
gs = [1,2,4,8,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
#Plotting
lw=2
fs=30
fig = plt.figure(figsize=(16,10))
ax = fig.add_subplot(111)
count = 2
dpi = 100
text = '_low_res'
if args.a_string=='annd_after_loom_predictions.csv':
data1 = pd.read_csv('../../data/temp_collective/roi/annd_after_loom_predictions.csv')
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_w_loom.csv')
y_label = 'ANND (Body Length)'
gs = [2,4,8,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
#Plotting
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data2["annd"][data2.Groupsize == i], alpha = 0.5, color = colors[count], s =10)
ax.plot(
data1.temp[data1.gs == i][data1.date == 18106][data1.trial == 10],
np.exp(data1.annd[data1.gs==i][data1.date == 18106][data1.trial == 10]), color = colors[count],
lw = lw)
ax.fill_between(
data1.temp[data1.gs == i][data1.date == 18106][data1.trial == 10],
np.exp(data1.annd025[data1.gs==i][data1.date == 18106][data1.trial == 10]),
np.exp(data1.annd975[data1.gs==i][data1.date == 18106][data1.trial == 10]), alpha = 0.3, color = colors[count], label = str(i),lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('ANND (BL)', size = fs)
#ax.set_title('Groupsize = 16', fontsize = fs)
legend = plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
plt.setp(legend.get_title(),fontsize='xx-large')
ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/annd_after_loom_predictions_w_data_all_low_res.png'
fig.savefig(out_dir, dpi = 100)
plt.show()
else:
gs = [2,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
for i in gs:
ax.plot(
data1.temp[data1.gs == i][data1.date == 18106][data1.trial == 10],
np.exp(data1.annd[data1.gs==i][data1.date == 18106][data1.trial == 10]), color = colors[count],
lw = lw)
ax.fill_between(
data1.temp[data1.gs == i][data1.date == 18106][data1.trial == 10],
np.exp(data1.annd025[data1.gs==i][data1.date == 18106][data1.trial == 10]),
np.exp(data1.annd975[data1.gs==i][data1.date == 18106][data1.trial == 10]), alpha = 0.3, color = colors[count],label = str(i), lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('ANND (BL)', size = fs)
legend = plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
plt.setp(legend.get_title(),fontsize='xx-large')
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/annd_after_loom_predictions_wo_data_all_low_res.png'
fig.savefig(out_dir, dpi = 100)
plt.show()
#model_glm_7 <- glm(startles_during_loom ~ I(Temperature^2) + Temperature + Groupsize + I(Groupsize^2) + Loom, family = poisson, data1)
if args.a_string=='number_startles_predictions.csv':
data1 = pd.read_csv('../../data/temp_collective/roi/number_startles_predictions.csv')
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_w_loom.csv')
gs = [1,2,4,8,16]
loom = [1,5]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
#Plotting
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i][data2.Loom == 1],
data2["number_startles"][data2.Groupsize == i][data2.Loom == 1], alpha = 0.5, color = colors[count], s =10)
ax.plot(
data1.Temperature[data1.Groupsize == i][data1.Loom == 1],
(data1.startles[data1.Groupsize ==i][data1.Loom == 1]), color = colors[count],
label = str(i), lw = lw)
ax.fill_between(
data1.Temperature[data1.Groupsize == i][data1.Loom == 1],
(data1.startles025[data1.Groupsize==i][data1.Loom == 1]),
(data1.startles975[data1.Groupsize==i][data1.Loom == 1]), alpha = 0.3, color = colors[count])
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('Number of startles', size = fs)
ax.set_title('Loom = 1', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29])
out_dir = '../../output/temp_collective/roi_figures/predictions/startles_w_data_all.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
else:
gs = [16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
for i in gs:
ax.plot(
data1.Temperature[data1.Groupsize == i][data1.Loom == 1],
(data1.startles[data1.Groupsize ==i][data1.Loom == 1]), color = colors[count],
label = str(i), lw = lw)
ax.fill_between(
data1.Temperature[data1.Groupsize == i][data1.Loom == 1],
(data1.startles025[data1.Groupsize ==i][data1.Loom == 1]),
(data1.startles975[data1.Groupsize ==i][data1.Loom == 1]), alpha = 0.3, color = colors[count])
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('Number of startles', size = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
ax.set_title('Loom = 1', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29])
out_dir = '../../output/temp_collective/roi_figures/predictions/startles_predictions_wo_data.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
#model_pois6 <- glm(latency ~ temp*gs + I(temp^2) + loom, family = quasipoisson, my_new_data2)
if args.a_string=='latency_predictions.csv':
data1 = pd.read_csv('../../data/temp_collective/roi/'+args.a_string)
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_w_loom.csv')
gs = [1,2,4,8,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
#Plotting
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i][data2.Loom == 1],
data2.latency[data2.Groupsize == i][data2.Loom == 1], s = 10, alpha = 0.5, color = colors[count])
ax.plot(
data1.temp[data1.gs== i][data1.loom == 1],
(data1.pred[data1.gs==i][data1.loom == 1]), color = colors[count],
lw = lw)
ax.fill_between(
data1.temp[data1.gs== i][data1.loom == 1],
(data1.lcb[data1.gs==i][data1.loom == 1]),
(data1.ucb[data1.gs==i][data1.loom == 1]), alpha = 0.3, color = colors[count], label = str(i),lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('Latency', size = fs)
#ax.set_title('Loom = 1', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.yticks(ticks = [580,585,590,595], labels = [580,585,590,595],fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/latency_w_data.png'
fig.savefig(out_dir, dpi = 100)
plt.show()
else:
gs = [1,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
for i in gs:
ax.plot(
data1.temp[data1.gs== i][data1.loom == 1],
(data1.pred[data1.gs==i][data1.loom == 1]), color = colors[count],
lw = lw)
ax.fill_between(
data1.temp[data1.gs== i][data1.loom == 1],
(data1.lcb[data1.gs==i][data1.loom == 1]),
(data1.ucb[data1.gs==i][data1.loom == 1]), alpha = 0.3, color = colors[count], label = str(i),lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('Latency', size = fs)
#ax.set_title('Loom = 1', fontsize = fs)
legend = plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
plt.setp(legend.get_title(),fontsize='xx-large')
plt.yticks(ticks = [580,585,590,595], labels = [580,585,590,595],fontsize = fs)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/latency_wo_data.png'
fig.savefig(out_dir, dpi = 100)
plt.show()
#model_glm <- glm(hull ~ gs + temp*loom + I(temp^2) + date, my_data, family = "Gamma")
if args.a_string=='hull_ratio_600_650_predictions.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/convex_hull_ratio_600_650w_loom.csv')
data_hull = data2.convex_hull_area_600_650
gs = [16]
loom = [1,5]
colors = plt.cm.bone_r(np.linspace(0,1,len(loom)+2))
if args.verbose==True:
for i in gs:
for j in loom:
ax.scatter(data2.Temperature[data2.Groupsize == i][data2.Loom == j],
data_hull[data2.Groupsize == i][data2.Loom == j], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
(data1.hull[data1.gs==i][data1.loom == j][data1.date == 18106]),
color = colors[count], label = str(j), lw = lw)
ax.fill_between(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
(data1.hull025[data1.gs==i][data1.loom == j][data1.date == 18106]),
(data1.hull975[data1.gs==i][data1.loom == j][data1.date == 18106]), alpha = 0.3,
color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Ratio of convex hull area during loom to \n convex hull area after loom', size = fs)
ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29])
out_dir = '../../output/temp_collective/roi_figures/predictions/convex_hull_ratio_w_data_loom_gs16.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
for i in gs:
for j in loom:
ax.plot(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
(data1.hull[data1.gs==i][data1.loom == j][data1.date == 18106]),
color = colors[count], label = str(j), lw = lw)
ax.fill_between(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
(data1.hull025[data1.gs==i][data1.loom == j][data1.date == 18106]),
(data1.hull975[data1.gs==i][data1.loom == j][data1.date == 18106]), alpha = 0.3,
color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Ratio of convex hull area during loom to \n convex hull before after loom', size = fs)
ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29])
out_dir = '../../output/temp_collective/roi_figures/predictions/convex_hull_ratio_wo_data_loom_gs16.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
#hull ratio - during/before
#model_lm <- lm(log(hull)~ log(gs,2) + I(temp^2) + loom + date, my_data)
if args.a_string=='hull_ratio_600_650_predictions2.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/convex_hull_ratio_600_650w_loom.csv')
data_hull = data2.convex_hull_area_ratio_loom
gs = [4,8,16]
loom = [1]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
if args.verbose==True:
for i in gs:
for j in loom:
ax.scatter(data2.Temperature[data2.Groupsize == i][data2.Loom == j],
data_hull[data2.Groupsize == i][data2.Loom == j], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
np.exp(data1.hull[data1.gs==i][data1.loom == j][data1.date == 18106]),
color = colors[count], label = str(i), lw = lw)
ax.fill_between(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
np.exp(data1.hull025[data1.gs==i][data1.loom == j][data1.date == 18106]),
np.exp(data1.hull975[data1.gs==i][data1.loom == j][data1.date == 18106]), alpha = 0.3,
color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Ratio of convex hull area during loom to \n convex hull before loom', size = fs)
ax.set_title('Loom = '+str(loom[0]), fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29])
out_dir = '../../output/temp_collective/roi_figures/predictions/convex_hull_ratio_loom_w_data_loom1_gs_all.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [16]
loom = [1]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
for i in gs:
for j in loom:
ax.plot(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
np.exp(data1.hull[data1.gs==i][data1.loom == j][data1.date == 18106]),
color = colors[count], label = str(j), lw = lw)
ax.fill_between(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
np.exp(data1.hull025[data1.gs==i][data1.loom == j][data1.date == 18106]),
np.exp(data1.hull975[data1.gs==i][data1.loom == j][data1.date == 18106]), alpha = 0.3,
color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Ratio of convex hull area during loom to \n convex hull area after loom', size = fs)
ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29])
out_dir = '../../output/temp_collective/roi_figures/predictions/convex_hull_ratio_loom_wo_data_loom1_gs16.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
#model_lm <- lm(log(speed+1) ~ temp,my_data)
if args.a_string=='speed99_before_loom_predictions.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
data_hull = data2.speed_percentile99
colors = plt.cm.bone_r(np.linspace(0,1,3))
if args.verbose==True:
ax.scatter(data2.Temperature,
data_hull, s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp,
np.exp(data1.speed99)-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp,
np.exp(data1.speed99_025)-1,
np.exp(data1.speed99_975)-1, alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of speed \n before loom (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/speed_percentile99_w_data.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
else:
ax.plot(
data1.temp,
np.exp(data1.speed99)-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp,
np.exp(data1.speed99_025)-1,
np.exp(data1.speed99_975)-1, alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of speed \n before loom (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/speed_percentile99_wo_data.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
"""
#model_lm <- lm(log(speed+1) ~ temp + temp^2,my_data)
if args.a_string=='speed99_before_loom_predictions_new.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
data_hull = data2.speed_percentile99
colors = plt.cm.bone_r(np.linspace(0,1,3))
if args.verbose==True:
ax.scatter(data2.Temperature,
data_hull, s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp,
np.exp(data1.speed99)-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp,
np.exp(data1.speed99_025)-1,
np.exp(data1.speed99_975)-1, alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of speed (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/speed_percentile99_new_w_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
ax.plot(
data1.temp,
np.exp(data1.speed99)-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp,
np.exp(data1.speed99_025)-1,
np.exp(data1.speed99_975)-1, alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of speed (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/speed_percentile99_new_wo_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
"""
#median speed during unperturbed swimming
#model_lm <- lm(log(speed+1) ~ temp ,my_data)
if args.a_string=='speed50_before_loom_predictions_new.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
data_hull = data2.speed_percentile50
colors = plt.cm.bone_r(np.linspace(0,1,3))
if args.verbose==True:
ax.scatter(data2.Temperature,
data_hull, s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp,
np.exp(data1.speed99)-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp,
np.exp(data1.speed99_025)-1,
np.exp(data1.speed99_975)-1, alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Median speed (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/speed_percentile50_new_w_data.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
else:
ax.plot(
data1.temp,
np.exp(data1.speed99)-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp,
np.exp(data1.speed99_025)-1,
np.exp(data1.speed99_975)-1, alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Median speed (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/speed_percentile50_new_wo_data.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
#average speed during unperturbed swimming
#model_lm <- lm(log(speed+1) ~ temp ,my_data)
if args.a_string=='speed_avg_before_loom_predictions_new.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
data_hull = data2.avg_speed
colors = plt.cm.bone_r(np.linspace(0,1,3))
if args.verbose==True:
ax.scatter(data2.Temperature,
data_hull, s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp,
np.exp(data1.speed99)-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp,
np.exp(data1.speed99_025)-1,
np.exp(data1.speed99_975)-1, alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Mean speed (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/speed_avg_new_w_data.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
else:
ax.plot(
data1.temp,
np.exp(data1.speed99)-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp,
np.exp(data1.speed99_025)-1,
np.exp(data1.speed99_975)-1, alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Mean speed (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/speed_avg_new_wo_data.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
## loom speed predictions
if args.a_string=='loom_speed_predictions.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_w_loom.csv')
data_hull = data2.speed_percentile99
if args.verbose==True:
gs = [1,2,4,8,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i][data2.Loom == 1],
data2.speed_percentile99[data2.Groupsize == i][data2.Loom == 1], alpha = 0.5, color = colors[count], s =10)
ax.plot(
data1.Temperature[data1.Groupsize == i][data1.loom == 1],
(data1.loom_speed[data1.Groupsize==i][data1.loom == 1])**2, color = colors[count], label = str(i), lw = lw)
ax.fill_between(
data1.Temperature[data1.Groupsize == i][data1.loom == 1],
(data1.loom_speed025[data1.Groupsize==i][data1.loom == 1])**2,
(data1.loom_speed975[data1.Groupsize==i][data1.loom == 1])**2, alpha = 0.3, color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('99th percentile of speed \n during loom (BL/s)', size = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
#ax.set_title('Loom = 1', fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/loom_speed_predictions_w_data_all.png'
fig.savefig(out_dir, dpi = 100)
plt.show()
else:
gs = [16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
for i in gs:
ax.plot(
data1.Temperature[data1.Groupsize == i][data1.loom == 1],
(data1.loom_speed[data1.Groupsize==i][data1.loom == 1])**2, color = colors[count], lw = lw)
ax.fill_between(
data1.Temperature[data1.Groupsize == i][data1.loom == 1],
(data1.loom_speed025[data1.Groupsize==i][data1.loom == 1])**2,
(data1.loom_speed975[data1.Groupsize==i][data1.loom == 1])**2, alpha = 0.3, color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of speed \n during loom (BL/s)', size = fs)
#ax.set_title('Groupsize = 16, Loom = 1', fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/loom_speed_predictions_wo_data.png'
fig.savefig(out_dir, dpi = 100)
plt.show()
## 99th percentile of loom speed predictions - 2 (after including t1)
# model_lm <- lm((speed)^0.5 ~ temp + I(temp^2) + log(gs,2) + loom + I(log(gs,2)^2) + t1,my_data)
# rsq 0.1872
if args.a_string=='99th_percentile_speed_during_loom_with_t1_predictions2.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_w_loom.csv')
data_hull = data2.speed_percentile99
if args.verbose==True:
gs = [1,2,4,8,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i][data2.Loom == 1],
data2.speed_percentile99[data2.Groupsize == i][data2.Loom == 1], alpha = 0.5, color = colors[count], s =10)
ax.plot(
data1.temp[data1.gs == i][data1.loom == 1][data1.t1 == 1620244800],
(data1.speed99[data1.gs==i][data1.loom == 1][data1.t1 == 1620244800])**2, color = colors[count], label = str(i), lw = lw)
ax.fill_between(
data1.temp[data1.gs == i][data1.loom == 1][data1.t1 == 1620244800],
(data1.speed99_025[data1.gs==i][data1.loom == 1][data1.t1 == 1620244800])**2,
(data1.speed99_975[data1.gs==i][data1.loom == 1][data1.t1 == 1620244800])**2, alpha = 0.3, color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('99th percentile of speed \n during loom (BL/s)', size = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
ax.set_title('Loom = 1', fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/loom_speed_predictions2_w_data_all.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
for i in gs:
ax.plot(
data1.temp[data1.gs == i][data1.loom == 1][data1.t1 == 1620244800],
(data1.speed99[data1.gs==i][data1.loom == 1][data1.t1 == 1620244800])**2, color = colors[count], label = str(i), lw = lw)
ax.fill_between(
data1.temp[data1.gs == i][data1.loom == 1][data1.t1 == 1620244800],
(data1.speed99_025[data1.gs==i][data1.loom == 1][data1.t1 == 1620244800])**2,
(data1.speed99_975[data1.gs==i][data1.loom == 1][data1.t1 == 1620244800])**2, alpha = 0.3, color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of speed \n during loom (BL/s)', size = fs)
ax.set_title('Groupsize = 16, Loom = 1', fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/loom_speed_predictions2_wo_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
#model_glm <- glm(prop_startles ~ temp + I(temp^2) + loom + t+date, family = binomial,my_data)
if args.a_string=='prop_startles_predictions.csv':
if args.verbose==True:
colors = plt.cm.bone_r(np.linspace(0,1,3))
ax.scatter(data2.Temperature,
data2.prop_startles, s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.loom == 1][data1.date == 18106][data1.t == 1200],
(data1.prop_startles[data1.loom == 1][data1.date == 18106][data1.t == 1200]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.loom == 1][data1.date == 18106][data1.t == 1200],
(data1.prop_startles025[data1.loom == 1][data1.date == 18106][data1.t == 1200]),
(data1.prop_startles975[data1.loom == 1][data1.date == 18106][data1.t == 1200]), alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Proportion of individuals that startle', size = fs)
#ax.set_title('Loom = 1', fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/prop_startles_w_data_loom_1_all.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
else:
colors = plt.cm.bone_r(np.linspace(0,1,3))
ax.plot(
data1.temp[data1.loom == 1][data1.date == 18106][data1.t == 1200],
(data1.prop_startles[data1.loom == 1][data1.date == 18106][data1.t == 1200]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.loom == 1][data1.date == 18106][data1.t == 1200],
(data1.prop_startles025[data1.loom == 1][data1.date == 18106][data1.t == 1200]),
(data1.prop_startles975[data1.loom == 1][data1.date == 18106][data1.t == 1200]), alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Proportion of individuals that startle', size = fs)
#ax.set_title('Loom = 1', fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
#plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29])
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/prop_startles_wo_data_loom_1_all.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
#model_glm <- glm(prop_startles ~ temp + I(temp^2) + loom +date, family = binomial,my_data)
if args.a_string=='prop_startles_predictions2.csv':
if args.verbose==True:
colors = plt.cm.bone_r(np.linspace(0,1,3))
ax.scatter(data2.Temperature,
data2.prop_startles, s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.loom == 1][data1.date == 18106],
(data1.prop_startles[data1.loom == 1][data1.date == 18106]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.loom == 1][data1.date == 18106],
(data1.prop_startles025[data1.loom == 1][data1.date == 18106]),
(data1.prop_startles975[data1.loom == 1][data1.date == 18106]), alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Proportion of individuals that startle', size = fs)
#ax.set_title('Loom = 1', fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29],fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/prop_startles_w_data_loom_1_all_new.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
colors = plt.cm.bone_r(np.linspace(0,1,3))
ax.plot(
data1.temp[data1.loom == 1][data1.date == 18106],
(data1.prop_startles[data1.loom == 1][data1.date == 18106]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.loom == 1][data1.date == 18106],
(data1.prop_startles025[data1.loom == 1][data1.date == 18106]),
(data1.prop_startles975[data1.loom == 1][data1.date == 18106]), alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Proportion of individuals that startle', size = fs)
#ax.set_title('Loom = 1', fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29],fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/prop_startles_wo_data_loom_1_all_new.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
## loom acceleration
#model_lm <- lm(log(acc+1) ~ temp + I(temp^2)*log(gs,2)*loom + date + t,my_data)
if args.a_string=='loom_acc_99_predictions.csv':
if args.verbose==True:
gs = [1,2,4,8,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize==i],
data2.acc_percentile99[data2.Groupsize ==i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs == i],
np.exp(data1.acc99[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs == i])-1,
color = colors[count], lw = lw, label = str(i))
ax.fill_between(
data1.temp[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs == i],
np.exp(data1.acc99_025[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs == i])-1,
np.exp(data1.acc99_975[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs == i])-1,
alpha = 0.3, color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of acceleration \n during loom (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Loom = 1', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29],fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/acc_w_data_loom_1_all.png'
fig.savefig(out_dir, dpi = 100)
plt.show()
else:
gs = [16]
colors = plt.cm.bone_r(np.linspace(0,1,3))
for i in gs:
ax.plot(
data1.temp[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs == i],
np.exp(data1.acc99[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs == i])-1,
color = colors[count], lw = lw, label = str(i))
ax.fill_between(
data1.temp[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs == i],
np.exp(data1.acc99_025[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs == i])-1,
np.exp(data1.acc99_975[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs == i])-1,
alpha = 0.3, color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of acceleration \n during loom (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = 16, Loom = 1', fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29],fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/acc_wo_data_loom_1.png'
fig.savefig(out_dir, dpi = 100)
plt.show()
#startle distance
#model_lm <-
#lm(log(distance) ~ temp + gs + temp*gs + loom + I(temp^2)*gs + loom*I(temp^2) + loom*gs + date, my_data)
if args.a_string=='startle_distance_predictions2.csv':
data1 = pd.read_csv('../../data/temp_collective/roi/'+args.a_string)
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_w_loom_startle.csv')
gs = [1,2,4,8,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
#Plotting
if args.verbose==True:
gs = [1,2,4,8,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i][data2.Loom == 1],
data2.distance[data2.Groupsize == i][data2.Loom == 1]/60, s = 10, alpha = 0.5, color = colors[count])
ax.plot(
data1.temp[data1.gs== i][data1.loom == 1][data1.date == 18106],
np.exp(data1.distance[data1.gs==i][data1.loom == 1][data1.date == 18106])/60,
color = colors[count],
lw = lw)
ax.fill_between(
data1.temp[data1.gs== i][data1.loom == 1][data1.date == 18106],
np.exp(data1.distance_025[data1.gs==i][data1.loom == 1][data1.date == 18106])/60,
np.exp(data1.distance_975[data1.gs==i][data1.loom == 1][data1.date == 18106])/60,
label = str(i), alpha = 0.3, color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('Distance (BL)', size = fs)
ax.set_title('Loom = 1', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/distance_w_data_loom1.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [16]
loom = [1,5]
colors = plt.cm.bone_r(np.linspace(0,1,len(loom)+1))
count = 1
for i in gs:
for j in loom:
ax.plot(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
np.exp(data1.distance[data1.gs==i][data1.loom == j][data1.date == 18106])/60, color = colors[count],
lw = lw)
ax.fill_between(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
np.exp(data1.distance_025[data1.gs==i][data1.loom == j][data1.date == 18106])/60,
np.exp(data1.distance_975[data1.gs==i][data1.loom == j][data1.date == 18106])/60,
alpha = 0.3, label = str(j), color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('Distance (BL)', size = fs)
ax.set_title('Groupsize = 16', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/distance_wo_data_gs16.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
### pre loom acceleration
#model_lm <- lm(log(acc+1) ~ temp + log(gs,2) + I(log(gs,2)^2),my_new_data)
# r sq 0.1936
if args.a_string=='acc_99_predictions.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
data2 = data2.drop(labels = 127)
data_hull = data2.acc_percentile99
gs = [1,2,4,8,16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data_hull[data2.Groupsize == i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc99[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc99_025[data1.gs ==i])-1,
np.exp(data1.acc99_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0,label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of acceleration \n before loom (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/acc_percentile99_w_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
for i in gs:
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc99[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc99_025[data1.gs ==i])-1,
np.exp(data1.acc99_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0, label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of acceleration \n before loom (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
#ax.set_title('Groupsize = 16', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/acc_percentile99_wo_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
"""
#model_lm <- lm(log(acc+1) ~ temp + I(temp^2) + log(gs,2) + I(log(gs,2)^2),my_new_data)
# r sq 0.1962
if args.a_string=='acc_99_predictions_new_squared.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
data2 = data2.drop(labels = 127)
data_hull = data2.acc_percentile99
gs = [1,2,4,8,16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data_hull[data2.Groupsize == i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc99[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc99_025[data1.gs ==i])-1,
np.exp(data1.acc99_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0,label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of acceleration (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/acc_percentile99_w_data_new_squared.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
for i in gs:
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc99[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc99_025[data1.gs ==i])-1,
np.exp(data1.acc99_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0, label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of acceleration (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
#ax.set_title('Groupsize = 16', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/acc_percentile99_wo_data_new_squared.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
"""
#startle distance corrected
#model_lm <-
#log(distance) ~ temp + log(gs,2) + temp*log(gs,2) + loom*temp + I(temp^2)*log(gs,2) + loom*I(temp^2)
#+date + loom*log(gs,2), my_data
if args.a_string=='startle_distance_predictions3.csv':
data1 = pd.read_csv('../../data/temp_collective/roi/'+args.a_string)
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_w_loom_startle_corrected.csv')
gs = [1,2,4,8,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+2))
#Plotting
if args.verbose==True:
gs = [1,2,4,8,16]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i][data2.Loom == 1],
data2.distance[data2.Groupsize == i][data2.Loom == 1]/60, s = 10, alpha = 0.5, color = colors[count])
ax.plot(
data1.temp[data1.gs== i][data1.loom == 1][data1.date == 18106],
np.exp(data1.distance[data1.gs==i][data1.loom == 1][data1.date == 18106])/60,
color = colors[count],
lw = lw)
ax.fill_between(
data1.temp[data1.gs== i][data1.loom == 1][data1.date == 18106],
np.exp(data1.distance_025[data1.gs==i][data1.loom == 1][data1.date == 18106])/60,
np.exp(data1.distance_975[data1.gs==i][data1.loom == 1][data1.date == 18106])/60,
label = str(i), alpha = 0.3, color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('Distance (BL)', size = fs)
ax.set_title('Loom = 1', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/distance_w_data_loom1.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [1,16]
loom = [1]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
for i in gs:
for j in loom:
ax.plot(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
np.exp(data1.distance[data1.gs==i][data1.loom == j][data1.date == 18106])/60, color = colors[count],
lw = lw)
ax.fill_between(
data1.temp[data1.gs== i][data1.loom == j][data1.date == 18106],
np.exp(data1.distance_025[data1.gs==i][data1.loom == j][data1.date == 18106])/60,
np.exp(data1.distance_975[data1.gs==i][data1.loom == j][data1.date == 18106])/60,
alpha = 0.3, label = str(i), color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('Distance (BL)', size = fs)
ax.set_title('Loom = 1', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/distance_wo_data_loom1.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
### pre loom annd
#model_lm <- lm(log(annd) ~ temp + log(gs,2), my_data)
#r sq = 0.76 #residuals not good
if args.a_string=='annd_before_loom_predictions.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
#data2 = data2.drop(labels = 127)
data_hull = data2.annd
gs = [2,4,8,16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data_hull[data2.Groupsize == i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.annd[data1.gs ==i]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.annd025[data1.gs ==i]),
np.exp(data1.annd975[data1.gs ==i]), alpha = 0.3,
color = colors[count], lw = 0,label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Average Nearest Neighbor Distance \n before loom (BL)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/annd_before_loom_w_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [2,16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
for i in gs:
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.annd[data1.gs ==i]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.annd025[data1.gs ==i]),
np.exp(data1.annd975[data1.gs ==i]), alpha = 0.3,
color = colors[count], lw = 0,label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Average Nearest Neighbor Distance \n before loom (BL)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
#ax.set_title('Groupsize = 16', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/annd_before_loom_wo_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
#local polarization
#lm(pol ~ temp + loom + t1, my_data)
#r sq 0.03
if args.a_string=='pol1_during_loom_predictions.csv':
data1 = pd.read_csv('../../data/temp_collective/roi/'+args.a_string)
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_w_loom_pol.csv')
loom = [1,2,3,4,5]
colors = plt.cm.bone_r(np.linspace(0,1,len(loom)+1))
count = 1
#Plotting
if args.verbose==True:
loom = [1,2,3,4,5]
colors = plt.cm.bone_r(np.linspace(0,1,len(loom)+1))
count = 1
for i in loom:
ax.scatter(data2.Temperature[data2.Loom == i],
data2.polarization_1[data2.Loom == 1], s = 10, alpha = 0.5, color = colors[count])
ax.plot(
data1.temp[data1.loom == i][data1.t1 == 1618600800],
data1.pol_1[data1.loom == i][data1.t1 == 1618600800],
color = colors[count],
lw = lw)
ax.fill_between(
data1.temp[data1.loom == i][data1.t1== 1618600800],
data1.pol1_025[data1.loom == i][data1.t1 == 1618600800],
data1.pol1_975[data1.loom == i][data1.t1 == 1618600800],
label = str(i), alpha = 0.3, color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('Local Polarization', size = fs)
#ax.set_title('Loom = 1', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/local_pol1_w_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
loom = [1,2,3,4,5]
colors = plt.cm.bone_r(np.linspace(0,1,len(loom)+1))
count = 1
for i in loom:
ax.plot(
data1.temp[data1.loom == i][data1.t1 == 1618600800],
data1.pol_1[data1.loom == i][data1.t1 == 1618600800],
color = colors[count],
lw = lw)
ax.fill_between(
data1.temp[data1.loom == i][data1.t1== 1618600800],
data1.pol1_025[data1.loom == i][data1.t1 == 1618600800],
data1.pol1_975[data1.loom == i][data1.t1 == 1618600800],
label = str(i), alpha = 0.3, color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel('Local Polarization', size = fs)
#ax.set_title('Loom = 1', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/local_pol1_wo_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
#hull after loom
#model_lm <- lm(hull ~ gs + temp , my_data)
if args.a_string=='hull_after_loom_predictions_700_900.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_w_loom.csv')
data_hull = data2.convex_hull_area
gs = [4,8,16]
loom = [1]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
if args.verbose==True:
for i in gs:
for j in loom:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data_hull[data2.Groupsize == i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs== i],
(data1.hull[data1.gs==i]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs== i],
(data1.hull025[data1.gs==i]),
(data1.hull975[data1.gs==i]), alpha = 0.3, label = str(i),
color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Convex hull area after loom', size = fs)
#ax.set_title('Loom = '+str(loom[0]), fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/convex_hull_after_loom_w_data_gs_all.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
else:
gs = [16]
loom = [1]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
for i in gs:
for j in loom:
ax.plot(
data1.temp[data1.gs== i],
(data1.hull[data1.gs==i]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs== i],
(data1.hull025[data1.gs==i]),
(data1.hull975[data1.gs==i]), alpha = 0.3,
color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Convex hull area after loom', size = fs)
#ax.set_title('Loom = '+str(loom[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/convex_hull_after_loom_wo_data_gs_16.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
#hull during loom
#model_lm <- lm(hull ~ gs + temp , my_data)
if args.a_string=='hull_during_loom_predictions_500_700.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/convex_hull_during_loom.csv')
data_hull = data2.convex_hull_area_500_700
gs = [4,8,16]
loom = [1]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
if args.verbose==True:
for i in gs:
for j in loom:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data_hull[data2.Groupsize == i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs== i],
(data1.hull[data1.gs==i]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs== i],
(data1.hull025[data1.gs==i]),
(data1.hull975[data1.gs==i]), alpha = 0.3, label = str(i),
color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Convex hull area during loom', size = fs)
#ax.set_title('Loom = '+str(loom[0]), fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/convex_hull_during_loom_w_data_gs_all.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [16]
loom = [1]
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
count = 1
for i in gs:
for j in loom:
ax.plot(
data1.temp[data1.gs== i],
(data1.hull[data1.gs==i]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs== i],
(data1.hull025[data1.gs==i]),
(data1.hull975[data1.gs==i]), alpha = 0.3, label = str(i),
color = colors[count], lw = 0)
count += 1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Convex hull area during loom', size = fs)
#ax.set_title('Loom = '+str(loom[0]), fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/convex_hull_during_loom_wo_data_gs_16.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
### pre loom avg acceleration
#log(acc+1) ~ temp + I(temp^2) + log(gs,2) + I(log(gs,2)^2)
# r sq 0.2786
if args.a_string=='acc_avg_predictions_new.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
#data2 = data2.drop(labels = 127)
data_hull = data2.avg_acc
gs = [1,2,4,8,16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data_hull[data2.Groupsize == i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc_025[data1.gs ==i])-1,
np.exp(data1.acc_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0,label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Average acceleration (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/pre_loom_avg_acc_w_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
for i in gs:
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc_025[data1.gs ==i])-1,
np.exp(data1.acc_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0, label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Average acceleration (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
#ax.set_title('Groupsize = 16', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/pre_loom_avg_acc_wo_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
### pre loom median acceleration
#log(acc+1) ~ temp + I(temp^2) + log(gs,2) + I(log(gs,2)^2)
# r sq 0.301
if args.a_string=='acc_50_predictions_new.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
#data2 = data2.drop(labels = 127)
data_hull = data2.acc_percentile50
gs = [1,2,4,8,16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data_hull[data2.Groupsize == i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc50[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc50_025[data1.gs ==i])-1,
np.exp(data1.acc50_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0,label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Median acceleration (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/pre_loom_50_acc_w_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
for i in gs:
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc50[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc50_025[data1.gs ==i])-1,
np.exp(data1.acc50_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0, label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Median acceleration (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
#ax.set_title('Groupsize = 16', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/pre_loom_50_acc_wo_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
### pre loom avg acceleration
#log(acc+1) ~ temp + I(temp^2) + log(gs,2) + I(log(gs,2)^2)
# r sq 0.2786
if args.a_string=='acc_avg_predictions_new.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
#data2 = data2.drop(labels = 127)
data_hull = data2.avg_acc
gs = [1,2,4,8,16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data_hull[data2.Groupsize == i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc_025[data1.gs ==i])-1,
np.exp(data1.acc_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0,label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Mean acceleration (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/pre_loom_avg_acc_w_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
for i in gs:
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc_025[data1.gs ==i])-1,
np.exp(data1.acc_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0, label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Mean acceleration (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
#ax.set_title('Groupsize = 16', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/pre_loom_avg_acc_wo_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
### pca coeff - bernoulli
# model_glm <- glm(pca ~ temp + I(temp^2) + log(gs,2) + I(log(gs,2)^2) + date, family = binomial, data = my_data)
# summary(model_glm)
#aic = 834
if args.a_string=='pca_predictions.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/pca_coeff_bernoulli.csv')
#data2 = data2.drop(labels = 127)
data_hull = data2.pca_coeff
gs = [4,8,16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
if args.verbose==False:
gs = [16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
for i in gs:
ax.plot(
data1.temp[data1.gs ==i][data1.date == 18106],
(data1.pca[data1.gs ==i][data1.date == 18106]),
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i][data1.date == 18106],
(data1.pca_025[data1.gs ==i][data1.date == 18106]),
(data1.pca_975[data1.gs ==i][data1.date == 18106]), alpha = 0.3,
color = colors[count], lw = 0, label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'Probability for pca coeff \n to be >= 0', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
ax.set_title('Groupsize = 16', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/pca_bernoulli_wo_data.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
# acc before each loom
#model_lm <- lm(log(acc+1) ~ temp + I(temp^2) + log(gs,2) + I(log(gs,2)^2),my_new_data)
# r sq 0.1962
if args.a_string=='acc_before_loom_99_predictions_new_squared.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
data2 = data2.drop(labels = 127)
data_hull = data2.acc_percentile99
gs = [1,2,4,8,16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data_hull[data2.Groupsize == i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc99[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc99_025[data1.gs ==i])-1,
np.exp(data1.acc99_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0,label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of acceleration \n before loom (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/acc_before_loom_percentile99_w_data_new_squared.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
else:
gs = [16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
for i in gs:
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc99[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc99_025[data1.gs ==i])-1,
np.exp(data1.acc99_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0, label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of acceleration \n before loom (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
#ax.set_title('Groupsize = 16', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/acc_before_loom_percentile99_wo_data_new_squared.png'
fig.savefig(out_dir, dpi = 300)
plt.show()
#figure with both unperturbed swimming speed predictions - linear and quadratic
#model_lm <- lm(log(speed+1) ~ temp + temp^2,my_data)
if args.a_string=='speed99_before_loom_predictions_new.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
data_hull = data2.speed_percentile99
data3 = pd.read_csv('../../data/temp_collective/roi/speed99_before_loom_predictions.csv')
colors = plt.cm.bone_r(np.linspace(0,1,3))
if args.verbose==True:
ax.scatter(data2.Temperature,
data_hull, s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp,
np.exp(data1.speed99)-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp,
np.exp(data1.speed99_025)-1,
np.exp(data1.speed99_975)-1, alpha = 0.3,
color = colors[count], lw = 0)
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of speed (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/speed_percentile99_new_w_data.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
else:
colors = plt.cm.bone_r(np.linspace(0,1,3))
ax.plot(
data1.temp,
np.exp(data1.speed99)-1,
color = colors[count], lw = lw)
ax.plot(
data3.temp,
np.exp(data3.speed99)-1,
color = colors[count-1], lw = lw)
ax.fill_between(
data1.temp,
np.exp(data1.speed99_025)-1,
np.exp(data1.speed99_975)-1, alpha = 0.3,
color = colors[count], lw = 0, label = 'quadratic')
ax.fill_between(
data3.temp,
np.exp(data3.speed99_025)-1,
np.exp(data3.speed99_975)-1, alpha = 0.3,
color = colors[count - 1], lw = 0, label = 'linear')
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of speed (BL/s)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
legend = plt.legend(fontsize=fs, loc='lower right', title = 'Model', framealpha = 0.5)
plt.setp(legend.get_title(),fontsize='xx-large')
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
out_dir = '../../output/temp_collective/roi_figures/predictions/speed_percentile99_together_wo_data.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
#model_lm <- lm(log(acc+1) ~ temp + I(temp^2) + log(gs,2) + I(log(gs,2)^2),my_new_data)
# r sq 0.1962
if args.a_string=='acc_99_predictions_new_squared.csv':
data2 = pd.read_csv('../../data/temp_collective/roi/all_params_wo_loom.csv')
data2 = data2.drop(labels = 127)
data_hull = data2.acc_percentile99
data3 = pd.read_csv('../../data/temp_collective/roi/acc_99_predictions.csv')
gs = [1,2,4,8,16]
count = 1
colors = plt.cm.bone_r(np.linspace(0,1,len(gs)+1))
if args.verbose==True:
for i in gs:
ax.scatter(data2.Temperature[data2.Groupsize == i],
data_hull[data2.Groupsize == i], s = 10, alpha = 0.5,
color = colors[count])
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc99[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc99_025[data1.gs ==i])-1,
np.exp(data1.acc99_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0,label = str(i))
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of acceleration (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
plt.legend(fontsize=fs, loc='lower right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/acc_percentile99_w_data_new_squared.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
else:
gs = [16]
count = 2
colors = plt.cm.bone_r(np.linspace(0,1,3))
for i in gs:
ax.plot(
data1.temp[data1.gs ==i],
np.exp(data1.acc99[data1.gs ==i])-1,
color = colors[count], lw = lw)
ax.fill_between(
data1.temp[data1.gs ==i],
np.exp(data1.acc99_025[data1.gs ==i])-1,
np.exp(data1.acc99_975[data1.gs ==i])-1, alpha = 0.3,
color = colors[count], lw = 0, label = 'quadratic')
ax.plot(
data3.temp[data1.gs ==i],
np.exp(data3.acc99[data3.gs ==i])-1,
color = colors[count-1], lw = lw)
ax.fill_between(
data3.temp[data1.gs ==i],
np.exp(data3.acc99_025[data3.gs ==i])-1,
np.exp(data3.acc99_975[data3.gs ==i])-1, alpha = 0.3,
color = colors[count-1], lw = 0, label = 'linear')
count +=1
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.ylabel(
'99th percentile of acceleration (BL/s'+r'$^2$)', size = fs)
#ax.set_title('Groupsize = '+str(gs[0]), fontsize = fs)
#plt.legend(fontsize=fs, loc='upper right', title = 'Loom', framealpha = 0.5)
#ax.set_title('Interaction of temperature and groupsize', fontsize = fs)
#ax.set_title('Groupsize = 16', fontsize = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29], fontsize = fs)
plt.yticks(fontsize = fs)
legend = plt.legend(fontsize=fs, loc='lower right', title = 'Model', framealpha = 0.5)
plt.setp(legend.get_title(),fontsize='xx-large')
#plt.legend(fontsize=fs, loc='upper right', title = 'Groupsize', framealpha = 0.5)
out_dir = '../../output/temp_collective/roi_figures/predictions/acc_percentile99_wo_data_together.png'
fig.savefig(out_dir, dpi = dpi)
plt.show()
| [
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xticks",
"numpy.linspace",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((491, 516), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (514, 516), False, 'import argparse\n'), ((1404, 1466), 'pandas.read_csv', 'pd.read_csv', (["('../../data/temp_collective/roi/' + args.a_string)"], {}), "('../../data/temp_collective/roi/' + args.a_string)\n", (1415, 1466), True, 'import pandas as pd\n'), ((1474, 1541), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_w_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_w_loom.csv')\n", (1485, 1541), True, 'import pandas as pd\n'), ((1639, 1667), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (1649, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1798, 1875), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/annd_after_loom_predictions.csv"""'], {}), "('../../data/temp_collective/roi/annd_after_loom_predictions.csv')\n", (1809, 1875), True, 'import pandas as pd\n'), ((1889, 1956), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_w_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_w_loom.csv')\n", (1900, 1956), True, 'import pandas as pd\n'), ((5269, 5346), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/number_startles_predictions.csv"""'], {}), "('../../data/temp_collective/roi/number_startles_predictions.csv')\n", (5280, 5346), True, 'import pandas as pd\n'), ((5360, 5427), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_w_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_w_loom.csv')\n", (5371, 5427), True, 'import pandas as pd\n'), ((8263, 8325), 'pandas.read_csv', 'pd.read_csv', (["('../../data/temp_collective/roi/' + args.a_string)"], {}), "('../../data/temp_collective/roi/' + args.a_string)\n", (8274, 8325), True, 'import pandas as pd\n'), ((8337, 8404), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_w_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_w_loom.csv')\n", (8348, 8404), True, 'import pandas as pd\n'), ((11406, 11492), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/convex_hull_ratio_600_650w_loom.csv"""'], {}), "(\n '../../data/temp_collective/roi/convex_hull_ratio_600_650w_loom.csv')\n", (11417, 11492), True, 'import pandas as pd\n'), ((14825, 14911), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/convex_hull_ratio_600_650w_loom.csv"""'], {}), "(\n '../../data/temp_collective/roi/convex_hull_ratio_600_650w_loom.csv')\n", (14836, 14911), True, 'import pandas as pd\n'), ((18365, 18433), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (18376, 18433), True, 'import pandas as pd\n'), ((23358, 23426), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (23369, 23426), True, 'import pandas as pd\n'), ((25838, 25906), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (25849, 25906), True, 'import pandas as pd\n'), ((28208, 28275), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_w_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_w_loom.csv')\n", (28219, 28275), True, 'import pandas as pd\n'), ((31402, 31469), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_w_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_w_loom.csv')\n", (31413, 31469), True, 'import pandas as pd\n'), ((44239, 44301), 'pandas.read_csv', 'pd.read_csv', (["('../../data/temp_collective/roi/' + args.a_string)"], {}), "('../../data/temp_collective/roi/' + args.a_string)\n", (44250, 44301), True, 'import pandas as pd\n'), ((44313, 44388), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_w_loom_startle.csv"""'], {}), "('../../data/temp_collective/roi/all_params_w_loom_startle.csv')\n", (44324, 44388), True, 'import pandas as pd\n'), ((47812, 47880), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (47823, 47880), True, 'import pandas as pd\n'), ((54583, 54645), 'pandas.read_csv', 'pd.read_csv', (["('../../data/temp_collective/roi/' + args.a_string)"], {}), "('../../data/temp_collective/roi/' + args.a_string)\n", (54594, 54645), True, 'import pandas as pd\n'), ((54657, 54747), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_w_loom_startle_corrected.csv"""'], {}), "(\n '../../data/temp_collective/roi/all_params_w_loom_startle_corrected.csv')\n", (54668, 54747), True, 'import pandas as pd\n'), ((58162, 58230), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (58173, 58230), True, 'import pandas as pd\n'), ((61441, 61503), 'pandas.read_csv', 'pd.read_csv', (["('../../data/temp_collective/roi/' + args.a_string)"], {}), "('../../data/temp_collective/roi/' + args.a_string)\n", (61452, 61503), True, 'import pandas as pd\n'), ((61515, 61586), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_w_loom_pol.csv"""'], {}), "('../../data/temp_collective/roi/all_params_w_loom_pol.csv')\n", (61526, 61586), True, 'import pandas as pd\n'), ((64726, 64793), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_w_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_w_loom.csv')\n", (64737, 64793), True, 'import pandas as pd\n'), ((67802, 67875), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/convex_hull_during_loom.csv"""'], {}), "('../../data/temp_collective/roi/convex_hull_during_loom.csv')\n", (67813, 67875), True, 'import pandas as pd\n'), ((70945, 71013), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (70956, 71013), True, 'import pandas as pd\n'), ((74219, 74287), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (74230, 74287), True, 'import pandas as pd\n'), ((77511, 77579), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (77522, 77579), True, 'import pandas as pd\n'), ((80838, 80907), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/pca_coeff_bernoulli.csv"""'], {}), "('../../data/temp_collective/roi/pca_coeff_bernoulli.csv')\n", (80849, 80907), True, 'import pandas as pd\n'), ((82804, 82872), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (82815, 82872), True, 'import pandas as pd\n'), ((86232, 86300), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (86243, 86300), True, 'import pandas as pd\n'), ((86354, 86440), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/speed99_before_loom_predictions.csv"""'], {}), "(\n '../../data/temp_collective/roi/speed99_before_loom_predictions.csv')\n", (86365, 86440), True, 'import pandas as pd\n'), ((89390, 89458), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/all_params_wo_loom.csv"""'], {}), "('../../data/temp_collective/roi/all_params_wo_loom.csv')\n", (89401, 89458), True, 'import pandas as pd\n'), ((89547, 89615), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/acc_99_predictions.csv"""'], {}), "('../../data/temp_collective/roi/acc_99_predictions.csv')\n", (89558, 89615), True, 'import pandas as pd\n'), ((2963, 3017), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (2973, 3017), True, 'import matplotlib.pyplot as plt\n'), ((3026, 3058), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ANND (BL)"""'], {'size': 'fs'}), "('ANND (BL)', size=fs)\n", (3036, 3058), True, 'import matplotlib.pyplot as plt\n'), ((3133, 3210), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (3143, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3360, 3450), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (3370, 3450), True, 'import matplotlib.pyplot as plt\n'), ((3451, 3474), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (3461, 3474), True, 'import matplotlib.pyplot as plt\n'), ((3649, 3659), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3657, 3659), True, 'import matplotlib.pyplot as plt\n'), ((4412, 4466), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (4422, 4466), True, 'import matplotlib.pyplot as plt\n'), ((4475, 4507), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ANND (BL)"""'], {'size': 'fs'}), "('ANND (BL)', size=fs)\n", (4485, 4507), True, 'import matplotlib.pyplot as plt\n'), ((4536, 4613), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (4546, 4613), True, 'import matplotlib.pyplot as plt\n'), ((4764, 4854), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (4774, 4854), True, 'import matplotlib.pyplot as plt\n'), ((4855, 4878), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (4865, 4878), True, 'import matplotlib.pyplot as plt\n'), ((5054, 5064), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5062, 5064), True, 'import matplotlib.pyplot as plt\n'), ((6389, 6443), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (6399, 6443), True, 'import matplotlib.pyplot as plt\n'), ((6452, 6493), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of startles"""'], {'size': 'fs'}), "('Number of startles', size=fs)\n", (6462, 6493), True, 'import matplotlib.pyplot as plt\n'), ((6552, 6629), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (6562, 6629), True, 'import matplotlib.pyplot as plt\n'), ((6723, 6796), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29])\n', (6733, 6796), True, 'import matplotlib.pyplot as plt\n'), ((6936, 6946), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6944, 6946), True, 'import matplotlib.pyplot as plt\n'), ((7624, 7678), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (7634, 7678), True, 'import matplotlib.pyplot as plt\n'), ((7687, 7728), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of startles"""'], {'size': 'fs'}), "('Number of startles', size=fs)\n", (7697, 7728), True, 'import matplotlib.pyplot as plt\n'), ((7739, 7816), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (7749, 7816), True, 'import matplotlib.pyplot as plt\n'), ((7877, 7950), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29])\n', (7887, 7950), True, 'import matplotlib.pyplot as plt\n'), ((8099, 8109), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8107, 8109), True, 'import matplotlib.pyplot as plt\n'), ((9279, 9333), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (9289, 9333), True, 'import matplotlib.pyplot as plt\n'), ((9342, 9372), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latency"""'], {'size': 'fs'}), "('Latency', size=fs)\n", (9352, 9372), True, 'import matplotlib.pyplot as plt\n'), ((9432, 9509), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (9442, 9509), True, 'import matplotlib.pyplot as plt\n'), ((9603, 9688), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'ticks': '[580, 585, 590, 595]', 'labels': '[580, 585, 590, 595]', 'fontsize': 'fs'}), '(ticks=[580, 585, 590, 595], labels=[580, 585, 590, 595], fontsize=fs\n )\n', (9613, 9688), True, 'import matplotlib.pyplot as plt\n'), ((9691, 9781), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (9701, 9781), True, 'import matplotlib.pyplot as plt\n'), ((9914, 9924), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9922, 9924), True, 'import matplotlib.pyplot as plt\n'), ((10533, 10587), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (10543, 10587), True, 'import matplotlib.pyplot as plt\n'), ((10596, 10626), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latency"""'], {'size': 'fs'}), "('Latency', size=fs)\n", (10606, 10626), True, 'import matplotlib.pyplot as plt\n'), ((10695, 10772), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (10705, 10772), True, 'import matplotlib.pyplot as plt\n'), ((10842, 10927), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'ticks': '[580, 585, 590, 595]', 'labels': '[580, 585, 590, 595]', 'fontsize': 'fs'}), '(ticks=[580, 585, 590, 595], labels=[580, 585, 590, 595], fontsize=fs\n )\n', (10852, 10927), True, 'import matplotlib.pyplot as plt\n'), ((11011, 11101), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (11021, 11101), True, 'import matplotlib.pyplot as plt\n'), ((11235, 11245), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11243, 11245), True, 'import matplotlib.pyplot as plt\n'), ((12605, 12659), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (12615, 12659), True, 'import matplotlib.pyplot as plt\n'), ((12668, 12775), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ratio of convex hull area during loom to \n convex hull area after loom"""'], {'size': 'fs'}), '(\n """Ratio of convex hull area during loom to \n convex hull area after loom"""\n , size=fs)\n', (12678, 12775), True, 'import matplotlib.pyplot as plt\n'), ((12849, 12921), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Loom"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Loom', framealpha=0.5)\n", (12859, 12921), True, 'import matplotlib.pyplot as plt\n'), ((13015, 13088), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29])\n', (13025, 13088), True, 'import matplotlib.pyplot as plt\n'), ((13243, 13253), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13251, 13253), True, 'import matplotlib.pyplot as plt\n'), ((14000, 14054), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (14010, 14054), True, 'import matplotlib.pyplot as plt\n'), ((14063, 14172), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ratio of convex hull area during loom to \n convex hull before after loom"""'], {'size': 'fs'}), '(\n """Ratio of convex hull area during loom to \n convex hull before after loom"""\n , size=fs)\n', (14073, 14172), True, 'import matplotlib.pyplot as plt\n'), ((14246, 14318), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Loom"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Loom', framealpha=0.5)\n", (14256, 14318), True, 'import matplotlib.pyplot as plt\n'), ((14412, 14485), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29])\n', (14422, 14485), True, 'import matplotlib.pyplot as plt\n'), ((14641, 14651), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14649, 14651), True, 'import matplotlib.pyplot as plt\n'), ((16059, 16113), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (16069, 16113), True, 'import matplotlib.pyplot as plt\n'), ((16122, 16224), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ratio of convex hull area during loom to \n convex hull before loom"""'], {'size': 'fs'}), '(\n """Ratio of convex hull area during loom to \n convex hull before loom""",\n size=fs)\n', (16132, 16224), True, 'import matplotlib.pyplot as plt\n'), ((16296, 16373), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (16306, 16373), True, 'import matplotlib.pyplot as plt\n'), ((16467, 16540), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29])\n', (16477, 16540), True, 'import matplotlib.pyplot as plt\n'), ((16703, 16713), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16711, 16713), True, 'import matplotlib.pyplot as plt\n'), ((17591, 17645), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (17601, 17645), True, 'import matplotlib.pyplot as plt\n'), ((17654, 17761), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ratio of convex hull area during loom to \n convex hull area after loom"""'], {'size': 'fs'}), '(\n """Ratio of convex hull area during loom to \n convex hull area after loom"""\n , size=fs)\n', (17664, 17761), True, 'import matplotlib.pyplot as plt\n'), ((17835, 17907), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Loom"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Loom', framealpha=0.5)\n", (17845, 17907), True, 'import matplotlib.pyplot as plt\n'), ((18001, 18074), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29])\n', (18011, 18074), True, 'import matplotlib.pyplot as plt\n'), ((18236, 18246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18244, 18246), True, 'import matplotlib.pyplot as plt\n'), ((18512, 18532), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (18523, 18532), True, 'import numpy as np\n'), ((19019, 19073), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (19029, 19073), True, 'import matplotlib.pyplot as plt\n'), ((19082, 19154), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""99th percentile of speed \n before loom (BL/s)"""'], {'size': 'fs'}), '("""99th percentile of speed \n before loom (BL/s)""", size=fs)\n', (19092, 19154), True, 'import matplotlib.pyplot as plt\n'), ((19415, 19505), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (19425, 19505), True, 'import matplotlib.pyplot as plt\n'), ((19506, 19529), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (19516, 19529), True, 'import matplotlib.pyplot as plt\n'), ((19683, 19693), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19691, 19693), True, 'import matplotlib.pyplot as plt\n'), ((20056, 20110), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (20066, 20110), True, 'import matplotlib.pyplot as plt\n'), ((20119, 20191), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""99th percentile of speed \n before loom (BL/s)"""'], {'size': 'fs'}), '("""99th percentile of speed \n before loom (BL/s)""", size=fs)\n', (20129, 20191), True, 'import matplotlib.pyplot as plt\n'), ((20443, 20533), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (20453, 20533), True, 'import matplotlib.pyplot as plt\n'), ((20534, 20557), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (20544, 20557), True, 'import matplotlib.pyplot as plt\n'), ((20712, 20722), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20720, 20722), True, 'import matplotlib.pyplot as plt\n'), ((23505, 23525), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (23516, 23525), True, 'import numpy as np\n'), ((24012, 24066), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (24022, 24066), True, 'import matplotlib.pyplot as plt\n'), ((24075, 24117), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Median speed (BL/s)"""'], {'size': 'fs'}), "('Median speed (BL/s)', size=fs)\n", (24085, 24117), True, 'import matplotlib.pyplot as plt\n'), ((24381, 24471), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (24391, 24471), True, 'import matplotlib.pyplot as plt\n'), ((24472, 24495), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (24482, 24495), True, 'import matplotlib.pyplot as plt\n'), ((24653, 24663), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24661, 24663), True, 'import matplotlib.pyplot as plt\n'), ((25026, 25080), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (25036, 25080), True, 'import matplotlib.pyplot as plt\n'), ((25089, 25131), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Median speed (BL/s)"""'], {'size': 'fs'}), "('Median speed (BL/s)', size=fs)\n", (25099, 25131), True, 'import matplotlib.pyplot as plt\n'), ((25386, 25476), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (25396, 25476), True, 'import matplotlib.pyplot as plt\n'), ((25477, 25500), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (25487, 25500), True, 'import matplotlib.pyplot as plt\n'), ((25659, 25669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25667, 25669), True, 'import matplotlib.pyplot as plt\n'), ((25976, 25996), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (25987, 25996), True, 'import numpy as np\n'), ((26483, 26537), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (26493, 26537), True, 'import matplotlib.pyplot as plt\n'), ((26546, 26586), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean speed (BL/s)"""'], {'size': 'fs'}), "('Mean speed (BL/s)', size=fs)\n", (26556, 26586), True, 'import matplotlib.pyplot as plt\n'), ((26850, 26940), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (26860, 26940), True, 'import matplotlib.pyplot as plt\n'), ((26941, 26964), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (26951, 26964), True, 'import matplotlib.pyplot as plt\n'), ((27113, 27123), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27121, 27123), True, 'import matplotlib.pyplot as plt\n'), ((27486, 27540), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (27496, 27540), True, 'import matplotlib.pyplot as plt\n'), ((27549, 27589), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean speed (BL/s)"""'], {'size': 'fs'}), "('Mean speed (BL/s)', size=fs)\n", (27559, 27589), True, 'import matplotlib.pyplot as plt\n'), ((27844, 27934), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (27854, 27934), True, 'import matplotlib.pyplot as plt\n'), ((27935, 27958), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (27945, 27958), True, 'import matplotlib.pyplot as plt\n'), ((28108, 28118), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28116, 28118), True, 'import matplotlib.pyplot as plt\n'), ((29225, 29279), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (29235, 29279), True, 'import matplotlib.pyplot as plt\n'), ((29288, 29360), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""99th percentile of speed \n during loom (BL/s)"""'], {'size': 'fs'}), '("""99th percentile of speed \n during loom (BL/s)""", size=fs)\n', (29298, 29360), True, 'import matplotlib.pyplot as plt\n'), ((29368, 29445), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (29378, 29445), True, 'import matplotlib.pyplot as plt\n'), ((29458, 29548), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (29468, 29548), True, 'import matplotlib.pyplot as plt\n'), ((29549, 29572), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (29559, 29572), True, 'import matplotlib.pyplot as plt\n'), ((29783, 29793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29791, 29793), True, 'import matplotlib.pyplot as plt\n'), ((30455, 30509), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (30465, 30509), True, 'import matplotlib.pyplot as plt\n'), ((30518, 30590), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""99th percentile of speed \n during loom (BL/s)"""'], {'size': 'fs'}), '("""99th percentile of speed \n during loom (BL/s)""", size=fs)\n', (30528, 30590), True, 'import matplotlib.pyplot as plt\n'), ((30843, 30933), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (30853, 30933), True, 'import matplotlib.pyplot as plt\n'), ((30934, 30957), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (30944, 30957), True, 'import matplotlib.pyplot as plt\n'), ((31116, 31126), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31124, 31126), True, 'import matplotlib.pyplot as plt\n'), ((32501, 32555), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (32511, 32555), True, 'import matplotlib.pyplot as plt\n'), ((32564, 32636), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""99th percentile of speed \n during loom (BL/s)"""'], {'size': 'fs'}), '("""99th percentile of speed \n during loom (BL/s)""", size=fs)\n', (32574, 32636), True, 'import matplotlib.pyplot as plt\n'), ((32644, 32721), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (32654, 32721), True, 'import matplotlib.pyplot as plt\n'), ((32734, 32824), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (32744, 32824), True, 'import matplotlib.pyplot as plt\n'), ((32825, 32848), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (32835, 32848), True, 'import matplotlib.pyplot as plt\n'), ((33059, 33069), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33067, 33069), True, 'import matplotlib.pyplot as plt\n'), ((33829, 33883), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (33839, 33883), True, 'import matplotlib.pyplot as plt\n'), ((33892, 33964), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""99th percentile of speed \n during loom (BL/s)"""'], {'size': 'fs'}), '("""99th percentile of speed \n during loom (BL/s)""", size=fs)\n', (33902, 33964), True, 'import matplotlib.pyplot as plt\n'), ((34216, 34306), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (34226, 34306), True, 'import matplotlib.pyplot as plt\n'), ((34307, 34330), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (34317, 34330), True, 'import matplotlib.pyplot as plt\n'), ((34490, 34500), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (34498, 34500), True, 'import matplotlib.pyplot as plt\n'), ((35522, 35576), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (35532, 35576), True, 'import matplotlib.pyplot as plt\n'), ((35585, 35646), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Proportion of individuals that startle"""'], {'size': 'fs'}), "('Proportion of individuals that startle', size=fs)\n", (35595, 35646), True, 'import matplotlib.pyplot as plt\n'), ((35891, 35981), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (35901, 35981), True, 'import matplotlib.pyplot as plt\n'), ((35982, 36005), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (35992, 36005), True, 'import matplotlib.pyplot as plt\n'), ((36165, 36175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36173, 36175), True, 'import matplotlib.pyplot as plt\n'), ((36881, 36935), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (36891, 36935), True, 'import matplotlib.pyplot as plt\n'), ((36944, 37005), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Proportion of individuals that startle"""'], {'size': 'fs'}), "('Proportion of individuals that startle', size=fs)\n", (36954, 37005), True, 'import matplotlib.pyplot as plt\n'), ((37327, 37417), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (37337, 37417), True, 'import matplotlib.pyplot as plt\n'), ((37418, 37441), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (37428, 37441), True, 'import matplotlib.pyplot as plt\n'), ((37602, 37612), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37610, 37612), True, 'import matplotlib.pyplot as plt\n'), ((38547, 38601), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (38557, 38601), True, 'import matplotlib.pyplot as plt\n'), ((38610, 38671), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Proportion of individuals that startle"""'], {'size': 'fs'}), "('Proportion of individuals that startle', size=fs)\n", (38620, 38671), True, 'import matplotlib.pyplot as plt\n'), ((38916, 39006), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (38926, 39006), True, 'import matplotlib.pyplot as plt\n'), ((39006, 39029), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (39016, 39029), True, 'import matplotlib.pyplot as plt\n'), ((39193, 39203), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39201, 39203), True, 'import matplotlib.pyplot as plt\n'), ((39824, 39878), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (39834, 39878), True, 'import matplotlib.pyplot as plt\n'), ((39887, 39948), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Proportion of individuals that startle"""'], {'size': 'fs'}), "('Proportion of individuals that startle', size=fs)\n", (39897, 39948), True, 'import matplotlib.pyplot as plt\n'), ((40193, 40283), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (40203, 40283), True, 'import matplotlib.pyplot as plt\n'), ((40283, 40306), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (40293, 40306), True, 'import matplotlib.pyplot as plt\n'), ((40471, 40481), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40479, 40481), True, 'import matplotlib.pyplot as plt\n'), ((41752, 41806), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (41762, 41806), True, 'import matplotlib.pyplot as plt\n'), ((41815, 41907), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['("""99th percentile of acceleration \n during loom (BL/s""" + \'$^2$)\')'], {'size': 'fs'}), '("""99th percentile of acceleration \n during loom (BL/s""" +\n \'$^2$)\', size=fs)\n', (41825, 41907), True, 'import matplotlib.pyplot as plt\n'), ((41972, 42049), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (41982, 42049), True, 'import matplotlib.pyplot as plt\n'), ((42143, 42233), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (42153, 42233), True, 'import matplotlib.pyplot as plt\n'), ((42233, 42256), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (42243, 42256), True, 'import matplotlib.pyplot as plt\n'), ((42406, 42416), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (42414, 42416), True, 'import matplotlib.pyplot as plt\n'), ((43352, 43406), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (43362, 43406), True, 'import matplotlib.pyplot as plt\n'), ((43415, 43507), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['("""99th percentile of acceleration \n during loom (BL/s""" + \'$^2$)\')'], {'size': 'fs'}), '("""99th percentile of acceleration \n during loom (BL/s""" +\n \'$^2$)\', size=fs)\n', (43425, 43507), True, 'import matplotlib.pyplot as plt\n'), ((43760, 43850), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (43770, 43850), True, 'import matplotlib.pyplot as plt\n'), ((43850, 43873), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (43860, 43873), True, 'import matplotlib.pyplot as plt\n'), ((44020, 44030), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (44028, 44030), True, 'import matplotlib.pyplot as plt\n'), ((45550, 45604), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (45560, 45604), True, 'import matplotlib.pyplot as plt\n'), ((45613, 45649), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance (BL)"""'], {'size': 'fs'}), "('Distance (BL)', size=fs)\n", (45623, 45649), True, 'import matplotlib.pyplot as plt\n'), ((45708, 45785), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (45718, 45785), True, 'import matplotlib.pyplot as plt\n'), ((45879, 45969), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (45889, 45969), True, 'import matplotlib.pyplot as plt\n'), ((45970, 45993), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (45980, 45993), True, 'import matplotlib.pyplot as plt\n'), ((46143, 46153), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (46151, 46153), True, 'import matplotlib.pyplot as plt\n'), ((47031, 47085), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (47041, 47085), True, 'import matplotlib.pyplot as plt\n'), ((47094, 47130), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance (BL)"""'], {'size': 'fs'}), "('Distance (BL)', size=fs)\n", (47104, 47130), True, 'import matplotlib.pyplot as plt\n'), ((47195, 47267), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Loom"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Loom', framealpha=0.5)\n", (47205, 47267), True, 'import matplotlib.pyplot as plt\n'), ((47361, 47451), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (47371, 47451), True, 'import matplotlib.pyplot as plt\n'), ((47452, 47475), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (47462, 47475), True, 'import matplotlib.pyplot as plt\n'), ((47625, 47635), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (47633, 47635), True, 'import matplotlib.pyplot as plt\n'), ((48753, 48807), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (48763, 48807), True, 'import matplotlib.pyplot as plt\n'), ((48816, 48895), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""99th percentile of acceleration \n before loom (BL/s)"""'], {'size': 'fs'}), '("""99th percentile of acceleration \n before loom (BL/s)""", size=fs)\n', (48826, 48895), True, 'import matplotlib.pyplot as plt\n'), ((49156, 49246), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (49166, 49246), True, 'import matplotlib.pyplot as plt\n'), ((49247, 49270), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (49257, 49270), True, 'import matplotlib.pyplot as plt\n'), ((49281, 49358), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (49291, 49358), True, 'import matplotlib.pyplot as plt\n'), ((49512, 49522), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (49520, 49522), True, 'import matplotlib.pyplot as plt\n'), ((50156, 50210), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (50166, 50210), True, 'import matplotlib.pyplot as plt\n'), ((50219, 50311), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['("""99th percentile of acceleration \n before loom (BL/s""" + \'$^2$)\')'], {'size': 'fs'}), '("""99th percentile of acceleration \n before loom (BL/s""" +\n \'$^2$)\', size=fs)\n', (50229, 50311), True, 'import matplotlib.pyplot as plt\n'), ((50613, 50703), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (50623, 50703), True, 'import matplotlib.pyplot as plt\n'), ((50704, 50727), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (50714, 50727), True, 'import matplotlib.pyplot as plt\n'), ((50971, 50981), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (50979, 50981), True, 'import matplotlib.pyplot as plt\n'), ((55904, 55958), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (55914, 55958), True, 'import matplotlib.pyplot as plt\n'), ((55967, 56003), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance (BL)"""'], {'size': 'fs'}), "('Distance (BL)', size=fs)\n", (55977, 56003), True, 'import matplotlib.pyplot as plt\n'), ((56062, 56139), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (56072, 56139), True, 'import matplotlib.pyplot as plt\n'), ((56233, 56323), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (56243, 56323), True, 'import matplotlib.pyplot as plt\n'), ((56324, 56347), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (56334, 56347), True, 'import matplotlib.pyplot as plt\n'), ((56497, 56507), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (56505, 56507), True, 'import matplotlib.pyplot as plt\n'), ((57383, 57437), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (57393, 57437), True, 'import matplotlib.pyplot as plt\n'), ((57446, 57482), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance (BL)"""'], {'size': 'fs'}), "('Distance (BL)', size=fs)\n", (57456, 57482), True, 'import matplotlib.pyplot as plt\n'), ((57541, 57618), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (57551, 57618), True, 'import matplotlib.pyplot as plt\n'), ((57712, 57802), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (57722, 57802), True, 'import matplotlib.pyplot as plt\n'), ((57803, 57826), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (57813, 57826), True, 'import matplotlib.pyplot as plt\n'), ((57977, 57987), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (57985, 57987), True, 'import matplotlib.pyplot as plt\n'), ((59079, 59133), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (59089, 59133), True, 'import matplotlib.pyplot as plt\n'), ((59142, 59221), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average Nearest Neighbor Distance \n before loom (BL)"""'], {'size': 'fs'}), '("""Average Nearest Neighbor Distance \n before loom (BL)""", size=fs)\n', (59152, 59221), True, 'import matplotlib.pyplot as plt\n'), ((59482, 59572), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (59492, 59572), True, 'import matplotlib.pyplot as plt\n'), ((59573, 59596), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (59583, 59596), True, 'import matplotlib.pyplot as plt\n'), ((59607, 59684), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (59617, 59684), True, 'import matplotlib.pyplot as plt\n'), ((59838, 59848), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (59846, 59848), True, 'import matplotlib.pyplot as plt\n'), ((60485, 60539), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (60495, 60539), True, 'import matplotlib.pyplot as plt\n'), ((60548, 60627), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average Nearest Neighbor Distance \n before loom (BL)"""'], {'size': 'fs'}), '("""Average Nearest Neighbor Distance \n before loom (BL)""", size=fs)\n', (60558, 60627), True, 'import matplotlib.pyplot as plt\n'), ((60934, 61024), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (60944, 61024), True, 'import matplotlib.pyplot as plt\n'), ((61025, 61048), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (61035, 61048), True, 'import matplotlib.pyplot as plt\n'), ((61292, 61302), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (61300, 61302), True, 'import matplotlib.pyplot as plt\n'), ((62632, 62686), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (62642, 62686), True, 'import matplotlib.pyplot as plt\n'), ((62695, 62736), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Local Polarization"""'], {'size': 'fs'}), "('Local Polarization', size=fs)\n", (62705, 62736), True, 'import matplotlib.pyplot as plt\n'), ((62796, 62868), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Loom"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Loom', framealpha=0.5)\n", (62806, 62868), True, 'import matplotlib.pyplot as plt\n'), ((62962, 63052), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (62972, 63052), True, 'import matplotlib.pyplot as plt\n'), ((63053, 63076), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (63063, 63076), True, 'import matplotlib.pyplot as plt\n'), ((63222, 63232), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (63230, 63232), True, 'import matplotlib.pyplot as plt\n'), ((63988, 64042), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (63998, 64042), True, 'import matplotlib.pyplot as plt\n'), ((64051, 64092), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Local Polarization"""'], {'size': 'fs'}), "('Local Polarization', size=fs)\n", (64061, 64092), True, 'import matplotlib.pyplot as plt\n'), ((64152, 64224), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Loom"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Loom', framealpha=0.5)\n", (64162, 64224), True, 'import matplotlib.pyplot as plt\n'), ((64318, 64408), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (64328, 64408), True, 'import matplotlib.pyplot as plt\n'), ((64409, 64432), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (64419, 64432), True, 'import matplotlib.pyplot as plt\n'), ((64579, 64589), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (64587, 64589), True, 'import matplotlib.pyplot as plt\n'), ((65693, 65747), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (65703, 65747), True, 'import matplotlib.pyplot as plt\n'), ((65756, 65806), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Convex hull area after loom"""'], {'size': 'fs'}), "('Convex hull area after loom', size=fs)\n", (65766, 65806), True, 'import matplotlib.pyplot as plt\n'), ((65891, 65968), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (65901, 65968), True, 'import matplotlib.pyplot as plt\n'), ((66062, 66152), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (66072, 66152), True, 'import matplotlib.pyplot as plt\n'), ((66153, 66176), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (66163, 66176), True, 'import matplotlib.pyplot as plt\n'), ((66341, 66351), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (66349, 66351), True, 'import matplotlib.pyplot as plt\n'), ((67005, 67059), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (67015, 67059), True, 'import matplotlib.pyplot as plt\n'), ((67068, 67118), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Convex hull area after loom"""'], {'size': 'fs'}), "('Convex hull area after loom', size=fs)\n", (67078, 67118), True, 'import matplotlib.pyplot as plt\n'), ((67375, 67465), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (67385, 67465), True, 'import matplotlib.pyplot as plt\n'), ((67466, 67489), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (67476, 67489), True, 'import matplotlib.pyplot as plt\n'), ((67654, 67664), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (67662, 67664), True, 'import matplotlib.pyplot as plt\n'), ((68783, 68837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (68793, 68837), True, 'import matplotlib.pyplot as plt\n'), ((68846, 68897), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Convex hull area during loom"""'], {'size': 'fs'}), "('Convex hull area during loom', size=fs)\n", (68856, 68897), True, 'import matplotlib.pyplot as plt\n'), ((68982, 69059), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (68992, 69059), True, 'import matplotlib.pyplot as plt\n'), ((69153, 69243), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (69163, 69243), True, 'import matplotlib.pyplot as plt\n'), ((69244, 69267), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (69254, 69267), True, 'import matplotlib.pyplot as plt\n'), ((69433, 69443), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (69441, 69443), True, 'import matplotlib.pyplot as plt\n'), ((70113, 70167), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (70123, 70167), True, 'import matplotlib.pyplot as plt\n'), ((70176, 70227), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Convex hull area during loom"""'], {'size': 'fs'}), "('Convex hull area during loom', size=fs)\n", (70186, 70227), True, 'import matplotlib.pyplot as plt\n'), ((70312, 70389), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (70322, 70389), True, 'import matplotlib.pyplot as plt\n'), ((70483, 70573), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (70493, 70573), True, 'import matplotlib.pyplot as plt\n'), ((70574, 70597), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (70584, 70597), True, 'import matplotlib.pyplot as plt\n'), ((70763, 70773), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (70771, 70773), True, 'import matplotlib.pyplot as plt\n'), ((71872, 71926), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (71882, 71926), True, 'import matplotlib.pyplot as plt\n'), ((71935, 71994), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('Average acceleration (BL/s' + '$^2$)')"], {'size': 'fs'}), "('Average acceleration (BL/s' + '$^2$)', size=fs)\n", (71945, 71994), True, 'import matplotlib.pyplot as plt\n'), ((72257, 72347), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (72267, 72347), True, 'import matplotlib.pyplot as plt\n'), ((72348, 72371), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (72358, 72371), True, 'import matplotlib.pyplot as plt\n'), ((72382, 72459), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (72392, 72459), True, 'import matplotlib.pyplot as plt\n'), ((72613, 72623), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (72621, 72623), True, 'import matplotlib.pyplot as plt\n'), ((73251, 73305), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (73261, 73305), True, 'import matplotlib.pyplot as plt\n'), ((73314, 73373), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('Average acceleration (BL/s' + '$^2$)')"], {'size': 'fs'}), "('Average acceleration (BL/s' + '$^2$)', size=fs)\n", (73324, 73373), True, 'import matplotlib.pyplot as plt\n'), ((73682, 73772), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (73692, 73772), True, 'import matplotlib.pyplot as plt\n'), ((73773, 73796), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (73783, 73796), True, 'import matplotlib.pyplot as plt\n'), ((74040, 74050), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (74048, 74050), True, 'import matplotlib.pyplot as plt\n'), ((75161, 75215), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (75171, 75215), True, 'import matplotlib.pyplot as plt\n'), ((75224, 75282), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('Median acceleration (BL/s' + '$^2$)')"], {'size': 'fs'}), "('Median acceleration (BL/s' + '$^2$)', size=fs)\n", (75234, 75282), True, 'import matplotlib.pyplot as plt\n'), ((75546, 75636), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (75556, 75636), True, 'import matplotlib.pyplot as plt\n'), ((75637, 75660), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (75647, 75660), True, 'import matplotlib.pyplot as plt\n'), ((75671, 75748), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (75681, 75748), True, 'import matplotlib.pyplot as plt\n'), ((75901, 75911), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (75909, 75911), True, 'import matplotlib.pyplot as plt\n'), ((76545, 76599), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (76555, 76599), True, 'import matplotlib.pyplot as plt\n'), ((76608, 76666), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('Median acceleration (BL/s' + '$^2$)')"], {'size': 'fs'}), "('Median acceleration (BL/s' + '$^2$)', size=fs)\n", (76618, 76666), True, 'import matplotlib.pyplot as plt\n'), ((76975, 77065), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (76985, 77065), True, 'import matplotlib.pyplot as plt\n'), ((77066, 77089), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (77076, 77089), True, 'import matplotlib.pyplot as plt\n'), ((77332, 77342), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (77340, 77342), True, 'import matplotlib.pyplot as plt\n'), ((78438, 78492), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (78448, 78492), True, 'import matplotlib.pyplot as plt\n'), ((78501, 78557), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('Mean acceleration (BL/s' + '$^2$)')"], {'size': 'fs'}), "('Mean acceleration (BL/s' + '$^2$)', size=fs)\n", (78511, 78557), True, 'import matplotlib.pyplot as plt\n'), ((78820, 78910), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (78830, 78910), True, 'import matplotlib.pyplot as plt\n'), ((78911, 78934), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (78921, 78934), True, 'import matplotlib.pyplot as plt\n'), ((78945, 79022), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (78955, 79022), True, 'import matplotlib.pyplot as plt\n'), ((79176, 79186), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (79184, 79186), True, 'import matplotlib.pyplot as plt\n'), ((79814, 79868), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (79824, 79868), True, 'import matplotlib.pyplot as plt\n'), ((79877, 79933), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('Mean acceleration (BL/s' + '$^2$)')"], {'size': 'fs'}), "('Mean acceleration (BL/s' + '$^2$)', size=fs)\n", (79887, 79933), True, 'import matplotlib.pyplot as plt\n'), ((80242, 80332), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (80252, 80332), True, 'import matplotlib.pyplot as plt\n'), ((80333, 80356), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (80343, 80356), True, 'import matplotlib.pyplot as plt\n'), ((80600, 80610), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (80608, 80610), True, 'import matplotlib.pyplot as plt\n'), ((81790, 81844), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (81800, 81844), True, 'import matplotlib.pyplot as plt\n'), ((81853, 81918), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability for pca coeff \n to be >= 0"""'], {'size': 'fs'}), '("""Probability for pca coeff \n to be >= 0""", size=fs)\n', (81863, 81918), True, 'import matplotlib.pyplot as plt\n'), ((82224, 82314), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (82234, 82314), True, 'import matplotlib.pyplot as plt\n'), ((82315, 82338), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (82325, 82338), True, 'import matplotlib.pyplot as plt\n'), ((82579, 82589), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (82587, 82589), True, 'import matplotlib.pyplot as plt\n'), ((83745, 83799), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (83755, 83799), True, 'import matplotlib.pyplot as plt\n'), ((83808, 83887), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""99th percentile of acceleration \n before loom (BL/s)"""'], {'size': 'fs'}), '("""99th percentile of acceleration \n before loom (BL/s)""", size=fs)\n', (83818, 83887), True, 'import matplotlib.pyplot as plt\n'), ((84148, 84238), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (84158, 84238), True, 'import matplotlib.pyplot as plt\n'), ((84239, 84262), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (84249, 84262), True, 'import matplotlib.pyplot as plt\n'), ((84273, 84350), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='upper right', title='Groupsize', framealpha=0.5)\n", (84283, 84350), True, 'import matplotlib.pyplot as plt\n'), ((84528, 84538), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (84536, 84538), True, 'import matplotlib.pyplot as plt\n'), ((85172, 85226), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (85182, 85226), True, 'import matplotlib.pyplot as plt\n'), ((85235, 85327), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['("""99th percentile of acceleration \n before loom (BL/s""" + \'$^2$)\')'], {'size': 'fs'}), '("""99th percentile of acceleration \n before loom (BL/s""" +\n \'$^2$)\', size=fs)\n', (85245, 85327), True, 'import matplotlib.pyplot as plt\n'), ((85629, 85719), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (85639, 85719), True, 'import matplotlib.pyplot as plt\n'), ((85720, 85743), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (85730, 85743), True, 'import matplotlib.pyplot as plt\n'), ((86011, 86021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (86019, 86021), True, 'import matplotlib.pyplot as plt\n'), ((86468, 86488), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (86479, 86488), True, 'import numpy as np\n'), ((86975, 87029), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (86985, 87029), True, 'import matplotlib.pyplot as plt\n'), ((87038, 87092), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""99th percentile of speed (BL/s)"""'], {'size': 'fs'}), "('99th percentile of speed (BL/s)', size=fs)\n", (87048, 87092), True, 'import matplotlib.pyplot as plt\n'), ((87356, 87446), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (87366, 87446), True, 'import matplotlib.pyplot as plt\n'), ((87447, 87470), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (87457, 87470), True, 'import matplotlib.pyplot as plt\n'), ((87628, 87638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (87636, 87638), True, 'import matplotlib.pyplot as plt\n'), ((88405, 88459), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (88415, 88459), True, 'import matplotlib.pyplot as plt\n'), ((88468, 88522), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""99th percentile of speed (BL/s)"""'], {'size': 'fs'}), "('99th percentile of speed (BL/s)', size=fs)\n", (88478, 88522), True, 'import matplotlib.pyplot as plt\n'), ((88705, 88778), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""lower right"""', 'title': '"""Model"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='lower right', title='Model', framealpha=0.5)\n", (88715, 88778), True, 'import matplotlib.pyplot as plt\n'), ((88929, 89019), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (88939, 89019), True, 'import matplotlib.pyplot as plt\n'), ((89020, 89043), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (89030, 89043), True, 'import matplotlib.pyplot as plt\n'), ((89207, 89217), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (89215, 89217), True, 'import matplotlib.pyplot as plt\n'), ((90412, 90466), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (90422, 90466), True, 'import matplotlib.pyplot as plt\n'), ((90475, 90545), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('99th percentile of acceleration (BL/s' + '$^2$)')"], {'size': 'fs'}), "('99th percentile of acceleration (BL/s' + '$^2$)', size=fs)\n", (90485, 90545), True, 'import matplotlib.pyplot as plt\n'), ((90808, 90898), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (90818, 90898), True, 'import matplotlib.pyplot as plt\n'), ((90899, 90922), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (90909, 90922), True, 'import matplotlib.pyplot as plt\n'), ((90933, 91010), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""lower right"""', 'title': '"""Groupsize"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='lower right', title='Groupsize', framealpha=0.5)\n", (90943, 91010), True, 'import matplotlib.pyplot as plt\n'), ((91176, 91186), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (91184, 91186), True, 'import matplotlib.pyplot as plt\n'), ((92256, 92310), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': 'fs'}), "('Temperature ' + '($^{\\\\circ}$C)', size=fs)\n", (92266, 92310), True, 'import matplotlib.pyplot as plt\n'), ((92319, 92389), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('99th percentile of acceleration (BL/s' + '$^2$)')"], {'size': 'fs'}), "('99th percentile of acceleration (BL/s' + '$^2$)', size=fs)\n", (92329, 92389), True, 'import matplotlib.pyplot as plt\n'), ((92698, 92788), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[9, 13, 17, 21, 25, 29]', 'labels': '[9, 13, 17, 21, 25, 29]', 'fontsize': 'fs'}), '(ticks=[9, 13, 17, 21, 25, 29], labels=[9, 13, 17, 21, 25, 29],\n fontsize=fs)\n', (92708, 92788), True, 'import matplotlib.pyplot as plt\n'), ((92789, 92812), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fs'}), '(fontsize=fs)\n', (92799, 92812), True, 'import matplotlib.pyplot as plt\n'), ((92832, 92905), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""lower right"""', 'title': '"""Model"""', 'framealpha': '(0.5)'}), "(fontsize=fs, loc='lower right', title='Model', framealpha=0.5)\n", (92842, 92905), True, 'import matplotlib.pyplot as plt\n'), ((93217, 93227), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (93225, 93227), True, 'import matplotlib.pyplot as plt\n'), ((34731, 34751), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (34742, 34751), True, 'import numpy as np\n'), ((36218, 36238), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (36229, 36238), True, 'import numpy as np\n'), ((37841, 37861), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (37852, 37861), True, 'import numpy as np\n'), ((39246, 39266), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (39257, 39266), True, 'import numpy as np\n'), ((42477, 42497), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (42488, 42497), True, 'import numpy as np\n'), ((87690, 87710), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (87701, 87710), True, 'import numpy as np\n'), ((91274, 91294), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (91285, 91294), True, 'import numpy as np\n'), ((2451, 2524), 'numpy.exp', 'np.exp', (['data1.annd[data1.gs == i][data1.date == 18106][data1.trial == 10]'], {}), '(data1.annd[data1.gs == i][data1.date == 18106][data1.trial == 10])\n', (2457, 2524), True, 'import numpy as np\n'), ((2703, 2779), 'numpy.exp', 'np.exp', (['data1.annd025[data1.gs == i][data1.date == 18106][data1.trial == 10]'], {}), '(data1.annd025[data1.gs == i][data1.date == 18106][data1.trial == 10])\n', (2709, 2779), True, 'import numpy as np\n'), ((2797, 2873), 'numpy.exp', 'np.exp', (['data1.annd975[data1.gs == i][data1.date == 18106][data1.trial == 10]'], {}), '(data1.annd975[data1.gs == i][data1.date == 18106][data1.trial == 10])\n', (2803, 2873), True, 'import numpy as np\n'), ((3900, 3973), 'numpy.exp', 'np.exp', (['data1.annd[data1.gs == i][data1.date == 18106][data1.trial == 10]'], {}), '(data1.annd[data1.gs == i][data1.date == 18106][data1.trial == 10])\n', (3906, 3973), True, 'import numpy as np\n'), ((4152, 4228), 'numpy.exp', 'np.exp', (['data1.annd025[data1.gs == i][data1.date == 18106][data1.trial == 10]'], {}), '(data1.annd025[data1.gs == i][data1.date == 18106][data1.trial == 10])\n', (4158, 4228), True, 'import numpy as np\n'), ((4246, 4322), 'numpy.exp', 'np.exp', (['data1.annd975[data1.gs == i][data1.date == 18106][data1.trial == 10]'], {}), '(data1.annd975[data1.gs == i][data1.date == 18106][data1.trial == 10])\n', (4252, 4322), True, 'import numpy as np\n'), ((18740, 18761), 'numpy.exp', 'np.exp', (['data1.speed99'], {}), '(data1.speed99)\n', (18746, 18761), True, 'import numpy as np\n'), ((18873, 18898), 'numpy.exp', 'np.exp', (['data1.speed99_025'], {}), '(data1.speed99_025)\n', (18879, 18898), True, 'import numpy as np\n'), ((18916, 18941), 'numpy.exp', 'np.exp', (['data1.speed99_975'], {}), '(data1.speed99_975)\n', (18922, 18941), True, 'import numpy as np\n'), ((19777, 19798), 'numpy.exp', 'np.exp', (['data1.speed99'], {}), '(data1.speed99)\n', (19783, 19798), True, 'import numpy as np\n'), ((19910, 19935), 'numpy.exp', 'np.exp', (['data1.speed99_025'], {}), '(data1.speed99_025)\n', (19916, 19935), True, 'import numpy as np\n'), ((19953, 19978), 'numpy.exp', 'np.exp', (['data1.speed99_975'], {}), '(data1.speed99_975)\n', (19959, 19978), True, 'import numpy as np\n'), ((23733, 23754), 'numpy.exp', 'np.exp', (['data1.speed99'], {}), '(data1.speed99)\n', (23739, 23754), True, 'import numpy as np\n'), ((23866, 23891), 'numpy.exp', 'np.exp', (['data1.speed99_025'], {}), '(data1.speed99_025)\n', (23872, 23891), True, 'import numpy as np\n'), ((23909, 23934), 'numpy.exp', 'np.exp', (['data1.speed99_975'], {}), '(data1.speed99_975)\n', (23915, 23934), True, 'import numpy as np\n'), ((24747, 24768), 'numpy.exp', 'np.exp', (['data1.speed99'], {}), '(data1.speed99)\n', (24753, 24768), True, 'import numpy as np\n'), ((24880, 24905), 'numpy.exp', 'np.exp', (['data1.speed99_025'], {}), '(data1.speed99_025)\n', (24886, 24905), True, 'import numpy as np\n'), ((24923, 24948), 'numpy.exp', 'np.exp', (['data1.speed99_975'], {}), '(data1.speed99_975)\n', (24929, 24948), True, 'import numpy as np\n'), ((26204, 26225), 'numpy.exp', 'np.exp', (['data1.speed99'], {}), '(data1.speed99)\n', (26210, 26225), True, 'import numpy as np\n'), ((26337, 26362), 'numpy.exp', 'np.exp', (['data1.speed99_025'], {}), '(data1.speed99_025)\n', (26343, 26362), True, 'import numpy as np\n'), ((26380, 26405), 'numpy.exp', 'np.exp', (['data1.speed99_975'], {}), '(data1.speed99_975)\n', (26386, 26405), True, 'import numpy as np\n'), ((27207, 27228), 'numpy.exp', 'np.exp', (['data1.speed99'], {}), '(data1.speed99)\n', (27213, 27228), True, 'import numpy as np\n'), ((27340, 27365), 'numpy.exp', 'np.exp', (['data1.speed99_025'], {}), '(data1.speed99_025)\n', (27346, 27365), True, 'import numpy as np\n'), ((27383, 27408), 'numpy.exp', 'np.exp', (['data1.speed99_975'], {}), '(data1.speed99_975)\n', (27389, 27408), True, 'import numpy as np\n'), ((58700, 58733), 'numpy.exp', 'np.exp', (['data1.annd[data1.gs == i]'], {}), '(data1.annd[data1.gs == i])\n', (58706, 58733), True, 'import numpy as np\n'), ((58872, 58908), 'numpy.exp', 'np.exp', (['data1.annd025[data1.gs == i]'], {}), '(data1.annd025[data1.gs == i])\n', (58878, 58908), True, 'import numpy as np\n'), ((58927, 58963), 'numpy.exp', 'np.exp', (['data1.annd975[data1.gs == i]'], {}), '(data1.annd975[data1.gs == i])\n', (58933, 58963), True, 'import numpy as np\n'), ((60093, 60126), 'numpy.exp', 'np.exp', (['data1.annd[data1.gs == i]'], {}), '(data1.annd[data1.gs == i])\n', (60099, 60126), True, 'import numpy as np\n'), ((60265, 60301), 'numpy.exp', 'np.exp', (['data1.annd025[data1.gs == i]'], {}), '(data1.annd025[data1.gs == i])\n', (60271, 60301), True, 'import numpy as np\n'), ((60320, 60356), 'numpy.exp', 'np.exp', (['data1.annd975[data1.gs == i]'], {}), '(data1.annd975[data1.gs == i])\n', (60326, 60356), True, 'import numpy as np\n'), ((86696, 86717), 'numpy.exp', 'np.exp', (['data1.speed99'], {}), '(data1.speed99)\n', (86702, 86717), True, 'import numpy as np\n'), ((86829, 86854), 'numpy.exp', 'np.exp', (['data1.speed99_025'], {}), '(data1.speed99_025)\n', (86835, 86854), True, 'import numpy as np\n'), ((86872, 86897), 'numpy.exp', 'np.exp', (['data1.speed99_975'], {}), '(data1.speed99_975)\n', (86878, 86897), True, 'import numpy as np\n'), ((87764, 87785), 'numpy.exp', 'np.exp', (['data1.speed99'], {}), '(data1.speed99)\n', (87770, 87785), True, 'import numpy as np\n'), ((87889, 87910), 'numpy.exp', 'np.exp', (['data3.speed99'], {}), '(data3.speed99)\n', (87895, 87910), True, 'import numpy as np\n'), ((88024, 88049), 'numpy.exp', 'np.exp', (['data1.speed99_025'], {}), '(data1.speed99_025)\n', (88030, 88049), True, 'import numpy as np\n'), ((88067, 88092), 'numpy.exp', 'np.exp', (['data1.speed99_975'], {}), '(data1.speed99_975)\n', (88073, 88092), True, 'import numpy as np\n'), ((88237, 88262), 'numpy.exp', 'np.exp', (['data3.speed99_025'], {}), '(data3.speed99_025)\n', (88243, 88262), True, 'import numpy as np\n'), ((88280, 88305), 'numpy.exp', 'np.exp', (['data3.speed99_975'], {}), '(data3.speed99_975)\n', (88286, 88305), True, 'import numpy as np\n'), ((15510, 15581), 'numpy.exp', 'np.exp', (['data1.hull[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.hull[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (15516, 15581), True, 'import numpy as np\n'), ((15789, 15863), 'numpy.exp', 'np.exp', (['data1.hull025[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.hull025[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (15795, 15863), True, 'import numpy as np\n'), ((15885, 15959), 'numpy.exp', 'np.exp', (['data1.hull975[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.hull975[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (15891, 15959), True, 'import numpy as np\n'), ((17042, 17113), 'numpy.exp', 'np.exp', (['data1.hull[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.hull[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (17048, 17113), True, 'import numpy as np\n'), ((17321, 17395), 'numpy.exp', 'np.exp', (['data1.hull025[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.hull025[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (17327, 17395), True, 'import numpy as np\n'), ((17417, 17491), 'numpy.exp', 'np.exp', (['data1.hull975[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.hull975[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (17423, 17491), True, 'import numpy as np\n'), ((41116, 41210), 'numpy.exp', 'np.exp', (['data1.acc99[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs ==\n i]'], {}), '(data1.acc99[data1.loom == 1][data1.date == 18106][data1.t == 1200][\n data1.gs == i])\n', (41122, 41210), True, 'import numpy as np\n'), ((41419, 41517), 'numpy.exp', 'np.exp', (['data1.acc99_025[data1.loom == 1][data1.date == 18106][data1.t == 1200][\n data1.gs == i]'], {}), '(data1.acc99_025[data1.loom == 1][data1.date == 18106][data1.t == \n 1200][data1.gs == i])\n', (41425, 41517), True, 'import numpy as np\n'), ((41534, 41632), 'numpy.exp', 'np.exp', (['data1.acc99_975[data1.loom == 1][data1.date == 18106][data1.t == 1200][\n data1.gs == i]'], {}), '(data1.acc99_975[data1.loom == 1][data1.date == 18106][data1.t == \n 1200][data1.gs == i])\n', (41540, 41632), True, 'import numpy as np\n'), ((42690, 42784), 'numpy.exp', 'np.exp', (['data1.acc99[data1.loom == 1][data1.date == 18106][data1.t == 1200][data1.gs ==\n i]'], {}), '(data1.acc99[data1.loom == 1][data1.date == 18106][data1.t == 1200][\n data1.gs == i])\n', (42696, 42784), True, 'import numpy as np\n'), ((42993, 43091), 'numpy.exp', 'np.exp', (['data1.acc99_025[data1.loom == 1][data1.date == 18106][data1.t == 1200][\n data1.gs == i]'], {}), '(data1.acc99_025[data1.loom == 1][data1.date == 18106][data1.t == \n 1200][data1.gs == i])\n', (42999, 43091), True, 'import numpy as np\n'), ((43108, 43206), 'numpy.exp', 'np.exp', (['data1.acc99_975[data1.loom == 1][data1.date == 18106][data1.t == 1200][\n data1.gs == i]'], {}), '(data1.acc99_975[data1.loom == 1][data1.date == 18106][data1.t == \n 1200][data1.gs == i])\n', (43114, 43206), True, 'import numpy as np\n'), ((44990, 45065), 'numpy.exp', 'np.exp', (['data1.distance[data1.gs == i][data1.loom == 1][data1.date == 18106]'], {}), '(data1.distance[data1.gs == i][data1.loom == 1][data1.date == 18106])\n', (44996, 45065), True, 'import numpy as np\n'), ((45260, 45339), 'numpy.exp', 'np.exp', (['data1.distance_025[data1.gs == i][data1.loom == 1][data1.date == 18106]'], {}), '(data1.distance_025[data1.gs == i][data1.loom == 1][data1.date == 18106])\n', (45266, 45339), True, 'import numpy as np\n'), ((45360, 45439), 'numpy.exp', 'np.exp', (['data1.distance_975[data1.gs == i][data1.loom == 1][data1.date == 18106]'], {}), '(data1.distance_975[data1.gs == i][data1.loom == 1][data1.date == 18106])\n', (45366, 45439), True, 'import numpy as np\n'), ((48363, 48397), 'numpy.exp', 'np.exp', (['data1.acc99[data1.gs == i]'], {}), '(data1.acc99[data1.gs == i])\n', (48369, 48397), True, 'import numpy as np\n'), ((48538, 48576), 'numpy.exp', 'np.exp', (['data1.acc99_025[data1.gs == i]'], {}), '(data1.acc99_025[data1.gs == i])\n', (48544, 48576), True, 'import numpy as np\n'), ((48597, 48635), 'numpy.exp', 'np.exp', (['data1.acc99_975[data1.gs == i]'], {}), '(data1.acc99_975[data1.gs == i])\n', (48603, 48635), True, 'import numpy as np\n'), ((49765, 49799), 'numpy.exp', 'np.exp', (['data1.acc99[data1.gs == i]'], {}), '(data1.acc99[data1.gs == i])\n', (49771, 49799), True, 'import numpy as np\n'), ((49940, 49978), 'numpy.exp', 'np.exp', (['data1.acc99_025[data1.gs == i]'], {}), '(data1.acc99_025[data1.gs == i])\n', (49946, 49978), True, 'import numpy as np\n'), ((49999, 50037), 'numpy.exp', 'np.exp', (['data1.acc99_975[data1.gs == i]'], {}), '(data1.acc99_975[data1.gs == i])\n', (50005, 50037), True, 'import numpy as np\n'), ((55344, 55419), 'numpy.exp', 'np.exp', (['data1.distance[data1.gs == i][data1.loom == 1][data1.date == 18106]'], {}), '(data1.distance[data1.gs == i][data1.loom == 1][data1.date == 18106])\n', (55350, 55419), True, 'import numpy as np\n'), ((55614, 55693), 'numpy.exp', 'np.exp', (['data1.distance_025[data1.gs == i][data1.loom == 1][data1.date == 18106]'], {}), '(data1.distance_025[data1.gs == i][data1.loom == 1][data1.date == 18106])\n', (55620, 55693), True, 'import numpy as np\n'), ((55714, 55793), 'numpy.exp', 'np.exp', (['data1.distance_975[data1.gs == i][data1.loom == 1][data1.date == 18106]'], {}), '(data1.distance_975[data1.gs == i][data1.loom == 1][data1.date == 18106])\n', (55720, 55793), True, 'import numpy as np\n'), ((71488, 71520), 'numpy.exp', 'np.exp', (['data1.acc[data1.gs == i]'], {}), '(data1.acc[data1.gs == i])\n', (71494, 71520), True, 'import numpy as np\n'), ((71661, 71697), 'numpy.exp', 'np.exp', (['data1.acc_025[data1.gs == i]'], {}), '(data1.acc_025[data1.gs == i])\n', (71667, 71697), True, 'import numpy as np\n'), ((71718, 71754), 'numpy.exp', 'np.exp', (['data1.acc_975[data1.gs == i]'], {}), '(data1.acc_975[data1.gs == i])\n', (71724, 71754), True, 'import numpy as np\n'), ((72866, 72898), 'numpy.exp', 'np.exp', (['data1.acc[data1.gs == i]'], {}), '(data1.acc[data1.gs == i])\n', (72872, 72898), True, 'import numpy as np\n'), ((73039, 73075), 'numpy.exp', 'np.exp', (['data1.acc_025[data1.gs == i]'], {}), '(data1.acc_025[data1.gs == i])\n', (73045, 73075), True, 'import numpy as np\n'), ((73096, 73132), 'numpy.exp', 'np.exp', (['data1.acc_975[data1.gs == i]'], {}), '(data1.acc_975[data1.gs == i])\n', (73102, 73132), True, 'import numpy as np\n'), ((74771, 74805), 'numpy.exp', 'np.exp', (['data1.acc50[data1.gs == i]'], {}), '(data1.acc50[data1.gs == i])\n', (74777, 74805), True, 'import numpy as np\n'), ((74946, 74984), 'numpy.exp', 'np.exp', (['data1.acc50_025[data1.gs == i]'], {}), '(data1.acc50_025[data1.gs == i])\n', (74952, 74984), True, 'import numpy as np\n'), ((75005, 75043), 'numpy.exp', 'np.exp', (['data1.acc50_975[data1.gs == i]'], {}), '(data1.acc50_975[data1.gs == i])\n', (75011, 75043), True, 'import numpy as np\n'), ((76154, 76188), 'numpy.exp', 'np.exp', (['data1.acc50[data1.gs == i]'], {}), '(data1.acc50[data1.gs == i])\n', (76160, 76188), True, 'import numpy as np\n'), ((76329, 76367), 'numpy.exp', 'np.exp', (['data1.acc50_025[data1.gs == i]'], {}), '(data1.acc50_025[data1.gs == i])\n', (76335, 76367), True, 'import numpy as np\n'), ((76388, 76426), 'numpy.exp', 'np.exp', (['data1.acc50_975[data1.gs == i]'], {}), '(data1.acc50_975[data1.gs == i])\n', (76394, 76426), True, 'import numpy as np\n'), ((78054, 78086), 'numpy.exp', 'np.exp', (['data1.acc[data1.gs == i]'], {}), '(data1.acc[data1.gs == i])\n', (78060, 78086), True, 'import numpy as np\n'), ((78227, 78263), 'numpy.exp', 'np.exp', (['data1.acc_025[data1.gs == i]'], {}), '(data1.acc_025[data1.gs == i])\n', (78233, 78263), True, 'import numpy as np\n'), ((78284, 78320), 'numpy.exp', 'np.exp', (['data1.acc_975[data1.gs == i]'], {}), '(data1.acc_975[data1.gs == i])\n', (78290, 78320), True, 'import numpy as np\n'), ((79429, 79461), 'numpy.exp', 'np.exp', (['data1.acc[data1.gs == i]'], {}), '(data1.acc[data1.gs == i])\n', (79435, 79461), True, 'import numpy as np\n'), ((79602, 79638), 'numpy.exp', 'np.exp', (['data1.acc_025[data1.gs == i]'], {}), '(data1.acc_025[data1.gs == i])\n', (79608, 79638), True, 'import numpy as np\n'), ((79659, 79695), 'numpy.exp', 'np.exp', (['data1.acc_975[data1.gs == i]'], {}), '(data1.acc_975[data1.gs == i])\n', (79665, 79695), True, 'import numpy as np\n'), ((83355, 83389), 'numpy.exp', 'np.exp', (['data1.acc99[data1.gs == i]'], {}), '(data1.acc99[data1.gs == i])\n', (83361, 83389), True, 'import numpy as np\n'), ((83530, 83568), 'numpy.exp', 'np.exp', (['data1.acc99_025[data1.gs == i]'], {}), '(data1.acc99_025[data1.gs == i])\n', (83536, 83568), True, 'import numpy as np\n'), ((83589, 83627), 'numpy.exp', 'np.exp', (['data1.acc99_975[data1.gs == i]'], {}), '(data1.acc99_975[data1.gs == i])\n', (83595, 83627), True, 'import numpy as np\n'), ((84781, 84815), 'numpy.exp', 'np.exp', (['data1.acc99[data1.gs == i]'], {}), '(data1.acc99[data1.gs == i])\n', (84787, 84815), True, 'import numpy as np\n'), ((84956, 84994), 'numpy.exp', 'np.exp', (['data1.acc99_025[data1.gs == i]'], {}), '(data1.acc99_025[data1.gs == i])\n', (84962, 84994), True, 'import numpy as np\n'), ((85015, 85053), 'numpy.exp', 'np.exp', (['data1.acc99_975[data1.gs == i]'], {}), '(data1.acc99_975[data1.gs == i])\n', (85021, 85053), True, 'import numpy as np\n'), ((90022, 90056), 'numpy.exp', 'np.exp', (['data1.acc99[data1.gs == i]'], {}), '(data1.acc99[data1.gs == i])\n', (90028, 90056), True, 'import numpy as np\n'), ((90197, 90235), 'numpy.exp', 'np.exp', (['data1.acc99_025[data1.gs == i]'], {}), '(data1.acc99_025[data1.gs == i])\n', (90203, 90235), True, 'import numpy as np\n'), ((90256, 90294), 'numpy.exp', 'np.exp', (['data1.acc99_975[data1.gs == i]'], {}), '(data1.acc99_975[data1.gs == i])\n', (90262, 90294), True, 'import numpy as np\n'), ((91421, 91455), 'numpy.exp', 'np.exp', (['data1.acc99[data1.gs == i]'], {}), '(data1.acc99[data1.gs == i])\n', (91427, 91455), True, 'import numpy as np\n'), ((91596, 91634), 'numpy.exp', 'np.exp', (['data1.acc99_025[data1.gs == i]'], {}), '(data1.acc99_025[data1.gs == i])\n', (91602, 91634), True, 'import numpy as np\n'), ((91655, 91693), 'numpy.exp', 'np.exp', (['data1.acc99_975[data1.gs == i]'], {}), '(data1.acc99_975[data1.gs == i])\n', (91661, 91693), True, 'import numpy as np\n'), ((91858, 91892), 'numpy.exp', 'np.exp', (['data3.acc99[data3.gs == i]'], {}), '(data3.acc99[data3.gs == i])\n', (91864, 91892), True, 'import numpy as np\n'), ((92035, 92073), 'numpy.exp', 'np.exp', (['data3.acc99_025[data3.gs == i]'], {}), '(data3.acc99_025[data3.gs == i])\n', (92041, 92073), True, 'import numpy as np\n'), ((92094, 92132), 'numpy.exp', 'np.exp', (['data3.acc99_975[data3.gs == i]'], {}), '(data3.acc99_975[data3.gs == i])\n', (92100, 92132), True, 'import numpy as np\n'), ((46460, 46535), 'numpy.exp', 'np.exp', (['data1.distance[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.distance[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (46466, 46535), True, 'import numpy as np\n'), ((46729, 46808), 'numpy.exp', 'np.exp', (['data1.distance_025[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.distance_025[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (46735, 46808), True, 'import numpy as np\n'), ((46833, 46912), 'numpy.exp', 'np.exp', (['data1.distance_975[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.distance_975[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (46839, 46912), True, 'import numpy as np\n'), ((56812, 56887), 'numpy.exp', 'np.exp', (['data1.distance[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.distance[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (56818, 56887), True, 'import numpy as np\n'), ((57081, 57160), 'numpy.exp', 'np.exp', (['data1.distance_025[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.distance_025[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (57087, 57160), True, 'import numpy as np\n'), ((57185, 57264), 'numpy.exp', 'np.exp', (['data1.distance_975[data1.gs == i][data1.loom == j][data1.date == 18106]'], {}), '(data1.distance_975[data1.gs == i][data1.loom == j][data1.date == 18106])\n', (57191, 57264), True, 'import numpy as np\n')] |
#import useful libraries
import numpy as np
import myml.discriminant as mydsc
import myml.data_separation as mydata
import myml.images as myimg
import myml.factorizations as myfac
import matplotlib.pyplot as plot
import sklearn.decomposition as skd
import time
from mpl_toolkits.mplot3d import Axes3D
def gda_model_accuracy_pca(Xraw, L, num_features, percent_train=0.5):
# reduce dimensions of raw data using PCA
(Q, X) = myfac.projrep(Xraw, k_or_tol=num_features)
# get shape information for labels
(nl,) = L.shape
# break data into training and testing datasets
(Dtr, Dtt) = mydata.separateClassData(X, L.reshape(1, nl), numdata_or_percent_for_training=percent_train)
# get the start time the training starts
start = time.time()
# build the set of LDA models
dlist = mydsc.constructDistriminantSet(X=Dtr['net'], L=Dtr['nlbl'])
# get the end time after training
end = time.time()
print('({0}) Time elapsed to train is: '.format(num_features),end-start)
# test the discriminants for each label dataset in the training set
Ltest = mydsc.evalDiscriminantSet(X=Dtt['net'], discriminant_list=dlist)
# Compute accuracy and confusion matrix
classes = np.array([0, 1])
(C, accuracy) = mydsc.computeConfusionMatrix(Xtest=Dtt['net'], Ltest=Dtt['nlbl'], Leval=Ltest, classes=classes)
# plot the confusion matrix
f0 = plot.figure()
(f0, ax0) = myimg.plotDataset(f0, C, delta=(-1, -1))
ax0.set_xlabel('Classified Classes')
ax0.set_ylabel('True Classes')
f0.savefig('{0}/confusion_mat_{1}.png'.format('../plots/GDA',num_features))
# print output message
print('Accuracy for {0} features is : '.format(num_features), accuracy)
# return the accuracy
return accuracy/100.0
def gda_model_accuracy_nmf(Xraw, L, num_features, percent_train=0.5):
# reduce dimensions of raw data using NMF
nmf_obj = skd.NMF(n_components=num_features, random_state=17)
X = nmf_obj.fit_transform(Xraw.T).T
Q = nmf_obj.components_.T
# get shape information for labels
(nl,) = L.shape
# break data into training and testing datasets
(Dtr, Dtt) = mydata.separateClassData(X, L.reshape(1, nl), numdata_or_percent_for_training=percent_train)
# get the start time the training starts
start = time.time()
# build the set of LDA models
dlist = mydsc.constructDistriminantSet(X=Dtr['net'], L=Dtr['nlbl'])
# get the end time after training
end = time.time()
print('({0}) Time elapsed to train is: '.format(num_features),end-start)
# test the discriminants for each label dataset in the training set
Ltest = mydsc.evalDiscriminantSet(X=Dtt['net'], discriminant_list=dlist)
# Compute accuracy and confusion matrix
classes = np.array([0, 1])
(C, accuracy) = mydsc.computeConfusionMatrix(Xtest=Dtt['net'], Ltest=Dtt['nlbl'], Leval=Ltest, classes=classes)
# plot the confusion matrix
f0 = plot.figure()
(f0, ax0) = myimg.plotDataset(f0, C, delta=(-1, -1))
ax0.set_xlabel('Classified Classes')
ax0.set_ylabel('True Classes')
f0.savefig('{0}/confusion_mat_{1}.png'.format('../plots/GDA',num_features))
# print output message
print('Accuracy for {0} features is : '.format(num_features), accuracy)
# return the accuracy
return accuracy/100.0
if __name__ == '__main__':
# load the raw inputs and labels
Xraw0 = np.load('../data/raw_features.npy').T
L = np.load('../data/control_lbls.npy')
# get mean of input data and subtract it
doWhiten = False
if doWhiten:
(d, nd) = Xraw0.shape
Xmean = np.mean(Xraw0, axis=1).reshape(d, 1)
Xraw = Xraw0 - Xmean
else:
Xraw = Xraw0
# loop through different number of feature representations
# to see what the overall classification accuracy is for each
klist = np.arange(1,21,1)
alist_pca = np.zeros(klist.shape)
alist_nmf = np.zeros(klist.shape)
for i in range(0,klist.shape[0]):
alist_pca[i] = gda_model_accuracy_pca(Xraw, L, klist[i])
alist_nmf[i] = gda_model_accuracy_nmf(Xraw, L, klist[i])
# save off data
np.save(file="../data/gpca.npy",arr=alist_pca)
np.save(file="../data/gnmf.npy",arr=alist_nmf)
# plot the results
#fig = plot.figure()
#plot.plot(klist,alist)
#plot.xlabel('Number of Features')
#plot.ylabel('Testing Classification Accuracy')
#fig.savefig('{0}/fdim_vs_accuracy.png'.format('../plots/GDA'))
| [
"sklearn.decomposition.NMF",
"numpy.load",
"numpy.save",
"myml.discriminant.constructDistriminantSet",
"numpy.zeros",
"myml.factorizations.projrep",
"time.time",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"numpy.arange",
"myml.images.plotDataset",
"myml.discriminant.evalDiscrim... | [((492, 534), 'myml.factorizations.projrep', 'myfac.projrep', (['Xraw'], {'k_or_tol': 'num_features'}), '(Xraw, k_or_tol=num_features)\n', (505, 534), True, 'import myml.factorizations as myfac\n'), ((817, 828), 'time.time', 'time.time', ([], {}), '()\n', (826, 828), False, 'import time\n'), ((876, 935), 'myml.discriminant.constructDistriminantSet', 'mydsc.constructDistriminantSet', ([], {'X': "Dtr['net']", 'L': "Dtr['nlbl']"}), "(X=Dtr['net'], L=Dtr['nlbl'])\n", (906, 935), True, 'import myml.discriminant as mydsc\n'), ((985, 996), 'time.time', 'time.time', ([], {}), '()\n', (994, 996), False, 'import time\n'), ((1159, 1223), 'myml.discriminant.evalDiscriminantSet', 'mydsc.evalDiscriminantSet', ([], {'X': "Dtt['net']", 'discriminant_list': 'dlist'}), "(X=Dtt['net'], discriminant_list=dlist)\n", (1184, 1223), True, 'import myml.discriminant as mydsc\n'), ((1283, 1299), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1291, 1299), True, 'import numpy as np\n'), ((1320, 1420), 'myml.discriminant.computeConfusionMatrix', 'mydsc.computeConfusionMatrix', ([], {'Xtest': "Dtt['net']", 'Ltest': "Dtt['nlbl']", 'Leval': 'Ltest', 'classes': 'classes'}), "(Xtest=Dtt['net'], Ltest=Dtt['nlbl'], Leval=\n Ltest, classes=classes)\n", (1348, 1420), True, 'import myml.discriminant as mydsc\n'), ((1458, 1471), 'matplotlib.pyplot.figure', 'plot.figure', ([], {}), '()\n', (1469, 1471), True, 'import matplotlib.pyplot as plot\n'), ((1488, 1528), 'myml.images.plotDataset', 'myimg.plotDataset', (['f0', 'C'], {'delta': '(-1, -1)'}), '(f0, C, delta=(-1, -1))\n', (1505, 1528), True, 'import myml.images as myimg\n'), ((1974, 2025), 'sklearn.decomposition.NMF', 'skd.NMF', ([], {'n_components': 'num_features', 'random_state': '(17)'}), '(n_components=num_features, random_state=17)\n', (1981, 2025), True, 'import sklearn.decomposition as skd\n'), ((2378, 2389), 'time.time', 'time.time', ([], {}), '()\n', (2387, 2389), False, 'import time\n'), ((2437, 2496), 'myml.discriminant.constructDistriminantSet', 'mydsc.constructDistriminantSet', ([], {'X': "Dtr['net']", 'L': "Dtr['nlbl']"}), "(X=Dtr['net'], L=Dtr['nlbl'])\n", (2467, 2496), True, 'import myml.discriminant as mydsc\n'), ((2546, 2557), 'time.time', 'time.time', ([], {}), '()\n', (2555, 2557), False, 'import time\n'), ((2720, 2784), 'myml.discriminant.evalDiscriminantSet', 'mydsc.evalDiscriminantSet', ([], {'X': "Dtt['net']", 'discriminant_list': 'dlist'}), "(X=Dtt['net'], discriminant_list=dlist)\n", (2745, 2784), True, 'import myml.discriminant as mydsc\n'), ((2844, 2860), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2852, 2860), True, 'import numpy as np\n'), ((2881, 2981), 'myml.discriminant.computeConfusionMatrix', 'mydsc.computeConfusionMatrix', ([], {'Xtest': "Dtt['net']", 'Ltest': "Dtt['nlbl']", 'Leval': 'Ltest', 'classes': 'classes'}), "(Xtest=Dtt['net'], Ltest=Dtt['nlbl'], Leval=\n Ltest, classes=classes)\n", (2909, 2981), True, 'import myml.discriminant as mydsc\n'), ((3019, 3032), 'matplotlib.pyplot.figure', 'plot.figure', ([], {}), '()\n', (3030, 3032), True, 'import matplotlib.pyplot as plot\n'), ((3049, 3089), 'myml.images.plotDataset', 'myimg.plotDataset', (['f0', 'C'], {'delta': '(-1, -1)'}), '(f0, C, delta=(-1, -1))\n', (3066, 3089), True, 'import myml.images as myimg\n'), ((3527, 3562), 'numpy.load', 'np.load', (['"""../data/control_lbls.npy"""'], {}), "('../data/control_lbls.npy')\n", (3534, 3562), True, 'import numpy as np\n'), ((3938, 3957), 'numpy.arange', 'np.arange', (['(1)', '(21)', '(1)'], {}), '(1, 21, 1)\n', (3947, 3957), True, 'import numpy as np\n'), ((3974, 3995), 'numpy.zeros', 'np.zeros', (['klist.shape'], {}), '(klist.shape)\n', (3982, 3995), True, 'import numpy as np\n'), ((4014, 4035), 'numpy.zeros', 'np.zeros', (['klist.shape'], {}), '(klist.shape)\n', (4022, 4035), True, 'import numpy as np\n'), ((4229, 4276), 'numpy.save', 'np.save', ([], {'file': '"""../data/gpca.npy"""', 'arr': 'alist_pca'}), "(file='../data/gpca.npy', arr=alist_pca)\n", (4236, 4276), True, 'import numpy as np\n'), ((4280, 4327), 'numpy.save', 'np.save', ([], {'file': '"""../data/gnmf.npy"""', 'arr': 'alist_nmf'}), "(file='../data/gnmf.npy', arr=alist_nmf)\n", (4287, 4327), True, 'import numpy as np\n'), ((3481, 3516), 'numpy.load', 'np.load', (['"""../data/raw_features.npy"""'], {}), "('../data/raw_features.npy')\n", (3488, 3516), True, 'import numpy as np\n'), ((3693, 3715), 'numpy.mean', 'np.mean', (['Xraw0'], {'axis': '(1)'}), '(Xraw0, axis=1)\n', (3700, 3715), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file belong to https://github.com/snolfi/evorobotpy
and has been written by <NAME> and <NAME>, <EMAIL>, <EMAIL>
evoalgo.py contains methods for showing, saving and loading data
"""
import numpy as np
import time
class EvoAlgo(object):
def __init__(self, env, policy, seed, fileini, filedir):
self.env = env # the environment
self.policy = policy # the policy
self.seed = seed # the seed of the experiment
self.fileini = fileini # the name of the file with the hyperparameters
self.filedir = filedir # the directory used to save/load files
self.bestfit = -999999999.0 # the fitness of the best agent so far
self.bestsol = None # the genotype of the best agent so far
self.bestgfit = -999999999.0 # the performance of the best post-evaluated agent so far
self.bestgsol = None # the genotype of the best postevaluated agent so far
self.stat = np.arange(0, dtype=np.float64) # a vector containing progress data across generations
self.avgfit = 0.0 # the average fitness of the population
self.last_save_time = time.time() # the last time in which data have been saved
def reset(self):
self.bestfit = -999999999.0
self.bestsol = None
self.bestgfit = -999999999.0
self.bestgsol = None
self.stat = np.arange(0, dtype=np.float64)
self.avgfit = 0.0
self.last_save_time = time.time()
def run(self, nevals):
# Run method depends on the algorithm
raise NotImplementedError
def test(self, testfile): # postevaluate an agent
if (self.policy.test == 1 and "Bullet" in self.policy.environment):
self.env.render(mode="human") # Pybullet render require this initialization
if testfile is not None:
if self.filedir.endswith("/"):
fname = self.filedir + testfile
else:
fname = self.filedir + "/" + testfile
if (self.policy.normalize == 0):
bestgeno = np.load(fname)
else:
geno = np.load(fname)
for i in range(self.policy.ninputs * 2):
self.policy.normvector[i] = geno[self.policy.nparams + i]
bestgeno = geno[0:self.policy.nparams]
self.policy.nn.setNormalizationVectors()
self.policy.set_trainable_flat(bestgeno)
else:
self.policy.reset()
if (self.policy.nttrials > 0):
ntrials = self.policy.nttrials
else:
ntrials = self.policy.ntrials
eval_rews, eval_length = self.policy.rollout(ntrials, render=True, seed=self.policy.get_seed + 100000)
print("Postevauation: Average Fitness %.2f Total Steps %d" % (eval_rews, eval_length))
def updateBest(self, fit, ind): # checks whether this is the best agent so far and in case store it
if fit > self.bestfit:
self.bestfit = fit
if (self.policy.normalize == 0):
self.bestsol = np.copy(ind)
else:
self.bestsol = np.append(ind,self.policy.normvector)
def updateBestg(self, fit, ind): # checks whether this is the best postevaluated agent so far and eventually store it
if fit > self.bestgfit:
self.bestgfit = fit
if (self.policy.normalize == 0):
self.bestgsol = np.copy(ind)
else:
self.bestgsol = np.append(ind,self.policy.normvector)
def save(self): # save the best agent so far, the best postevaluated agent so far, and the statistical data
print('save data')
fname = self.filedir + "/bestS" + str(self.seed)
np.save(fname, self.bestsol)
fname = self.filedir + "/bestgS" + str(self.seed)
np.save(fname, self.bestgsol)
fname = self.filedir + "/statS" + str(self.seed)
np.save(fname, self.stat)
| [
"numpy.load",
"numpy.save",
"numpy.copy",
"time.time",
"numpy.append",
"numpy.arange"
] | [((1127, 1157), 'numpy.arange', 'np.arange', (['(0)'], {'dtype': 'np.float64'}), '(0, dtype=np.float64)\n', (1136, 1157), True, 'import numpy as np\n'), ((1328, 1339), 'time.time', 'time.time', ([], {}), '()\n', (1337, 1339), False, 'import time\n'), ((1569, 1599), 'numpy.arange', 'np.arange', (['(0)'], {'dtype': 'np.float64'}), '(0, dtype=np.float64)\n', (1578, 1599), True, 'import numpy as np\n'), ((1656, 1667), 'time.time', 'time.time', ([], {}), '()\n', (1665, 1667), False, 'import time\n'), ((3995, 4023), 'numpy.save', 'np.save', (['fname', 'self.bestsol'], {}), '(fname, self.bestsol)\n', (4002, 4023), True, 'import numpy as np\n'), ((4098, 4127), 'numpy.save', 'np.save', (['fname', 'self.bestgsol'], {}), '(fname, self.bestgsol)\n', (4105, 4127), True, 'import numpy as np\n'), ((4214, 4239), 'numpy.save', 'np.save', (['fname', 'self.stat'], {}), '(fname, self.stat)\n', (4221, 4239), True, 'import numpy as np\n'), ((2268, 2282), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (2275, 2282), True, 'import numpy as np\n'), ((2324, 2338), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (2331, 2338), True, 'import numpy as np\n'), ((3273, 3285), 'numpy.copy', 'np.copy', (['ind'], {}), '(ind)\n', (3280, 3285), True, 'import numpy as np\n'), ((3335, 3373), 'numpy.append', 'np.append', (['ind', 'self.policy.normvector'], {}), '(ind, self.policy.normvector)\n', (3344, 3373), True, 'import numpy as np\n'), ((3637, 3649), 'numpy.copy', 'np.copy', (['ind'], {}), '(ind)\n', (3644, 3649), True, 'import numpy as np\n'), ((3700, 3738), 'numpy.append', 'np.append', (['ind', 'self.policy.normvector'], {}), '(ind, self.policy.normvector)\n', (3709, 3738), True, 'import numpy as np\n')] |
import math
import ctre
import numpy as np
from pyswervedrive.swervemodule import SwerveModule
from utilities.functions import constrain_angle
class PhysicsEngine:
X_WHEELBASE = 0.50
Y_WHEELBASE = 0.62
GRAVITY = 9.8
def __init__(self, controller):
self.controller = controller
self.drive_counts_per_rev = (
SwerveModule.SRX_MAG_COUNTS_PER_REV
* SwerveModule.DRIVE_ENCODER_GEAR_REDUCTION
)
self.drive_counts_per_meter = self.drive_counts_per_rev / (
math.pi * SwerveModule.WHEEL_DIAMETER
)
# factor by which to scale velocities in m/s to give to our drive talon.
# 0.1 is because SRX velocities are measured in ticks/100ms
self.drive_velocity_to_native_units = self.drive_counts_per_meter * 0.1
# for modules [a, b, c, d]. used to iterate over them
# self.module_steer_can_ids = [48, 46, 44, 42]
# self.module_drive_can_ids = [49, 47, 45, 43]
self.module_steer_can_ids = [42, 58]
self.module_drive_can_ids = [48, 2]
self.module_steer_offsets = [0] * 2
x_off = self.X_WHEELBASE / 2
y_off = self.Y_WHEELBASE / 2
self.module_x_offsets = [x_off, -x_off]
self.module_y_offsets = [-y_off, y_off]
self.controller.add_device_gyro_channel("navxmxp_spi_4_angle")
def initialize(self, hal_data):
pass
def update_sim(self, hal_data, now, tm_diff):
"""Update pyfrc simulator.
Args:
hal_data: Data about motors and other components
now: Current time in ms
tm_diff: Difference between current time and time when last checked
"""
# only simulate things if the robot is enabled
if not hal_data["control"]["enabled"]:
return
steer_positions = []
for can_id, offset in zip(self.module_steer_can_ids, self.module_steer_offsets):
value = hal_data["CAN"][can_id]["pid0_target"]
hal_data["CAN"][can_id]["pulse_width_position"] = int(value)
position = constrain_angle(
(hal_data["CAN"][can_id]["pulse_width_position"] - offset)
/ SwerveModule.STEER_COUNTS_PER_RADIAN
)
steer_positions.append(position)
motor_speeds = []
for i, can_id in enumerate(self.module_drive_can_ids):
talon = hal_data["CAN"][can_id]
if talon["control_mode"] == ctre.ControlMode.Velocity:
enc_speed = talon["pid0_target"]
else:
enc_speed = 0
speed = enc_speed / SwerveModule.drive_velocity_to_native_units
talon["quad_position"] += int(enc_speed * tm_diff * 10)
talon["quad_velocity"] = int(enc_speed)
motor_speeds.append(speed)
lr_speed, rf_speed = motor_speeds
lr_angle, rf_angle = steer_positions
vx, vy, vw = better_four_motor_swerve_drivetrain(
motor_speeds, steer_positions, self.module_x_offsets, self.module_y_offsets
)
# convert meters to ft. (cause america)
vx /= 0.3048
vy /= 0.3048
self.controller.vector_drive(-vy, vx, -vw, tm_diff)
def better_four_motor_swerve_drivetrain(
module_speeds, module_angles, module_x_offsets, module_y_offsets
):
"""Solve the least-squares of the speed and angles of four swerve modules
to retrieve delta x and y in the robot frame.
Note:
This function uses the standard (and superior) ROS coordinate system,
with forward being positive x, leftward being positive y, and a
counter clockwise rotation being one about the positive z axis.
Args:
module_speeds: List of the speeds of each module (m/s)
module_angles: List of the angles of each module (radians)
module_x_offsets: Offset of each module on the x axis.
module_y_offsets: Offset of each module on the y axis.
Returns:
vx: float, robot velocity on the x axis (m/s)
vy: float, robot velocity on the y axis (m/s)
vz: float, robot velocity about the z axis (radians/s)
"""
A = np.array([[1, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 1]], dtype=float)
module_states = np.zeros((4, 1), dtype=float)
for i in range(2):
module_dist = math.hypot(module_x_offsets[i], module_y_offsets[i])
module_angle = math.atan2(module_y_offsets[i], module_x_offsets[i])
A[i * 2, 2] = -module_dist * math.sin(module_angle)
A[i * 2 + 1, 2] = module_dist * math.cos(module_angle)
x_vel = module_speeds[i] * math.cos(module_angles[i])
y_vel = module_speeds[i] * math.sin(module_angles[i])
module_states[i * 2, 0] = x_vel
module_states[i * 2 + 1, 0] = y_vel
lstsq_ret = np.linalg.lstsq(A, module_states, rcond=None)
vx, vy, vz = lstsq_ret[0].reshape(3)
return vx, vy, vz
| [
"math.hypot",
"utilities.functions.constrain_angle",
"numpy.linalg.lstsq",
"math.atan2",
"numpy.zeros",
"math.sin",
"numpy.array",
"math.cos"
] | [((4289, 4356), 'numpy.array', 'np.array', (['[[1, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 1]]'], {'dtype': 'float'}), '([[1, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 1]], dtype=float)\n', (4297, 4356), True, 'import numpy as np\n'), ((4378, 4407), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {'dtype': 'float'}), '((4, 1), dtype=float)\n', (4386, 4407), True, 'import numpy as np\n'), ((4943, 4988), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'module_states'], {'rcond': 'None'}), '(A, module_states, rcond=None)\n', (4958, 4988), True, 'import numpy as np\n'), ((4455, 4507), 'math.hypot', 'math.hypot', (['module_x_offsets[i]', 'module_y_offsets[i]'], {}), '(module_x_offsets[i], module_y_offsets[i])\n', (4465, 4507), False, 'import math\n'), ((4532, 4584), 'math.atan2', 'math.atan2', (['module_y_offsets[i]', 'module_x_offsets[i]'], {}), '(module_y_offsets[i], module_x_offsets[i])\n', (4542, 4584), False, 'import math\n'), ((2163, 2281), 'utilities.functions.constrain_angle', 'constrain_angle', (["((hal_data['CAN'][can_id]['pulse_width_position'] - offset) / SwerveModule.\n STEER_COUNTS_PER_RADIAN)"], {}), "((hal_data['CAN'][can_id]['pulse_width_position'] - offset) /\n SwerveModule.STEER_COUNTS_PER_RADIAN)\n", (2178, 2281), False, 'from utilities.functions import constrain_angle\n'), ((4623, 4645), 'math.sin', 'math.sin', (['module_angle'], {}), '(module_angle)\n', (4631, 4645), False, 'import math\n'), ((4687, 4709), 'math.cos', 'math.cos', (['module_angle'], {}), '(module_angle)\n', (4695, 4709), False, 'import math\n'), ((4748, 4774), 'math.cos', 'math.cos', (['module_angles[i]'], {}), '(module_angles[i])\n', (4756, 4774), False, 'import math\n'), ((4811, 4837), 'math.sin', 'math.sin', (['module_angles[i]'], {}), '(module_angles[i])\n', (4819, 4837), False, 'import math\n')] |
import torch
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np
import os
import librosa
import json
# from scipy.io.wavfile import read
class MP3Audio(Dataset):
'''
# https://nbviewer.org/github/mdeff/fma/blob/outputs/usage.ipynb
mp4 audio는 학습된 모델의 train단계에서 사용됩니다.
'''
def __init__(self,input_length=48000,type='small'):
if type not in ['small','medium']:
raise ValueError('samll or medium')
self.input_length = input_length
self.dir = 'D:/Siamusic/dataset/fma_' + type
self.folders = next(os.walk(self.dir))[1]
self.df = pd.DataFrame(columns=['path'])
for folder in self.folders:
PATH = self.dir + '/' + folder
for music in next(os.walk(PATH))[2]:
self.df = self.df.append({"path": PATH+'/'+music}, ignore_index=True)
def __len__(self):
return len(self.df)
def get_waveform(self,data_path):#22050
waveform,_ = librosa.load(data_path,sr=22050,duration=30)
waveform = np.array(waveform,dtype=float)
random_idx = np.random.randint(low=0, high=int(waveform.shape[0] - self.input_length))
waveform = waveform[random_idx:random_idx+self.input_length] # extract 48000 sequence
audio = np.expand_dims(waveform, axis = 0) # expand to [1,48000]
return audio
def __getitem__(self, idx):
data_path = self.df['path'][idx]
waveform = self.get_waveform(data_path)
return waveform.astype(np.float32)
class JsonAudio(Dataset):
'''
Json 형태로 저장된 오디오를 불러오는 class
'''
def __init__(self,data_dir,input_length=48000):
self.data_dir = data_dir
self.data_list = os.listdir(data_dir)
self.input_length = input_length
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
with open(self.data_dir+'/'+self.data_list[idx], 'r') as f:
waveform = np.array(json.load(f)['audio'],dtype=float)
try:
random_idx = np.random.randint(low=0, high=int(waveform.shape[-1] - self.input_length))
if len(waveform.shape) == 1:
waveform = waveform[random_idx:random_idx+self.input_length]
elif len(waveform.shape) == 2:
waveform = waveform[0][random_idx:random_idx+self.input_length]
else:
raise ValueError('Shape이 이상한데요?')
audio = np.expand_dims(waveform, axis = 0) # expand to [1,48000]
except:
temp = np.zeros((48000))
temp[0:int(waveform.shape[-1])] = waveform
audio = np.expand_dims(temp, axis = 0) # expand to [1,48000]
return audio
class TestJsonAudio(Dataset):
'''
학습된 모델의 test단계에서 사용됩니다
'''
def __init__(self,df,data_dir,input_length=48000):
self.df = df
self.data_dir = './data/json_audio'
self.input_length = input_length
self.error_term = ['/','\"','<','>','\\','|',':','*','?']
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
pid,artist,track,url = self.df.iloc[idx]
song = artist + '-' + track
for item in song:
if item in self.error_term:
song = song.replace(item,'^')
data = self.data_dir + '/' + song + '.mp4.json'
with open(data, 'r') as f:
waveform = np.array(json.load(f)['audio'],dtype=float)
try:
random_idx = np.random.randint(low=0, high=int(waveform.shape[-1] - self.input_length))
if len(waveform.shape) == 1:
waveform = waveform[random_idx:random_idx+self.input_length]
elif len(waveform.shape) == 2:
waveform = waveform[0][random_idx:random_idx+self.input_length]
else:
raise ValueError('Shape이 이상한데요?')
audio = np.expand_dims(waveform, axis = 0) # expand to [1,48000]
except:
temp = np.zeros((48000))
temp[0:int(waveform.shape[-1])] = waveform
audio = np.expand_dims(temp, axis = 0) # expand to [1,48000]
return audio
if __name__ == '__main__':
# mp3
mp3_data = MP3Audio()
mp3_dataloader = DataLoader(mp3_data,batch_size=8,drop_last=True)
mp3_x = next(iter(mp3_dataloader))
print(f'mp3_x : {mp3_x.shape}')
# # mp4
# mp4_data = MP4Audio()
# mp4_dataloader = DataLoader(mp4_data,batch_size=16,drop_last=True)
# mp4_x, mp4_y= next(iter(mp4_dataloader))
# print(f'mp4_x : {mp4_x.shape} | mp4_y : {mp4_y.shape}') | [
"pandas.DataFrame",
"json.load",
"torch.utils.data.DataLoader",
"os.walk",
"numpy.zeros",
"numpy.expand_dims",
"librosa.load",
"numpy.array",
"os.listdir"
] | [((4262, 4312), 'torch.utils.data.DataLoader', 'DataLoader', (['mp3_data'], {'batch_size': '(8)', 'drop_last': '(True)'}), '(mp3_data, batch_size=8, drop_last=True)\n', (4272, 4312), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((636, 666), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['path']"}), "(columns=['path'])\n", (648, 666), True, 'import pandas as pd\n'), ((999, 1045), 'librosa.load', 'librosa.load', (['data_path'], {'sr': '(22050)', 'duration': '(30)'}), '(data_path, sr=22050, duration=30)\n', (1011, 1045), False, 'import librosa\n'), ((1063, 1094), 'numpy.array', 'np.array', (['waveform'], {'dtype': 'float'}), '(waveform, dtype=float)\n', (1071, 1094), True, 'import numpy as np\n'), ((1299, 1331), 'numpy.expand_dims', 'np.expand_dims', (['waveform'], {'axis': '(0)'}), '(waveform, axis=0)\n', (1313, 1331), True, 'import numpy as np\n'), ((1731, 1751), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (1741, 1751), False, 'import os\n'), ((2470, 2502), 'numpy.expand_dims', 'np.expand_dims', (['waveform'], {'axis': '(0)'}), '(waveform, axis=0)\n', (2484, 2502), True, 'import numpy as np\n'), ((3915, 3947), 'numpy.expand_dims', 'np.expand_dims', (['waveform'], {'axis': '(0)'}), '(waveform, axis=0)\n', (3929, 3947), True, 'import numpy as np\n'), ((596, 613), 'os.walk', 'os.walk', (['self.dir'], {}), '(self.dir)\n', (603, 613), False, 'import os\n'), ((2562, 2577), 'numpy.zeros', 'np.zeros', (['(48000)'], {}), '(48000)\n', (2570, 2577), True, 'import numpy as np\n'), ((2655, 2683), 'numpy.expand_dims', 'np.expand_dims', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (2669, 2683), True, 'import numpy as np\n'), ((4007, 4022), 'numpy.zeros', 'np.zeros', (['(48000)'], {}), '(48000)\n', (4015, 4022), True, 'import numpy as np\n'), ((4100, 4128), 'numpy.expand_dims', 'np.expand_dims', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (4114, 4128), True, 'import numpy as np\n'), ((776, 789), 'os.walk', 'os.walk', (['PATH'], {}), '(PATH)\n', (783, 789), False, 'import os\n'), ((1993, 2005), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2002, 2005), False, 'import json\n'), ((3438, 3450), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3447, 3450), False, 'import json\n')] |
import os
import time
import pickle
import torch as t
import numpy as np
from torch.utils import data
import gzip
from time import time
from config import DefaultConfig
import torch
import dgl
import threading
class dataSet(data.Dataset):
def __init__(self, root_dir, protein_list_file):
super(dataSet, self).__init__()
self.edge_feat_mean = [31.83509173, 1.56021911] #calculated from trainset only
self.edge_feat_std = [16.79204272, 0.69076342] #calculated from trainset only
self.all_protBert_feature = pickle.load(gzip.open(root_dir+'/inputs/ProtBert_features.pkl.gz', "rb"))['ProtBert_features']
self.all_dist_matrix = pickle.load(gzip.open(root_dir+'/inputs/ppisp_dist_matrix_map.pkl.gz', 'rb'))
self.all_angle_matrix = pickle.load(gzip.open(root_dir+'/inputs/ppisp_angle_matrix_map.pkl.gz', 'rb'))
print('protein_list_file:', protein_list_file)
with open(protein_list_file, "r") as f:
protein_list = f.readlines()
self.protein_list = [x.strip() for x in protein_list]
self.config = DefaultConfig()
self.max_seq_len = self.config.max_sequence_length
self.neighbourhood_size = 21
self.protein_list_len = len(self.protein_list)
self.all_graphs = self.generate_all_graphs()
print('All graphs generated.')
def __getitem__(self, index):
t0=time()
protein_name = self.protein_list[index]
id_idx = index
_all_protBert_feature_ = self.all_protBert_feature[id_idx][:self.max_seq_len]
seq_len = _all_protBert_feature_.shape[0]
protein_info = {
'protein_name': protein_name,
'protein_idx': id_idx,
'seq_length': seq_len
}
if seq_len < self.max_seq_len:
temp = np.zeros([self.max_seq_len, _all_protBert_feature_.shape[1]])
temp[:seq_len, :] = _all_protBert_feature_
_all_protBert_feature_ = temp
_all_protBert_feature_ = _all_protBert_feature_[np.newaxis, :, :]
G = self.all_graphs[id_idx]
return torch.from_numpy(_all_protBert_feature_).type(torch.FloatTensor), \
G, \
protein_info
def __len__(self):
return self.protein_list_len
def generate_all_graphs(self):
graph_list = {}
for id_idx in self.all_dist_matrix:
G = dgl.DGLGraph()
G.add_nodes(self.max_seq_len)
neighborhood_indices = self.all_dist_matrix[id_idx]['dist_matrix'][:self.max_seq_len, :self.max_seq_len, 0] \
.argsort()[:, 1:self.neighbourhood_size]
if neighborhood_indices.max() > self.max_seq_len-1 or neighborhood_indices.min() < 0:
print(neighborhood_indices.max(), neighborhood_indices.min())
raise
edge_feat = np.array([
self.all_dist_matrix[id_idx]['dist_matrix'][:self.max_seq_len, :self.max_seq_len, 0],
self.all_angle_matrix[id_idx]['angle_matrix'][:self.max_seq_len, :self.max_seq_len]
])
edge_feat = np.transpose(edge_feat, (1, 2, 0))
edge_feat = (edge_feat - self.edge_feat_mean) / self.edge_feat_std # standardize features
self.add_edges_custom(G,
neighborhood_indices,
edge_feat
)
graph_list[id_idx]= G
return graph_list
def add_edges_custom(self, G, neighborhood_indices, edge_features):
t1 = time()
size = neighborhood_indices.shape[0]
neighborhood_indices = neighborhood_indices.tolist()
src = []
dst = []
temp_edge_features = []
for center in range(size):
src += neighborhood_indices[center]
dst += [center] * (self.neighbourhood_size - 1)
for nbr in neighborhood_indices[center]:
temp_edge_features += [np.abs(edge_features[center, nbr])]
if len(src) != len(dst):
prit('source and destination array should have been of the same length: src and dst:', len(src), len(dst))
raise Exception
G.add_edges(src, dst)
G.edata['ex'] = torch.tensor(temp_edge_features)
def graph_collate(samples):
protbert_data, graph_batch, protein_info = map(list, zip(*samples))
graph_batch = dgl.batch(graph_batch)
protbert_data = torch.cat(protbert_data)
return protbert_data, graph_batch, protein_info
| [
"gzip.open",
"numpy.abs",
"dgl.batch",
"numpy.zeros",
"torch.cat",
"dgl.DGLGraph",
"time.time",
"numpy.transpose",
"numpy.array",
"config.DefaultConfig",
"torch.tensor",
"torch.from_numpy"
] | [((4432, 4454), 'dgl.batch', 'dgl.batch', (['graph_batch'], {}), '(graph_batch)\n', (4441, 4454), False, 'import dgl\n'), ((4475, 4499), 'torch.cat', 'torch.cat', (['protbert_data'], {}), '(protbert_data)\n', (4484, 4499), False, 'import torch\n'), ((1094, 1109), 'config.DefaultConfig', 'DefaultConfig', ([], {}), '()\n', (1107, 1109), False, 'from config import DefaultConfig\n'), ((1401, 1407), 'time.time', 'time', ([], {}), '()\n', (1405, 1407), False, 'from time import time\n'), ((3594, 3600), 'time.time', 'time', ([], {}), '()\n', (3598, 3600), False, 'from time import time\n'), ((4278, 4310), 'torch.tensor', 'torch.tensor', (['temp_edge_features'], {}), '(temp_edge_features)\n', (4290, 4310), False, 'import torch\n'), ((683, 749), 'gzip.open', 'gzip.open', (["(root_dir + '/inputs/ppisp_dist_matrix_map.pkl.gz')", '"""rb"""'], {}), "(root_dir + '/inputs/ppisp_dist_matrix_map.pkl.gz', 'rb')\n", (692, 749), False, 'import gzip\n'), ((793, 860), 'gzip.open', 'gzip.open', (["(root_dir + '/inputs/ppisp_angle_matrix_map.pkl.gz')", '"""rb"""'], {}), "(root_dir + '/inputs/ppisp_angle_matrix_map.pkl.gz', 'rb')\n", (802, 860), False, 'import gzip\n'), ((1820, 1881), 'numpy.zeros', 'np.zeros', (['[self.max_seq_len, _all_protBert_feature_.shape[1]]'], {}), '([self.max_seq_len, _all_protBert_feature_.shape[1]])\n', (1828, 1881), True, 'import numpy as np\n'), ((2402, 2416), 'dgl.DGLGraph', 'dgl.DGLGraph', ([], {}), '()\n', (2414, 2416), False, 'import dgl\n'), ((2883, 3074), 'numpy.array', 'np.array', (["[self.all_dist_matrix[id_idx]['dist_matrix'][:self.max_seq_len, :self.\n max_seq_len, 0], self.all_angle_matrix[id_idx]['angle_matrix'][:self.\n max_seq_len, :self.max_seq_len]]"], {}), "([self.all_dist_matrix[id_idx]['dist_matrix'][:self.max_seq_len, :\n self.max_seq_len, 0], self.all_angle_matrix[id_idx]['angle_matrix'][:\n self.max_seq_len, :self.max_seq_len]])\n", (2891, 3074), True, 'import numpy as np\n'), ((3135, 3169), 'numpy.transpose', 'np.transpose', (['edge_feat', '(1, 2, 0)'], {}), '(edge_feat, (1, 2, 0))\n', (3147, 3169), True, 'import numpy as np\n'), ((557, 619), 'gzip.open', 'gzip.open', (["(root_dir + '/inputs/ProtBert_features.pkl.gz')", '"""rb"""'], {}), "(root_dir + '/inputs/ProtBert_features.pkl.gz', 'rb')\n", (566, 619), False, 'import gzip\n'), ((2105, 2145), 'torch.from_numpy', 'torch.from_numpy', (['_all_protBert_feature_'], {}), '(_all_protBert_feature_)\n', (2121, 2145), False, 'import torch\n'), ((4008, 4042), 'numpy.abs', 'np.abs', (['edge_features[center, nbr]'], {}), '(edge_features[center, nbr])\n', (4014, 4042), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from Lab1.problem3_env import Env
class QLearning():
def __init__(self):
self.num_moves = 10000000
self.environment = Env() # Environment class with the model
self.QValues = np.zeros(( self.environment.NUM_STATES, self.environment.NUM_ACTIONS))
self.num_updates = np.zeros(( self.environment.NUM_STATES, self.environment.NUM_ACTIONS)) #keep the number of updates of each value (Q(s,a))
#PARAMETERS
self.epsilon = 0.1 #egreedy policy
self.alpha = 0.5 #learning rate
self.sum_rewards = 0
self.sum_rewards_list = []
self.step_size = 0
# keep the value function for the initial state
self.init_state_indx = self.environment.states_mapping[((0,0), (3,3))]
self.ValueF_init = np.zeros(self.num_moves)
self.optimal_policy = np.zeros(self.environment.NUM_STATES) # keep best action for each state
self.permitted_actions = dict()
def egreedy_policy(self, state_indx):
'''
Choose an action based on a epsilon greedy policy.
A random action is selected with epsilon probability, else select the best action.
'''
# first find possible actions(robber's movements)
robber_moves_permitted = self.environment.check_wall_constraint('robber')
if np.random.random() < self.epsilon:
return np.random.choice(robber_moves_permitted)
else:
QValues_permitted = self.QValues[state_indx, robber_moves_permitted] #robber_moves_permitted
indx = np.argmax(QValues_permitted) #index in QValues permitted
return robber_moves_permitted[indx]
def myplot(self, arg, num_steps_done):
'''
plot yy throughout the game
'''
moves = range(num_steps_done)
if arg == 'valueF':
plt.plot(moves, self.ValueF_init[0:num_steps_done])
plt.title('Value function for the initial state v/s time, obtained with Q-Learning')
plt.ylabel("Value fn at initial state)")
plt.xlabel("time")
plt.savefig('Figures/problem3_%s%i.png' % (arg,num_steps_done))
plt.show()
elif arg == 'sumRewards':
plt.plot(moves, self.sum_rewards_list[0:num_steps_done])
plt.title('%s' % arg)
plt.savefig('Figures/problem3_%s%i.png' % (arg, num_steps_done))
plt.show()
#elif arg == 'policy':
# plt.plot(moves, self.)
###NOTE: all states mentions refer to indexes, not the actual positions
def apply_qlearning(self):
'''
Implement Q-Learning Algorithm
'''
# initialize position of robber and police
cur_state_indx = self.environment.reset_game()
for i in range(self.num_moves):
# choose action
action = self.egreedy_policy(cur_state_indx)
# perform action --> move to new state and get reward
new_state_indx = self.environment.next_step(action)
self.sum_rewards += self.environment.reward
self.sum_rewards_list.append(self.sum_rewards)
# find state value in next_state
new_value = self.environment.reward + self.environment.lamda * np.max(self.QValues[new_state_indx])
# compare it with action value in cur_state and action
difference = new_value - self.QValues[cur_state_indx, action]
# define step size
self.num_updates[cur_state_indx, action] += 1
self.step_size = float(1/pow(self.num_updates[cur_state_indx,action], float(2/3)))
# update QValue in cur_state
self.QValues[cur_state_indx,action] += self.step_size * difference
#print('state: %d' %cur_state_indx, 'action: %d'%action, 'QValue:%f'% self.QValues[cur_state_indx,action])
#update cur_state
cur_state_indx = new_state_indx
#update valueF for the initial state
self.ValueF_init[i] = np.amax(self.QValues[self.init_state_indx])
if i % 1000000 == 0:
print('---------%d------------'%i)
self.myplot('valueF', i)
def find_policy(self):
'''
find optimal policy from the QValues matrix
'''
for state_indx in range(self.environment.NUM_STATES):
# optimal policy action should belong to the permitted actions!
state = self.environment.all_states[state_indx]
self.environment.robber = state[0]
self.environment.police = state[1]
permitted_actions = self.environment.check_wall_constraint('robber')
action_indx = np.argmax(self.QValues[state_indx, permitted_actions])
self.optimal_policy[ state_indx] = permitted_actions[action_indx]
def simulate_game(self, num_rounds):
'''
simulate a game using the optimal policy found
'''
game_stats = {'win':0, 'caught':0, 'almost_caught':0}
cur_state_indx = self.environment.reset_game()
for t in range(num_rounds):
action = self.optimal_policy[cur_state_indx]
print('action chosen:', action)
new_state_indx = self.environment.next_step(action)
new_state = self.environment.all_states[new_state_indx]
print('Move to state:',new_state)
if self.environment.reward == 1:
game_stats['win'] += 1
elif self.environment.reward == -10:
game_stats['caught'] += 1
elif self.environment.reward == -5:
game_stats['almost_caught'] += 1
cur_state_indx = new_state_indx
print('---- GAME STATISTICS (%d moves) ----' %num_rounds)
print('Managed to rob the bank: %d times' %game_stats['win'])
print('Got caught by the police: %d times' %game_stats['caught'])
print('Got very close to the police: %d times' % game_stats['almost_caught'])
def test():
qLearning_obj = QLearning()
qLearning_obj.apply_qlearning()
qLearning_obj.find_policy()
qLearning_obj.simulate_game(1000)
test()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"Lab1.problem3_env.Env",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.zeros",
"numpy.amax",
"numpy.max",
"numpy.random.random",
"numpy.random.choice",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
... | [((192, 197), 'Lab1.problem3_env.Env', 'Env', ([], {}), '()\n', (195, 197), False, 'from Lab1.problem3_env import Env\n'), ((258, 327), 'numpy.zeros', 'np.zeros', (['(self.environment.NUM_STATES, self.environment.NUM_ACTIONS)'], {}), '((self.environment.NUM_STATES, self.environment.NUM_ACTIONS))\n', (266, 327), True, 'import numpy as np\n'), ((357, 426), 'numpy.zeros', 'np.zeros', (['(self.environment.NUM_STATES, self.environment.NUM_ACTIONS)'], {}), '((self.environment.NUM_STATES, self.environment.NUM_ACTIONS))\n', (365, 426), True, 'import numpy as np\n'), ((846, 870), 'numpy.zeros', 'np.zeros', (['self.num_moves'], {}), '(self.num_moves)\n', (854, 870), True, 'import numpy as np\n'), ((902, 939), 'numpy.zeros', 'np.zeros', (['self.environment.NUM_STATES'], {}), '(self.environment.NUM_STATES)\n', (910, 939), True, 'import numpy as np\n'), ((1387, 1405), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1403, 1405), True, 'import numpy as np\n'), ((1441, 1481), 'numpy.random.choice', 'np.random.choice', (['robber_moves_permitted'], {}), '(robber_moves_permitted)\n', (1457, 1481), True, 'import numpy as np\n'), ((1623, 1651), 'numpy.argmax', 'np.argmax', (['QValues_permitted'], {}), '(QValues_permitted)\n', (1632, 1651), True, 'import numpy as np\n'), ((1911, 1962), 'matplotlib.pyplot.plot', 'plt.plot', (['moves', 'self.ValueF_init[0:num_steps_done]'], {}), '(moves, self.ValueF_init[0:num_steps_done])\n', (1919, 1962), True, 'import matplotlib.pyplot as plt\n'), ((1975, 2064), 'matplotlib.pyplot.title', 'plt.title', (['"""Value function for the initial state v/s time, obtained with Q-Learning"""'], {}), "(\n 'Value function for the initial state v/s time, obtained with Q-Learning')\n", (1984, 2064), True, 'import matplotlib.pyplot as plt\n'), ((2072, 2112), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value fn at initial state)"""'], {}), "('Value fn at initial state)')\n", (2082, 2112), True, 'import matplotlib.pyplot as plt\n'), ((2125, 2143), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (2135, 2143), True, 'import matplotlib.pyplot as plt\n'), ((2156, 2220), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Figures/problem3_%s%i.png' % (arg, num_steps_done))"], {}), "('Figures/problem3_%s%i.png' % (arg, num_steps_done))\n", (2167, 2220), True, 'import matplotlib.pyplot as plt\n'), ((2232, 2242), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2240, 2242), True, 'import matplotlib.pyplot as plt\n'), ((4080, 4123), 'numpy.amax', 'np.amax', (['self.QValues[self.init_state_indx]'], {}), '(self.QValues[self.init_state_indx])\n', (4087, 4123), True, 'import numpy as np\n'), ((4748, 4802), 'numpy.argmax', 'np.argmax', (['self.QValues[state_indx, permitted_actions]'], {}), '(self.QValues[state_indx, permitted_actions])\n', (4757, 4802), True, 'import numpy as np\n'), ((2290, 2346), 'matplotlib.pyplot.plot', 'plt.plot', (['moves', 'self.sum_rewards_list[0:num_steps_done]'], {}), '(moves, self.sum_rewards_list[0:num_steps_done])\n', (2298, 2346), True, 'import matplotlib.pyplot as plt\n'), ((2359, 2380), 'matplotlib.pyplot.title', 'plt.title', (["('%s' % arg)"], {}), "('%s' % arg)\n", (2368, 2380), True, 'import matplotlib.pyplot as plt\n'), ((2393, 2457), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Figures/problem3_%s%i.png' % (arg, num_steps_done))"], {}), "('Figures/problem3_%s%i.png' % (arg, num_steps_done))\n", (2404, 2457), True, 'import matplotlib.pyplot as plt\n'), ((2470, 2480), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2478, 2480), True, 'import matplotlib.pyplot as plt\n'), ((3321, 3357), 'numpy.max', 'np.max', (['self.QValues[new_state_indx]'], {}), '(self.QValues[new_state_indx])\n', (3327, 3357), True, 'import numpy as np\n')] |
import cv2
import numpy as np
# A0. Set-up
print("[INFO] Setting up video capture...")
device_cap = 1
video = cv2.VideoCapture(device_cap, cv2.CAP_DSHOW)
video.set(cv2.CAP_PROP_FRAME_WIDTH, 480)
video.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
class_names = []
class_file = 'coco.names'
with open(class_file, 'rt') as f:
class_names = f.read().rstrip('\n').split('\n')
config_path = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weights_path = 'frozen_inference_graph.pb'
# A1. Open model
net = cv2.dnn_DetectionModel(weights_path, config_path)
net.setInputSize(320, 320)
net.setInputScale(1.0/127.5)
net.setInputMean(127.5)
net.setInputSwapRB(True)
if not video.isOpened():
video.open(device_cap)
# B0. Loop
while True:
ret, frame = video.read()
class_id, conf, bbox = net.detect(frame, confThreshold=0.5)
if len(class_id) != 0:
for class_id, confidence, box in zip(class_id.flatten(), conf.flatten(), bbox):
display_text = class_names[class_id-1] + f" {str(np.floor(confidence * 100))}%"
cv2.rectangle(frame, box, color=(0, 255, 255),
thickness=3)
cv2.putText(frame, display_text.upper(),
(box[0]+10, box[1]+30),
cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 0), 1)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == ord('w'):
break
video.release()
cv2.destroyAllWindows()
| [
"cv2.dnn_DetectionModel",
"cv2.waitKey",
"numpy.floor",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.destroyAllWindows"
] | [((121, 164), 'cv2.VideoCapture', 'cv2.VideoCapture', (['device_cap', 'cv2.CAP_DSHOW'], {}), '(device_cap, cv2.CAP_DSHOW)\n', (137, 164), False, 'import cv2\n'), ((520, 569), 'cv2.dnn_DetectionModel', 'cv2.dnn_DetectionModel', (['weights_path', 'config_path'], {}), '(weights_path, config_path)\n', (542, 569), False, 'import cv2\n'), ((1454, 1477), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1475, 1477), False, 'import cv2\n'), ((1354, 1380), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (1364, 1380), False, 'import cv2\n'), ((1391, 1405), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1402, 1405), False, 'import cv2\n'), ((1085, 1144), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'box'], {'color': '(0, 255, 255)', 'thickness': '(3)'}), '(frame, box, color=(0, 255, 255), thickness=3)\n', (1098, 1144), False, 'import cv2\n'), ((1041, 1067), 'numpy.floor', 'np.floor', (['(confidence * 100)'], {}), '(confidence * 100)\n', (1049, 1067), True, 'import numpy as np\n')] |
import pandas as pd
from importlib import reload # allows reloading of modules
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import ipywidgets as widgets
from IPython.display import display, clear_output
from importlib import reload
import asyncio
import pickle
import pmagpy.pmag as pmag
import pmagpy.ipmag as ipmag
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from pmagpy import contribution_builder as cb
import scipy as scipy
import pickle
from sklearn.decomposition import PCA
from scipy.optimize import curve_fit
#The sortarai function from pmagpy, this will soon be modified so that additivity checks work.
model_circle_fast=pickle.load(open('model_circle_fast.pkl','rb'))
model_circle_slow=pickle.load(open('model_circle_slow.pkl','rb'))
def get_mad(IZZI):
"""Calculates the free Maximum Angle of Deviation (MAD) of Kirshvink et al (1980)"""
pca=PCA(n_components=3)
fit=pca.fit(IZZI.loc[:,'NRM_x':'NRM_z'].values).explained_variance_
MAD=np.degrees(np.arctan(np.sqrt((fit[2]+fit[1])/(fit[0]))))
return MAD
def TaubinSVD(x,y):
"""
Function from PmagPy
algebraic circle fit
input: list [[x_1, y_1], [x_2, y_2], ....]
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
Algebraic circle fit by Taubin
<NAME>, "Estimation Of Planar Curves, Surfaces And Nonplanar
Space Curves Defined By Implicit Equations, With
Applications To Edge And Range Image Segmentation",
IEEE Trans. PAMI, Vol. 13, pages 1115-1138, (1991)
"""
X = np.array(list(map(float, x)))
Xprime=X
Y = np.array(list(map(float, y)))
Yprime=Y
XY = np.array(list(zip(X, Y)))
XY = np.array(XY)
X = XY[:,0] - np.mean(XY[:,0]) # norming points by x avg
Y = XY[:,1] - np.mean(XY[:,1]) # norming points by y avg
centroid = [np.mean(XY[:,0]), np.mean(XY[:,1])]
Z = X * X + Y * Y
Zmean = np.mean(Z)
Z0 = (Z - Zmean)/(2. * np.sqrt(Zmean))
ZXY = np.array([Z0, X, Y]).T
U, S, V = np.linalg.svd(ZXY, full_matrices=False) #
V = V.transpose()
A = V[:,2]
A[0] = A[0]/(2. * np.sqrt(Zmean))
A = np.concatenate([A, [(-1. * Zmean * A[0])]], axis=0)
a, b = (-1 * A[1:3]) / A[0] / 2 + centroid
r = np.sqrt(A[1]*A[1]+A[2]*A[2]-4*A[0]*A[3])/abs(A[0])/2
errors=[]
for i in list(range(0,len(Xprime)-1)):
errors.append((np.sqrt((Xprime[i]-a)**2+(Yprime[i]-b)**2)-r)**2)
sigma=np.sqrt((sum(errors))/(len(Xprime)-1))
return a,b,r,sigma
def get_drat(IZZI,IZZI_trunc,P):
"""Calculates the difference ratio (DRAT) of pTRM checks
(Selkin and Tauxe, 2000) to check for alteration"""
IZZI_reduced=IZZI[IZZI.temp_step.isin(P.temp_step)]
a=np.sum((IZZI_trunc.PTRM-np.mean(IZZI_trunc.PTRM))*(IZZI_trunc.NRM-np.mean(IZZI_trunc.NRM)))
b=a/np.abs(a)*np.sqrt(np.sum((IZZI_trunc.NRM-np.mean(IZZI_trunc.NRM))**2)/np.sum((IZZI_trunc.PTRM-np.mean(IZZI_trunc.PTRM))**2))
yint=np.mean(IZZI_trunc.NRM)-b*np.mean(IZZI_trunc.PTRM)
line={'slope':b,'intercept':yint}
xprime=0.5*(IZZI_trunc.PTRM+(IZZI_trunc.NRM-line['intercept'])/line['slope'])
yprime=0.5*(IZZI_trunc.NRM+line['slope']*IZZI_trunc.PTRM+line['intercept'])
scalefactor=np.sqrt((min(xprime)-max(xprime))**2+(min(yprime)-max(yprime))**2)
absdiff=max(np.abs(P.PTRM.values-IZZI_reduced.PTRM.values)/scalefactor)*100
return(absdiff)
def calculate_anisotropy_correction(IZZI):
"""Calculates anisotropy correction factor for a
paleointensity interpretation, given an s tensor"""
#Convert the s tensor into a numpy array
strlist=IZZI['s_tensor'].iloc[0].split(':')
slist=[]
for stringo in strlist:
slist.append(float(stringo.strip()))
stensor=np.array([[slist[0],slist[3],slist[5]],[slist[3],slist[1],slist[4]],[slist[5],slist[4],slist[2]]])
#Fit a PCA to the IZZI directions
NRM_trunc_dirs=IZZI.loc[:,'NRM_x':'NRM_z']
pca=PCA(n_components=3)
pca=pca.fit(NRM_trunc_dirs)
#Calculate the anisotropy correction factor (see Standard Paleointensity Definitions)
vector=pca.components_[0]
vector=vector/np.sqrt(np.sum(vector**2))
ancvector=np.matmul(np.linalg.inv(stensor),vector)
ancvector=ancvector/np.sqrt(np.sum(ancvector**2))
labmag=np.matmul(stensor,np.array([0,0,-1]))
ancmag=np.matmul(stensor,ancvector)
c=np.sqrt(np.sum(labmag**2))/np.sqrt(np.sum(ancmag**2))
return(c)
def calculate_NLT_correction(IZZI,c):
"""Calculates the correction for non linear TRM for a paleointensity interpretation, given the anisotropy and cooling rate corrections"""
a=np.sum((IZZI.PTRM-np.mean(IZZI.PTRM))*(IZZI.NRM-np.mean(IZZI.NRM)))
b=a/np.abs(a)*np.sqrt(np.sum((IZZI.NRM-np.mean(IZZI.NRM))**2)/np.sum((IZZI.PTRM-np.mean(IZZI.PTRM))**2))
beta=IZZI['NLT_beta'].iloc[0]
correction=c*IZZI.correction.iloc[0]
B_lab=IZZI.B_lab.iloc[0]*1e6
total_correction=(np.arctanh(correction*np.abs(b)*np.tanh(beta*B_lab)))/(np.abs(b)*beta*B_lab)
return(total_correction)
def prep_data_for_fitting(IZZI_filtered,IZZI_original):
"""Returns the needed data for a paleointensity interpretation to perform the BiCEP method (Cych et al, in prep.), calculates all corrections for a specimen"""
specimen=IZZI_original.specimen.iloc[0] #Specimen name
methcodes='' #String For Method Codes
extracolumnsdict={} #Extra Column Information for MagIC export (corrections)
#Calculate Anisotropy Correction:
if len(IZZI_original.dropna(subset=['s_tensor']))>0:
c=calculate_anisotropy_correction(IZZI_filtered)
extracolumnsdict['int_corr_aniso']=c
#Get method code depending on anisotropy type (AARM or ATRM)
methcodes+=IZZI_original['aniso_type'].iloc[0]
else:
c=1
#Get Cooling Rate Correction
if IZZI_original.correction.iloc[0]!=1:
methcodes+=':LP-CR-TRM' #method code for cooling rate correction
extracolumnsdict['int_corr_cooling_rate']=IZZI_original.correction.iloc[0]
#Calculate nonlinear TRM Correction
if len(IZZI_original.dropna(subset=['NLT_beta']))>0:
methcodes+=':DA-NL' #method code for nonlinear TRM correction
total_correction=calculate_NLT_correction(IZZI_filtered,c) #total correction (combination of all three corrections)
extracolumnsdict['int_corr_nlt']=total_correction/(c*IZZI_original.correction.iloc[0]) #NLT correction is total correction/original correction.
else:
total_correction=c*IZZI_original.correction.iloc[0]
#Converting Arai plot data to useable form
NRM0=IZZI_original.NRM.iloc[0]
NRMS=IZZI_filtered.NRM.values/NRM0
PTRMS=IZZI_filtered.PTRM.values/NRM0/total_correction #We divide our pTRMs by the total correction, because we scale the pTRM values so that the maximum pTRM is one, this doesn't affect the fit and just gets scaled back when converting the circle tangent slopes back to intensities as would be expected, but it's easier to apply this here.
PTRMmax=max(IZZI_original.PTRM/NRM0/total_correction) #We scale by our maximum pTRM to perform the circle fit.
line=bestfit_line(IZZI_original.PTRM/NRM0/total_correction,IZZI_original.NRM/NRM0) #best fitting line to the pTRMs
scale=np.sqrt((line['intercept']/line['slope'])**2+(line['intercept'])**2) #Flag- is this ever used?
PTRMS=PTRMS/PTRMmax #Scales the pTRMs so the maximum pTRM is one
#We subtract the minimum pTRM and NRM to maintain aspect ratio and make circle fitting easier.
minPTRM=min(PTRMS)
minNRM=min(NRMS)
PTRMS=PTRMS-minPTRM
NRMS=NRMS-minNRM
#We perform the Taubin least squares circle fit to get values close to the Bayesian maximum likelihood to initialize our MCMC sampler at, this makes sampling a lot easier than initializing at a random point (which may have infinitely low probability).
x_c,y_c,R,sigma=TaubinSVD(PTRMS,NRMS) #Calculate x_c,y_c and R
dist_to_edge=abs(np.sqrt(x_c**2+y_c**2)-R) #Calculate D (dist_to_edge)
phi=np.radians(np.degrees(np.arctan(y_c/x_c))%180)
#Calculate (and ensure the sign of) k
if y_c<0:
k=-1/R
else:
k=1/R
B_lab=IZZI_filtered.B_lab.unique()[0]*1e6
return(scale,minPTRM,minNRM,PTRMmax,k,phi,dist_to_edge,sigma,PTRMS,NRMS,B_lab,methcodes,extracolumnsdict)
def BiCEP_fit(specimenlist,temperatures=None,n_samples=30000,priorstd=20,model=None,**kwargs):
minPTRMs=[]
minNRMs=[]
IZZI_list=[]
B_lab_list=[]
klist=[]
NRM0s=[]
pTRMsList=np.array([])
NRMsList=np.array([])
lengths=[]
philist=[]
dist_to_edgelist=[]
B_ancs=[]
dmaxlist=[]
PTRMmaxlist=[]
centroidlist=[]
spec_old=''
newmethcodes={}
newcolumns={}
i=0
for specimen in specimenlist:
if spec_old==specimen:
i+=1
else:
i=0
spec_old=specimen
IZZI_original=temps[(temps.specimen==specimen)&((temps.steptype=='IZ')|(temps.steptype=="ZI"))]
if temperatures==None:
IZZI_filtered=IZZI_original
else:
IZZI_filtered=IZZI_original[(IZZI_original.temp_step>=temperatures[specimen][i,0])&(IZZI_original.temp_step<=temperatures[specimen][i,1])]
scale,minPTRM,minNRM,PTRMmax,k,phi,dist_to_edge,sigma,PTRMS,NRMS,B_lab,methcodestr,extracolumnsdict=prep_data_for_fitting(IZZI_filtered,IZZI_original)
newcolumns[specimen]=extracolumnsdict
newmethcodes[specimen]=methcodestr
if len(IZZI_filtered)<=3:
print('Specimen Rejected- Too Few Points to make an interpretation')
NRM0=IZZI_filtered.NRM.iloc[0]
minPTRMs.append(minPTRM)
minNRMs.append(minNRM)
line=bestfit_line(IZZI_filtered.PTRM,IZZI_filtered.NRM)
B_anc=-line['slope']*B_lab*IZZI_filtered.correction.iloc[0]
B_ancs.append(B_anc)
Pi,Pj=np.meshgrid(PTRMS,PTRMS)
Ni,Nj=np.meshgrid(NRMS,NRMS)
dmax=np.amax(np.sqrt((Pi-Pj)**2+(Ni-Nj)**2))
centroid=np.sqrt(np.mean(PTRMS)**2+np.mean(NRMS)**2)
IZZI_list.append(IZZI_filtered)
B_lab_list.append(B_lab)
klist.append(k)
philist.append(phi)
dist_to_edgelist.append(dist_to_edge)
NRM0s.append(NRM0)
pTRMsList=np.append(pTRMsList,PTRMS)
NRMsList=np.append(NRMsList,NRMS)
lengths.append(int(len(PTRMS)))
dmaxlist.append(dmax)
PTRMmaxlist.append(PTRMmax)
centroidlist.append(centroid)
if model==None:
if len(specimenlist)<7:
model_circle=model_circle_slow
else:
model_circle=model_circle_fast
else:
model_circle=model
fit_circle=model_circle.sampling (
data={'I':len(pTRMsList),'M':len(lengths),'PTRM':pTRMsList,'NRM':NRMsList,'N':lengths,'PTRMmax':PTRMmaxlist,'B_labs':B_lab_list,'dmax':np.sqrt(dmaxlist),'centroid':centroidlist,'priorstd':priorstd},iter=n_samples,warmup=int(n_samples/2),
init=[{'k_scale':np.array(klist)*np.array(dist_to_edgelist),'phi':philist,'dist_to_edge':dist_to_edgelist,'int_real':B_ancs}]*4,**kwargs)
return(fit_circle,newmethcodes,newcolumns)
def sufficient_statistics(ptrm, nrm):
"""
inputs list of ptrm and nrm data and computes sufficent statistcs needed
for computations
"""
corr = np.cov( np.stack((ptrm, nrm), axis=0) )
return {'xbar': np.mean(ptrm), 'ybar': np.mean(nrm), 'S2xx': corr[0,0], 'S2yy': corr[1,1], 'S2xy': corr[0,1] }
def bestfit_line(ptrm, nrm):
"""
returns the slope and intercept of the best fit line to a set of points with NRM and PTRM using Bayesian maximum likelihood estimate
"""
stat = sufficient_statistics(ptrm, nrm)
w = .5*(stat['S2xx'] - stat['S2yy'])/stat['S2xy']
m = -w-np.sqrt(w**2+1)
b = stat['ybar']-m*stat['xbar']
return {'slope': m, 'intercept': b }
def sortarai(datablock, s, Zdiff, **kwargs):
"""
sorts data block in to first_Z, first_I, etc.
Parameters
_________
datablock : Pandas DataFrame with Thellier-Tellier type data
s : specimen name
Zdiff : if True, take difference in Z values instead of vector difference
NB: this should always be False
**kwargs :
version : data model. if not 3, assume data model = 2.5
Returns
_______
araiblock : [first_Z, first_I, ptrm_check,
ptrm_tail, zptrm_check, GammaChecks]
field : lab field (in tesla)
"""
if 'version' in list(kwargs.keys()) and kwargs['version'] == 3:
dec_key, inc_key, csd_key = 'dir_dec', 'dir_inc', 'dir_csd'
Mkeys = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude','dir_csd']
meth_key = 'method_codes'
temp_key, dc_key = 'treat_temp', 'treat_dc_field'
dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi'
# convert dataframe to list of dictionaries
datablock = datablock.to_dict('records')
else:
dec_key, inc_key, csd_key = 'measurement_dec', 'measurement_inc','measurement_csd'
Mkeys = ['measurement_magn_moment', 'measurement_magn_volume',
'measurement_magn_mass', 'measurement_magnitude']
meth_key = 'magic_method_codes'
temp_key, dc_key = 'treatment_temp', 'treatment_dc_field'
dc_theta_key, dc_phi_key = 'treatment_dc_field_theta', 'treatment_dc_field_phi'
first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], []
field, phi, theta = "", "", ""
starthere = 0
Treat_I, Treat_Z, Treat_PZ, Treat_PI, Treat_M = [], [], [], [], []
ISteps, ZSteps, PISteps, PZSteps, MSteps = [], [], [], [], []
GammaChecks = [] # comparison of pTRM direction acquired and lab field
rec = datablock[0]
for key in Mkeys:
if key in list(rec.keys()) and rec[key] != "":
momkey = key
break
# first find all the steps
for k in range(len(datablock)):
rec = datablock[k]
temp = float(rec[temp_key])
methcodes = []
tmp = rec[meth_key].split(":")
for meth in tmp:
methcodes.append(meth.strip())
if 'LT-T-I' in methcodes and 'LP-TRM' not in methcodes and 'LP-PI-TRM' in methcodes:
Treat_I.append(temp)
ISteps.append(k)
if field == "":
field = float(rec[dc_key])
if phi == "":
phi = float(rec[dc_phi_key])
theta = float(rec[dc_theta_key])
# stick first zero field stuff into first_Z
if 'LT-NO' in methcodes:
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-T-Z' in methcodes:
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-PTRM-Z' in methcodes:
Treat_PZ.append(temp)
PZSteps.append(k)
if 'LT-PTRM-I' in methcodes:
Treat_PI.append(temp)
PISteps.append(k)
if 'LT-PTRM-MD' in methcodes:
Treat_M.append(temp)
MSteps.append(k)
if 'LT-NO' in methcodes:
dec = float(rec[dec_key])
inc = float(rec[inc_key])
str = float(rec[momkey])
if csd_key not in rec.keys():
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
elif rec[csd_key]!=None:
sig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*str
else:
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
first_I.append([273, 0., 0., 0., 0., 1])
first_Z.append([273, dec, inc, str, sig, 1]) # NRM step
for temp in Treat_I: # look through infield steps and find matching Z step
if temp in Treat_Z: # found a match
istep = ISteps[Treat_I.index(temp)]
irec = datablock[istep]
methcodes = []
tmp = irec[meth_key].split(":")
for meth in tmp:
methcodes.append(meth.strip())
# take last record as baseline to subtract
brec = datablock[istep - 1]
zstep = ZSteps[Treat_Z.index(temp)]
zrec = datablock[zstep]
# sort out first_Z records
if "LP-PI-TRM-IZ" in methcodes:
ZI = 0
else:
ZI = 1
dec = float(zrec[dec_key])
inc = float(zrec[inc_key])
str = float(zrec[momkey])
if csd_key not in rec.keys():
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
elif rec[csd_key]!=None:
sig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*str
else:
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
first_Z.append([temp, dec, inc, str, sig, ZI])
# sort out first_I records
idec = float(irec[dec_key])
iinc = float(irec[inc_key])
istr = float(irec[momkey])
X = pmag.dir2cart([idec, iinc, istr])
BL = pmag.dir2cart([dec, inc, str])
I = []
for c in range(3):
I.append((X[c] - BL[c]))
if I[2] != 0:
iDir = pmag.cart2dir(I)
if csd_key not in rec.keys():
isig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
elif rec[csd_key]!=None:
isig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*istr
else:
isig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*istr
isig = np.sqrt(isig**2+sig**2)
if Zdiff == 0:
first_I.append([temp, iDir[0], iDir[1], iDir[2], isig, ZI])
else:
first_I.append([temp, 0., 0., I[2], 0., isig, ZI])
gamma = pmag.angle([iDir[0], iDir[1]], [phi, theta])
else:
first_I.append([temp, 0., 0., 0., 0., ZI])
gamma = 0.0
# put in Gamma check (infield trm versus lab field)
if 180. - gamma < gamma:
gamma = 180. - gamma
GammaChecks.append([temp - 273., gamma])
for temp in Treat_PI: # look through infield steps and find matching Z step
step = PISteps[Treat_PI.index(temp)]
rec = datablock[step]
dec = float(rec[dec_key])
inc = float(rec[inc_key])
str = float(rec[momkey])
brec = datablock[step - 1] # take last record as baseline to subtract
pdec = float(brec[dec_key])
pinc = float(brec[inc_key])
pint = float(brec[momkey])
X = pmag.dir2cart([dec, inc, str])
prevX = pmag.dir2cart([pdec, pinc, pint])
I = []
for c in range(3):
I.append(X[c] - prevX[c])
dir1 = pmag.cart2dir(I)
if csd_key not in rec.keys():
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
psig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*dir1[2]
elif rec[csd_key]!=None:
sig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*str
psig=np.radians(float(brec[csd_key]))*np.sqrt(3)/np.sqrt(2)*dir1[2]
else:
sig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
psig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*dir1[2]
psig=np.sqrt(sig**2+psig**2)
if Zdiff == 0:
ptrm_check.append([temp, dir1[0], dir1[1], dir1[2], sig])
else:
ptrm_check.append([temp, 0., 0., I[2]], sig)
# in case there are zero-field pTRM checks (not the SIO way)
for temp in Treat_PZ:
step = PZSteps[Treat_PZ.index(temp)]
rec = datablock[step]
dec = float(rec[dec_key])
inc = float(rec[inc_key])
str = float(rec[momkey])
brec = datablock[step - 1]
pdec = float(brec[dec_key])
pinc = float(brec[inc_key])
pint = float(brec[momkey])
X = pmag.dir2cart([dec, inc, str])
prevX = pmag.dir2cart([pdec, pinc, pint])
I = []
for c in range(3):
I.append(X[c] - prevX[c])
dir2 = pmag.cart2dir(I)
if csd_key not in rec.keys():
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
psig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*dir1[2]
elif rec[csd_key]!=None:
sig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*str
psig= np.radians(float(brec[csd_key]))*np.sqrt(3)/np.sqrt(2)*dir2[2]
else:
sig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
psig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*dir1[2]
psig=np.sqrt(sig**2+psig**2)
zptrm_check.append([temp, dir2[0], dir2[1], dir2[2],psig])
# get pTRM tail checks together -
for temp in Treat_M:
# tail check step - just do a difference in magnitude!
step = MSteps[Treat_M.index(temp)]
rec = datablock[step]
dec = float(rec[dec_key])
inc = float(rec[inc_key])
str = float(rec[momkey])
if csd_key not in rec.keys():
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
elif rec[csd_key]!=None:
sig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*str
else:
sig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
if temp in Treat_Z:
step = ZSteps[Treat_Z.index(temp)]
brec = datablock[step]
pint = float(brec[momkey])
# X=dir2cart([dec,inc,str])
# prevX=dir2cart([pdec,pinc,pint])
# I=[]
# for c in range(3):I.append(X[c]-prevX[c])
# d=cart2dir(I)
# ptrm_tail.append([temp,d[0],d[1],d[2]])
# difference - if negative, negative tail!
ptrm_tail.append([temp, dec, inc, str, sig])
else:
print(
s, ' has a tail check with no first zero field step - check input file! for step', temp - 273.)
#
# final check
#
if len(first_Z) != len(first_I):
print(len(first_Z), len(first_I))
print(" Something wrong with this specimen! Better fix it or delete it ")
input(" press return to acknowledge message")
araiblock = (first_Z, first_I, ptrm_check,
ptrm_tail, zptrm_check, GammaChecks)
return araiblock, field
def NLTsolver(fields,a,b):
"""Makes the non linear TRM correction"""
return(a* np.tanh(b*fields))
def convert_intensity_measurements(measurements):
"""Converts a measurements table with only intensity experiments into the internal data format used by the BiCEP method"""
specimens=list(measurements.specimen.unique())#This function constructs the 'temps' dataframe (used to plot Arai plots)
#this may take a while to run depending on the number of specimens.
#Constructs initial empty 'temps' dataframe
data_array=np.empty(shape=(16,0))
for specimen in specimens:
print('Working on:',specimen)
araiblock,field=sortarai(measurements[measurements.specimen==specimen],specimen, Zdiff=False,version=3) #Get arai data
sitename=measurements[measurements.specimen==specimen].site.unique()
first_Z,first_I,ptrm_check,ptrm_tail,zptrm_check,GammaChecks=araiblock #Split NRM and PTRM values into step types
B_lab=np.full(len(first_Z),field) #Lab field used
m=len(first_Z)
NRM_dec_max=first_Z[m-1][1]
NRM_inc_max=first_Z[m-1][2]
NRM_int_max=first_Z[m-1][3]
PTRM_dec_max=first_I[m-1][1]
PTRM_inc_max=first_I[m-1][2]
PTRM_int_max=first_I[m-1][3]
NRM_vector_max=pmag.dir2cart([NRM_dec_max,NRM_inc_max,NRM_int_max])
PTRM_vector_max=pmag.dir2cart([PTRM_dec_max,PTRM_inc_max,PTRM_int_max])
PTRM_vector_max=PTRM_vector_max-NRM_vector_max
NRMS=[first_Z[i][3] for i in list(range(len(first_Z)))]
first_Z=np.array(first_Z)
first_I=np.array(first_I)
if min(NRMS)/NRMS[0]<0.25:
if(len(first_Z))>1:
sample=np.full(len(first_Z),measurements[measurements.specimen==specimen]['sample'].unique()[0]) #Get sample name
site=np.full(len(first_Z),measurements[measurements.specimen==specimen].site.unique()[0]) #Get site name
specarray=np.full(len(first_Z),specimen)
temp_step=first_Z[:,0] #Gets the temperature in kelvin we use
NRM=first_Z[:,3] #NRM value (in first_Z dataframe)
zbinary=first_Z[:,5]#Is it a ZI or an IZ step?
zbinary=zbinary.astype('object')
zbinary[zbinary==1]='ZI'
zbinary[zbinary==0]='IZ'
steptype=zbinary
PTRM=first_I[:,3] #PTRM value (in first_I dataframe)
PTRM_sigma=first_I[:,4]
NRM_dec=first_Z[:,1]
NRM_inc=first_Z[:,2]
NRM_int=NRM
NRM_sigma=first_Z[:,4]
NRM_vector=pmag.dir2cart(np.array([NRM_dec,NRM_inc,NRM_int]).T)
PTRM_vector=pmag.dir2cart(np.array([first_I[:,1],first_I[:,2],first_I[:,3]]).T)
NRM_x=NRM_vector[:,0]
NRM_y=NRM_vector[:,1]
NRM_z=NRM_vector[:,2]
PTRM_x=PTRM_vector[:,0]
PTRM_y=PTRM_vector[:,1]
PTRM_z=PTRM_vector[:,2]
newarray=np.array([specarray,sample,site,NRM,PTRM,NRM_x,NRM_y,NRM_z,PTRM_x,PTRM_y,PTRM_z,NRM_sigma,PTRM_sigma,B_lab,steptype,temp_step])
data_array=np.concatenate((data_array,newarray),axis=1)
#Doing PTRM Checks Part
ptrm_check=np.array(ptrm_check)
temp_step=ptrm_check[:,0]
smallarray=data_array
sample=np.full(len(ptrm_check),measurements[measurements.specimen==specimen]['sample'].unique()[0]) #Get sample name
site=np.full(len(ptrm_check),measurements[measurements.specimen==specimen].site.unique()[0]) #Get site name
specarray=np.full(len(ptrm_check),specimen)
B_lab=np.full(len(ptrm_check),field)
PTRM=ptrm_check[:,3]
PTRM_sigma=ptrm_check[:,4]
intersect=data_array[:,(data_array[0]==specimen)&(np.in1d(data_array[-1].astype('float'),temp_step.astype('float')))]
NRM_vector=np.array([intersect[5],intersect[6],intersect[7]])
NRM_sigma=intersect[11]
PTRM_vector=pmag.dir2cart(np.array([ptrm_check[:,1],ptrm_check[:,2],ptrm_check[:,3]]).T)
NRM_x=NRM_vector[0]
NRM_y=NRM_vector[1]
NRM_z=NRM_vector[2]
PTRM_x=PTRM_vector[:,0]
PTRM_y=PTRM_vector[:,1]
PTRM_z=PTRM_vector[:,2]
NRM=intersect[3]
steptype=np.full(len(ptrm_check),'P')
if len(NRM)==len(PTRM):
newarray=np.array([specarray,sample,site,NRM,PTRM,NRM_x,NRM_y,NRM_z,PTRM_x,PTRM_y,PTRM_z,NRM_sigma,PTRM_sigma,B_lab,steptype,temp_step])
data_array=np.concatenate((data_array,newarray),axis=1)
else:
diff=np.setdiff1d(temp_step,intersect[-1])
for i in diff:
print('PTRM check at '+str(i)+'K has no corresponding infield measurement, ignoring')
newarray=np.array([specarray[temp_step!=diff],sample[temp_step!=diff],site[temp_step!=diff],NRM,PTRM[temp_step!=diff],NRM_x,NRM_y,NRM_z,PTRM_x[temp_step!=diff],PTRM_y[temp_step!=diff],PTRM_z[temp_step!=diff],NRM_sigma,PTRM_sigma[temp_step!=diff],B_lab[temp_step!=diff],steptype[temp_step!=diff],temp_step[temp_step!=diff]])
data_array=np.concatenate((data_array,newarray),axis=1)
#Add PTRM tail checks
ptrm_tail=np.array(ptrm_tail)
if len(ptrm_tail)>1:
temp_step=ptrm_tail[:,0]
sample=np.full(len(ptrm_tail),measurements[measurements.specimen==specimen]['sample'].unique()[0]) #Get sample name
site=np.full(len(ptrm_tail),measurements[measurements.specimen==specimen].site.unique()[0]) #Get site name
specarray=np.full(len(ptrm_tail),specimen)
B_lab=np.full(len(ptrm_tail),field)
intersect=data_array[:,(data_array[0]==specimen)&(np.in1d(data_array[-1].astype('float'),temp_step.astype('float')))&(data_array[-2]!='P')]
NRM=ptrm_tail[:,3]
NRM_sigma=ptrm_tail[:,4]
NRM_vector=pmag.dir2cart(np.array([ptrm_tail[:,1],ptrm_tail[:,2],ptrm_tail[:,3]]).T)
PTRM_vector=np.array([intersect[8],intersect[9],intersect[10]])
PTRM_sigma=intersect[12]
PTRM_x=PTRM_vector[0]
PTRM_y=PTRM_vector[1]
PTRM_z=PTRM_vector[2]
NRM_x=NRM_vector[:,0]
NRM_y=NRM_vector[:,1]
NRM_z=NRM_vector[:,2]
PTRM=intersect[4]
steptype=np.full(len(ptrm_tail),'T')
if len(PTRM)==len(NRM):
newarray=np.array([specarray,sample,site,NRM,PTRM,NRM_x,NRM_y,NRM_z,PTRM_x,PTRM_y,PTRM_z,NRM_sigma,PTRM_sigma,B_lab,steptype,temp_step])
data_array=np.concatenate((data_array,newarray),axis=1)
else:
diff=np.setdiff1d(temp_step,intersect[-1])
for i in diff:
print('PTRM tail check at '+str(i)+'K has no corresponding zero field measurement, ignoring')
newarray=np.array([specarray[temp_step!=diff],sample[temp_step!=diff],site[temp_step!=diff],NRM[temp_step!=diff],PTRM,NRM_x[temp_step!=diff],NRM_y[temp_step!=diff],NRM_z[temp_step!=diff],PTRM_x,PTRM_y,PTRM_z,NRM_sigma[temp_step!=diff],PTRM_sigma,B_lab[temp_step!=diff],steptype[temp_step!=diff],temp_step[temp_step!=diff]])
data_array=np.concatenate((data_array,newarray),axis=1)
else:
print(specimen,'in site',sitename[0],'Not included, not a thellier experiment')
else:
print(specimen,'in site',sitename[0],'Not included, demagnetization not completed')
temps=pd.DataFrame(data_array.T,columns=['specimen','sample','site','NRM','PTRM','NRM_x','NRM_y','NRM_z','PTRM_x','PTRM_y','PTRM_z','NRM_sigma','PTRM_sigma','B_lab','steptype','temp_step'])
return(temps)
def generate_arai_plot_table(outputname):
"""
Generates a DataFrame with points on an Araiplot. Inputs: outputname (must be string)
"""
#This cell constructs the 'measurements' dataframe with samples and sites added
status,measurements=cb.add_sites_to_meas_table('./')
measurements=measurements[measurements.specimen.str.contains('#')==False]
measurements_old=measurements
measurements=measurements[measurements.experiment.str.contains('LP-PI-TRM')]
temps=convert_intensity_measurements(measurements)
temps['correction']=1
temps['s_tensor']=np.nan
temps['aniso_type']=np.nan
spec=pd.read_csv('specimens.txt',skiprows=1,sep='\t')
#Create the anisotropy tensors if they don't already exist.
print("Couldn't find Anisotropy Tensors, Generating...")
#Tensor for ATRM
ipmag.atrm_magic('measurements.txt',output_spec_file='specimens_atrm.txt')
spec_atrm=pd.read_csv('specimens_atrm.txt',sep='\t',skiprows=1)
for specimen in spec_atrm.specimen.unique():
temps.loc[temps.specimen==specimen,'s_tensor']=spec_atrm.loc[spec_atrm.specimen==specimen,'aniso_s'].iloc[0]
temps.loc[temps.specimen==specimen,'aniso_type']=':LP-AN-TRM'
#Tensor for AARM
ipmag.aarm_magic('measurements.txt',output_spec_file='specimens_aarm.txt')
spec_aarm=pd.read_csv('specimens_aarm.txt',sep='\t',skiprows=1)
for specimen in spec_aarm.specimen.unique():
temps.loc[temps.specimen==specimen,'s_tensor']=spec_aarm.loc[spec_aarm.specimen==specimen,'aniso_s'].iloc[0]
temps.loc[temps.specimen==specimen,'aniso_type']=':LP-AN-ARM'
#Add Anisotropy tensors to specimen tables.
if len(spec_atrm.specimen.unique())>0:
cols = spec.columns.difference(spec_atrm.columns)
cols=np.append(cols.values,'specimen')
spec_1=pd.merge(spec.loc[:,cols],spec_atrm,how='right',left_on='specimen',right_on='specimen')
if len(spec_aarm.specimen.unique())>0:
spec_2=pd.merge(spec.loc[:,cols],spec_aarm,how='right',left_on='specimen',right_on='specimen')
spec=pd.concat([spec_2,spec_1])
else:
spec=spec_1
elif len(spec_aarm.specimen.unique())>0:
cols = spec.columns.difference(spec_aarm.columns)
cols=np.append(cols.values,'specimen')
spec=pd.merge(spec.loc[:,cols],spec_aarm,how='right',left_on='specimen',right_on='specimen')
spec=spec.drop_duplicates(subset=['specimen'])
spec=spec.fillna('')
specdict=spec.to_dict('records')
pmag.magic_write('specimens.txt',specdict,'specimens')
#Get the best fitting hyperbolic tangent for the NLT correction.
temps['NLT_beta']=np.nan
NLTcorrs=measurements_old[measurements_old['method_codes']=='LP-TRM:LT-T-I']
for specimen in NLTcorrs.specimen.unique():
meas_val=NLTcorrs[NLTcorrs['specimen']==specimen]
try:
ab,cov = curve_fit(NLTsolver, meas_val['treat_dc_field'].values*1e6, meas_val['magn_moment'].values/meas_val['magn_moment'].iloc[-1], p0=(max(meas_val['magn_moment']/meas_val['magn_moment'].iloc[-1]),1e-2))
temps.loc[temps.specimen==specimen,'NLT_beta']=ab[1]
except RuntimeError:
print("-W- WARNING: Can't fit tanh function to NLT data for "+specimen)
#Get the cooling rate correction
meas_cool=measurements_old[measurements_old.method_codes.str.contains('LP-CR-TRM')].dropna(subset=['description'])
for specimen in meas_cool.specimen.unique():
specframe=meas_cool[meas_cool.specimen==specimen]
vals=specframe.description.str.split(':').values
crs=np.array([])
for val in vals:
crs=np.append(crs,float(val[1]))
magn_moments=specframe['magn_moment'].values
avg_moment=np.mean(magn_moments[crs==max(crs)])
norm_moments=magn_moments/avg_moment
croven=max(crs)
crlog=np.log(croven/crs)
m,c=np.polyfit(crlog,norm_moments,1)
sample=specframe['sample'].iloc[0]
cr_real=samples[samples['sample']==sample].cooling_rate.values/5.256e+11
cr_reallog=np.log(croven/cr_real)
cfactor=1/(c+m*cr_reallog)[0]
temps.loc[temps.specimen==specimen,'correction']=temps.loc[temps.specimen==specimen,'correction']*cfactor
#Save the dataframe to output.
temps=temps.dropna(subset=['site'])
temps.to_csv(outputname+'.csv',index=False)
def maketempsfile(fname):
"""Imports a csv file to this module for use"""
temps=pd.read_csv(fname)
temps.set_index([list(range(0,len(temps)))]) #Make sure indexes are unique
specimens = temps.specimen.unique() #Get specimen list
return(temps)
def convert(a):
"""Converts data from MagIC format into BiCEP GUI format"""
convert_button.description='Converting..'
generate_arai_plot_table('arai_data')
temps=maketempsfile('arai_data.csv')
convert_button.description='Convert MagIC data'
def plot_line_base(ax,specimen,min_temp,max_temp,GUI=False):
"""Plots data onto the Arai plot. Does not fit a line to this data"""
specdf=temps[temps.specimen==specimen]
IZZI=specdf[(specdf.steptype=='IZ')|(specdf.steptype=='ZI')]
IZZI_trunc=IZZI[(IZZI.temp_step>=min_temp+273)&(IZZI.temp_step<=max_temp+273)]
P=specdf[(specdf.steptype=='P')]
T=specdf[(specdf.steptype=='T')]
if GUI==True:
try:
P_trunc=P[(P.temp_step<=max_temp+273)].iloc[:-1]
drat=get_drat(IZZI,IZZI_trunc,P_trunc)
dratbox.description='DRAT: %2.1f'%drat
except:
dratbox.description='DRAT: '
NRM0=specdf.iloc[0].NRM
PTRMmax=max(specdf.PTRM)/NRM0
lines=ax.plot(IZZI.PTRM/NRM0,IZZI.NRM/NRM0,'k',linewidth=1)
emptydots=ax.plot(IZZI.PTRM/NRM0,IZZI.NRM/NRM0,'o',markerfacecolor='None',markeredgecolor='black',label='Not Used')
ptrm_check=ax.plot(P.PTRM/NRM0,P.NRM/NRM0,'^',markerfacecolor='None',markeredgecolor='black',markersize=10,label='PTRM Check')
md_check=ax.plot(T.PTRM/NRM0,T.NRM/NRM0,'s',markerfacecolor='None',markeredgecolor='black',markersize=10)
ax.set_ylim(0,max(IZZI.NRM/NRM0)*1.1)
ax.set_xlim(0,PTRMmax*1.1)
IZ=IZZI_trunc[IZZI_trunc.steptype=='IZ']
ZI=IZZI_trunc[IZZI_trunc.steptype=='ZI']
iz_plot=ax.plot(IZ.PTRM/NRM0,IZ.NRM/NRM0,'o',markerfacecolor='b',markeredgecolor='black',label='I step')
zi_plot=ax.plot(ZI.PTRM/NRM0,ZI.NRM/NRM0,'o',markerfacecolor='r',markeredgecolor='black',label='Z step')
ax.set_ylabel('NRM/NRM$_0$')
ax.set_xlabel('PTRM/NRM$_0$')
return(lines,emptydots,ptrm_check,md_check,iz_plot,zi_plot)
def plot_zijd(ax,specimen,min_temp,max_temp):
"""Plots Zijderveld plot of zero field steps for a paleointensity experiment"""
#Get the NRM data for the specimen
IZZI=temps.loc[(temps.specimen==specimen)&((temps.steptype=='IZ')|(temps.steptype=='ZI'))]
IZZI_trunc=IZZI[(temps.temp_step>=min_temp+273)&(temps.temp_step<=max_temp+273)]
NRM_dirs=IZZI.loc[:,'NRM_x':'NRM_z'].values
NRM_trunc_dirs=IZZI_trunc.loc[:,'NRM_x':'NRM_z'].values
try:
mad=get_mad(IZZI_trunc)
madbox.description='MAD: %2.1f'%mad
except:
madbox.description='MAD: '
#Plot axis
ax.axvline(0,color='k',linewidth=1)
ax.axhline(0,color='k',linewidth=1)
#Plot NRM directions
ax.plot(NRM_dirs[:,0],NRM_dirs[:,1],'k')
ax.plot(NRM_dirs[:,0],NRM_dirs[:,2],'k')
#Plot NRM directions in currently selected temperature range as closed symbols
ax.plot(NRM_trunc_dirs[:,0],NRM_trunc_dirs[:,1],'ko')
ax.plot(NRM_trunc_dirs[:,0],NRM_trunc_dirs[:,2],'rs')
#Plot open circles for all NRM directions as closed symbols
ax.plot(NRM_dirs[:,0],NRM_dirs[:,1],'o',markerfacecolor='None',markeredgecolor='k')
ax.plot(NRM_dirs[:,0],NRM_dirs[:,2],'s',markerfacecolor='None',markeredgecolor='k')
#Perform PCA fit to data
if len(IZZI_trunc)>2:
pca=PCA(n_components=3)
pca=pca.fit(NRM_trunc_dirs)
length, vector=pca.explained_variance_[0], pca.components_[0]
vals=pca.transform(NRM_trunc_dirs)[:,0]
v = np.outer(vals,vector)
#Plot PCA line fit
ax.plot(pca.mean_[0]+v[:,0],pca.mean_[1]+v[:,1],'g')
ax.plot(pca.mean_[0]+v[:,0], pca.mean_[2]+v[:,2],'g')
#
NRM_vect=np.mean(NRM_trunc_dirs,axis=0)
NRM_mean_magn=np.sqrt(sum(NRM_vect**2))
vector_magn=np.sqrt(sum(vector**2))
dang=np.degrees(np.arccos(np.abs(np.dot(NRM_vect,vector)/(NRM_mean_magn*vector_magn))))
dangbox.description='DANG: %2.1f'%dang
else:
dangbox.description='DANG: '
ax.set_xlabel('x, $Am^2$')
ax.set_ylabel('y,z, $Am^2$')
ax.axis('equal')
ax.relim()
def circleplot(site,fit,i,ax,temperatures,legend=False,linewidth=2,title=None,tangent=False):
"""Plots Circle fits sampled from the posterior distribution
(using the BiCEP method) to the Arai plot data. Plots tangent
to the circle as a slope if tangent=True"""
#Get information on maximum pTRM for rescaling of circle
specimenlist=temps[temps.site==site].specimen.unique()
specimen=specimenlist[i]
specdf=temps[temps.specimen==specimen]
IZZI=specdf[(specdf.steptype=='IZ')|(specdf.steptype=='ZI')]
NRM0=specdf.NRM.iloc[0]
PTRMmax=max(IZZI.PTRM)/NRM0
if temperatures!=None:
IZZI_trunc=IZZI[(IZZI.temp_step>=temperatures[specimen][0,0])&(IZZI.temp_step<=temperatures[specimen][0,1])]
else:
IZZI_trunc=IZZI
minNRM=min(IZZI_trunc.NRM/NRM0)
minPTRM=min(IZZI_trunc.PTRM/NRM0)
#Parameters for the circle fit
c=np.random.choice(range(len(fit['R'][:,i])),100)
thetas=np.linspace(0,2*np.pi,1000)
NRM0=temps[temps.specimen==specimen].iloc[0].NRM
#Circle x and y values for circle plot.
xs=(fit['x_c'][c,i][:,np.newaxis])*PTRMmax+minPTRM+fit['R'][c,i][:,np.newaxis]*np.cos(thetas)*PTRMmax
ys=fit['y_c'][c,i][:,np.newaxis]+minNRM+fit['R'][c,i][:,np.newaxis]*np.sin(thetas)
#Plot Circles
ax.plot(xs.T,ys.T,'-',color='lightgreen',alpha=0.2,linewidth=linewidth,zorder=-1);
ax.plot(100,100,'-',color='lightgreen',label='Circle Fits');
#Find tangents to the circle:
if tangent==True:
slope_ideal=-1/np.tan(np.median(fit['phi'][:,i]))/PTRMmax
x_i=np.median(fit['dist_to_edge'][:,i])*np.cos(np.median(fit['phi'][:,i]))*PTRMmax+minPTRM
y_i=np.median(fit['dist_to_edge'][:,i])*np.sin(np.median(fit['phi'][:,i]))+minNRM
ax.plot(x_i,y_i,'ko')
c=y_i-slope_ideal*x_i
d=-c/slope_ideal
ax.plot([0,d],[c,0],'k',linestyle='--')
#Add legend and title to plot
if legend==True:
ax.legend(fontsize=10);
if title!=None:
ax.set_title(title,fontsize=20,loc='left')
def regplot(fit,ax,specimenlist,legend=False,title=None):
"""Plots B vs k for all specimens in a site given a BiCEP or unpooled fit"""
B_lab_list=[]
for specimen in specimenlist:
B_lab_list.append(temps[temps.specimen==specimen].B_lab.unique()*1e6)
try:
Bs=fit['int_real']
mink,maxk=np.amin(fit['k']),np.amax(fit['k'])
minB,maxB=fit['c']*mink+fit['int_site'],fit['c']*maxk+fit['int_site']
c=np.random.choice(range(len(minB)),100)
ax.plot([mink,maxk],[minB[c],maxB[c]],color='skyblue',alpha=0.12)
except:
Bs=fit['slope']*np.array(B_lab_list).T
ax.set_xlabel(r'$\vec{k}$');
ax.plot(np.percentile(fit['k'],(2.5,97.5),axis=0),[np.median(Bs,axis=0),np.median(Bs,axis=0)],'k')
ax.plot([np.median(fit['k'],axis=0),np.median(fit['k'],axis=0)],np.percentile(Bs,(2.5,97.5),axis=0),'k')
ax.plot(np.median(fit['k'],axis=0),np.median(Bs,axis=0),'o',markerfacecolor='lightgreen',markeredgecolor='k')
ax.axvline(0,color='k',linewidth=1)
if title!=None:
ax.set_title(title,fontsize=20,loc='left')
def display_gui():
"""Displays the specimen plots for BiCEP GUI"""
for axis in ax:
axis.cla()
plot_line_base(ax[0],specimen_wid.value,lower_temp_wid.value,upper_temp_wid.value,GUI=True) #Base Arai plot
plot_zijd(ax[1],specimen_wid.value,lower_temp_wid.value,upper_temp_wid.value) #Zijderveld plot
try:
fit=fits[site_wid.value]
specimenlist=np.array(specimen_wid.options)
specimen=specimen_wid.value
i=np.where(np.array(specimenlist)==specimen)[0][0]
circleplot(site_wid.value,fit,i,ax[0],ktemp)
except:
pass
fig.set_tight_layout(True)
fig_2.set_tight_layout(True)
def plot_site_plot(fit):
"""Plots the plot of k vs B_anc and the histogram for the site fits on BiCEP GUI"""
ax_2[0].axhline(np.median(fit['int_site']),color='k')
ax_2[1].axhline(np.median(fit['int_site']),color='k')
ax_2[1].hist(fit['int_site'],color='skyblue',bins=100,density=True,orientation='horizontal')
specimenlist=specimen_wid.options
regplot(fit,ax_2[0],specimenlist)
ax_2[0].yaxis.tick_right()
ax_2[1].yaxis.tick_right()
ax_2[1].yaxis.set_label_position('right')
ax_2[0].set_ylim(min(np.percentile(fit['int_real'],2.5,axis=0))*0.9,max(np.percentile(fit['int_real'],97.5,axis=0))*1.1)
ax_2[0].set_xlim(min(min(np.percentile(fit['k'],2.5,axis=0))*1.1,min(np.percentile(fit['k'],2.5,axis=0))*0.9),max(max(np.percentile(fit['k'],97.5,axis=0))*1.1,max(np.percentile(fit['k'],97.5,axis=0))*0.9))
currspec=specimen_wid.value
specindex=np.where(specimenlist==currspec)
specindex=specindex[0]
ax_2[0].plot(np.median(fit['k'][:,specindex]),np.median(fit['int_real'][:,specindex]),'o',markeredgecolor='r',markerfacecolor='r')
ax_2[1].set_ylabel('$B_{anc}$')
ax_2[1].set_xlabel('Probability Density')
def display_site_plot():
"""Updates everything needed once the BiCEP method has been applied to a site"""
try:
fit=fits[site_wid.value]
ax_2[0].cla()
ax_2[1].cla()
plot_site_plot(fit)
display_sampler_diags(fit)
display_specimen_ring()
except:
#If no fit for this site yet, delete everything and make a new plot!
ax_2[0].cla()
ax_2[1].cla()
rhatlabel.description='R_hat:'
nefflabel.description='n_eff:'
banclabel.description='B_anc:'
def display_specimen_ring():
"""Displays a red circle around the currently selected
specimen in the site plot of BiCEP GUI"""
try:
fit=fits[site_wid.value]
#Maybe not the most efficent way of doing things,
#need to loop through matplotlib elements to find
#any red circles that already exist
for line in ax_2[0].lines:
if line.properties()['markeredgecolor']=='r':
line.remove()
specimenlist=specimen_wid.options
currspec=specimen_wid.value
specindex=np.where(np.array(specimenlist)==currspec)
ax_2[0].plot(np.median(fit['k'][:,specindex]),np.median(fit['int_real'][:,specindex]),'o',markeredgecolor='r',markerfacecolor='None')
circleplot(site_wid.value,fit,specindex,ax[0],temperatures)
except:
pass
def on_change(change):
"""Update GUI on changing one of our site, specimen, temperature dropdowns"""
ax[0].cla()
#If we're changing the site dropdown, we need to replot the site plots and change the specimen options
if (change.owner==site_wid):
specimen_wid.options=temps[temps.site==site_wid.value].specimen.unique()
display_site_plot()
#If we're changing the site or specimen dropdown, we need to update the temperature steps.
if (change.owner==site_wid)|(change.owner==specimen_wid):
lower_temp_wid.options=temps[temps.specimen==specimen_wid.value].temp_step.unique()-273
upper_temp_wid.options=temps[temps.specimen==specimen_wid.value].temp_step.unique()-273
lower_temp_wid.value=temperatures[specimen_wid.value][0,0]
upper_temp_wid.value=temperatures[specimen_wid.value][0,1]
#If we're changing the specimen plot, we display a red circle around the currently selected specimen on the site plot
if (change.owner==specimen_wid):
display_specimen_ring()
#We always need to redraw the specimen dropdown.
display_gui()
def on_button_clicked(a):
"""GUI function for saving specimen min and max temperatures (saves to file)"""
temperatures[specimen_wid.value]=np.array([[lower_temp_wid.value,upper_temp_wid.value]])
with open('specimen-temperatures.pickle', 'wb') as tempdict:
pickle.dump(temperatures, tempdict)
def get_sampler_diags(fit):
"""Returns useful sampler diagnostics for a particular MCMC fit with pystan"""
rhat=fit.summary()['summary'][:,-1]
rhat_worst=rhat[np.abs(1-rhat)==max(np.abs(1-rhat))][0]
n_eff_int_site=int(fit.summary()['summary'][0,-2])
return rhat_worst,n_eff_int_site
def display_sampler_diags(fit):
"""Displays the worst R_hat and n_eff for B_anc for the fit for the MCMC fit for a site in BiCEP_GUI"""
rhat_worst,n_eff_int_site=get_sampler_diags(fit)
if (rhat_worst>1.1)|(rhat_worst<0.9):
rhatlabel.button_style='danger'
else:
rhatlabel.button_style='success'
if n_eff_int_site<1000:
nefflabel.button_style='warning'
else:
nefflabel.button_style='success'
rhatlabel.description='R_hat: %1.2f'%rhat_worst
nefflabel.description='n_eff:'+str(n_eff_int_site)
minB,maxB=np.percentile(fit['int_site'],(2.5,97.5),axis=0)
banclabel.description='B_anc %3.1f'%minB+'- %3.1f'%maxB+' μT'
def get_site_dist(a):
"""Runs the MCMC sampler and updates the GUI"""
process_wid.description='Processing..'
for ax in ax_2:
ax.cla()
specimenlist=np.array(specimen_wid.options)
methods={'Slow, more accurate':model_circle_slow,'Fast, less accurate':model_circle_fast}
for key in temperatures:
ktemp[key]=temperatures[key]+273
fit,newmethcodes,newcolumns=BiCEP_fit(specimenlist,ktemp,model=methods[method_wid.value],priorstd=5,n_samples=n_samples_wid.value)
plot_site_plot(fit)
display_sampler_diags(fit)
fits[site_wid.value]=fit
for specimen in newmethcodes.keys():
spec_method_codes[specimen]+=newmethcodes[specimen]
spec_extra_columns[specimen]=newcolumns[specimen]
display_specimen_ring()
display_gui()
process_wid.description='Process Site Data'
def newfile(a):
"""Sets up the GUI with a new file converted from arai data"""
global been_pressed
if been_pressed==False:
global temps,site_wid,specimen_wid,lower_temp_wid,upper_temp_wid,save_wid,temperatures,fig,ax,spec_method_codes,site_method_codes
been_pressed=True
temps=pd.read_csv('arai_data.csv')
spec_method_codes={specimen:'IE-BICEP' for specimen in temps.specimen.unique()}
site_method_codes={site:'IE-BICEP' for site in temps.site.unique()}
try:
with open("specimen-temperatures.pickle",'rb') as tempdict:
temperatures=pickle.load(tempdict)
if len(np.intersect1d(tempdict.keys(),temps.specimen.unique()))==0:
temperatures={specimen:np.array([[temps[temps.specimen==specimen].temp_step.unique()[0]-273,temps[temps.specimen==specimen].temp_step.unique()[-1]-273]]) for specimen in temps.specimen.unique()}
else:
pass
except:
temperatures={specimen:np.array([[temps[temps.specimen==specimen].temp_step.unique()[0]-273,temps[temps.specimen==specimen].temp_step.unique()[-1]-273]]) for specimen in temps.specimen.unique()}
site_wid.options=temps.site.unique()
specimen_wid.options=temps[temps.site==site_wid.value].specimen.unique()
lower_temp_wid.options=temps[temps.specimen==specimen_wid.value].temp_step.unique()-273
upper_temp_wid.options=temps[temps.specimen==specimen_wid.value].temp_step.unique()-273
site_wid.observe(on_change)
specimen_wid.observe(on_change)
lower_temp_wid.observe(on_change)
upper_temp_wid.observe(on_change)
save_wid.on_click(on_button_clicked)
display_gui()
else:
pass
def examplefile(a):
"""Sets up the GUI with the example dataset of Cych et al (in prep)"""
global been_pressed
if been_pressed==False:
global temps,site_wid,specimen_wid,lower_temp_wid,upper_temp_wid,save_wid,temperatures,fig,ax,spec_method_codes,site_method_codes
temps=pd.read_csv('arai_data_example.csv')
been_pressed=True
spec_method_codes={specimen:'IE-BICEP' for specimen in temps.specimen.unique()}
site_method_codes={site:'IE-BICEP' for site in temps.site.unique()}
try:
with open("specimen-temperatures.pickle",'rb') as tempdict:
temperatures=pickle.load(tempdict)
if len(np.intersect1d(tempdict.keys(),temps.specimen.unique()))==0:
temperatures={specimen:np.array([[temps[temps.specimen==specimen].temp_step.unique()[0]-273,temps[temps.specimen==specimen].temp_step.unique()[-1]-273]]) for specimen in temps.specimen.unique()}
else:
pass
except:
temperatures={specimen:np.array([[temps[temps.specimen==specimen].temp_step.unique()[0]-273,temps[temps.specimen==specimen].temp_step.unique()[-1]-273]]) for specimen in temps.specimen.unique()}
site_wid.options=temps.site.unique()
specimen_wid.options=temps[temps.site==site_wid.value].specimen.unique()
lower_temp_wid.options=temps[temps.specimen==specimen_wid.value].temp_step.unique()-273
upper_temp_wid.options=temps[temps.specimen==specimen_wid.value].temp_step.unique()-273
site_wid.observe(on_change)
specimen_wid.observe(on_change)
lower_temp_wid.observe(on_change)
upper_temp_wid.observe(on_change)
save_wid.on_click(on_button_clicked)
display_gui()
else:
pass
row_layout_buttons = widgets.Layout(
width='60%',
display='flex',
flex_flow='row',
justify_content='space-around',
margin='1000px left'
)
row_layout = widgets.Layout(
width='100%',
display='flex',
flex_flow='row',
justify_content='space-around'
)
def save_magic_tables(a):
"""Saves data from the currently displayed site to the GUI"""
fit=fits[site_wid.value]
sitestable=pd.read_csv('sites.txt',skiprows=1,sep='\t')
sitestable.loc[sitestable.site==site_wid.value,'int_abs_min']=round(np.percentile(fit['int_site'],2.5),1)
sitestable.loc[sitestable.site==site_wid.value,'int_abs_max']=round(np.percentile(fit['int_site'],97.5),1)
sitestable.loc[sitestable.site==site_wid.value,'int_abs']=round(np.percentile(fit['int_site'],50),1)
specimenstable=pd.read_csv('specimens.txt',skiprows=1,sep='\t')
speclist=specimen_wid.options
for i in range(len(speclist)):
specimen=speclist[i]
specimenstable.loc[specimenstable.specimen==specimen,'int_abs_min']=round(np.percentile(fit['int_real'][:,i],2.5),1)
specimenstable.loc[specimenstable.specimen==specimen,'int_abs_max']=round(np.percentile(fit['int_real'][:,i],97.5),1)
specimenstable.loc[specimenstable.specimen==specimen,'int_abs']=round(np.percentile(fit['int_real'][:,i],50),1)
specimenstable.loc[specimenstable.specimen==specimen,'int_k_min']=round(np.percentile(fit['k'][:,i],2.5),3)
specimenstable.loc[specimenstable.specimen==specimen,'int_k_max']=round(np.percentile(fit['k'][:,i],97.5),3)
specimenstable.loc[specimenstable.specimen==specimen,'int_k']=round(np.percentile(fit['k'][:,i],50),3)
specimenstable.loc[specimenstable.specimen==specimen,'meas_step_min']=ktemp[specimen][0,0]
specimenstable.loc[specimenstable.specimen==specimen,'meas_step_max']=ktemp[specimen][0,1]
specimenstable.loc[specimenstable.specimen==specimen,'method_codes']=spec_method_codes[specimen]
extra_columns=spec_extra_columns[specimen]
for col in extra_columns.keys():
specimenstable.loc[specimenstable.specimen==specimen,col]=extra_columns[col]
sitestable.loc[sitestable.site==site_wid.value,'method_codes']=site_method_codes[site_wid.value]
specimenstable['meas_step_unit']='Kelvin'
sitestable=sitestable.fillna('')
specimenstable=specimenstable.fillna('')
specimenstable=specimenstable.drop_duplicates(subset=['specimen'])
sitesdict=sitestable.to_dict('records')
specimensdict=specimenstable.to_dict('records')
pmag.magic_write('sites.txt',sitesdict,'sites')
pmag.magic_write('specimens.txt',specimensdict,'specimens')
def save_figures(a):
"""Saves figures from GUI depending on widgets"""
objdict={'Specimen Plot':fig,'Site Plot':fig_2}
value={'Specimen Plot':specimen_wid.value,'Site Plot':site_wid.value}
objdict[figchoice.value].savefig(value[figchoice.value]+'_BiCEP_fit.'+figformats.value)
#Objects global to all the functions in this module
fits={} #Fits for various sites are stored here- this can use a lot of memory!
ktemp={} #Temperatures (In Kelvin) used for a specimen at the time a site fit has been performed. Only done once site fit is performed.
spec_extra_columns={} #Additional columns for the MagIC tables, e.g. if corrections were applied.
#GUI widgets- these widgets are what are used in the BiCEP_GUI notebook/voila page, which allows you to display them.
#Their interaction is linked to the functions in this module.
#File picker widget- probably not necessary as sites, specimens etc already there.
convert_button = widgets.Button(description='Convert MagIC Data')
convert_button.on_click(convert)
#Been_pressed flag stops you from loading a new dataset when one is already loaded (this breaks the GUI because the dropdown boxes try and update their options before they know what those options are).
global been_pressed
been_pressed=False
#Specimen/Interpretation Selection, Arai plot etc.
site_wid= widgets.Dropdown(
description='Site:')
specimen_wid= widgets.Dropdown(
options=[],
description='Specimen:')
lower_temp_wid=widgets.Dropdown(
options=[],
description='Temperatures (Low):',
style={"description_width":"initial"}
)
upper_temp_wid=widgets.Dropdown(
options=[],
description='(High):',
style={"description_width":"initial"})
save_wid=widgets.Button(description='Save Temperatures')
newfile_wid=widgets.Button(description='Use New File',
style={"description_width":"initial"})
examplefile_wid=widgets.Button(description='Use Example File',
style={"description_width":"initial"})
figsave=widgets.Button(description='Save Figures')
figchoice=widgets.Dropdown(options=['Specimen Plot','Site Plot'])
figformats=widgets.Dropdown(description='Format:',options=['pdf','png','jpg','svg','tiff'])
figsave.on_click(save_figures)
newfile_wid.on_click(newfile)
examplefile_wid.on_click(examplefile)
madbox=widgets.Button(description='MAD:',disabled=True)
dangbox=widgets.Button(description='DANG:',disabled=True)
dratbox=widgets.Button(description='DRAT:',disabled=True)
filebox=widgets.HBox([newfile_wid,examplefile_wid],grid_area="filebox")
tempbox=widgets.VBox([lower_temp_wid,upper_temp_wid],grid_area="tempbox")
specbox=widgets.VBox([site_wid,specimen_wid],grid_area="specbox")
savebox=widgets.HBox([save_wid,figsave,figchoice,figformats])
dirbox=widgets.HBox([madbox,dangbox])
critbox=widgets.VBox([dirbox,dratbox])
specplots=widgets.Output(grid_area="specplots")
dropdowns=widgets.HBox([specbox,tempbox,critbox],grid_area="dropdowns")
#fullbox gives the entire specimen processing box
fullbox=widgets.Box(children=[filebox,dropdowns,specplots,savebox],title='Specimen Processing',
layout=widgets.Layout(
width='100%',
flex_flow='column',
align_content='space-around',
align_items='flex-start')
)
#Make a plot in the specplots box
with specplots:
fig,ax=plt.subplots(1,2,figsize=(9,3))
fig.canvas.header_visible = False
plt.tight_layout()
dangbox.button_style='info'
madbox.button_style='info'
dratbox.button_style='info'
#GUI widgets for the site processing box
n_samples_wid=widgets.IntSlider(min=3000,max=100000,value=30000,step=1000,description='n samples')
method_wid=widgets.Dropdown(options=['Slow, more accurate','Fast, less accurate'],description='Sampler:')
process_wid=widgets.Button(description='Process Site Data')
process_wid.on_click(get_site_dist)
rhatlabel=widgets.Button(description='R_hat:',disabled=True)
nefflabel=widgets.Button(description='n_eff:',disabled=True)
banclabel=widgets.Button(description='B_anc:',disabled=True,display='flex',flex_flow='column',align_items='stretch',layout=widgets.Layout(width='auto', height=rhatlabel.layout.height))
sampler_diag=widgets.HBox([rhatlabel,nefflabel])
sampler_buttons=widgets.VBox([sampler_diag,banclabel])
sampler_pars=widgets.VBox([n_samples_wid,method_wid])
sampler_line=widgets.HBox([sampler_pars,sampler_buttons])
banclabel.button_style='info'
nefflabel.button_style='info'
rhatlabel.button_style='info'
siteplots=widgets.Output()
with siteplots:
fig_2,ax_2=plt.subplots(1,2,figsize=(6.4,3),sharey=True)
fig_2.canvas.header_visible = False
savetables=widgets.Button(description='Save to MagIC tables')
savetables.on_click(save_magic_tables)
sitesave=widgets.HBox([savetables])
fullbox2=widgets.VBox([process_wid,sampler_line,siteplots,sitesave],title='Site Processing')
specpage=widgets.Accordion([fullbox])
sitepage=widgets.Accordion([fullbox2])
specpage.set_title(0,'Specimen Processing')
sitepage.set_title(0,'Site Processing')
gui=widgets.VBox([specpage,sitepage])
| [
"pmagpy.pmag.magic_write",
"pmagpy.pmag.dir2cart",
"pickle.dump",
"numpy.abs",
"numpy.sum",
"numpy.amin",
"numpy.polyfit",
"pandas.read_csv",
"numpy.empty",
"pmagpy.pmag.cart2dir",
"ipywidgets.Output",
"numpy.mean",
"numpy.linalg.svd",
"pmagpy.ipmag.atrm_magic",
"numpy.sin",
"pickle.lo... | [((52760, 52878), 'ipywidgets.Layout', 'widgets.Layout', ([], {'width': '"""60%"""', 'display': '"""flex"""', 'flex_flow': '"""row"""', 'justify_content': '"""space-around"""', 'margin': '"""1000px left"""'}), "(width='60%', display='flex', flex_flow='row',\n justify_content='space-around', margin='1000px left')\n", (52774, 52878), True, 'import ipywidgets as widgets\n'), ((52910, 53007), 'ipywidgets.Layout', 'widgets.Layout', ([], {'width': '"""100%"""', 'display': '"""flex"""', 'flex_flow': '"""row"""', 'justify_content': '"""space-around"""'}), "(width='100%', display='flex', flex_flow='row',\n justify_content='space-around')\n", (52924, 53007), True, 'import ipywidgets as widgets\n'), ((56359, 56407), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Convert MagIC Data"""'}), "(description='Convert MagIC Data')\n", (56373, 56407), True, 'import ipywidgets as widgets\n'), ((56748, 56785), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'description': '"""Site:"""'}), "(description='Site:')\n", (56764, 56785), True, 'import ipywidgets as widgets\n'), ((56806, 56859), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': '[]', 'description': '"""Specimen:"""'}), "(options=[], description='Specimen:')\n", (56822, 56859), True, 'import ipywidgets as widgets\n'), ((56885, 56993), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': '[]', 'description': '"""Temperatures (Low):"""', 'style': "{'description_width': 'initial'}"}), "(options=[], description='Temperatures (Low):', style={\n 'description_width': 'initial'})\n", (56901, 56993), True, 'import ipywidgets as widgets\n'), ((57022, 57118), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': '[]', 'description': '"""(High):"""', 'style': "{'description_width': 'initial'}"}), "(options=[], description='(High):', style={\n 'description_width': 'initial'})\n", (57038, 57118), True, 'import ipywidgets as widgets\n'), ((57136, 57183), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Save Temperatures"""'}), "(description='Save Temperatures')\n", (57150, 57183), True, 'import ipywidgets as widgets\n'), ((57198, 57284), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Use New File"""', 'style': "{'description_width': 'initial'}"}), "(description='Use New File', style={'description_width':\n 'initial'})\n", (57212, 57284), True, 'import ipywidgets as widgets\n'), ((57322, 57412), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Use Example File"""', 'style': "{'description_width': 'initial'}"}), "(description='Use Example File', style={'description_width':\n 'initial'})\n", (57336, 57412), True, 'import ipywidgets as widgets\n'), ((57446, 57488), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Save Figures"""'}), "(description='Save Figures')\n", (57460, 57488), True, 'import ipywidgets as widgets\n'), ((57499, 57555), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': "['Specimen Plot', 'Site Plot']"}), "(options=['Specimen Plot', 'Site Plot'])\n", (57515, 57555), True, 'import ipywidgets as widgets\n'), ((57566, 57655), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'description': '"""Format:"""', 'options': "['pdf', 'png', 'jpg', 'svg', 'tiff']"}), "(description='Format:', options=['pdf', 'png', 'jpg', 'svg',\n 'tiff'])\n", (57582, 57655), True, 'import ipywidgets as widgets\n'), ((57754, 57803), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""MAD:"""', 'disabled': '(True)'}), "(description='MAD:', disabled=True)\n", (57768, 57803), True, 'import ipywidgets as widgets\n'), ((57811, 57861), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""DANG:"""', 'disabled': '(True)'}), "(description='DANG:', disabled=True)\n", (57825, 57861), True, 'import ipywidgets as widgets\n'), ((57869, 57919), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""DRAT:"""', 'disabled': '(True)'}), "(description='DRAT:', disabled=True)\n", (57883, 57919), True, 'import ipywidgets as widgets\n'), ((57927, 57992), 'ipywidgets.HBox', 'widgets.HBox', (['[newfile_wid, examplefile_wid]'], {'grid_area': '"""filebox"""'}), "([newfile_wid, examplefile_wid], grid_area='filebox')\n", (57939, 57992), True, 'import ipywidgets as widgets\n'), ((57999, 58066), 'ipywidgets.VBox', 'widgets.VBox', (['[lower_temp_wid, upper_temp_wid]'], {'grid_area': '"""tempbox"""'}), "([lower_temp_wid, upper_temp_wid], grid_area='tempbox')\n", (58011, 58066), True, 'import ipywidgets as widgets\n'), ((58073, 58132), 'ipywidgets.VBox', 'widgets.VBox', (['[site_wid, specimen_wid]'], {'grid_area': '"""specbox"""'}), "([site_wid, specimen_wid], grid_area='specbox')\n", (58085, 58132), True, 'import ipywidgets as widgets\n'), ((58140, 58196), 'ipywidgets.HBox', 'widgets.HBox', (['[save_wid, figsave, figchoice, figformats]'], {}), '([save_wid, figsave, figchoice, figformats])\n', (58152, 58196), True, 'import ipywidgets as widgets\n'), ((58201, 58232), 'ipywidgets.HBox', 'widgets.HBox', (['[madbox, dangbox]'], {}), '([madbox, dangbox])\n', (58213, 58232), True, 'import ipywidgets as widgets\n'), ((58240, 58271), 'ipywidgets.VBox', 'widgets.VBox', (['[dirbox, dratbox]'], {}), '([dirbox, dratbox])\n', (58252, 58271), True, 'import ipywidgets as widgets\n'), ((58281, 58318), 'ipywidgets.Output', 'widgets.Output', ([], {'grid_area': '"""specplots"""'}), "(grid_area='specplots')\n", (58295, 58318), True, 'import ipywidgets as widgets\n'), ((58329, 58393), 'ipywidgets.HBox', 'widgets.HBox', (['[specbox, tempbox, critbox]'], {'grid_area': '"""dropdowns"""'}), "([specbox, tempbox, critbox], grid_area='dropdowns')\n", (58341, 58393), True, 'import ipywidgets as widgets\n'), ((59009, 59102), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(3000)', 'max': '(100000)', 'value': '(30000)', 'step': '(1000)', 'description': '"""n samples"""'}), "(min=3000, max=100000, value=30000, step=1000, description\n ='n samples')\n", (59026, 59102), True, 'import ipywidgets as widgets\n'), ((59105, 59205), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': "['Slow, more accurate', 'Fast, less accurate']", 'description': '"""Sampler:"""'}), "(options=['Slow, more accurate', 'Fast, less accurate'],\n description='Sampler:')\n", (59121, 59205), True, 'import ipywidgets as widgets\n'), ((59212, 59259), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Process Site Data"""'}), "(description='Process Site Data')\n", (59226, 59259), True, 'import ipywidgets as widgets\n'), ((59306, 59357), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""R_hat:"""', 'disabled': '(True)'}), "(description='R_hat:', disabled=True)\n", (59320, 59357), True, 'import ipywidgets as widgets\n'), ((59367, 59418), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""n_eff:"""', 'disabled': '(True)'}), "(description='n_eff:', disabled=True)\n", (59381, 59418), True, 'import ipywidgets as widgets\n'), ((59616, 59652), 'ipywidgets.HBox', 'widgets.HBox', (['[rhatlabel, nefflabel]'], {}), '([rhatlabel, nefflabel])\n', (59628, 59652), True, 'import ipywidgets as widgets\n'), ((59668, 59707), 'ipywidgets.VBox', 'widgets.VBox', (['[sampler_diag, banclabel]'], {}), '([sampler_diag, banclabel])\n', (59680, 59707), True, 'import ipywidgets as widgets\n'), ((59720, 59761), 'ipywidgets.VBox', 'widgets.VBox', (['[n_samples_wid, method_wid]'], {}), '([n_samples_wid, method_wid])\n', (59732, 59761), True, 'import ipywidgets as widgets\n'), ((59774, 59819), 'ipywidgets.HBox', 'widgets.HBox', (['[sampler_pars, sampler_buttons]'], {}), '([sampler_pars, sampler_buttons])\n', (59786, 59819), True, 'import ipywidgets as widgets\n'), ((59919, 59935), 'ipywidgets.Output', 'widgets.Output', ([], {}), '()\n', (59933, 59935), True, 'import ipywidgets as widgets\n'), ((60067, 60117), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Save to MagIC tables"""'}), "(description='Save to MagIC tables')\n", (60081, 60117), True, 'import ipywidgets as widgets\n'), ((60166, 60192), 'ipywidgets.HBox', 'widgets.HBox', (['[savetables]'], {}), '([savetables])\n', (60178, 60192), True, 'import ipywidgets as widgets\n'), ((60203, 60295), 'ipywidgets.VBox', 'widgets.VBox', (['[process_wid, sampler_line, siteplots, sitesave]'], {'title': '"""Site Processing"""'}), "([process_wid, sampler_line, siteplots, sitesave], title=\n 'Site Processing')\n", (60215, 60295), True, 'import ipywidgets as widgets\n'), ((60296, 60324), 'ipywidgets.Accordion', 'widgets.Accordion', (['[fullbox]'], {}), '([fullbox])\n', (60313, 60324), True, 'import ipywidgets as widgets\n'), ((60334, 60363), 'ipywidgets.Accordion', 'widgets.Accordion', (['[fullbox2]'], {}), '([fullbox2])\n', (60351, 60363), True, 'import ipywidgets as widgets\n'), ((60452, 60486), 'ipywidgets.VBox', 'widgets.VBox', (['[specpage, sitepage]'], {}), '([specpage, sitepage])\n', (60464, 60486), True, 'import ipywidgets as widgets\n'), ((960, 979), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (963, 979), False, 'from sklearn.decomposition import PCA\n'), ((1798, 1810), 'numpy.array', 'np.array', (['XY'], {}), '(XY)\n', (1806, 1810), True, 'import numpy as np\n'), ((2019, 2029), 'numpy.mean', 'np.mean', (['Z'], {}), '(Z)\n', (2026, 2029), True, 'import numpy as np\n'), ((2120, 2159), 'numpy.linalg.svd', 'np.linalg.svd', (['ZXY'], {'full_matrices': '(False)'}), '(ZXY, full_matrices=False)\n', (2133, 2159), True, 'import numpy as np\n'), ((2245, 2295), 'numpy.concatenate', 'np.concatenate', (['[A, [-1.0 * Zmean * A[0]]]'], {'axis': '(0)'}), '([A, [-1.0 * Zmean * A[0]]], axis=0)\n', (2259, 2295), True, 'import numpy as np\n'), ((3836, 3947), 'numpy.array', 'np.array', (['[[slist[0], slist[3], slist[5]], [slist[3], slist[1], slist[4]], [slist[5],\n slist[4], slist[2]]]'], {}), '([[slist[0], slist[3], slist[5]], [slist[3], slist[1], slist[4]], [\n slist[5], slist[4], slist[2]]])\n', (3844, 3947), True, 'import numpy as np\n'), ((4029, 4048), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (4032, 4048), False, 'from sklearn.decomposition import PCA\n'), ((4416, 4445), 'numpy.matmul', 'np.matmul', (['stensor', 'ancvector'], {}), '(stensor, ancvector)\n', (4425, 4445), True, 'import numpy as np\n'), ((7327, 7401), 'numpy.sqrt', 'np.sqrt', (["((line['intercept'] / line['slope']) ** 2 + line['intercept'] ** 2)"], {}), "((line['intercept'] / line['slope']) ** 2 + line['intercept'] ** 2)\n", (7334, 7401), True, 'import numpy as np\n'), ((8593, 8605), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8601, 8605), True, 'import numpy as np\n'), ((8619, 8631), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8627, 8631), True, 'import numpy as np\n'), ((22753, 22776), 'numpy.empty', 'np.empty', ([], {'shape': '(16, 0)'}), '(shape=(16, 0))\n', (22761, 22776), True, 'import numpy as np\n'), ((30696, 30903), 'pandas.DataFrame', 'pd.DataFrame', (['data_array.T'], {'columns': "['specimen', 'sample', 'site', 'NRM', 'PTRM', 'NRM_x', 'NRM_y', 'NRM_z',\n 'PTRM_x', 'PTRM_y', 'PTRM_z', 'NRM_sigma', 'PTRM_sigma', 'B_lab',\n 'steptype', 'temp_step']"}), "(data_array.T, columns=['specimen', 'sample', 'site', 'NRM',\n 'PTRM', 'NRM_x', 'NRM_y', 'NRM_z', 'PTRM_x', 'PTRM_y', 'PTRM_z',\n 'NRM_sigma', 'PTRM_sigma', 'B_lab', 'steptype', 'temp_step'])\n", (30708, 30903), True, 'import pandas as pd\n'), ((31156, 31188), 'pmagpy.contribution_builder.add_sites_to_meas_table', 'cb.add_sites_to_meas_table', (['"""./"""'], {}), "('./')\n", (31182, 31188), True, 'from pmagpy import contribution_builder as cb\n'), ((31534, 31584), 'pandas.read_csv', 'pd.read_csv', (['"""specimens.txt"""'], {'skiprows': '(1)', 'sep': '"""\t"""'}), "('specimens.txt', skiprows=1, sep='\\t')\n", (31545, 31584), True, 'import pandas as pd\n'), ((31735, 31810), 'pmagpy.ipmag.atrm_magic', 'ipmag.atrm_magic', (['"""measurements.txt"""'], {'output_spec_file': '"""specimens_atrm.txt"""'}), "('measurements.txt', output_spec_file='specimens_atrm.txt')\n", (31751, 31810), True, 'import pmagpy.ipmag as ipmag\n'), ((31824, 31879), 'pandas.read_csv', 'pd.read_csv', (['"""specimens_atrm.txt"""'], {'sep': '"""\t"""', 'skiprows': '(1)'}), "('specimens_atrm.txt', sep='\\t', skiprows=1)\n", (31835, 31879), True, 'import pandas as pd\n'), ((32140, 32215), 'pmagpy.ipmag.aarm_magic', 'ipmag.aarm_magic', (['"""measurements.txt"""'], {'output_spec_file': '"""specimens_aarm.txt"""'}), "('measurements.txt', output_spec_file='specimens_aarm.txt')\n", (32156, 32215), True, 'import pmagpy.ipmag as ipmag\n'), ((32229, 32284), 'pandas.read_csv', 'pd.read_csv', (['"""specimens_aarm.txt"""'], {'sep': '"""\t"""', 'skiprows': '(1)'}), "('specimens_aarm.txt', sep='\\t', skiprows=1)\n", (32240, 32284), True, 'import pandas as pd\n'), ((33426, 33482), 'pmagpy.pmag.magic_write', 'pmag.magic_write', (['"""specimens.txt"""', 'specdict', '"""specimens"""'], {}), "('specimens.txt', specdict, 'specimens')\n", (33442, 33482), True, 'import pmagpy.pmag as pmag\n'), ((35383, 35401), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (35394, 35401), True, 'import pandas as pd\n'), ((40533, 40564), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1000)'], {}), '(0, 2 * np.pi, 1000)\n', (40544, 40564), True, 'import numpy as np\n'), ((44264, 44298), 'numpy.where', 'np.where', (['(specimenlist == currspec)'], {}), '(specimenlist == currspec)\n', (44272, 44298), True, 'import numpy as np\n'), ((47188, 47244), 'numpy.array', 'np.array', (['[[lower_temp_wid.value, upper_temp_wid.value]]'], {}), '([[lower_temp_wid.value, upper_temp_wid.value]])\n', (47196, 47244), True, 'import numpy as np\n'), ((48226, 48277), 'numpy.percentile', 'np.percentile', (["fit['int_site']", '(2.5, 97.5)'], {'axis': '(0)'}), "(fit['int_site'], (2.5, 97.5), axis=0)\n", (48239, 48277), True, 'import numpy as np\n'), ((48513, 48543), 'numpy.array', 'np.array', (['specimen_wid.options'], {}), '(specimen_wid.options)\n', (48521, 48543), True, 'import numpy as np\n'), ((53159, 53205), 'pandas.read_csv', 'pd.read_csv', (['"""sites.txt"""'], {'skiprows': '(1)', 'sep': '"""\t"""'}), "('sites.txt', skiprows=1, sep='\\t')\n", (53170, 53205), True, 'import pandas as pd\n'), ((53549, 53599), 'pandas.read_csv', 'pd.read_csv', (['"""specimens.txt"""'], {'skiprows': '(1)', 'sep': '"""\t"""'}), "('specimens.txt', skiprows=1, sep='\\t')\n", (53560, 53599), True, 'import pandas as pd\n'), ((55296, 55345), 'pmagpy.pmag.magic_write', 'pmag.magic_write', (['"""sites.txt"""', 'sitesdict', '"""sites"""'], {}), "('sites.txt', sitesdict, 'sites')\n", (55312, 55345), True, 'import pmagpy.pmag as pmag\n'), ((55348, 55409), 'pmagpy.pmag.magic_write', 'pmag.magic_write', (['"""specimens.txt"""', 'specimensdict', '"""specimens"""'], {}), "('specimens.txt', specimensdict, 'specimens')\n", (55364, 55409), True, 'import pmagpy.pmag as pmag\n'), ((58777, 58811), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(9, 3)'}), '(1, 2, figsize=(9, 3))\n', (58789, 58811), True, 'import matplotlib.pyplot as plt\n'), ((58851, 58869), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (58867, 58869), True, 'import matplotlib.pyplot as plt\n'), ((59967, 60016), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6.4, 3)', 'sharey': '(True)'}), '(1, 2, figsize=(6.4, 3), sharey=True)\n', (59979, 60016), True, 'import matplotlib.pyplot as plt\n'), ((1829, 1846), 'numpy.mean', 'np.mean', (['XY[:, 0]'], {}), '(XY[:, 0])\n', (1836, 1846), True, 'import numpy as np\n'), ((1890, 1907), 'numpy.mean', 'np.mean', (['XY[:, 1]'], {}), '(XY[:, 1])\n', (1897, 1907), True, 'import numpy as np\n'), ((1949, 1966), 'numpy.mean', 'np.mean', (['XY[:, 0]'], {}), '(XY[:, 0])\n', (1956, 1966), True, 'import numpy as np\n'), ((1967, 1984), 'numpy.mean', 'np.mean', (['XY[:, 1]'], {}), '(XY[:, 1])\n', (1974, 1984), True, 'import numpy as np\n'), ((2083, 2103), 'numpy.array', 'np.array', (['[Z0, X, Y]'], {}), '([Z0, X, Y])\n', (2091, 2103), True, 'import numpy as np\n'), ((3056, 3079), 'numpy.mean', 'np.mean', (['IZZI_trunc.NRM'], {}), '(IZZI_trunc.NRM)\n', (3063, 3079), True, 'import numpy as np\n'), ((4271, 4293), 'numpy.linalg.inv', 'np.linalg.inv', (['stensor'], {}), '(stensor)\n', (4284, 4293), True, 'import numpy as np\n'), ((4385, 4405), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (4393, 4405), True, 'import numpy as np\n'), ((9939, 9964), 'numpy.meshgrid', 'np.meshgrid', (['PTRMS', 'PTRMS'], {}), '(PTRMS, PTRMS)\n', (9950, 9964), True, 'import numpy as np\n'), ((9978, 10001), 'numpy.meshgrid', 'np.meshgrid', (['NRMS', 'NRMS'], {}), '(NRMS, NRMS)\n', (9989, 10001), True, 'import numpy as np\n'), ((10331, 10358), 'numpy.append', 'np.append', (['pTRMsList', 'PTRMS'], {}), '(pTRMsList, PTRMS)\n', (10340, 10358), True, 'import numpy as np\n'), ((10375, 10400), 'numpy.append', 'np.append', (['NRMsList', 'NRMS'], {}), '(NRMsList, NRMS)\n', (10384, 10400), True, 'import numpy as np\n'), ((11385, 11414), 'numpy.stack', 'np.stack', (['(ptrm, nrm)'], {'axis': '(0)'}), '((ptrm, nrm), axis=0)\n', (11393, 11414), True, 'import numpy as np\n'), ((11438, 11451), 'numpy.mean', 'np.mean', (['ptrm'], {}), '(ptrm)\n', (11445, 11451), True, 'import numpy as np\n'), ((11461, 11473), 'numpy.mean', 'np.mean', (['nrm'], {}), '(nrm)\n', (11468, 11473), True, 'import numpy as np\n'), ((11826, 11845), 'numpy.sqrt', 'np.sqrt', (['(w ** 2 + 1)'], {}), '(w ** 2 + 1)\n', (11833, 11845), True, 'import numpy as np\n'), ((18578, 18608), 'pmagpy.pmag.dir2cart', 'pmag.dir2cart', (['[dec, inc, str]'], {}), '([dec, inc, str])\n', (18591, 18608), True, 'import pmagpy.pmag as pmag\n'), ((18625, 18658), 'pmagpy.pmag.dir2cart', 'pmag.dir2cart', (['[pdec, pinc, pint]'], {}), '([pdec, pinc, pint])\n', (18638, 18658), True, 'import pmagpy.pmag as pmag\n'), ((18754, 18770), 'pmagpy.pmag.cart2dir', 'pmag.cart2dir', (['I'], {}), '(I)\n', (18767, 18770), True, 'import pmagpy.pmag as pmag\n'), ((19266, 19295), 'numpy.sqrt', 'np.sqrt', (['(sig ** 2 + psig ** 2)'], {}), '(sig ** 2 + psig ** 2)\n', (19273, 19295), True, 'import numpy as np\n'), ((19871, 19901), 'pmagpy.pmag.dir2cart', 'pmag.dir2cart', (['[dec, inc, str]'], {}), '([dec, inc, str])\n', (19884, 19901), True, 'import pmagpy.pmag as pmag\n'), ((19918, 19951), 'pmagpy.pmag.dir2cart', 'pmag.dir2cart', (['[pdec, pinc, pint]'], {}), '([pdec, pinc, pint])\n', (19931, 19951), True, 'import pmagpy.pmag as pmag\n'), ((20047, 20063), 'pmagpy.pmag.cart2dir', 'pmag.cart2dir', (['I'], {}), '(I)\n', (20060, 20063), True, 'import pmagpy.pmag as pmag\n'), ((20560, 20589), 'numpy.sqrt', 'np.sqrt', (['(sig ** 2 + psig ** 2)'], {}), '(sig ** 2 + psig ** 2)\n', (20567, 20589), True, 'import numpy as np\n'), ((22298, 22317), 'numpy.tanh', 'np.tanh', (['(b * fields)'], {}), '(b * fields)\n', (22305, 22317), True, 'import numpy as np\n'), ((23546, 23600), 'pmagpy.pmag.dir2cart', 'pmag.dir2cart', (['[NRM_dec_max, NRM_inc_max, NRM_int_max]'], {}), '([NRM_dec_max, NRM_inc_max, NRM_int_max])\n', (23559, 23600), True, 'import pmagpy.pmag as pmag\n'), ((23627, 23684), 'pmagpy.pmag.dir2cart', 'pmag.dir2cart', (['[PTRM_dec_max, PTRM_inc_max, PTRM_int_max]'], {}), '([PTRM_dec_max, PTRM_inc_max, PTRM_int_max])\n', (23640, 23684), True, 'import pmagpy.pmag as pmag\n'), ((23830, 23847), 'numpy.array', 'np.array', (['first_Z'], {}), '(first_Z)\n', (23838, 23847), True, 'import numpy as np\n'), ((23868, 23885), 'numpy.array', 'np.array', (['first_I'], {}), '(first_I)\n', (23876, 23885), True, 'import numpy as np\n'), ((32682, 32716), 'numpy.append', 'np.append', (['cols.values', '"""specimen"""'], {}), "(cols.values, 'specimen')\n", (32691, 32716), True, 'import numpy as np\n'), ((32731, 32827), 'pandas.merge', 'pd.merge', (['spec.loc[:, cols]', 'spec_atrm'], {'how': '"""right"""', 'left_on': '"""specimen"""', 'right_on': '"""specimen"""'}), "(spec.loc[:, cols], spec_atrm, how='right', left_on='specimen',\n right_on='specimen')\n", (32739, 32827), True, 'import pandas as pd\n'), ((34512, 34524), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (34520, 34524), True, 'import numpy as np\n'), ((34787, 34807), 'numpy.log', 'np.log', (['(croven / crs)'], {}), '(croven / crs)\n', (34793, 34807), True, 'import numpy as np\n'), ((34818, 34852), 'numpy.polyfit', 'np.polyfit', (['crlog', 'norm_moments', '(1)'], {}), '(crlog, norm_moments, 1)\n', (34828, 34852), True, 'import numpy as np\n'), ((34994, 35018), 'numpy.log', 'np.log', (['(croven / cr_real)'], {}), '(croven / cr_real)\n', (35000, 35018), True, 'import numpy as np\n'), ((38785, 38804), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (38788, 38804), False, 'from sklearn.decomposition import PCA\n'), ((38971, 38993), 'numpy.outer', 'np.outer', (['vals', 'vector'], {}), '(vals, vector)\n', (38979, 38993), True, 'import numpy as np\n'), ((39172, 39203), 'numpy.mean', 'np.mean', (['NRM_trunc_dirs'], {'axis': '(0)'}), '(NRM_trunc_dirs, axis=0)\n', (39179, 39203), True, 'import numpy as np\n'), ((42293, 42337), 'numpy.percentile', 'np.percentile', (["fit['k']", '(2.5, 97.5)'], {'axis': '(0)'}), "(fit['k'], (2.5, 97.5), axis=0)\n", (42306, 42337), True, 'import numpy as np\n'), ((42452, 42490), 'numpy.percentile', 'np.percentile', (['Bs', '(2.5, 97.5)'], {'axis': '(0)'}), '(Bs, (2.5, 97.5), axis=0)\n', (42465, 42490), True, 'import numpy as np\n'), ((42505, 42532), 'numpy.median', 'np.median', (["fit['k']"], {'axis': '(0)'}), "(fit['k'], axis=0)\n", (42514, 42532), True, 'import numpy as np\n'), ((42532, 42553), 'numpy.median', 'np.median', (['Bs'], {'axis': '(0)'}), '(Bs, axis=0)\n', (42541, 42553), True, 'import numpy as np\n'), ((43104, 43134), 'numpy.array', 'np.array', (['specimen_wid.options'], {}), '(specimen_wid.options)\n', (43112, 43134), True, 'import numpy as np\n'), ((43506, 43532), 'numpy.median', 'np.median', (["fit['int_site']"], {}), "(fit['int_site'])\n", (43515, 43532), True, 'import numpy as np\n'), ((43564, 43590), 'numpy.median', 'np.median', (["fit['int_site']"], {}), "(fit['int_site'])\n", (43573, 43590), True, 'import numpy as np\n'), ((44341, 44374), 'numpy.median', 'np.median', (["fit['k'][:, specindex]"], {}), "(fit['k'][:, specindex])\n", (44350, 44374), True, 'import numpy as np\n'), ((44374, 44414), 'numpy.median', 'np.median', (["fit['int_real'][:, specindex]"], {}), "(fit['int_real'][:, specindex])\n", (44383, 44414), True, 'import numpy as np\n'), ((47317, 47352), 'pickle.dump', 'pickle.dump', (['temperatures', 'tempdict'], {}), '(temperatures, tempdict)\n', (47328, 47352), False, 'import pickle\n'), ((49494, 49522), 'pandas.read_csv', 'pd.read_csv', (['"""arai_data.csv"""'], {}), "('arai_data.csv')\n", (49505, 49522), True, 'import pandas as pd\n'), ((51248, 51284), 'pandas.read_csv', 'pd.read_csv', (['"""arai_data_example.csv"""'], {}), "('arai_data_example.csv')\n", (51259, 51284), True, 'import pandas as pd\n'), ((53276, 53311), 'numpy.percentile', 'np.percentile', (["fit['int_site']", '(2.5)'], {}), "(fit['int_site'], 2.5)\n", (53289, 53311), True, 'import numpy as np\n'), ((53386, 53422), 'numpy.percentile', 'np.percentile', (["fit['int_site']", '(97.5)'], {}), "(fit['int_site'], 97.5)\n", (53399, 53422), True, 'import numpy as np\n'), ((53493, 53527), 'numpy.percentile', 'np.percentile', (["fit['int_site']", '(50)'], {}), "(fit['int_site'], 50)\n", (53506, 53527), True, 'import numpy as np\n'), ((58553, 58662), 'ipywidgets.Layout', 'widgets.Layout', ([], {'width': '"""100%"""', 'flex_flow': '"""column"""', 'align_content': '"""space-around"""', 'align_items': '"""flex-start"""'}), "(width='100%', flex_flow='column', align_content=\n 'space-around', align_items='flex-start')\n", (58567, 58662), True, 'import ipywidgets as widgets\n'), ((59541, 59601), 'ipywidgets.Layout', 'widgets.Layout', ([], {'width': '"""auto"""', 'height': 'rhatlabel.layout.height'}), "(width='auto', height=rhatlabel.layout.height)\n", (59555, 59601), True, 'import ipywidgets as widgets\n'), ((1081, 1116), 'numpy.sqrt', 'np.sqrt', (['((fit[2] + fit[1]) / fit[0])'], {}), '((fit[2] + fit[1]) / fit[0])\n', (1088, 1116), True, 'import numpy as np\n'), ((2057, 2071), 'numpy.sqrt', 'np.sqrt', (['Zmean'], {}), '(Zmean)\n', (2064, 2071), True, 'import numpy as np\n'), ((2221, 2235), 'numpy.sqrt', 'np.sqrt', (['Zmean'], {}), '(Zmean)\n', (2228, 2235), True, 'import numpy as np\n'), ((2352, 2404), 'numpy.sqrt', 'np.sqrt', (['(A[1] * A[1] + A[2] * A[2] - 4 * A[0] * A[3])'], {}), '(A[1] * A[1] + A[2] * A[2] - 4 * A[0] * A[3])\n', (2359, 2404), True, 'import numpy as np\n'), ((2922, 2931), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (2928, 2931), True, 'import numpy as np\n'), ((3082, 3106), 'numpy.mean', 'np.mean', (['IZZI_trunc.PTRM'], {}), '(IZZI_trunc.PTRM)\n', (3089, 3106), True, 'import numpy as np\n'), ((4228, 4247), 'numpy.sum', 'np.sum', (['(vector ** 2)'], {}), '(vector ** 2)\n', (4234, 4247), True, 'import numpy as np\n'), ((4334, 4356), 'numpy.sum', 'np.sum', (['(ancvector ** 2)'], {}), '(ancvector ** 2)\n', (4340, 4356), True, 'import numpy as np\n'), ((4459, 4478), 'numpy.sum', 'np.sum', (['(labmag ** 2)'], {}), '(labmag ** 2)\n', (4465, 4478), True, 'import numpy as np\n'), ((4486, 4505), 'numpy.sum', 'np.sum', (['(ancmag ** 2)'], {}), '(ancmag ** 2)\n', (4492, 4505), True, 'import numpy as np\n'), ((4782, 4791), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (4788, 4791), True, 'import numpy as np\n'), ((8028, 8056), 'numpy.sqrt', 'np.sqrt', (['(x_c ** 2 + y_c ** 2)'], {}), '(x_c ** 2 + y_c ** 2)\n', (8035, 8056), True, 'import numpy as np\n'), ((10022, 10062), 'numpy.sqrt', 'np.sqrt', (['((Pi - Pj) ** 2 + (Ni - Nj) ** 2)'], {}), '((Pi - Pj) ** 2 + (Ni - Nj) ** 2)\n', (10029, 10062), True, 'import numpy as np\n'), ((16947, 16980), 'pmagpy.pmag.dir2cart', 'pmag.dir2cart', (['[idec, iinc, istr]'], {}), '([idec, iinc, istr])\n', (16960, 16980), True, 'import pmagpy.pmag as pmag\n'), ((16998, 17028), 'pmagpy.pmag.dir2cart', 'pmag.dir2cart', (['[dec, inc, str]'], {}), '([dec, inc, str])\n', (17011, 17028), True, 'import pmagpy.pmag as pmag\n'), ((32885, 32981), 'pandas.merge', 'pd.merge', (['spec.loc[:, cols]', 'spec_aarm'], {'how': '"""right"""', 'left_on': '"""specimen"""', 'right_on': '"""specimen"""'}), "(spec.loc[:, cols], spec_aarm, how='right', left_on='specimen',\n right_on='specimen')\n", (32893, 32981), True, 'import pandas as pd\n'), ((32990, 33017), 'pandas.concat', 'pd.concat', (['[spec_2, spec_1]'], {}), '([spec_2, spec_1])\n', (32999, 33017), True, 'import pandas as pd\n'), ((33172, 33206), 'numpy.append', 'np.append', (['cols.values', '"""specimen"""'], {}), "(cols.values, 'specimen')\n", (33181, 33206), True, 'import numpy as np\n'), ((33219, 33315), 'pandas.merge', 'pd.merge', (['spec.loc[:, cols]', 'spec_aarm'], {'how': '"""right"""', 'left_on': '"""specimen"""', 'right_on': '"""specimen"""'}), "(spec.loc[:, cols], spec_aarm, how='right', left_on='specimen',\n right_on='specimen')\n", (33227, 33315), True, 'import pandas as pd\n'), ((40837, 40851), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (40843, 40851), True, 'import numpy as np\n'), ((41952, 41969), 'numpy.amin', 'np.amin', (["fit['k']"], {}), "(fit['k'])\n", (41959, 41969), True, 'import numpy as np\n'), ((41970, 41987), 'numpy.amax', 'np.amax', (["fit['k']"], {}), "(fit['k'])\n", (41977, 41987), True, 'import numpy as np\n'), ((42336, 42357), 'numpy.median', 'np.median', (['Bs'], {'axis': '(0)'}), '(Bs, axis=0)\n', (42345, 42357), True, 'import numpy as np\n'), ((42357, 42378), 'numpy.median', 'np.median', (['Bs'], {'axis': '(0)'}), '(Bs, axis=0)\n', (42366, 42378), True, 'import numpy as np\n'), ((42397, 42424), 'numpy.median', 'np.median', (["fit['k']"], {'axis': '(0)'}), "(fit['k'], axis=0)\n", (42406, 42424), True, 'import numpy as np\n'), ((42424, 42451), 'numpy.median', 'np.median', (["fit['k']"], {'axis': '(0)'}), "(fit['k'], axis=0)\n", (42433, 42451), True, 'import numpy as np\n'), ((45705, 45738), 'numpy.median', 'np.median', (["fit['k'][:, specindex]"], {}), "(fit['k'][:, specindex])\n", (45714, 45738), True, 'import numpy as np\n'), ((45738, 45778), 'numpy.median', 'np.median', (["fit['int_real'][:, specindex]"], {}), "(fit['int_real'][:, specindex])\n", (45747, 45778), True, 'import numpy as np\n'), ((53778, 53819), 'numpy.percentile', 'np.percentile', (["fit['int_real'][:, i]", '(2.5)'], {}), "(fit['int_real'][:, i], 2.5)\n", (53791, 53819), True, 'import numpy as np\n'), ((53903, 53945), 'numpy.percentile', 'np.percentile', (["fit['int_real'][:, i]", '(97.5)'], {}), "(fit['int_real'][:, i], 97.5)\n", (53916, 53945), True, 'import numpy as np\n'), ((54025, 54065), 'numpy.percentile', 'np.percentile', (["fit['int_real'][:, i]", '(50)'], {}), "(fit['int_real'][:, i], 50)\n", (54038, 54065), True, 'import numpy as np\n'), ((54147, 54181), 'numpy.percentile', 'np.percentile', (["fit['k'][:, i]", '(2.5)'], {}), "(fit['k'][:, i], 2.5)\n", (54160, 54181), True, 'import numpy as np\n'), ((54263, 54298), 'numpy.percentile', 'np.percentile', (["fit['k'][:, i]", '(97.5)'], {}), "(fit['k'][:, i], 97.5)\n", (54276, 54298), True, 'import numpy as np\n'), ((54376, 54409), 'numpy.percentile', 'np.percentile', (["fit['k'][:, i]", '(50)'], {}), "(fit['k'][:, i], 50)\n", (54389, 54409), True, 'import numpy as np\n'), ((2846, 2870), 'numpy.mean', 'np.mean', (['IZZI_trunc.PTRM'], {}), '(IZZI_trunc.PTRM)\n', (2853, 2870), True, 'import numpy as np\n'), ((2888, 2911), 'numpy.mean', 'np.mean', (['IZZI_trunc.NRM'], {}), '(IZZI_trunc.NRM)\n', (2895, 2911), True, 'import numpy as np\n'), ((3407, 3455), 'numpy.abs', 'np.abs', (['(P.PTRM.values - IZZI_reduced.PTRM.values)'], {}), '(P.PTRM.values - IZZI_reduced.PTRM.values)\n', (3413, 3455), True, 'import numpy as np\n'), ((4724, 4742), 'numpy.mean', 'np.mean', (['IZZI.PTRM'], {}), '(IZZI.PTRM)\n', (4731, 4742), True, 'import numpy as np\n'), ((4754, 4771), 'numpy.mean', 'np.mean', (['IZZI.NRM'], {}), '(IZZI.NRM)\n', (4761, 4771), True, 'import numpy as np\n'), ((5045, 5066), 'numpy.tanh', 'np.tanh', (['(beta * B_lab)'], {}), '(beta * B_lab)\n', (5052, 5066), True, 'import numpy as np\n'), ((5068, 5077), 'numpy.abs', 'np.abs', (['b'], {}), '(b)\n', (5074, 5077), True, 'import numpy as np\n'), ((8112, 8132), 'numpy.arctan', 'np.arctan', (['(y_c / x_c)'], {}), '(y_c / x_c)\n', (8121, 8132), True, 'import numpy as np\n'), ((10915, 10932), 'numpy.sqrt', 'np.sqrt', (['dmaxlist'], {}), '(dmaxlist)\n', (10922, 10932), True, 'import numpy as np\n'), ((17169, 17185), 'pmagpy.pmag.cart2dir', 'pmag.cart2dir', (['I'], {}), '(I)\n', (17182, 17185), True, 'import pmagpy.pmag as pmag\n'), ((17538, 17567), 'numpy.sqrt', 'np.sqrt', (['(isig ** 2 + sig ** 2)'], {}), '(isig ** 2 + sig ** 2)\n', (17545, 17567), True, 'import numpy as np\n'), ((17792, 17836), 'pmagpy.pmag.angle', 'pmag.angle', (['[iDir[0], iDir[1]]', '[phi, theta]'], {}), '([iDir[0], iDir[1]], [phi, theta])\n', (17802, 17836), True, 'import pmagpy.pmag as pmag\n'), ((25433, 25579), 'numpy.array', 'np.array', (['[specarray, sample, site, NRM, PTRM, NRM_x, NRM_y, NRM_z, PTRM_x, PTRM_y,\n PTRM_z, NRM_sigma, PTRM_sigma, B_lab, steptype, temp_step]'], {}), '([specarray, sample, site, NRM, PTRM, NRM_x, NRM_y, NRM_z, PTRM_x,\n PTRM_y, PTRM_z, NRM_sigma, PTRM_sigma, B_lab, steptype, temp_step])\n', (25441, 25579), True, 'import numpy as np\n'), ((25592, 25638), 'numpy.concatenate', 'np.concatenate', (['(data_array, newarray)'], {'axis': '(1)'}), '((data_array, newarray), axis=1)\n', (25606, 25638), True, 'import numpy as np\n'), ((25713, 25733), 'numpy.array', 'np.array', (['ptrm_check'], {}), '(ptrm_check)\n', (25721, 25733), True, 'import numpy as np\n'), ((26465, 26517), 'numpy.array', 'np.array', (['[intersect[5], intersect[6], intersect[7]]'], {}), '([intersect[5], intersect[6], intersect[7]])\n', (26473, 26517), True, 'import numpy as np\n'), ((28051, 28070), 'numpy.array', 'np.array', (['ptrm_tail'], {}), '(ptrm_tail)\n', (28059, 28070), True, 'import numpy as np\n'), ((40742, 40756), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (40748, 40756), True, 'import numpy as np\n'), ((41257, 41293), 'numpy.median', 'np.median', (["fit['dist_to_edge'][:, i]"], {}), "(fit['dist_to_edge'][:, i])\n", (41266, 41293), True, 'import numpy as np\n'), ((43908, 43951), 'numpy.percentile', 'np.percentile', (["fit['int_real']", '(2.5)'], {'axis': '(0)'}), "(fit['int_real'], 2.5, axis=0)\n", (43921, 43951), True, 'import numpy as np\n'), ((43959, 44003), 'numpy.percentile', 'np.percentile', (["fit['int_real']", '(97.5)'], {'axis': '(0)'}), "(fit['int_real'], 97.5, axis=0)\n", (43972, 44003), True, 'import numpy as np\n'), ((45650, 45672), 'numpy.array', 'np.array', (['specimenlist'], {}), '(specimenlist)\n', (45658, 45672), True, 'import numpy as np\n'), ((47525, 47541), 'numpy.abs', 'np.abs', (['(1 - rhat)'], {}), '(1 - rhat)\n', (47531, 47541), True, 'import numpy as np\n'), ((49801, 49822), 'pickle.load', 'pickle.load', (['tempdict'], {}), '(tempdict)\n', (49812, 49822), False, 'import pickle\n'), ((51589, 51610), 'pickle.load', 'pickle.load', (['tempdict'], {}), '(tempdict)\n', (51600, 51610), False, 'import pickle\n'), ((2485, 2537), 'numpy.sqrt', 'np.sqrt', (['((Xprime[i] - a) ** 2 + (Yprime[i] - b) ** 2)'], {}), '((Xprime[i] - a) ** 2 + (Yprime[i] - b) ** 2)\n', (2492, 2537), True, 'import numpy as np\n'), ((5035, 5044), 'numpy.abs', 'np.abs', (['b'], {}), '(b)\n', (5041, 5044), True, 'import numpy as np\n'), ((10079, 10093), 'numpy.mean', 'np.mean', (['PTRMS'], {}), '(PTRMS)\n', (10086, 10093), True, 'import numpy as np\n'), ((10097, 10110), 'numpy.mean', 'np.mean', (['NRMS'], {}), '(NRMS)\n', (10104, 10110), True, 'import numpy as np\n'), ((18851, 18861), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18858, 18861), True, 'import numpy as np\n'), ((18910, 18920), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18917, 18920), True, 'import numpy as np\n'), ((20144, 20154), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20151, 20154), True, 'import numpy as np\n'), ((20203, 20213), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20210, 20213), True, 'import numpy as np\n'), ((21031, 21041), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21038, 21041), True, 'import numpy as np\n'), ((27096, 27242), 'numpy.array', 'np.array', (['[specarray, sample, site, NRM, PTRM, NRM_x, NRM_y, NRM_z, PTRM_x, PTRM_y,\n PTRM_z, NRM_sigma, PTRM_sigma, B_lab, steptype, temp_step]'], {}), '([specarray, sample, site, NRM, PTRM, NRM_x, NRM_y, NRM_z, PTRM_x,\n PTRM_y, PTRM_z, NRM_sigma, PTRM_sigma, B_lab, steptype, temp_step])\n', (27104, 27242), True, 'import numpy as np\n'), ((27259, 27305), 'numpy.concatenate', 'np.concatenate', (['(data_array, newarray)'], {'axis': '(1)'}), '((data_array, newarray), axis=1)\n', (27273, 27305), True, 'import numpy as np\n'), ((27359, 27397), 'numpy.setdiff1d', 'np.setdiff1d', (['temp_step', 'intersect[-1]'], {}), '(temp_step, intersect[-1])\n', (27371, 27397), True, 'import numpy as np\n'), ((27583, 27951), 'numpy.array', 'np.array', (['[specarray[temp_step != diff], sample[temp_step != diff], site[temp_step !=\n diff], NRM, PTRM[temp_step != diff], NRM_x, NRM_y, NRM_z, PTRM_x[\n temp_step != diff], PTRM_y[temp_step != diff], PTRM_z[temp_step != diff\n ], NRM_sigma, PTRM_sigma[temp_step != diff], B_lab[temp_step != diff],\n steptype[temp_step != diff], temp_step[temp_step != diff]]'], {}), '([specarray[temp_step != diff], sample[temp_step != diff], site[\n temp_step != diff], NRM, PTRM[temp_step != diff], NRM_x, NRM_y, NRM_z,\n PTRM_x[temp_step != diff], PTRM_y[temp_step != diff], PTRM_z[temp_step !=\n diff], NRM_sigma, PTRM_sigma[temp_step != diff], B_lab[temp_step !=\n diff], steptype[temp_step != diff], temp_step[temp_step != diff]])\n', (27591, 27951), True, 'import numpy as np\n'), ((27933, 27979), 'numpy.concatenate', 'np.concatenate', (['(data_array, newarray)'], {'axis': '(1)'}), '((data_array, newarray), axis=1)\n', (27947, 27979), True, 'import numpy as np\n'), ((28961, 29014), 'numpy.array', 'np.array', (['[intersect[8], intersect[9], intersect[10]]'], {}), '([intersect[8], intersect[9], intersect[10]])\n', (28969, 29014), True, 'import numpy as np\n'), ((41110, 41137), 'numpy.median', 'np.median', (["fit['phi'][:, i]"], {}), "(fit['phi'][:, i])\n", (41119, 41137), True, 'import numpy as np\n'), ((41158, 41194), 'numpy.median', 'np.median', (["fit['dist_to_edge'][:, i]"], {}), "(fit['dist_to_edge'][:, i])\n", (41167, 41194), True, 'import numpy as np\n'), ((41300, 41327), 'numpy.median', 'np.median', (["fit['phi'][:, i]"], {}), "(fit['phi'][:, i])\n", (41309, 41327), True, 'import numpy as np\n'), ((42225, 42245), 'numpy.array', 'np.array', (['B_lab_list'], {}), '(B_lab_list)\n', (42233, 42245), True, 'import numpy as np\n'), ((44037, 44073), 'numpy.percentile', 'np.percentile', (["fit['k']", '(2.5)'], {'axis': '(0)'}), "(fit['k'], 2.5, axis=0)\n", (44050, 44073), True, 'import numpy as np\n'), ((44081, 44117), 'numpy.percentile', 'np.percentile', (["fit['k']", '(2.5)'], {'axis': '(0)'}), "(fit['k'], 2.5, axis=0)\n", (44094, 44117), True, 'import numpy as np\n'), ((44130, 44167), 'numpy.percentile', 'np.percentile', (["fit['k']", '(97.5)'], {'axis': '(0)'}), "(fit['k'], 97.5, axis=0)\n", (44143, 44167), True, 'import numpy as np\n'), ((44175, 44212), 'numpy.percentile', 'np.percentile', (["fit['k']", '(97.5)'], {'axis': '(0)'}), "(fit['k'], 97.5, axis=0)\n", (44188, 44212), True, 'import numpy as np\n'), ((47545, 47561), 'numpy.abs', 'np.abs', (['(1 - rhat)'], {}), '(1 - rhat)\n', (47551, 47561), True, 'import numpy as np\n'), ((15298, 15308), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15305, 15308), True, 'import numpy as np\n'), ((16509, 16519), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16516, 16519), True, 'import numpy as np\n'), ((18826, 18839), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (18836, 18839), True, 'import numpy as np\n'), ((18840, 18850), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (18847, 18850), True, 'import numpy as np\n'), ((18885, 18898), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (18895, 18898), True, 'import numpy as np\n'), ((18899, 18909), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (18906, 18909), True, 'import numpy as np\n'), ((19023, 19033), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19030, 19033), True, 'import numpy as np\n'), ((19099, 19109), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19106, 19109), True, 'import numpy as np\n'), ((19175, 19185), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19182, 19185), True, 'import numpy as np\n'), ((19234, 19244), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19241, 19244), True, 'import numpy as np\n'), ((20119, 20132), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (20129, 20132), True, 'import numpy as np\n'), ((20133, 20143), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (20140, 20143), True, 'import numpy as np\n'), ((20178, 20191), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (20188, 20191), True, 'import numpy as np\n'), ((20192, 20202), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (20199, 20202), True, 'import numpy as np\n'), ((20316, 20326), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20323, 20326), True, 'import numpy as np\n'), ((20393, 20403), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20400, 20403), True, 'import numpy as np\n'), ((20469, 20479), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20476, 20479), True, 'import numpy as np\n'), ((20528, 20538), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20535, 20538), True, 'import numpy as np\n'), ((21006, 21019), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (21016, 21019), True, 'import numpy as np\n'), ((21020, 21030), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (21027, 21030), True, 'import numpy as np\n'), ((21140, 21150), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21147, 21150), True, 'import numpy as np\n'), ((21212, 21222), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21219, 21222), True, 'import numpy as np\n'), ((25004, 25041), 'numpy.array', 'np.array', (['[NRM_dec, NRM_inc, NRM_int]'], {}), '([NRM_dec, NRM_inc, NRM_int])\n', (25012, 25041), True, 'import numpy as np\n'), ((25089, 25144), 'numpy.array', 'np.array', (['[first_I[:, 1], first_I[:, 2], first_I[:, 3]]'], {}), '([first_I[:, 1], first_I[:, 2], first_I[:, 3]])\n', (25097, 25144), True, 'import numpy as np\n'), ((26606, 26670), 'numpy.array', 'np.array', (['[ptrm_check[:, 1], ptrm_check[:, 2], ptrm_check[:, 3]]'], {}), '([ptrm_check[:, 1], ptrm_check[:, 2], ptrm_check[:, 3]])\n', (26614, 26670), True, 'import numpy as np\n'), ((29528, 29674), 'numpy.array', 'np.array', (['[specarray, sample, site, NRM, PTRM, NRM_x, NRM_y, NRM_z, PTRM_x, PTRM_y,\n PTRM_z, NRM_sigma, PTRM_sigma, B_lab, steptype, temp_step]'], {}), '([specarray, sample, site, NRM, PTRM, NRM_x, NRM_y, NRM_z, PTRM_x,\n PTRM_y, PTRM_z, NRM_sigma, PTRM_sigma, B_lab, steptype, temp_step])\n', (29536, 29674), True, 'import numpy as np\n'), ((29695, 29741), 'numpy.concatenate', 'np.concatenate', (['(data_array, newarray)'], {'axis': '(1)'}), '((data_array, newarray), axis=1)\n', (29709, 29741), True, 'import numpy as np\n'), ((29803, 29841), 'numpy.setdiff1d', 'np.setdiff1d', (['temp_step', 'intersect[-1]'], {}), '(temp_step, intersect[-1])\n', (29815, 29841), True, 'import numpy as np\n'), ((30047, 30420), 'numpy.array', 'np.array', (['[specarray[temp_step != diff], sample[temp_step != diff], site[temp_step !=\n diff], NRM[temp_step != diff], PTRM, NRM_x[temp_step != diff], NRM_y[\n temp_step != diff], NRM_z[temp_step != diff], PTRM_x, PTRM_y, PTRM_z,\n NRM_sigma[temp_step != diff], PTRM_sigma, B_lab[temp_step != diff],\n steptype[temp_step != diff], temp_step[temp_step != diff]]'], {}), '([specarray[temp_step != diff], sample[temp_step != diff], site[\n temp_step != diff], NRM[temp_step != diff], PTRM, NRM_x[temp_step !=\n diff], NRM_y[temp_step != diff], NRM_z[temp_step != diff], PTRM_x,\n PTRM_y, PTRM_z, NRM_sigma[temp_step != diff], PTRM_sigma, B_lab[\n temp_step != diff], steptype[temp_step != diff], temp_step[temp_step !=\n diff]])\n', (30055, 30420), True, 'import numpy as np\n'), ((30401, 30447), 'numpy.concatenate', 'np.concatenate', (['(data_array, newarray)'], {'axis': '(1)'}), '((data_array, newarray), axis=1)\n', (30415, 30447), True, 'import numpy as np\n'), ((39336, 39360), 'numpy.dot', 'np.dot', (['NRM_vect', 'vector'], {}), '(NRM_vect, vector)\n', (39342, 39360), True, 'import numpy as np\n'), ((41201, 41228), 'numpy.median', 'np.median', (["fit['phi'][:, i]"], {}), "(fit['phi'][:, i])\n", (41210, 41228), True, 'import numpy as np\n'), ((43190, 43212), 'numpy.array', 'np.array', (['specimenlist'], {}), '(specimenlist)\n', (43198, 43212), True, 'import numpy as np\n'), ((2963, 2986), 'numpy.mean', 'np.mean', (['IZZI_trunc.NRM'], {}), '(IZZI_trunc.NRM)\n', (2970, 2986), True, 'import numpy as np\n'), ((3016, 3040), 'numpy.mean', 'np.mean', (['IZZI_trunc.PTRM'], {}), '(IZZI_trunc.PTRM)\n', (3023, 3040), True, 'import numpy as np\n'), ((4817, 4834), 'numpy.mean', 'np.mean', (['IZZI.NRM'], {}), '(IZZI.NRM)\n', (4824, 4834), True, 'import numpy as np\n'), ((4858, 4876), 'numpy.mean', 'np.mean', (['IZZI.PTRM'], {}), '(IZZI.PTRM)\n', (4865, 4876), True, 'import numpy as np\n'), ((11043, 11058), 'numpy.array', 'np.array', (['klist'], {}), '(klist)\n', (11051, 11058), True, 'import numpy as np\n'), ((11059, 11085), 'numpy.array', 'np.array', (['dist_to_edgelist'], {}), '(dist_to_edgelist)\n', (11067, 11085), True, 'import numpy as np\n'), ((15273, 15286), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (15283, 15286), True, 'import numpy as np\n'), ((15287, 15297), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (15294, 15297), True, 'import numpy as np\n'), ((15415, 15425), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15422, 15425), True, 'import numpy as np\n'), ((15494, 15504), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15501, 15504), True, 'import numpy as np\n'), ((16484, 16497), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (16494, 16497), True, 'import numpy as np\n'), ((16498, 16508), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (16505, 16508), True, 'import numpy as np\n'), ((16627, 16637), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16634, 16637), True, 'import numpy as np\n'), ((16706, 16716), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16713, 16716), True, 'import numpy as np\n'), ((17283, 17293), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (17290, 17293), True, 'import numpy as np\n'), ((19012, 19022), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (19019, 19022), True, 'import numpy as np\n'), ((19088, 19098), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (19095, 19098), True, 'import numpy as np\n'), ((19150, 19163), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (19160, 19163), True, 'import numpy as np\n'), ((19164, 19174), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (19171, 19174), True, 'import numpy as np\n'), ((19209, 19222), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (19219, 19222), True, 'import numpy as np\n'), ((19223, 19233), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (19230, 19233), True, 'import numpy as np\n'), ((20305, 20315), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (20312, 20315), True, 'import numpy as np\n'), ((20382, 20392), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (20389, 20392), True, 'import numpy as np\n'), ((20444, 20457), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (20454, 20457), True, 'import numpy as np\n'), ((20458, 20468), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (20465, 20468), True, 'import numpy as np\n'), ((20503, 20516), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (20513, 20516), True, 'import numpy as np\n'), ((20517, 20527), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (20524, 20527), True, 'import numpy as np\n'), ((21129, 21139), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (21136, 21139), True, 'import numpy as np\n'), ((21187, 21200), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (21197, 21200), True, 'import numpy as np\n'), ((21201, 21211), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (21208, 21211), True, 'import numpy as np\n'), ((28865, 28926), 'numpy.array', 'np.array', (['[ptrm_tail[:, 1], ptrm_tail[:, 2], ptrm_tail[:, 3]]'], {}), '([ptrm_tail[:, 1], ptrm_tail[:, 2], ptrm_tail[:, 3]])\n', (28873, 28926), True, 'import numpy as np\n'), ((15404, 15414), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (15411, 15414), True, 'import numpy as np\n'), ((15469, 15482), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (15479, 15482), True, 'import numpy as np\n'), ((15483, 15493), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (15490, 15493), True, 'import numpy as np\n'), ((16616, 16626), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (16623, 16626), True, 'import numpy as np\n'), ((16681, 16694), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (16691, 16694), True, 'import numpy as np\n'), ((16695, 16705), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (16702, 16705), True, 'import numpy as np\n'), ((17258, 17271), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (17268, 17271), True, 'import numpy as np\n'), ((17272, 17282), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (17279, 17282), True, 'import numpy as np\n'), ((17409, 17419), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (17416, 17419), True, 'import numpy as np\n'), ((17499, 17509), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (17506, 17509), True, 'import numpy as np\n'), ((17398, 17408), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (17405, 17408), True, 'import numpy as np\n'), ((17474, 17487), 'numpy.radians', 'np.radians', (['(2)'], {}), '(2)\n', (17484, 17487), True, 'import numpy as np\n'), ((17488, 17498), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (17495, 17498), True, 'import numpy as np\n')] |
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from numba import jit, float64, void
from compecon import Basis
__author__ = 'Randall'
# TODO: complete this class
# todo: compare performance of csr_matrix and csc_matrix to deal with sparse interpolation operators
class BasisChebyshev(Basis):
def __init__(self, n, a, b, **kwargs):
""" Create an instance of BasisChebyshev.
Args:
n: number of nodes per dimension
a: lower bounds
b: upper bounds
**kwargs: options passed to BasisOptions (see Keyword Args below)
The dimension of the basis is inferred from the number of elements in n, a, b. If all of them are scalars,
then d = 1. Otherwise, they are broadcast to a common size array.
Keyword Args:
nodetype (str): type of nodes to use, either 'gaussian', 'lobatto', 'endpoint', or 'uniform'.
method (str): method to combine basis dimensions (relevant only if d > 1). Valid options are 'tensor',
'smolyak', 'complete', 'cluster', and 'zcluster'.
qn (int or array of ints): if method is 'smolyak', qn controls depth of node selection. Isotropic grid if qn
is scalar, anisotropic grid if qn is an array on ints.
qp (int or array of ints): if method is 'smolyak', qp controls depth of polynomial selection. Isotropic grid
if qp is scalar, anisotropic grid if qp is an array on ints. If method is 'complete', then qp controls
maximum degree of interpolation polynomials.
labels (list of strings): Labels to identify basis dimensions.
f (callable): a function to compute value of interpolated function at nodes.
y (numpy array): value of interpolated function at nodes.
c (numpy array): interpolation coefficients.
s (list of scalars): number of function for each dimension.
l (list of strings): labels for each of the function dimensions.
Notice that only one of the keyword arguments f, y, c, s, l can be specified. If none is, then s=1.
Notes:
Methods 'cluster' and 'zcluster' have not been implemented yet.
Examples:
BasisChebyshev(15, -2, 3, labels=['wealth']) # a basis to interpolate a function of wealth.
income = BasisChebyshev(15, -2, 3, labels=['wealth'], l=['employed', 'unemployed') # a basis to interpolate
income as a function of wealth, for employed and unemployed workers.
BasisChebyshev(9, [0, 0], [2, 3]) # it uses 9 nodes in each dimension, uses tensor product (81 nodes total)
BasisChebyshev(9, [0, 0], [2, 3], method='smolyak', qn=3, qp=3) # Smolyak grid, 29 nodes (as opposed to 81)
Returns:
A BasisChebyshev instance.
"""
kwargs['basistype'] = 'chebyshev'
super().__init__(n, a, b, **kwargs)
self._set_nodes()
def _set_nodes(self):
nodetype = self.opts.nodetype
for i in range(self.d):
n = self.n[i]
a = self.a[i]
b = self.b[i]
if nodetype in ['gaussian', 'endpoint']:
x = np.array([-np.cos(np.pi * k / (2 * n)) for k in range(1, 2 * n, 2)])
elif nodetype == 'lobatto':
x = np.array([-np.cos(np.pi * k / (n - 1)) for k in range(n)])
elif nodetype == 'uniform':
x = np.linspace(-1, 1, n)
else:
raise Exception('Unknown node type')
if nodetype == 'endpoint':
x /= x[-1]
x *= (b - a) / 2
x += (b + a) / 2
self._nodes.append(x)
self._expand_nodes()
def _rescale201(self, i, x):
"""
Rescales nodes from [a, b] domain to [-1, 1] domain
:param x: nodes in [a, b] domain (array)
:return: nodes in [-1, 1] domain
"""
a = self.a[i]
b = self.b[i]
# if not(a <= min(x) <= max(x) <= b):
# warnings.warn('x values must be between a and b.')
return (2 / (b - a)) * (x - (a + b) / 2)
def _update_diff_operators(self, i, order):
keys = set(self._diff_operators[i].keys())
if (order in keys) or (order == 0):
return # Use previously stored values if available
n = self.n[i]
a = self.a[i]
b = self.b[i]
if order > 0:
if order > n - 2:
warnings.warn('order must be less or equal to n - 2; setting order = n - 2')
order = n - 2
missing_keys = set(range(1, order + 1)) - keys
if 1 in missing_keys:
hh = np.arange(n) + 1
jj, ii = np.meshgrid(hh, hh)
rc = np.logical_and(np.asarray((ii + jj) % 2, bool), jj > ii)
d = np.zeros([n, n])
d[rc] = (4 / (b - a)) * (jj[rc] - 1)
d[0, :] = d[0, :] / 2
# todo: convert d to sparse matrix
d = csc_matrix(d[:-1, :])
self._diff_operators[i][1] = d
missing_keys -= {1}
else:
d = self._diff_operators[i][1]
missing_keys = list(missing_keys)
missing_keys.sort(reverse=True)
while missing_keys:
k = missing_keys.pop()
self._diff_operators[i][k] = d[:n - k, :n - k + 1] * self._diff_operators[i][k - 1]
else:
nn = n - order
ii = np.array([(0.25 * (b - a)) / k for k in range(1, nn + 1)])
d = np.diag(ii) - np.diag(ii[:-2], 2)
# todo: make d sparse
d[0, 0] *= 2
d0 = np.array([(-1) ** k for k in range(nn)]) * sum(d)
d0.resize((1, d0.size)) # need to have 2 dimensions to concatenate with d
dd = np.mat(np.r_[d0, d])
missing_keys = set(range(order, 0)) - keys
if -1 in missing_keys:
self._diff_operators[i][-1] = dd[:n + 1, :n]
missing_keys -= {-1}
missing_keys = list(missing_keys)
missing_keys.sort(reverse=False)
while missing_keys:
k = missing_keys.pop()
self._diff_operators[i][k] = dd[:n - k, :n - k - 1] * self._diff_operators[i][k + 1]
"""
Interpolation methods
"""
def _phi1d(self, i, x=None, order=0):
if order is None:
order = 0
orderIsScalar = np.isscalar(order)
order = np.atleast_1d(order).flatten()
n = self.n[i]
nn = n + np.maximum(0, -np.min(order))
# Check for x argument
xIsProvided = (x is not None)
x = np.asarray(x).flatten() if xIsProvided else self._nodes[i]
nx = x.size
# Compute order 0 interpolation matrix
if xIsProvided:
bas = np.zeros([nx, nn])
z = self._rescale201(i, x)
cheby_polynomials(z, bas)
else:
z = np.atleast_2d(np.arange(n - 0.5, -0.5, -1)).T
bas = np.cos((np.pi / n) * z * np.arange(0, nn))
# Compute Phi
Phidict = dict()
for ii in set(order):
if ii == 0:
Phidict[ii] = bas
else:
Phidict[ii] = bas[:, :n - ii] * self._diff(i, ii) # as matrix multiplication, because diff is sparse
Phi = np.array([Phidict[k] for k in order])
return Phi
@jit(void(float64[:], float64[:, :]), nopython=True)
def cheby_polynomials(z, bas):
for node in range(z.size):
bas[node, 0] = 1
bas[node, 1] = z[node]
z[node] *= 2
for k in range(2, bas.shape[1]):
bas[node, k] = z[node] * bas[node, k - 1] - bas[node, k - 2]
return None
| [
"numba.void",
"numpy.meshgrid",
"numpy.isscalar",
"numpy.asarray",
"numpy.zeros",
"scipy.sparse.csc_matrix",
"numpy.min",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.cos",
"warnings.warn",
"numpy.atleast_1d",
"numpy.mat",
"numpy.diag"
] | [((7542, 7573), 'numba.void', 'void', (['float64[:]', 'float64[:, :]'], {}), '(float64[:], float64[:, :])\n', (7546, 7573), False, 'from numba import jit, float64, void\n'), ((6571, 6589), 'numpy.isscalar', 'np.isscalar', (['order'], {}), '(order)\n', (6582, 6589), True, 'import numpy as np\n'), ((7478, 7515), 'numpy.array', 'np.array', (['[Phidict[k] for k in order]'], {}), '([Phidict[k] for k in order])\n', (7486, 7515), True, 'import numpy as np\n'), ((5933, 5953), 'numpy.mat', 'np.mat', (['np.r_[d0, d]'], {}), '(np.r_[d0, d])\n', (5939, 5953), True, 'import numpy as np\n'), ((6958, 6976), 'numpy.zeros', 'np.zeros', (['[nx, nn]'], {}), '([nx, nn])\n', (6966, 6976), True, 'import numpy as np\n'), ((4541, 4617), 'warnings.warn', 'warnings.warn', (['"""order must be less or equal to n - 2; setting order = n - 2"""'], {}), "('order must be less or equal to n - 2; setting order = n - 2')\n", (4554, 4617), False, 'import warnings\n'), ((4806, 4825), 'numpy.meshgrid', 'np.meshgrid', (['hh', 'hh'], {}), '(hh, hh)\n', (4817, 4825), True, 'import numpy as np\n'), ((4924, 4940), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (4932, 4940), True, 'import numpy as np\n'), ((5103, 5124), 'scipy.sparse.csc_matrix', 'csc_matrix', (['d[:-1, :]'], {}), '(d[:-1, :])\n', (5113, 5124), False, 'from scipy.sparse import csc_matrix\n'), ((5669, 5680), 'numpy.diag', 'np.diag', (['ii'], {}), '(ii)\n', (5676, 5680), True, 'import numpy as np\n'), ((5683, 5702), 'numpy.diag', 'np.diag', (['ii[:-2]', '(2)'], {}), '(ii[:-2], 2)\n', (5690, 5702), True, 'import numpy as np\n'), ((6606, 6626), 'numpy.atleast_1d', 'np.atleast_1d', (['order'], {}), '(order)\n', (6619, 6626), True, 'import numpy as np\n'), ((4764, 4776), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (4773, 4776), True, 'import numpy as np\n'), ((4862, 4893), 'numpy.asarray', 'np.asarray', (['((ii + jj) % 2)', 'bool'], {}), '((ii + jj) % 2, bool)\n', (4872, 4893), True, 'import numpy as np\n'), ((6692, 6705), 'numpy.min', 'np.min', (['order'], {}), '(order)\n', (6698, 6705), True, 'import numpy as np\n'), ((6789, 6802), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (6799, 6802), True, 'import numpy as np\n'), ((7098, 7126), 'numpy.arange', 'np.arange', (['(n - 0.5)', '(-0.5)', '(-1)'], {}), '(n - 0.5, -0.5, -1)\n', (7107, 7126), True, 'import numpy as np\n'), ((7173, 7189), 'numpy.arange', 'np.arange', (['(0)', 'nn'], {}), '(0, nn)\n', (7182, 7189), True, 'import numpy as np\n'), ((3503, 3524), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'n'], {}), '(-1, 1, n)\n', (3514, 3524), True, 'import numpy as np\n'), ((3266, 3293), 'numpy.cos', 'np.cos', (['(np.pi * k / (2 * n))'], {}), '(np.pi * k / (2 * n))\n', (3272, 3293), True, 'import numpy as np\n'), ((3395, 3422), 'numpy.cos', 'np.cos', (['(np.pi * k / (n - 1))'], {}), '(np.pi * k / (n - 1))\n', (3401, 3422), True, 'import numpy as np\n')] |
import sys
import numpy as np
import torch
from pytorch_fid.inception import InceptionV3
sys.path.insert(0, '/workspace')
from datasets.custom_subset import SingleClassSubset
from utils.stylegan import create_image
class PRCD:
def __init__(self, dataset_real, dataset_fake, device, crop_size=None, generator=None, batch_size=128, dims=2048, num_workers=16, gpu_devices=[]):
self.dataset_real = dataset_real
self.dataset_fake = dataset_fake
self.batch_size = batch_size
self.dims = dims
self.num_workers = num_workers
self.device = device
self.generator = generator
self.crop_size = crop_size
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[self.dims]
inception_model = InceptionV3([block_idx])
if len(gpu_devices) > 1:
self.inception_model = torch.nn.DataParallel(inception_model, device_ids=gpu_devices)
else:
self.inception_model = inception_model
self.inception_model.to(self.device)
def compute_metric(self, num_classes, k=3, rtpt=None):
precision_list = []
recall_list = []
density_list = []
coverage_list = []
for step, cls in enumerate(range(num_classes)):
with torch.no_grad():
embedding_fake = self.compute_embedding(self.dataset_fake, cls)
embedding_real = self.compute_embedding(self.dataset_real, cls)
pair_dist_real = torch.cdist(embedding_real, embedding_real, p=2)
pair_dist_real = torch.sort(pair_dist_real, dim=1, descending=False)[0]
pair_dist_fake = torch.cdist(embedding_fake, embedding_fake, p=2)
pair_dist_fake = torch.sort(pair_dist_fake, dim=1, descending=False)[0]
radius_real = pair_dist_real[:, k]
radius_fake = pair_dist_fake[:, k]
# Compute precision
distances_fake_to_real = torch.cdist(embedding_fake, embedding_real, p=2)
min_dist_fake_to_real, nn_real = distances_fake_to_real.min(dim=1)
precision = (min_dist_fake_to_real <= radius_real[nn_real]).float().mean()
precision_list.append(precision.cpu().item())
# Compute recall
distances_real_to_fake = torch.cdist(embedding_real, embedding_fake, p=2)
min_dist_real_to_fake, nn_fake = distances_real_to_fake.min(dim=1)
recall = (min_dist_real_to_fake <= radius_fake[nn_fake]).float().mean()
recall_list.append(recall.cpu().item())
# Compute density
num_samples = distances_fake_to_real.shape[0]
sphere_counter = (distances_fake_to_real <= radius_real.repeat(num_samples, 1)).float().sum(dim=0).mean()
density = sphere_counter / k
density_list.append(density.cpu().item())
# Compute coverage
num_neighbors = (distances_fake_to_real <= radius_real.repeat(num_samples, 1)).float().sum(dim=0)
coverage = (num_neighbors > 0).float().mean()
coverage_list.append(coverage.cpu().item())
# Update rtpt
if rtpt:
rtpt.step(
subtitle=f'PRCD Computation step {step} of {num_classes}')
# Compute mean over targets
precision = np.mean(precision_list)
recall = np.mean(recall_list)
density = np.mean(density_list)
coverage = np.mean(coverage_list)
return precision, recall, density, coverage
def compute_embedding(self, dataset, cls=None):
self.inception_model.eval()
if cls:
dataset = SingleClassSubset(dataset, cls)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=self.batch_size,
shuffle=False,
drop_last=False,
pin_memory=True,
num_workers=self.num_workers)
pred_arr = np.empty((len(dataset), self.dims))
start_idx = 0
max_iter = int(len(dataset) / self.batch_size)
for step, (x, y) in enumerate(dataloader):
with torch.no_grad():
if x.shape[1] != 3:
x = create_image(x, self.generator,
crop_size=self.crop_size, resize=299, batch_size=int(self.batch_size / 2))
x = x.to(self.device)
pred = self.inception_model(x)[0]
pred = pred.squeeze(3).squeeze(2).cpu().numpy()
pred_arr[start_idx:start_idx + pred.shape[0]] = pred
start_idx = start_idx + pred.shape[0]
return torch.from_numpy(pred_arr)
| [
"torch.utils.data.DataLoader",
"sys.path.insert",
"datasets.custom_subset.SingleClassSubset",
"torch.cdist",
"numpy.mean",
"pytorch_fid.inception.InceptionV3",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.sort",
"torch.from_numpy"
] | [((91, 123), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/workspace"""'], {}), "(0, '/workspace')\n", (106, 123), False, 'import sys\n'), ((753, 777), 'pytorch_fid.inception.InceptionV3', 'InceptionV3', (['[block_idx]'], {}), '([block_idx])\n', (764, 777), False, 'from pytorch_fid.inception import InceptionV3\n'), ((3419, 3442), 'numpy.mean', 'np.mean', (['precision_list'], {}), '(precision_list)\n', (3426, 3442), True, 'import numpy as np\n'), ((3460, 3480), 'numpy.mean', 'np.mean', (['recall_list'], {}), '(recall_list)\n', (3467, 3480), True, 'import numpy as np\n'), ((3499, 3520), 'numpy.mean', 'np.mean', (['density_list'], {}), '(density_list)\n', (3506, 3520), True, 'import numpy as np\n'), ((3540, 3562), 'numpy.mean', 'np.mean', (['coverage_list'], {}), '(coverage_list)\n', (3547, 3562), True, 'import numpy as np\n'), ((3795, 3943), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'drop_last': '(False)', 'pin_memory': '(True)', 'num_workers': 'self.num_workers'}), '(dataset, batch_size=self.batch_size, shuffle=\n False, drop_last=False, pin_memory=True, num_workers=self.num_workers)\n', (3822, 3943), False, 'import torch\n'), ((4885, 4911), 'torch.from_numpy', 'torch.from_numpy', (['pred_arr'], {}), '(pred_arr)\n', (4901, 4911), False, 'import torch\n'), ((846, 908), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['inception_model'], {'device_ids': 'gpu_devices'}), '(inception_model, device_ids=gpu_devices)\n', (867, 908), False, 'import torch\n'), ((3742, 3773), 'datasets.custom_subset.SingleClassSubset', 'SingleClassSubset', (['dataset', 'cls'], {}), '(dataset, cls)\n', (3759, 3773), False, 'from datasets.custom_subset import SingleClassSubset\n'), ((1266, 1281), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1279, 1281), False, 'import torch\n'), ((1476, 1524), 'torch.cdist', 'torch.cdist', (['embedding_real', 'embedding_real'], {'p': '(2)'}), '(embedding_real, embedding_real, p=2)\n', (1487, 1524), False, 'import torch\n'), ((1646, 1694), 'torch.cdist', 'torch.cdist', (['embedding_fake', 'embedding_fake'], {'p': '(2)'}), '(embedding_fake, embedding_fake, p=2)\n', (1657, 1694), False, 'import torch\n'), ((1963, 2011), 'torch.cdist', 'torch.cdist', (['embedding_fake', 'embedding_real'], {'p': '(2)'}), '(embedding_fake, embedding_real, p=2)\n', (1974, 2011), False, 'import torch\n'), ((2323, 2371), 'torch.cdist', 'torch.cdist', (['embedding_real', 'embedding_fake'], {'p': '(2)'}), '(embedding_real, embedding_fake, p=2)\n', (2334, 2371), False, 'import torch\n'), ((4384, 4399), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4397, 4399), False, 'import torch\n'), ((1558, 1609), 'torch.sort', 'torch.sort', (['pair_dist_real'], {'dim': '(1)', 'descending': '(False)'}), '(pair_dist_real, dim=1, descending=False)\n', (1568, 1609), False, 'import torch\n'), ((1728, 1779), 'torch.sort', 'torch.sort', (['pair_dist_fake'], {'dim': '(1)', 'descending': '(False)'}), '(pair_dist_fake, dim=1, descending=False)\n', (1738, 1779), False, 'import torch\n')] |
""" helper function
author baiyu
"""
import os
import sys
import re
import datetime
import numpy
import torch
from torch.optim.lr_scheduler import _LRScheduler
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
def get_network(args):
""" return given network
"""
if args.net == 'vgg16':
from models.vgg import vgg16_bn
net = vgg16_bn()
elif args.net == 'vgg13':
from models.vgg import vgg13_bn
net = vgg13_bn()
elif args.net == 'vgg11':
from models.vgg import vgg11_bn
net = vgg11_bn()
elif args.net == 'vgg19':
from models.vgg import vgg19_bn
net = vgg19_bn()
elif args.net == 'densenet121':
from models.densenet import densenet121
net = densenet121()
elif args.net == 'densenet161':
from models.densenet import densenet161
net = densenet161()
elif args.net == 'densenet169':
from models.densenet import densenet169
net = densenet169()
elif args.net == 'densenet201':
from models.densenet import densenet201
net = densenet201()
elif args.net == 'googlenet':
from models.googlenet import googlenet
net = googlenet()
elif args.net == 'inceptionv3':
from models.inceptionv3 import inceptionv3
net = inceptionv3()
elif args.net == 'inceptionv4':
from models.inceptionv4 import inceptionv4
net = inceptionv4()
elif args.net == 'inceptionresnetv2':
from models.inceptionv4 import inception_resnet_v2
net = inception_resnet_v2()
elif args.net == 'xception':
from models.xception import xception
net = xception()
elif args.net == 'resnet18':
from models.resnet import resnet18
net = resnet18()
elif args.net == 'resnet34':
from models.resnet import resnet34
net = resnet34()
elif args.net == 'resnet50':
from models.resnet import resnet50
net = resnet50()
elif args.net == 'resnet101':
from models.resnet import resnet101
net = resnet101()
elif args.net == 'resnet152':
from models.resnet import resnet152
net = resnet152()
elif args.net == 'preactresnet18':
from models.preactresnet import preactresnet18
net = preactresnet18()
elif args.net == 'preactresnet34':
from models.preactresnet import preactresnet34
net = preactresnet34()
elif args.net == 'preactresnet50':
from models.preactresnet import preactresnet50
net = preactresnet50()
elif args.net == 'preactresnet101':
from models.preactresnet import preactresnet101
net = preactresnet101()
elif args.net == 'preactresnet152':
from models.preactresnet import preactresnet152
net = preactresnet152()
elif args.net == 'resnext50':
from models.resnext import resnext50
net = resnext50()
elif args.net == 'resnext101':
from models.resnext import resnext101
net = resnext101()
elif args.net == 'resnext152':
from models.resnext import resnext152
net = resnext152()
elif args.net == 'shufflenet':
from models.shufflenet import shufflenet
net = shufflenet()
elif args.net == 'shufflenetv2':
from models.shufflenetv2 import shufflenetv2
net = shufflenetv2()
elif args.net == 'squeezenet':
from models.squeezenet import squeezenet
net = squeezenet()
elif args.net == 'mobilenet':
from models.mobilenet import mobilenet
net = mobilenet()
elif args.net == 'mobilenetv2':
from models.mobilenetv2 import mobilenetv2
net = mobilenetv2()
elif args.net == 'nasnet':
from models.nasnet import nasnet
net = nasnet()
elif args.net == 'attention56':
from models.attention import attention56
net = attention56()
elif args.net == 'attention92':
from models.attention import attention92
net = attention92()
elif args.net == 'seresnet18':
from models.senet import seresnet18
net = seresnet18()
elif args.net == 'seresnet34':
from models.senet import seresnet34
net = seresnet34()
elif args.net == 'seresnet50':
from models.senet import seresnet50
net = seresnet50()
elif args.net == 'seresnet101':
from models.senet import seresnet101
net = seresnet101()
elif args.net == 'seresnet152':
from models.senet import seresnet152
net = seresnet152()
elif args.net == 'wideresnet':
from models.wideresidual import wideresnet
net = wideresnet()
elif args.net == 'stochasticdepth18':
from models.stochasticdepth import stochastic_depth_resnet18
net = stochastic_depth_resnet18()
elif args.net == 'stochasticdepth34':
from models.stochasticdepth import stochastic_depth_resnet34
net = stochastic_depth_resnet34()
elif args.net == 'stochasticdepth50':
from models.stochasticdepth import stochastic_depth_resnet50
net = stochastic_depth_resnet50()
elif args.net == 'stochasticdepth101':
from models.stochasticdepth import stochastic_depth_resnet101
net = stochastic_depth_resnet101()
else:
print('the network name you have entered is not supported yet')
sys.exit()
if args.gpu: # use_gpu
net = net.cuda()
return net
def get_training_dataloader(mean, std, batch_size=128, num_workers=4, shuffle=True):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
# cifar100_training = CIFAR100Train(path, transform=transform_train)
cifar100_training = torchvision.datasets.CIFAR100(root='/home/lab265/lab265/datasets/CIFAR100', train=True,
download=True, transform=transform_train)
cifar100_training_loader = DataLoader(
cifar100_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
return cifar100_training_loader
def get_test_dataloader(mean, std, batch_size=100, num_workers=4, shuffle=True):
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
cifar100_test = torchvision.datasets.CIFAR100(root='/home/lab265/lab265/datasets/CIFAR100', train=False,
download=True, transform=transform_test)
cifar100_test_loader = DataLoader(
cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
return cifar100_test_loader
def compute_mean_std(cifar100_dataset):
"""compute the mean and std of cifar100 dataset
Args:
cifar100_training_dataset or cifar100_test_dataset
witch derived from class torch.utils.data
Returns:
a tuple contains mean, std value of entire dataset
"""
data_r = numpy.dstack([cifar100_dataset[i][1][:, :, 0] for i in range(len(cifar100_dataset))])
data_g = numpy.dstack([cifar100_dataset[i][1][:, :, 1] for i in range(len(cifar100_dataset))])
data_b = numpy.dstack([cifar100_dataset[i][1][:, :, 2] for i in range(len(cifar100_dataset))])
mean = numpy.mean(data_r), numpy.mean(data_g), numpy.mean(data_b)
std = numpy.std(data_r), numpy.std(data_g), numpy.std(data_b)
return mean, std
class WarmUpLR(_LRScheduler):
"""warmup_training learning rate scheduler
Args:
optimizer: optimzier(e.g. SGD)
total_iters: totoal_iters of warmup phase
"""
def __init__(self, optimizer, total_iters, last_epoch=-1):
self.total_iters = total_iters
super().__init__(optimizer, last_epoch)
def get_lr(self):
"""we will use the first m batches, and set the learning
rate to base_lr * m / total_iters
"""
return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]
def most_recent_folder(net_weights, fmt):
"""
return most recent created folder under net_weights
if no none-empty folder were found, return empty folder
"""
# get subfolders in net_weights
folders = os.listdir(net_weights)
# filter out empty folders
folders = [f for f in folders if len(os.listdir(os.path.join(net_weights, f)))]
if len(folders) == 0:
return ''
# sort folders by folder created time
folders = sorted(folders, key=lambda f: datetime.datetime.strptime(f, fmt))
return folders[-1]
def most_recent_weights(weights_folder):
"""
return most recent created weights file
if folder is empty return empty string
"""
weight_files = os.listdir(weights_folder)
if len(weights_folder) == 0:
return ''
regex_str = r'([A-Za-z0-9]+)-([0-9]+)-(regular|best)'
# sort files by epoch
weight_files = sorted(weight_files, key=lambda w: int(re.search(regex_str, w).groups()[1]))
return weight_files[-1]
def last_epoch(weights_folder):
weight_file = most_recent_weights(weights_folder)
if not weight_file:
raise Exception('no recent weights were found')
resume_epoch = int(weight_file.split('-')[1])
return resume_epoch
def best_acc_weights(weights_folder):
"""
return the best acc .pth file in given folder, if no
best acc weights file were found, return empty string
"""
files = os.listdir(weights_folder)
if len(files) == 0:
return ''
regex_str = r'([A-Za-z0-9]+)-([0-9]+)-(regular|best)'
best_files = [w for w in files if re.search(regex_str, w).groups()[2] == 'best']
if len(best_files) == 0:
return ''
best_files = sorted(best_files, key=lambda w: int(re.search(regex_str, w).groups()[1]))
return best_files[-1]
| [
"models.preactresnet.preactresnet18",
"models.resnext.resnext152",
"models.shufflenet.shufflenet",
"models.inceptionv4.inception_resnet_v2",
"models.resnet.resnet152",
"models.stochasticdepth.stochastic_depth_resnet18",
"numpy.mean",
"models.preactresnet.preactresnet152",
"models.googlenet.googlenet... | [((5949, 6082), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""/home/lab265/lab265/datasets/CIFAR100"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='/home/lab265/lab265/datasets/CIFAR100',\n train=True, download=True, transform=transform_train)\n", (5978, 6082), False, 'import torchvision\n'), ((6164, 6262), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_training'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_training, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (6174, 6262), False, 'from torch.utils.data import DataLoader\n'), ((6530, 6663), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""/home/lab265/lab265/datasets/CIFAR100"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='/home/lab265/lab265/datasets/CIFAR100',\n train=False, download=True, transform=transform_test)\n", (6559, 6663), False, 'import torchvision\n'), ((6737, 6831), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_test'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_test, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (6747, 6831), False, 'from torch.utils.data import DataLoader\n'), ((8433, 8456), 'os.listdir', 'os.listdir', (['net_weights'], {}), '(net_weights)\n', (8443, 8456), False, 'import os\n'), ((8936, 8962), 'os.listdir', 'os.listdir', (['weights_folder'], {}), '(weights_folder)\n', (8946, 8962), False, 'import os\n'), ((9659, 9685), 'os.listdir', 'os.listdir', (['weights_folder'], {}), '(weights_folder)\n', (9669, 9685), False, 'import os\n'), ((411, 421), 'models.vgg.vgg16_bn', 'vgg16_bn', ([], {}), '()\n', (419, 421), False, 'from models.vgg import vgg16_bn\n'), ((7473, 7491), 'numpy.mean', 'numpy.mean', (['data_r'], {}), '(data_r)\n', (7483, 7491), False, 'import numpy\n'), ((7493, 7511), 'numpy.mean', 'numpy.mean', (['data_g'], {}), '(data_g)\n', (7503, 7511), False, 'import numpy\n'), ((7513, 7531), 'numpy.mean', 'numpy.mean', (['data_b'], {}), '(data_b)\n', (7523, 7531), False, 'import numpy\n'), ((7542, 7559), 'numpy.std', 'numpy.std', (['data_r'], {}), '(data_r)\n', (7551, 7559), False, 'import numpy\n'), ((7561, 7578), 'numpy.std', 'numpy.std', (['data_g'], {}), '(data_g)\n', (7570, 7578), False, 'import numpy\n'), ((7580, 7597), 'numpy.std', 'numpy.std', (['data_b'], {}), '(data_b)\n', (7589, 7597), False, 'import numpy\n'), ((506, 516), 'models.vgg.vgg13_bn', 'vgg13_bn', ([], {}), '()\n', (514, 516), False, 'from models.vgg import vgg13_bn\n'), ((5654, 5690), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (5675, 5690), True, 'import torchvision.transforms as transforms\n'), ((5700, 5733), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (5731, 5733), True, 'import torchvision.transforms as transforms\n'), ((5743, 5772), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (5768, 5772), True, 'import torchvision.transforms as transforms\n'), ((5782, 5803), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5801, 5803), True, 'import torchvision.transforms as transforms\n'), ((5813, 5844), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (5833, 5844), True, 'import torchvision.transforms as transforms\n'), ((6439, 6460), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6458, 6460), True, 'import torchvision.transforms as transforms\n'), ((6470, 6501), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (6490, 6501), True, 'import torchvision.transforms as transforms\n'), ((601, 611), 'models.vgg.vgg11_bn', 'vgg11_bn', ([], {}), '()\n', (609, 611), False, 'from models.vgg import vgg11_bn\n'), ((8704, 8738), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['f', 'fmt'], {}), '(f, fmt)\n', (8730, 8738), False, 'import datetime\n'), ((696, 706), 'models.vgg.vgg19_bn', 'vgg19_bn', ([], {}), '()\n', (704, 706), False, 'from models.vgg import vgg19_bn\n'), ((8541, 8569), 'os.path.join', 'os.path.join', (['net_weights', 'f'], {}), '(net_weights, f)\n', (8553, 8569), False, 'import os\n'), ((805, 818), 'models.densenet.densenet121', 'densenet121', ([], {}), '()\n', (816, 818), False, 'from models.densenet import densenet121\n'), ((917, 930), 'models.densenet.densenet161', 'densenet161', ([], {}), '()\n', (928, 930), False, 'from models.densenet import densenet161\n'), ((9825, 9848), 're.search', 're.search', (['regex_str', 'w'], {}), '(regex_str, w)\n', (9834, 9848), False, 'import re\n'), ((1029, 1042), 'models.densenet.densenet169', 'densenet169', ([], {}), '()\n', (1040, 1042), False, 'from models.densenet import densenet169\n'), ((9158, 9181), 're.search', 're.search', (['regex_str', 'w'], {}), '(regex_str, w)\n', (9167, 9181), False, 'import re\n'), ((9974, 9997), 're.search', 're.search', (['regex_str', 'w'], {}), '(regex_str, w)\n', (9983, 9997), False, 'import re\n'), ((1141, 1154), 'models.densenet.densenet201', 'densenet201', ([], {}), '()\n', (1152, 1154), False, 'from models.densenet import densenet201\n'), ((1250, 1261), 'models.googlenet.googlenet', 'googlenet', ([], {}), '()\n', (1259, 1261), False, 'from models.googlenet import googlenet\n'), ((1363, 1376), 'models.inceptionv3.inceptionv3', 'inceptionv3', ([], {}), '()\n', (1374, 1376), False, 'from models.inceptionv3 import inceptionv3\n'), ((1478, 1491), 'models.inceptionv4.inceptionv4', 'inceptionv4', ([], {}), '()\n', (1489, 1491), False, 'from models.inceptionv4 import inceptionv4\n'), ((1607, 1628), 'models.inceptionv4.inception_resnet_v2', 'inception_resnet_v2', ([], {}), '()\n', (1626, 1628), False, 'from models.inceptionv4 import inception_resnet_v2\n'), ((1721, 1731), 'models.xception.xception', 'xception', ([], {}), '()\n', (1729, 1731), False, 'from models.xception import xception\n'), ((1822, 1832), 'models.resnet.resnet18', 'resnet18', ([], {}), '()\n', (1830, 1832), False, 'from models.resnet import resnet18\n'), ((1923, 1933), 'models.resnet.resnet34', 'resnet34', ([], {}), '()\n', (1931, 1933), False, 'from models.resnet import resnet34\n'), ((2024, 2034), 'models.resnet.resnet50', 'resnet50', ([], {}), '()\n', (2032, 2034), False, 'from models.resnet import resnet50\n'), ((2127, 2138), 'models.resnet.resnet101', 'resnet101', ([], {}), '()\n', (2136, 2138), False, 'from models.resnet import resnet101\n'), ((2231, 2242), 'models.resnet.resnet152', 'resnet152', ([], {}), '()\n', (2240, 2242), False, 'from models.resnet import resnet152\n'), ((2351, 2367), 'models.preactresnet.preactresnet18', 'preactresnet18', ([], {}), '()\n', (2365, 2367), False, 'from models.preactresnet import preactresnet18\n'), ((2476, 2492), 'models.preactresnet.preactresnet34', 'preactresnet34', ([], {}), '()\n', (2490, 2492), False, 'from models.preactresnet import preactresnet34\n'), ((2601, 2617), 'models.preactresnet.preactresnet50', 'preactresnet50', ([], {}), '()\n', (2615, 2617), False, 'from models.preactresnet import preactresnet50\n'), ((2728, 2745), 'models.preactresnet.preactresnet101', 'preactresnet101', ([], {}), '()\n', (2743, 2745), False, 'from models.preactresnet import preactresnet101\n'), ((2856, 2873), 'models.preactresnet.preactresnet152', 'preactresnet152', ([], {}), '()\n', (2871, 2873), False, 'from models.preactresnet import preactresnet152\n'), ((2967, 2978), 'models.resnext.resnext50', 'resnext50', ([], {}), '()\n', (2976, 2978), False, 'from models.resnext import resnext50\n'), ((3074, 3086), 'models.resnext.resnext101', 'resnext101', ([], {}), '()\n', (3084, 3086), False, 'from models.resnext import resnext101\n'), ((3182, 3194), 'models.resnext.resnext152', 'resnext152', ([], {}), '()\n', (3192, 3194), False, 'from models.resnext import resnext152\n'), ((3293, 3305), 'models.shufflenet.shufflenet', 'shufflenet', ([], {}), '()\n', (3303, 3305), False, 'from models.shufflenet import shufflenet\n'), ((3410, 3424), 'models.shufflenetv2.shufflenetv2', 'shufflenetv2', ([], {}), '()\n', (3422, 3424), False, 'from models.shufflenetv2 import shufflenetv2\n'), ((3523, 3535), 'models.squeezenet.squeezenet', 'squeezenet', ([], {}), '()\n', (3533, 3535), False, 'from models.squeezenet import squeezenet\n'), ((3631, 3642), 'models.mobilenet.mobilenet', 'mobilenet', ([], {}), '()\n', (3640, 3642), False, 'from models.mobilenet import mobilenet\n'), ((3744, 3757), 'models.mobilenetv2.mobilenetv2', 'mobilenetv2', ([], {}), '()\n', (3755, 3757), False, 'from models.mobilenetv2 import mobilenetv2\n'), ((3844, 3852), 'models.nasnet.nasnet', 'nasnet', ([], {}), '()\n', (3850, 3852), False, 'from models.nasnet import nasnet\n'), ((3952, 3965), 'models.attention.attention56', 'attention56', ([], {}), '()\n', (3963, 3965), False, 'from models.attention import attention56\n'), ((4065, 4078), 'models.attention.attention92', 'attention92', ([], {}), '()\n', (4076, 4078), False, 'from models.attention import attention92\n'), ((4172, 4184), 'models.senet.seresnet18', 'seresnet18', ([], {}), '()\n', (4182, 4184), False, 'from models.senet import seresnet18\n'), ((4278, 4290), 'models.senet.seresnet34', 'seresnet34', ([], {}), '()\n', (4288, 4290), False, 'from models.senet import seresnet34\n'), ((4384, 4396), 'models.senet.seresnet50', 'seresnet50', ([], {}), '()\n', (4394, 4396), False, 'from models.senet import seresnet50\n'), ((4492, 4505), 'models.senet.seresnet101', 'seresnet101', ([], {}), '()\n', (4503, 4505), False, 'from models.senet import seresnet101\n'), ((4601, 4614), 'models.senet.seresnet152', 'seresnet152', ([], {}), '()\n', (4612, 4614), False, 'from models.senet import seresnet152\n'), ((4715, 4727), 'models.wideresidual.wideresnet', 'wideresnet', ([], {}), '()\n', (4725, 4727), False, 'from models.wideresidual import wideresnet\n'), ((4853, 4880), 'models.stochasticdepth.stochastic_depth_resnet18', 'stochastic_depth_resnet18', ([], {}), '()\n', (4878, 4880), False, 'from models.stochasticdepth import stochastic_depth_resnet18\n'), ((5006, 5033), 'models.stochasticdepth.stochastic_depth_resnet34', 'stochastic_depth_resnet34', ([], {}), '()\n', (5031, 5033), False, 'from models.stochasticdepth import stochastic_depth_resnet34\n'), ((5159, 5186), 'models.stochasticdepth.stochastic_depth_resnet50', 'stochastic_depth_resnet50', ([], {}), '()\n', (5184, 5186), False, 'from models.stochasticdepth import stochastic_depth_resnet50\n'), ((5314, 5342), 'models.stochasticdepth.stochastic_depth_resnet101', 'stochastic_depth_resnet101', ([], {}), '()\n', (5340, 5342), False, 'from models.stochasticdepth import stochastic_depth_resnet101\n'), ((5434, 5444), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5442, 5444), False, 'import sys\n')] |
import os
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def plot(file_name):
keyword = os.path.basename(file_name)
keyword = keyword[:keyword.find("-")]
data = []
with open(file_name, "r") as file:
for i, line in enumerate(file.readlines()):
if i == 0: continue
temp_str = line.split(",")
date = datetime.strptime(temp_str[1].strip(), "%Y/%m/%d")
price = float(temp_str[2].strip())
if date.year < 2021: continue
if price < 0: continue
data.append([date, price])
data = np.array(data, dtype=object)
data = np.sort(data, axis=0)
print(data)
plt.step(data[:, 0], data[:, 1:])
plt.title(keyword)
plt.xlabel("Date")
plt.ylabel("Price (CAD$)")
plt.savefig(f"figures/{keyword}.png")
# plt.show()
plt.clf()
if __name__ == "__main__":
import tkinter as tk
from tkinter import filedialog
# get file of supplies
root = tk.Tk()
root.withdraw()
file_paths = filedialog.askopenfilename(
title="Choose input file",
initialdir=os.getcwd() + "/raw_data/",
initialfile="supplies.txt",
multiple=True
)
for path in file_paths:
plot(path)
| [
"matplotlib.pyplot.title",
"os.path.basename",
"matplotlib.pyplot.clf",
"os.getcwd",
"numpy.sort",
"matplotlib.pyplot.step",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"tkinter.Tk",
"matplotlib.pyplot.savefig"
] | [((129, 156), 'os.path.basename', 'os.path.basename', (['file_name'], {}), '(file_name)\n', (145, 156), False, 'import os\n'), ((623, 651), 'numpy.array', 'np.array', (['data'], {'dtype': 'object'}), '(data, dtype=object)\n', (631, 651), True, 'import numpy as np\n'), ((663, 684), 'numpy.sort', 'np.sort', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (670, 684), True, 'import numpy as np\n'), ((706, 739), 'matplotlib.pyplot.step', 'plt.step', (['data[:, 0]', 'data[:, 1:]'], {}), '(data[:, 0], data[:, 1:])\n', (714, 739), True, 'import matplotlib.pyplot as plt\n'), ((744, 762), 'matplotlib.pyplot.title', 'plt.title', (['keyword'], {}), '(keyword)\n', (753, 762), True, 'import matplotlib.pyplot as plt\n'), ((767, 785), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (777, 785), True, 'import matplotlib.pyplot as plt\n'), ((790, 816), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price (CAD$)"""'], {}), "('Price (CAD$)')\n", (800, 816), True, 'import matplotlib.pyplot as plt\n'), ((821, 858), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""figures/{keyword}.png"""'], {}), "(f'figures/{keyword}.png')\n", (832, 858), True, 'import matplotlib.pyplot as plt\n'), ((880, 889), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (887, 889), True, 'import matplotlib.pyplot as plt\n'), ((1017, 1024), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (1022, 1024), True, 'import tkinter as tk\n'), ((1145, 1156), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1154, 1156), False, 'import os\n')] |
from __future__ import print_function
import os
import glob # may cause segmentation fault in (C+Python) environment
import numpy as np
import cv2
import csv
import faiss
import pandas as pd
import utm
### For dataset
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import get_streetview
from vps import vps
import unit_test_vps_config as config
import sys;sys.path.insert(0,'/home/ccsmm/workdir/ccsmmutils');import torch_img_utils as tim
from ipdb import set_trace as bp
postfix_dict = {
"ascen_fix":"ascen_fix.csv",
"images":"images.avi",
"imu_data":"imu_data.csv",
"intersect":"intersect.csv",
"novatel_fix":"novatel_fix.csv"}
def get_input_file_list(testset_dir, ext="*.avi", out_postfix='vps.csv'):
flist = glob.glob(os.path.join(testset_dir, ext))
flist.sort()
images = []
ascen_fix = []
outputs = []
for i, fn in enumerate(flist):
prefix = os.path.basename(fn)[:14] # "191115_151140_" from 191115_151140_images.avi
#images.append(os.path.join(testset_dir, prefix+postfix_dict["images"]))
images.append(fn)
ascen_fix.append(os.path.join(testset_dir, prefix+postfix_dict["ascen_fix"]))
outputs.append(os.path.join(testset_dir, prefix+out_postfix))
assert len(images)==len(ascen_fix), "Number of files are mis-matched."
return images, ascen_fix, outputs
class dgDataset(Dataset):
def __init__(self, avi, ascen):
self.cap = cv2.VideoCapture(avi)
self.video_fps = self.cap.get(cv2.CAP_PROP_FPS)
self.video_frame_length = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.timestamp, self.lat, self.lon, self.alt = self.parse_ascen(ascen)
self.starttime = self.timestamp[0]
self.endtime = self.timestamp[-1]
self.time_length = self.endtime - self.starttime # ascen's total seconds
self.video_scale = self.video_fps * self.time_length / self.video_frame_length
print("=====> Start reading {} and {}.".format(avi, ascen))
def parse_ascen(self, ascen):
'''
ipdb> data.head()
time .header.seq .header.stamp.secs .header.stamp.nsecs .header.frame_id ... .latitude .longitude .altitude .position_covariance .position_covariance_type
0 2019/11/15/11:40:06.994837 24 1573785606 994514942 gps ... 36.382057 127.367646 90.2 (18.3184, 0.0, 0.0, 0.0, 18.3184, 0.0, 0.0, 0.... 1
1 2019/11/15/11:40:07.993330 25 1573785607 993129014 gps ... 36.382056 127.367646 90.5 (18.3184, 0.0, 0.0, 0.0, 18.3184, 0.0, 0.0, 0.... 1
2 2019/11/15/11:40:08.991022 26 1573785608 990794897 gps ... 36.382057 127.367646 90.4 (24.2064, 0.0, 0.0, 0.0, 24.2064, 0.0, 0.0, 0.... 1
'''
data = pd.read_csv(ascen, sep=",")
lat = np.array([ float(i) for i in data[".latitude"]])
lon = np.array([ float(i) for i in data[".longitude"]])
alt = np.array([ float(i) for i in data[".altitude"]])
timestamp = np.array([float(i) for i in data[".header.stamp.secs"]])
return timestamp, lat, lon, alt
def get_image_timestamp(self, fnumber):
timestamp = self.starttime + fnumber * self.video_scale / self.video_fps
return timestamp
def get_latlon_from_timestamp(self, q_timestamp):
best_similar_idx = np.argmin(np.abs(self.timestamp - q_timestamp))
return self.lat[best_similar_idx], self.lon[best_similar_idx], best_similar_idx
def __len__(self):
return int(self.video_frame_length )
def release(self):
self.cap.release()
def __getitem__(self, idx):
fnumber = idx
ret, qimg = self.cap.read()
image_timestamp = self.get_image_timestamp(fnumber)
lat, lon, tidx = self.get_latlon_from_timestamp(image_timestamp)
return [qimg, fnumber, image_timestamp, lat, lon]
def get_utm_err(lat1, lon1, lat2, lon2):
if np.isnan(lat1) or np.isnan(lat1) or np.isnan(lon1) or np.isnan(lon2):
return -1
if lat1 < 36 or lon1 < 127 or lat2 < 36 or lon2 < 127:
return -1
if lat1 > 38 or lon1 > 128 or lat2 > 38 or lon2 > 128:
return -1
p1 = np.array(utm.from_latlon(lat1, lon1)[0:2])
p2 = np.array(utm.from_latlon(lat2, lon2)[0:2])
err_l2norm = np.linalg.norm(p1-p2) # l2norm = np.sqrt(np.sum((p1-p2)**2))
return err_l2norm
def do_vps(avi, ascen, output_filename, begin_frame=1000, server_ip="172.16.31.10"): # file names
print("=====> Start reading {}.".format(avi))
dataset = dgDataset(avi, ascen)
dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
fout = open(output_filename, 'w', buffering=1)
string='fnumber,timestamp,svid,svidx,pred_lat,pred_lon,distance,angle,confidence,curr_lat,curr_lon,utm_err'
fout.write(string+'\n')
print(string)
for idx, [qimg, fnumber, timestamp, lat, lon] in enumerate(dataloader):
qimg = qimg.numpy()[0]
fnumber = fnumber.numpy()[0]
timestamp = timestamp.numpy()[0]
curr_lat = lat.numpy()[0]
curr_lon = lon.numpy()[0]
try:
[h, w, c] = qimg.shape
if (h < 480) or (w < 640) or c != 3:
print("Invalid shape of query image :", h,w,c)
continue
except:
print("Broken query image :", fname)
continue
qimg = cv2.resize(qimg,(640,480))
#vps_IDandConf = mod_vps.apply(qimg, 3, 36.381438, 127.378867, 0.8, 1.0, streetview_server_ipaddr) # k=5 for knn
if idx < begin_frame: # Skip beginning videos
print("Skip {}\r".format(idx), end='')
continue
#cv2.imshow('QImg', qimg)
#cv2.waitKey(1)
vps_IDandConf = mod_vps.apply(image=qimg, K=1, gps_lat=lat, gps_lon=lon, gps_accuracy=0.8, timestamp=timestamp, ipaddr=server_ip) # k=5 for knn
svid = vps_IDandConf[0][0] # street view id from map server
svidx = "f" # cubic == "f" || cubic == "b" || cubic == "l" || cubic == "r" || cubic == "u" || cubic == "d")
confidence = vps_IDandConf[1][0] # 0 ~ 1, default 1
distance = -1 # distance in the ground from camera to predicted point. default -1, meter
angle = np.pi # Relative angle from camera to predicted point(CCW : +). default is pi, radian
_, pred_lat, pred_lon = get_streetview.GetStreetView_fromID(svid, roi_radius=1, ipaddr=server_ip)
utm_err = get_utm_err(curr_lat, curr_lon, pred_lat, pred_lon)
string = '{0:04d},{1:10.3f},{2:11d},{3:},{4:2.8f},{5:3.7f},{6:3d},{7:1.3f},{8:1.3f},{9:2.8f},{10:3.7f},{11:3.1f}'.format(
fnumber,timestamp,svid,svidx,pred_lat,pred_lon,distance,angle,confidence,curr_lat,curr_lon,utm_err)
fout.write(string+'\n')
print(string)
# print('{0:04d},{1:10.0f},{2:11d},{3:},{4:2.8f},{5:3.7f},{6:3d},{7:1.3f},{8:1.3f},{9:2.8f},{10:3.7f},{11:3.1f}'.format(fnumber,timestamp,svid,svidx,pred_lat,pred_lon,distance,angle,confidence,curr_lat,curr_lon,utm_err))
if False:
## Display Result
qImgs = mod_vps.get_qImgs() # [10,3,480,640]
dbImgs = mod_vps.get_dbImgs() # [10,3,480,640]
qdbImgs = torch.cat((qImgs,dbImgs),-1) # [10,3,480,1280]
fout.close()
dataset.release()
#cv2.destroyAllWindows()
if __name__ == "__main__":
## Image server address
server_ip = config.ip
region = config.dataset_region
## Set the GPU number (which gpu will you use)
gpu_num = config.which_gpu
## In/out directory and file information
testset_dir = config.indir
in_ext = config.input_reference_ext # "*.avi"
out_postfix = config.out_postfix # "vps_lr.csv"
date_idx = config.date_idx
## Skip frame for invalid video at the very begenning.
begin_skip_frame = config.begin_skip_frame
images, ascen_fix, outputs = get_input_file_list(testset_dir, ext=in_ext, out_postfix=out_postfix)
mod_vps = vps(gpu_num, region)
mod_vps.initialize()
avi_filename = images[date_idx]
ascen_filename = ascen_fix[date_idx]
output_filename = outputs[date_idx]
do_vps(avi_filename, ascen_filename, output_filename, begin_skip_frame, server_ip) # avi and novatel are filenames
#for i, [avi, ascen] in enumerate(zip(images, ascen_fix)):
#do_vps(avi_filename, ascen_filename, output_filename, 2500, server_ip) # avi and novatel are filenames
| [
"cv2.resize",
"utm.from_latlon",
"numpy.abs",
"torch.utils.data.DataLoader",
"os.path.basename",
"pandas.read_csv",
"sys.path.insert",
"numpy.isnan",
"torch.cat",
"cv2.VideoCapture",
"numpy.linalg.norm",
"get_streetview.GetStreetView_fromID",
"os.path.join",
"vps.vps"
] | [((452, 504), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/ccsmm/workdir/ccsmmutils"""'], {}), "(0, '/home/ccsmm/workdir/ccsmmutils')\n", (467, 504), False, 'import sys\n'), ((4729, 4752), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1 - p2)'], {}), '(p1 - p2)\n', (4743, 4752), True, 'import numpy as np\n'), ((5023, 5071), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset, batch_size=1, shuffle=False)\n', (5033, 5071), False, 'from torch.utils.data import DataLoader\n'), ((8487, 8507), 'vps.vps', 'vps', (['gpu_num', 'region'], {}), '(gpu_num, region)\n', (8490, 8507), False, 'from vps import vps\n'), ((875, 905), 'os.path.join', 'os.path.join', (['testset_dir', 'ext'], {}), '(testset_dir, ext)\n', (887, 905), False, 'import os\n'), ((1578, 1599), 'cv2.VideoCapture', 'cv2.VideoCapture', (['avi'], {}), '(avi)\n', (1594, 1599), False, 'import cv2\n'), ((3164, 3191), 'pandas.read_csv', 'pd.read_csv', (['ascen'], {'sep': '""","""'}), "(ascen, sep=',')\n", (3175, 3191), True, 'import pandas as pd\n'), ((4350, 4364), 'numpy.isnan', 'np.isnan', (['lat1'], {}), '(lat1)\n', (4358, 4364), True, 'import numpy as np\n'), ((4368, 4382), 'numpy.isnan', 'np.isnan', (['lat1'], {}), '(lat1)\n', (4376, 4382), True, 'import numpy as np\n'), ((4386, 4400), 'numpy.isnan', 'np.isnan', (['lon1'], {}), '(lon1)\n', (4394, 4400), True, 'import numpy as np\n'), ((4404, 4418), 'numpy.isnan', 'np.isnan', (['lon2'], {}), '(lon2)\n', (4412, 4418), True, 'import numpy as np\n'), ((5845, 5873), 'cv2.resize', 'cv2.resize', (['qimg', '(640, 480)'], {}), '(qimg, (640, 480))\n', (5855, 5873), False, 'import cv2\n'), ((6823, 6896), 'get_streetview.GetStreetView_fromID', 'get_streetview.GetStreetView_fromID', (['svid'], {'roi_radius': '(1)', 'ipaddr': 'server_ip'}), '(svid, roi_radius=1, ipaddr=server_ip)\n', (6858, 6896), False, 'import get_streetview\n'), ((1034, 1054), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (1050, 1054), False, 'import os\n'), ((1245, 1306), 'os.path.join', 'os.path.join', (['testset_dir', "(prefix + postfix_dict['ascen_fix'])"], {}), "(testset_dir, prefix + postfix_dict['ascen_fix'])\n", (1257, 1306), False, 'import os\n'), ((1330, 1377), 'os.path.join', 'os.path.join', (['testset_dir', '(prefix + out_postfix)'], {}), '(testset_dir, prefix + out_postfix)\n', (1342, 1377), False, 'import os\n'), ((3754, 3790), 'numpy.abs', 'np.abs', (['(self.timestamp - q_timestamp)'], {}), '(self.timestamp - q_timestamp)\n', (3760, 3790), True, 'import numpy as np\n'), ((4622, 4649), 'utm.from_latlon', 'utm.from_latlon', (['lat1', 'lon1'], {}), '(lat1, lon1)\n', (4637, 4649), False, 'import utm\n'), ((4676, 4703), 'utm.from_latlon', 'utm.from_latlon', (['lat2', 'lon2'], {}), '(lat2, lon2)\n', (4691, 4703), False, 'import utm\n'), ((7707, 7737), 'torch.cat', 'torch.cat', (['(qImgs, dbImgs)', '(-1)'], {}), '((qImgs, dbImgs), -1)\n', (7716, 7737), False, 'import torch\n')] |
import pandas as pd
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
import utils as utils
import numpy as np
import glob
def predict(X_test):
#feature columns [2,3,5,6,11,12,13]
print('Loading model to predict...')
# load model to predict
bst = lgb.Booster(model_file='./model.txt')
# can only predict with the best iteration (or the saving iteration)
y_pred = bst.predict(X_test)
return y_pred
# X_train, Y_train, X_test, Y_test = utils.get_train_test()
subdirname = './data/rush_hour/'
depthfiles = glob.glob(subdirname + '*.csv')
for i in range(len(depthfiles)):
X_test = pd.read_csv(depthfiles[i])
Y_test = predict(X_test)
file_tokens = depthfiles[i].split("/")
file_name = file_tokens[len(file_tokens) - 1]
print(depthfiles[i])
output_name = "./test_result/predictions_" + file_name
np.savetxt(output_name, Y_test, delimiter=",")
| [
"pandas.read_csv",
"lightgbm.Booster",
"numpy.savetxt",
"glob.glob"
] | [((534, 565), 'glob.glob', 'glob.glob', (["(subdirname + '*.csv')"], {}), "(subdirname + '*.csv')\n", (543, 565), False, 'import glob\n'), ((272, 309), 'lightgbm.Booster', 'lgb.Booster', ([], {'model_file': '"""./model.txt"""'}), "(model_file='./model.txt')\n", (283, 309), True, 'import lightgbm as lgb\n'), ((609, 635), 'pandas.read_csv', 'pd.read_csv', (['depthfiles[i]'], {}), '(depthfiles[i])\n', (620, 635), True, 'import pandas as pd\n'), ((828, 874), 'numpy.savetxt', 'np.savetxt', (['output_name', 'Y_test'], {'delimiter': '""","""'}), "(output_name, Y_test, delimiter=',')\n", (838, 874), True, 'import numpy as np\n')] |
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import os
import numpy as np
import cv2
from PIL import Image
class Node():
def __init__(self, start_idx, end_idx, level, idx):
self.start_idx = start_idx
self.end_idx = end_idx
self.level = level
self.idx = idx
self.parent_idx = None
self.children_node_indices = []
self.cluster_label = None
@property
def centroid_idx(self):
return (self.start_idx + self.end_idx) // 2
@property
def len(self):
return self.end_idx - self.start_idx
class HierarchicalAgglomorativeTree():
"""Construct a hierarchical tree for clusters
"""
def __init__(self):
self.nodes = []
self.edges = []
self.node_indices = []
self.level_indices = {}
self.graph = nx.Graph()
self.root_node = None
def add_nodes(self, nodes):
for node in nodes:
if node.idx in self.node_indices:
continue
self.add_node(node)
def add_node(self, node):
self.nodes.append(node)
if node.children_node_indices != []:
for child_idx in node.children_node_indices:
self.edges.append([node.idx, child_idx])
self.node_indices.append(node.idx)
if node.level not in self.level_indices.keys():
self.level_indices[node.level] = []
self.level_indices[node.level].append(node.idx)
def to_nx_graph(self):
for node in self.nodes:
self.graph.add_node(node.idx)
for edge in self.edges:
self.graph.add_edge(edge[0], edge[1])
return self.graph
def find_parent(self, node_idx):
while self.nodes[node_idx].parent_idx is not None:
node_idx = self.nodes[node_idx].parent_idx
return self.nodes[node_idx]
def create_root_node(self):
root_node = Node(start_idx=0, end_idx=0, level=-1, idx=len(self.nodes))
for node in self.nodes:
if node.parent_idx is None:
root_node.children_node_indices.append(node.idx)
root_node.level = max(node.level + 1, root_node.level)
node.parent_idx = root_node.idx
self.edges.append([root_node.idx, node.idx])
self.nodes.append(root_node)
self.root_node = root_node
self.level_indices[self.root_node.level] = [self.root_node.idx]
@property
def max_depth(self):
return max(self.level_indices.keys()) + 1
def find_children_nodes(self, parent_node_idx, depth=0, no_leaf=False, min_len=20):
# Return when depth = 0
node_list = []
for node_idx in self.nodes[parent_node_idx].children_node_indices:
if depth == 0 or self.nodes[node_idx].len < min_len:
node_list.append(node_idx)
else:
if no_leaf:
if self.nodes[node_idx].level == 1:
node_list.append(node_idx)
else:
node_list += self.find_children_nodes(node_idx, depth-1)
else:
if self.nodes[node_idx].level == 0 or self.nodes[node_idx].len < min_len:
node_list.append(node_idx)
else:
node_list += self.find_children_nodes(node_idx, depth-1)
return node_list
def find_midlevel_abstraction(self, parent_node_idx, depth=0, no_leaf=False, min_len=40):
# Return when depth = 0
node_list = []
for node_idx in self.nodes[parent_node_idx].children_node_indices:
if depth == 0:
node_list.append(node_idx)
else:
if no_leaf:
if self.nodes[node_idx].level == 1:
node_list.append(node_idx)
else:
node_list += self.find_children_nodes(node_idx, depth-1, min_len=min_len)
else:
if self.nodes[node_idx].level == 0:
node_list.append(node_idx)
else:
node_list += self.find_children_nodes(node_idx, depth-1, min_len=min_len)
return node_list
def check_consistency(self, node_idx):
node = self.nodes[node_idx]
if node.level == 0:
return True
else:
children_nodes = self.find_children_nodes(node_idx, 0)
for child_idx in children_nodes:
if node.cluster_label != self.nodes[child_idx].cluster_label:
return False
return True
def assign_labels(self, node_idx, label):
self.nodes[node_idx].cluster_label = label
def unassign_labels(self, node_idx):
self.nodes[node_idx].cluster_label = None
for child_idx in self.find_children_nodes(node_idx, 0):
self.nodes[child_idx].cluster_label = None
def compute_distance(self, e1, e2, mode="l2"):
if mode == "l2":
return np.linalg.norm(e1 - e2)
elif mode == "l1":
return np.linalg.norm((e1 - e2), ord=1)
elif mode == "cos":
return np.dot(e1, e2.transpose()) / (np.linalg.norm(e1) * np.linalg.norm(e2))
elif mode == "js":
mu_e1, var_e1 = np.split(e1, 2, axis=-1)
mu_e2, var_e2 = np.split(e2, 2, axis=-1)
def kl_normal(qm, qv, pm, pv):
element_wise = 0.5 * (np.log(pv) - np.log(qv) + qv / pv + np.power(qm - pm, 2) / pv - 1)
return element_wise.sum(-1)
js_dist = 0.5 * (kl_normal(mu_e1, var_e1, mu_e2, var_e2) + kl_normal(mu_e2, var_e2, mu_e1, var_e1))
return js_dist
def node_footprint(self, node: Node, embeddings, mode="mean"):
if mode == "centroid":
return embeddings[node.centroid_idx]
elif mode == "mean":
embedding = np.mean([embeddings[node.start_idx], embeddings[node.centroid_idx], embeddings[node.end_idx]], axis=0)
return embedding
elif mode == "head":
return embeddings[node.start_idx]
elif mode == "tail":
return embeddings[node.end_idx]
elif mode == "concat_1":
return np.concatenate([embeddings[node.start_idx], embeddings[node.centroid_idx], embeddings[node.end_idx]], axis=1)
elif mode == "gaussian":
mu = np.mean(embeddings[node.start_idx:node.end_idx+1], axis=0)
var = np.mean(np.square(embeddings[node.start_idx:node.end_idx+1]), axis=0) - mu ** 2 + 1e-5
assert(np.all(mu.shape == var.shape))
return np.concatenate([mu, var], axis=1)
def find_nn(self, embeddings, node_idx, before_idx, after_idx, footprint_mode="mean", dist_mode="l2"):
f1 = self.node_footprint(self.nodes[node_idx], embeddings, mode=footprint_mode)
f2 = self.node_footprint(self.nodes[before_idx], embeddings, mode=footprint_mode)
f3 = self.node_footprint(self.nodes[after_idx], embeddings, mode=footprint_mode)
d1 = self.compute_distance(f1, f2, mode=dist_mode)
d2 = self.compute_distance(f1, f3, mode=dist_mode)
return d1 < d2
def agglomoration(self, embeddings, step, footprint_mode="mean", dist_mode="l2", len_penalty=True):
idx = 0
nodes = []
terminate = False
for i in range(len(embeddings)-1):
if i % step == 0:
if (i + 2 * step >= len(embeddings)-1):
start_idx = i
end_idx = len(embeddings) - 1
terminate = True
else:
start_idx = i
end_idx = min(i + step, len(embeddings) - 1)
node = Node(start_idx=start_idx,
end_idx=end_idx,
level=0,
idx=idx)
idx += 1
nodes.append(node)
if terminate:
break
self.add_nodes(nodes)
while len(nodes) > 2:
i = 1
dist_seq = []
for i in range(len(nodes) - 1):
dist = self.compute_distance(self.node_footprint(nodes[i], embeddings, mode=footprint_mode),
self.node_footprint(nodes[i+1], embeddings, mode=footprint_mode), mode=dist_mode)
if len_penalty:
# Very simple penalty
# dist += (nodes[i].len + nodes[i+1].len) * (1./
# 10.)
# Pentaly with respect to the whole length
dist += (nodes[i].len + nodes[i+1].len) / (5. * len(nodes))
dist_seq.append(dist)
target_idx = dist_seq.index(min(dist_seq))
new_node = Node(start_idx=nodes[target_idx].start_idx,
end_idx=nodes[target_idx+1].end_idx,
level=max(nodes[target_idx].level, nodes[target_idx+1].level) + 1,
idx=idx)
new_node.children_node_indices = [nodes[target_idx].idx, nodes[target_idx + 1].idx]
nodes[target_idx].parent_idx = idx
nodes[target_idx + 1].parent_idx = idx
self.add_node(new_node)
idx += 1
new_nodes = []
visited_nodes = []
for node in nodes:
parent_node = self.find_parent(node.idx)
if parent_node.idx not in visited_nodes:
new_nodes.append(parent_node)
visited_nodes.append(parent_node.idx)
nodes = new_nodes
def save_agglomorative_tree(agglomorative_tree, agentview_image_names_list, ep_idx, dataset_name, footprint_mode, dist_mode, modality_mode):
fig = plt.figure(figsize=(25, 10))
width = 100
depth = 3
x = y = 0
positions = {}
for (i, node_idx) in enumerate(agglomorative_tree.level_indices[0]):
positions[node_idx] = [x + i * width, y]
plt.plot([x + i * width, x + i * width],
[y, y + depth / 2],
'k')
for level in range(1, agglomorative_tree.max_depth):
y += depth
for (i, node_idx) in enumerate(agglomorative_tree.level_indices[level]):
child_node_x_positions = []
weights = []
min_x = 10000
max_x = 0
for child_idx in agglomorative_tree.nodes[node_idx].children_node_indices:
child_node_x_positions.append(positions[child_idx][0])
if min_x > positions[child_idx][0]:
min_x = positions[child_idx][0]
if max_x < positions[child_idx][0]:
max_x = positions[child_idx][0]
plt.plot([positions[child_idx][0], positions[child_idx][0]],
[positions[child_idx][1], y - depth / 2], 'k')
plt.plot([min_x, max_x],
[y - depth / 2, y - depth/2], 'k')
positions[node_idx] = [np.mean(child_node_x_positions), y]
plt.plot([positions[node_idx][0], positions[node_idx][0]],
[y - depth / 2, y], 'k')
num_nodes = len(agglomorative_tree.level_indices[level])
points = {}
min_x = min_y = 10000
max_x = max_y = 0
for node in agglomorative_tree.nodes:
point = plt.plot(positions[node.idx][0], positions[node.idx][1], 'ko')
points[node.idx] = (point[0].get_data()[0][0], point[0].get_data()[1][0])
min_x = min(min_x, points[node.idx][0])
max_x = max(max_x, points[node.idx][0])
min_y = min(min_y, points[node.idx][1])
max_y = max(max_y, points[node.idx][1])
plt.xlim([min_x - 50, max_x + 50])
plt.ylim([min_y - 5, max_y + 5])
plt.axis('off')
ax=plt.gca()
fig=plt.gcf()
label_pos = 0.5 # middle of edge, halfway between nodes
trans = ax.transData.transform
trans2 = fig.transFigure.inverted().transform
img_size = 0.08
counter = 0
for agglomorative_tree_node in agglomorative_tree.nodes:
if agglomorative_tree_node.children_node_indices == []:
if counter % 3 == 0:
xa, ya = trans2(trans(points[agglomorative_tree_node.idx]))
new_axis = plt.axes([xa - img_size / 2.0, ya - img_size * 1.1, img_size, img_size])
img = np.array(Image.open(agentview_image_names_list[agglomorative_tree_node.centroid_idx]))
new_axis.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
new_axis.set_aspect('equal')
new_axis.axis('off')
counter += 1
canvas = FigureCanvas(fig)
canvas.draw()
s, (width, height) = canvas.print_to_buffer()
image = np.fromstring(canvas.tostring_rgb(), dtype='uint8').reshape((height, width, 3))
os.makedirs(f"skill_classification/agglomoration_results/{dataset_name}/{footprint_mode}_{dist_mode}_{modality_mode}", exist_ok=True)
cv2.imwrite(f"skill_classification/agglomoration_results/{dataset_name}/{footprint_mode}_{dist_mode}_{modality_mode}/{dataset_name}_{ep_idx}.png", image)
| [
"matplotlib.pyplot.axes",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.linalg.norm",
"matplotlib.pyplot.gca",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"cv2.imwrite",
"cv2.cvtColor",
"numpy.power",
"matplotlib.pyplot.ylim",
"numpy.square",
"matplotlib.pyplot.gcf",
"numpy.all",
... | [((10039, 10067), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(25, 10)'}), '(figsize=(25, 10))\n', (10049, 10067), True, 'import matplotlib.pyplot as plt\n'), ((12147, 12181), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[min_x - 50, max_x + 50]'], {}), '([min_x - 50, max_x + 50])\n', (12155, 12181), True, 'import matplotlib.pyplot as plt\n'), ((12190, 12222), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[min_y - 5, max_y + 5]'], {}), '([min_y - 5, max_y + 5])\n', (12198, 12222), True, 'import matplotlib.pyplot as plt\n'), ((12231, 12246), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12239, 12246), True, 'import matplotlib.pyplot as plt\n'), ((12258, 12267), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12265, 12267), True, 'import matplotlib.pyplot as plt\n'), ((12280, 12289), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (12287, 12289), True, 'import matplotlib.pyplot as plt\n'), ((13173, 13190), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (13185, 13190), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((13380, 13523), 'os.makedirs', 'os.makedirs', (['f"""skill_classification/agglomoration_results/{dataset_name}/{footprint_mode}_{dist_mode}_{modality_mode}"""'], {'exist_ok': '(True)'}), "(\n f'skill_classification/agglomoration_results/{dataset_name}/{footprint_mode}_{dist_mode}_{modality_mode}'\n , exist_ok=True)\n", (13391, 13523), False, 'import os\n'), ((13522, 13685), 'cv2.imwrite', 'cv2.imwrite', (['f"""skill_classification/agglomoration_results/{dataset_name}/{footprint_mode}_{dist_mode}_{modality_mode}/{dataset_name}_{ep_idx}.png"""', 'image'], {}), "(\n f'skill_classification/agglomoration_results/{dataset_name}/{footprint_mode}_{dist_mode}_{modality_mode}/{dataset_name}_{ep_idx}.png'\n , image)\n", (13533, 13685), False, 'import cv2\n'), ((918, 928), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (926, 928), True, 'import networkx as nx\n'), ((10293, 10358), 'matplotlib.pyplot.plot', 'plt.plot', (['[x + i * width, x + i * width]', '[y, y + depth / 2]', '"""k"""'], {}), "([x + i * width, x + i * width], [y, y + depth / 2], 'k')\n", (10301, 10358), True, 'import matplotlib.pyplot as plt\n'), ((11781, 11843), 'matplotlib.pyplot.plot', 'plt.plot', (['positions[node.idx][0]', 'positions[node.idx][1]', '"""ko"""'], {}), "(positions[node.idx][0], positions[node.idx][1], 'ko')\n", (11789, 11843), True, 'import matplotlib.pyplot as plt\n'), ((5197, 5220), 'numpy.linalg.norm', 'np.linalg.norm', (['(e1 - e2)'], {}), '(e1 - e2)\n', (5211, 5220), True, 'import numpy as np\n'), ((11266, 11327), 'matplotlib.pyplot.plot', 'plt.plot', (['[min_x, max_x]', '[y - depth / 2, y - depth / 2]', '"""k"""'], {}), "([min_x, max_x], [y - depth / 2, y - depth / 2], 'k')\n", (11274, 11327), True, 'import matplotlib.pyplot as plt\n'), ((11442, 11529), 'matplotlib.pyplot.plot', 'plt.plot', (['[positions[node_idx][0], positions[node_idx][0]]', '[y - depth / 2, y]', '"""k"""'], {}), "([positions[node_idx][0], positions[node_idx][0]], [y - depth / 2,\n y], 'k')\n", (11450, 11529), True, 'import matplotlib.pyplot as plt\n'), ((5267, 5297), 'numpy.linalg.norm', 'np.linalg.norm', (['(e1 - e2)'], {'ord': '(1)'}), '(e1 - e2, ord=1)\n', (5281, 5297), True, 'import numpy as np\n'), ((6084, 6190), 'numpy.mean', 'np.mean', (['[embeddings[node.start_idx], embeddings[node.centroid_idx], embeddings[node\n .end_idx]]'], {'axis': '(0)'}), '([embeddings[node.start_idx], embeddings[node.centroid_idx],\n embeddings[node.end_idx]], axis=0)\n', (6091, 6190), True, 'import numpy as np\n'), ((11112, 11224), 'matplotlib.pyplot.plot', 'plt.plot', (['[positions[child_idx][0], positions[child_idx][0]]', '[positions[child_idx][1], y - depth / 2]', '"""k"""'], {}), "([positions[child_idx][0], positions[child_idx][0]], [positions[\n child_idx][1], y - depth / 2], 'k')\n", (11120, 11224), True, 'import matplotlib.pyplot as plt\n'), ((11390, 11421), 'numpy.mean', 'np.mean', (['child_node_x_positions'], {}), '(child_node_x_positions)\n', (11397, 11421), True, 'import numpy as np\n'), ((12776, 12848), 'matplotlib.pyplot.axes', 'plt.axes', (['[xa - img_size / 2.0, ya - img_size * 1.1, img_size, img_size]'], {}), '([xa - img_size / 2.0, ya - img_size * 1.1, img_size, img_size])\n', (12784, 12848), True, 'import matplotlib.pyplot as plt\n'), ((12884, 12960), 'PIL.Image.open', 'Image.open', (['agentview_image_names_list[agglomorative_tree_node.centroid_idx]'], {}), '(agentview_image_names_list[agglomorative_tree_node.centroid_idx])\n', (12894, 12960), False, 'from PIL import Image\n'), ((12998, 13034), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (13010, 13034), False, 'import cv2\n'), ((5473, 5497), 'numpy.split', 'np.split', (['e1', '(2)'], {'axis': '(-1)'}), '(e1, 2, axis=-1)\n', (5481, 5497), True, 'import numpy as np\n'), ((5526, 5550), 'numpy.split', 'np.split', (['e2', '(2)'], {'axis': '(-1)'}), '(e2, 2, axis=-1)\n', (5534, 5550), True, 'import numpy as np\n'), ((5377, 5395), 'numpy.linalg.norm', 'np.linalg.norm', (['e1'], {}), '(e1)\n', (5391, 5395), True, 'import numpy as np\n'), ((5398, 5416), 'numpy.linalg.norm', 'np.linalg.norm', (['e2'], {}), '(e2)\n', (5412, 5416), True, 'import numpy as np\n'), ((6416, 6529), 'numpy.concatenate', 'np.concatenate', (['[embeddings[node.start_idx], embeddings[node.centroid_idx], embeddings[node\n .end_idx]]'], {'axis': '(1)'}), '([embeddings[node.start_idx], embeddings[node.centroid_idx],\n embeddings[node.end_idx]], axis=1)\n', (6430, 6529), True, 'import numpy as np\n'), ((6576, 6636), 'numpy.mean', 'np.mean', (['embeddings[node.start_idx:node.end_idx + 1]'], {'axis': '(0)'}), '(embeddings[node.start_idx:node.end_idx + 1], axis=0)\n', (6583, 6636), True, 'import numpy as np\n'), ((6759, 6788), 'numpy.all', 'np.all', (['(mu.shape == var.shape)'], {}), '(mu.shape == var.shape)\n', (6765, 6788), True, 'import numpy as np\n'), ((6809, 6842), 'numpy.concatenate', 'np.concatenate', (['[mu, var]'], {'axis': '(1)'}), '([mu, var], axis=1)\n', (6823, 6842), True, 'import numpy as np\n'), ((5668, 5688), 'numpy.power', 'np.power', (['(qm - pm)', '(2)'], {}), '(qm - pm, 2)\n', (5676, 5688), True, 'import numpy as np\n'), ((6661, 6715), 'numpy.square', 'np.square', (['embeddings[node.start_idx:node.end_idx + 1]'], {}), '(embeddings[node.start_idx:node.end_idx + 1])\n', (6670, 6715), True, 'import numpy as np\n'), ((5632, 5642), 'numpy.log', 'np.log', (['pv'], {}), '(pv)\n', (5638, 5642), True, 'import numpy as np\n'), ((5645, 5655), 'numpy.log', 'np.log', (['qv'], {}), '(qv)\n', (5651, 5655), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# functions for working with raster data
# written by <NAME>, 2020
from osgeo import gdal, osr
import numpy as np
import subprocess as sp
import os, inspect
def get_nodata(raster_file, band = 1):
"""Get raster nodata value"""
file = gdal.Open(raster_file)
nodata = file.GetRasterBand(band).GetNoDataValue()
file = None
return nodata
def get_gt_sr(raster_file):
"""Get geotransform"""
file = gdal.Open(raster_file)
gt = file.GetGeoTransform()
sr = file.GetProjection()
file = None
return [gt, sr]
def get_proj4str(raster_file):
"""Get proj4 string"""
file = gdal.Open(raster_file)
proj = osr.SpatialReference(wkt = file.GetProjection()).ExportToProj4().rstrip()
file = None
return proj
def get_nbands(raster_file):
"""Get number of bands"""
file = gdal.Open(raster_file)
num_bands = file.RasterCount
file = None
return num_bands
def get_dims(raster_file):
"""Get dimensions of raster file, without loading it into memory"""
file = gdal.Open(raster_file)
num_cols = file.RasterXSize
num_rows = file.RasterYSize
num_bands = file.RasterCount
file = None
return [num_cols, num_rows, num_bands]
def get_xy_res(raster_file):
"""Get X and Y resolution of raster file, without loading it into memory"""
file = gdal.Open(raster_file)
gt = file.GetGeoTransform()
file = None
x_res = gt[1]
y_res = -gt[5]
return [x_res, y_res]
def get_prj_units(raster_file):
"""Get units of raster file CRS, without loading it into memory"""
prj4_str = get_proj4str(raster_file)
prj4_str_list = prj4_str.split('+')
units_str = list(filter(lambda x: 'units' in x, prj4_str_list))[0]
units_str = units_str.strip().split('=')[1]
return units_str
def get_cell_area_ha(raster_file):
"""Get grid cell area (ha) of raster file, without loading it into memory"""
units = get_prj_units(raster_file)
if units == 'm':
x_res, y_res = get_xy_res(raster_file)
area_ha = x_res * y_res * 1e-4
return area_ha
else:
print('Error: CRS units are {} (must be meters).'.format(units), flush = True)
return
def get_dtype(raster_file, band = 1):
"""Get raster data type"""
file = gdal.Open(raster_file)
dtype_int = file.GetRasterBand(band).DataType
dtype_str = gdal.GetDataTypeName(dtype_int)
file = None
return dtype_str
def dtype_gdal(dtype_str):
"""Translate data type from string to GDAL data type (integer)"""
dtype_switcher = {
"Unknown" : gdal.GDT_Unknown, # Unknown or unspecified type
"Byte" : gdal.GDT_Byte, # Eight bit unsigned integer
"UInt16" : gdal.GDT_UInt16, # Sixteen bit unsigned integer
"Int16" : gdal.GDT_Int16, # Sixteen bit signed integer
"UInt32" : gdal.GDT_UInt32, # Thirty two bit unsigned integer
"Int32" : gdal.GDT_Int32, # Thirty two bit signed integer
"Float32" : gdal.GDT_Float32, # Thirty two bit floating point
"Float64" : gdal.GDT_Float64, # Sixty four bit floating point
"CInt16" : gdal.GDT_CInt16, # Complex Int16
"CInt32" : gdal.GDT_CInt32, # Complex Int32
"CFloat32" : gdal.GDT_CFloat32, # Complex Float32
"CFloat64" : gdal.GDT_CFloat64 # Complex Float64
}
dtype_int = dtype_switcher.get(dtype_str, 0)
return dtype_int
def dtype_bit_depth(dtype_str):
"""Get pixel bit depth from raster data type"""
bit_depth_switcher = {
"Byte" : 8,
"UInt16" : 16,
"Int16" : 16,
"UInt32" : 32,
"Int32" : 32,
"Float32" : 32,
"Float64" : 64,
"CInt16" : 16,
"CInt32" : 32,
"CFloat32" : 32,
"CFloat64" : 64
}
bit_depth = bit_depth_switcher.get(dtype_str, 0)
return bit_depth
def r2n(raster_file, band = 1):
"""Load a raster from disk into a 2D numpy array in memory."""
print('WARNING: r2n() is depreciated, use raster() instead!', flush = True)
file = gdal.Open(raster_file)
img = file.GetRasterBand(band).ReadAsArray()
file = None
return img
def raster(raster_file, bands = None, verbose = False):
"""Load single- or multi-band raster from disk into a 2- or 3-dimensional numpy array in memory.\nNote, bands must be INTEGER or LIST of integers, e.g., [1, 3, 6] = Bands 1, 3 and 6. There is no Band 0."""
if verbose: print('Reading {} ...'.format(raster_file), flush = True)
file = gdal.Open(raster_file)
tot_band_cnt = file.RasterCount
if bands == None:
if (verbose) & (tot_band_cnt == 1): print('Raster has 1 band ...', flush = True)
if (verbose) & (tot_band_cnt > 1): print('Reading all {} bands ...'.format(tot_band_cnt), flush = True)
arr = file.ReadAsArray()
elif type(bands) == int: # in this case, "bands" refers to only one band
if verbose: print('Reading band {} of {} ...'.format(bands, tot_band_cnt), flush = True)
arr = file.GetRasterBand(bands).ReadAsArray()
elif type(bands) == list:
arr_list = []
for band in bands:
if verbose: print('Reading band {} ...'.format(band), flush = True)
tmp_arr = file.GetRasterBand(band).ReadAsArray()
arr_list.append(tmp_arr)
arr = np.stack(arr_list, axis = 0)
else:
print('Error: bands argument must be type INTEGER or LIST (of integers), e.g., [1, 3, 6] = Bands 1, 3 and 6. There is no Band 0.', flush = True)
return
file = None
return arr
def write_gtiff(img_arr, out_tif, dtype, gt, sr, nodata = None, stats = True, msg = False):
"""Write a 2D numpy image array to a GeoTIFF raster file on disk"""
# check that output is a numpy array
if type(img_arr) != np.ndarray:
print('Error: numpy array invalid', flush = True)
return
# check gdal data type
dtype_int = dtype_gdal(dtype)
if dtype_int == 0:
print('Error: output data type invalid', flush = True)
return
if msg: print('Writing {} ...'.format(out_tif), flush = True)
ndim = img_arr.ndim
nband = 1
nrow = img_arr.shape[0]
ncol = img_arr.shape[1]
driver = gdal.GetDriverByName('GTiff')
out_dataset = driver.Create(out_tif, ncol, nrow, nband, dtype_int, options = [ 'COMPRESS=LZW' ])
out_dataset.SetGeoTransform(gt)
out_dataset.SetProjection(sr)
out_dataset.GetRasterBand(1).WriteArray(img_arr)
if (nodata != None) and (type(nodata) != str):
out_dataset.GetRasterBand(1).SetNoDataValue(nodata)
out_dataset = None
if stats: cmd_chk = sp.run(['gdal_edit.py', '-stats', out_tif])
return
def stats(input, nodata = None):
"""Get descriptive statistics for either a raster on disk (input = filepath) or a numpy array stored in memory"""
if (type(input) != str) and (type(input) != np.ndarray):
print("Error: input must be either filepath to raster or numpy image array.", flush = True)
return
elif type(input) == str:
file = gdal.Open(input)
img = np.array(file.GetRasterBand(1).ReadAsArray())
nodata = file.GetRasterBand(1).GetNoDataValue()
img_data = img[img != nodata]
del img
img_min = np.min(img_data)
img_max = np.max(img_data)
img_mean = np.mean(img_data)
img_std = np.std(img_data)
file = None
else: # type(input) == np.ndarray
if nodata != None: input = input[input != nodata]
img_min = np.min(input)
img_max = np.max(input)
img_mean = np.mean(input)
img_std = np.std(input)
if nodata == None:
print("Min.\tMax.\tMean\tStd.", flush = True)
print("%2.2f\t%2.2f\t%2.2f\t%2.2f" % (img_min, img_max, img_mean, img_std), flush = True)
return
else:
print("Min.\tMax.\tMean\tStd.\tNoData", flush = True)
print("%2.2f\t%2.2f\t%2.2f\t%2.2f\t%i" % (img_min, img_max, img_mean, img_std, nodata), flush = True)
return
def compare_rasters(r1, r2):
"""Compare cells of two rasters (numpy arrays)"""
if (type(r1) != np.ndarray) or (type(r2) != np.ndarray):
print('Error: inputs must be numpy arrays.', flush = True)
return
elif (r1.shape != r2.shape):
print('Error: inputs must have the same dimensions.', flush = True)
return
else:
ind_same = r1 == r2
num_same = np.sum(ind_same)
num_pxl = r1.size
per_same = round(num_same/num_pxl*100, 2)
print('{}% of pixels are identical ({}/{} pixels)'.format(per_same, num_same, num_pxl), flush = True)
return
# - - - - - - - - - - -
# additional misc tools
# - - - - - - - - - - -
def pcode(function):
"""Print function source code. Note, does not work for type = builtin_function_or_method."""
if inspect.isfunction(function) == True:
source_code_lines = inspect.getsourcelines(function)
print(("".join(source_code_lines[0])), flush = True)
else:
print("Error: input is not a function", flush = True)
def check():
modname = os.path.splitext(os.path.basename(os.path.abspath(__file__)))[0]
print('{} loaded'.format(modname), flush = True)
| [
"numpy.stack",
"subprocess.run",
"os.path.abspath",
"inspect.getsourcelines",
"numpy.sum",
"numpy.std",
"numpy.min",
"numpy.max",
"numpy.mean",
"inspect.isfunction",
"osgeo.gdal.Open",
"osgeo.gdal.GetDriverByName",
"osgeo.gdal.GetDataTypeName"
] | [((261, 283), 'osgeo.gdal.Open', 'gdal.Open', (['raster_file'], {}), '(raster_file)\n', (270, 283), False, 'from osgeo import gdal, osr\n'), ((425, 447), 'osgeo.gdal.Open', 'gdal.Open', (['raster_file'], {}), '(raster_file)\n', (434, 447), False, 'from osgeo import gdal, osr\n'), ((598, 620), 'osgeo.gdal.Open', 'gdal.Open', (['raster_file'], {}), '(raster_file)\n', (607, 620), False, 'from osgeo import gdal, osr\n'), ((794, 816), 'osgeo.gdal.Open', 'gdal.Open', (['raster_file'], {}), '(raster_file)\n', (803, 816), False, 'from osgeo import gdal, osr\n'), ((983, 1005), 'osgeo.gdal.Open', 'gdal.Open', (['raster_file'], {}), '(raster_file)\n', (992, 1005), False, 'from osgeo import gdal, osr\n'), ((1262, 1284), 'osgeo.gdal.Open', 'gdal.Open', (['raster_file'], {}), '(raster_file)\n', (1271, 1284), False, 'from osgeo import gdal, osr\n'), ((2119, 2141), 'osgeo.gdal.Open', 'gdal.Open', (['raster_file'], {}), '(raster_file)\n', (2128, 2141), False, 'from osgeo import gdal, osr\n'), ((2202, 2233), 'osgeo.gdal.GetDataTypeName', 'gdal.GetDataTypeName', (['dtype_int'], {}), '(dtype_int)\n', (2222, 2233), False, 'from osgeo import gdal, osr\n'), ((3718, 3740), 'osgeo.gdal.Open', 'gdal.Open', (['raster_file'], {}), '(raster_file)\n', (3727, 3740), False, 'from osgeo import gdal, osr\n'), ((4155, 4177), 'osgeo.gdal.Open', 'gdal.Open', (['raster_file'], {}), '(raster_file)\n', (4164, 4177), False, 'from osgeo import gdal, osr\n'), ((5696, 5725), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (5716, 5725), False, 'from osgeo import gdal, osr\n'), ((6081, 6124), 'subprocess.run', 'sp.run', (["['gdal_edit.py', '-stats', out_tif]"], {}), "(['gdal_edit.py', '-stats', out_tif])\n", (6087, 6124), True, 'import subprocess as sp\n'), ((8060, 8088), 'inspect.isfunction', 'inspect.isfunction', (['function'], {}), '(function)\n', (8078, 8088), False, 'import os, inspect\n'), ((8120, 8152), 'inspect.getsourcelines', 'inspect.getsourcelines', (['function'], {}), '(function)\n', (8142, 8152), False, 'import os, inspect\n'), ((6478, 6494), 'osgeo.gdal.Open', 'gdal.Open', (['input'], {}), '(input)\n', (6487, 6494), False, 'from osgeo import gdal, osr\n'), ((6653, 6669), 'numpy.min', 'np.min', (['img_data'], {}), '(img_data)\n', (6659, 6669), True, 'import numpy as np\n'), ((6682, 6698), 'numpy.max', 'np.max', (['img_data'], {}), '(img_data)\n', (6688, 6698), True, 'import numpy as np\n'), ((6712, 6729), 'numpy.mean', 'np.mean', (['img_data'], {}), '(img_data)\n', (6719, 6729), True, 'import numpy as np\n'), ((6742, 6758), 'numpy.std', 'np.std', (['img_data'], {}), '(img_data)\n', (6748, 6758), True, 'import numpy as np\n'), ((6872, 6885), 'numpy.min', 'np.min', (['input'], {}), '(input)\n', (6878, 6885), True, 'import numpy as np\n'), ((6898, 6911), 'numpy.max', 'np.max', (['input'], {}), '(input)\n', (6904, 6911), True, 'import numpy as np\n'), ((6925, 6939), 'numpy.mean', 'np.mean', (['input'], {}), '(input)\n', (6932, 6939), True, 'import numpy as np\n'), ((6952, 6965), 'numpy.std', 'np.std', (['input'], {}), '(input)\n', (6958, 6965), True, 'import numpy as np\n'), ((7671, 7687), 'numpy.sum', 'np.sum', (['ind_same'], {}), '(ind_same)\n', (7677, 7687), True, 'import numpy as np\n'), ((4882, 4908), 'numpy.stack', 'np.stack', (['arr_list'], {'axis': '(0)'}), '(arr_list, axis=0)\n', (4890, 4908), True, 'import numpy as np\n'), ((8330, 8355), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (8345, 8355), False, 'import os, inspect\n')] |
import sys
import numpy as np
from scipy.spatial.distance import pdist
from gmr.utils import check_random_state
from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true
from nose.plugins.skip import SkipTest
from numpy.testing import assert_array_almost_equal
try:
# Python 2
from cStringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization
from test_mvn import AxisStub
random_state = check_random_state(0)
means = np.array([[0.0, 1.0],
[2.0, -1.0]])
covariances = np.array([[[0.5, -1.0], [-1.0, 5.0]],
[[5.0, 1.0], [1.0, 0.5]]])
X1 = random_state.multivariate_normal(means[0], covariances[0], size=(50000,))
X2 = random_state.multivariate_normal(means[1], covariances[1], size=(50000,))
X = np.vstack((X1, X2))
def test_kmeanspp_too_few_centers():
X = np.array([[0.0, 1.0]])
assert_raises(ValueError, kmeansplusplus_initialization, X, 0, 0)
def test_kmeanspp_too_many_centers():
X = np.array([[0.0, 1.0]])
assert_raises(ValueError, kmeansplusplus_initialization, X, 2, 0)
def test_kmeanspp_one_sample():
X = np.array([[0.0, 1.0]])
centers = kmeansplusplus_initialization(X, 1, 0)
assert_array_almost_equal(X, centers)
def test_kmeanspp_two_samples():
X = np.array([[0.0, 1.0], [1.0, 0.0]])
centers = kmeansplusplus_initialization(X, 1, 0)
assert_in(centers[0], X)
def test_kmeanspp_two_samples_two_centers():
X = np.array([[0.0, 1.0], [1.0, 0.0]])
centers = kmeansplusplus_initialization(X, 2, 0)
assert_in(centers[0], X)
assert_in(centers[1], X)
assert_false(centers[0, 0] == centers[1, 0])
def test_kmeanspp_six_samples_three_centers():
X = np.array([
[0.0, 1.0],
[1.0, 0.0],
[0.0, 0.0],
[1.0, 1.0],
[100.0, 0.0],
[0.0, 100.0]])
centers = kmeansplusplus_initialization(X, 3, 0)
assert_equal(len(centers), 3)
assert_in(np.array([100.0, 0.0]), centers)
assert_in(np.array([0.0, 100.0]), centers)
assert_true(
X[0] in centers or
X[1] in centers or
X[2] in centers or
X[3] in centers
)
def test_initialize_no_covariance():
assert_raises(
ValueError, covariance_initialization,
np.array([[0, 1], [2, 3]]), 0)
def test_initialize_one_covariance():
cov = covariance_initialization(np.array([[0], [1]]), 1)
assert_equal(len(cov), 1)
assert_array_almost_equal(cov, np.array([[[1.0]]]))
def test_initialize_two_covariances():
cov = covariance_initialization(np.array([[0], [1], [2]]), 2)
assert_equal(len(cov), 2)
assert_array_almost_equal(cov, np.array([[[2.0 / 3.0]], [[2.0 / 3.0]]]) ** 2)
def test_initialize_2d_covariance():
cov = covariance_initialization(np.array([[0, 0], [3, 4]]), 1)
assert_equal(len(cov), 1)
assert_array_almost_equal(cov, np.array([[[9.0, 0.0], [0.0, 16.0]]]))
def test_estimate_moments():
"""Test moments estimated from samples and sampling from GMM."""
global X
global random_state
gmm = GMM(n_components=2, random_state=random_state)
gmm.from_samples(X)
assert_less(np.linalg.norm(gmm.means[0] - means[0]), 0.005)
assert_less(np.linalg.norm(gmm.covariances[0] - covariances[0]), 0.01)
assert_less(np.linalg.norm(gmm.means[1] - means[1]), 0.01)
assert_less(np.linalg.norm(gmm.covariances[1] - covariances[1]), 0.03)
X = gmm.sample(n_samples=100000)
gmm = GMM(n_components=2, random_state=random_state)
gmm.from_samples(X)
assert_less(np.linalg.norm(gmm.means[0] - means[0]), 0.01)
assert_less(np.linalg.norm(gmm.covariances[0] - covariances[0]), 0.03)
assert_less(np.linalg.norm(gmm.means[1] - means[1]), 0.01)
assert_less(np.linalg.norm(gmm.covariances[1] - covariances[1]), 0.04)
def test_estimation_from_previous_initialization():
global X
global random_state
global means
global covariances
gmm = GMM(n_components=2, priors=0.5 * np.ones(2), means=np.copy(means),
covariances=np.copy(covariances),
random_state=check_random_state(2))
gmm.from_samples(X, n_iter=2)
assert_less(np.linalg.norm(gmm.means[0] - means[0]), 0.01)
assert_less(np.linalg.norm(gmm.covariances[0] - covariances[0]), 0.03)
assert_less(np.linalg.norm(gmm.means[1] - means[1]), 0.01)
assert_less(np.linalg.norm(gmm.covariances[1] - covariances[1]), 0.04)
def test_probability_density():
"""Test PDF of GMM."""
global X
global random_state
gmm = GMM(n_components=2, random_state=random_state)
gmm.from_samples(X)
x = np.linspace(-100, 100, 201)
X_grid = np.vstack(list(map(np.ravel, np.meshgrid(x, x)))).T
p = gmm.to_probability_density(X_grid)
approx_int = np.sum(p) * ((x[-1] - x[0]) / 201) ** 2
assert_less(np.abs(1.0 - approx_int), 0.01)
def test_conditional_distribution():
"""Test moments from conditional GMM."""
random_state = check_random_state(0)
gmm = GMM(n_components=2, priors=np.array([0.5, 0.5]), means=means,
covariances=covariances, random_state=random_state)
conditional = gmm.condition(np.array([1]), np.array([1.0]))
assert_array_almost_equal(conditional.means[0], np.array([0.0]))
assert_array_almost_equal(conditional.covariances[0], np.array([[0.3]]))
conditional = gmm.condition(np.array([0]), np.array([2.0]))
assert_array_almost_equal(conditional.means[1], np.array([-1.0]))
assert_array_almost_equal(conditional.covariances[1], np.array([[0.3]]))
def test_sample_confidence_region():
"""Test sampling from confidence region."""
random_state = check_random_state(0)
means = np.array([[0.0, 1.0],
[2.0, -1.0]])
covariances = np.array([[[0.5, 0.0], [0.0, 5.0]],
[[5.0, 0.0], [0.0, 0.5]]])
gmm = GMM(n_components=2, priors=np.array([0.5, 0.5]), means=means,
covariances=covariances, random_state=random_state)
samples = gmm.sample_confidence_region(100, 0.7)
for sample in samples:
assert_true(gmm.is_in_confidence_region(sample, 0.7))
def test_ellipses():
"""Test equiprobable ellipses."""
random_state = check_random_state(0)
means = np.array([[0.0, 1.0],
[2.0, -1.0]])
covariances = np.array([[[0.5, 0.0], [0.0, 5.0]],
[[5.0, 0.0], [0.0, 0.5]]])
gmm = GMM(n_components=2, priors=np.array([0.5, 0.5]), means=means,
covariances=covariances, random_state=random_state)
ellipses = gmm.to_ellipses()
mean, (angle, width, height) = ellipses[0]
assert_array_almost_equal(means[0], mean)
assert_equal(angle, 0.5 * np.pi)
assert_equal(width, np.sqrt(5.0))
assert_equal(height, np.sqrt(0.5))
mean, (angle, width, height) = ellipses[1]
assert_array_almost_equal(means[1], mean)
assert_equal(angle, -np.pi)
assert_equal(width, np.sqrt(5.0))
assert_equal(height, np.sqrt(0.5))
def test_regression():
"""Test regression with GMM."""
random_state = check_random_state(0)
n_samples = 200
x = np.linspace(0, 2, n_samples)[:, np.newaxis]
y1 = 3 * x[:n_samples // 2] + 1
y2 = -3 * x[n_samples // 2:] + 7
noise = random_state.randn(n_samples, 1) * 0.01
y = np.vstack((y1, y2)) + noise
samples = np.hstack((x, y))
gmm = GMM(n_components=2, random_state=random_state)
gmm.from_samples(samples)
assert_array_almost_equal(gmm.priors, 0.5 * np.ones(2), decimal=2)
assert_array_almost_equal(gmm.means[0], np.array([0.5, 2.5]), decimal=2)
assert_array_almost_equal(gmm.means[1], np.array([1.5, 2.5]), decimal=1)
pred = gmm.predict(np.array([0]), x)
mse = np.sum((y - pred) ** 2) / n_samples
assert_less(mse, 0.01)
def test_regression_with_2d_input():
"""Test regression with GMM and two-dimensional input."""
random_state = check_random_state(0)
n_samples = 200
x = np.linspace(0, 2, n_samples)[:, np.newaxis]
y1 = 3 * x[:n_samples // 2] + 1
y2 = -3 * x[n_samples // 2:] + 7
noise = random_state.randn(n_samples, 1) * 0.01
y = np.vstack((y1, y2)) + noise
samples = np.hstack((x, x[::-1], y))
gmm = GMM(n_components=2, random_state=random_state)
gmm.from_samples(samples)
pred = gmm.predict(np.array([0, 1]), np.hstack((x, x[::-1])))
mse = np.sum((y - pred) ** 2) / n_samples
def test_regression_without_noise():
"""Test regression without noise."""
random_state = check_random_state(0)
n_samples = 200
x = np.linspace(0, 2, n_samples)[:, np.newaxis]
y1 = 3 * x[:n_samples // 2] + 1
y2 = -3 * x[n_samples // 2:] + 7
y = np.vstack((y1, y2))
samples = np.hstack((x, y))
gmm = GMM(n_components=2, random_state=random_state)
gmm.from_samples(samples)
assert_array_almost_equal(gmm.priors, 0.5 * np.ones(2), decimal=2)
assert_array_almost_equal(gmm.means[0], np.array([1.5, 2.5]), decimal=2)
assert_array_almost_equal(gmm.means[1], np.array([0.5, 2.5]), decimal=1)
pred = gmm.predict(np.array([0]), x)
mse = np.sum((y - pred) ** 2) / n_samples
assert_less(mse, 0.01)
def test_plot():
"""Test plot of GMM."""
gmm = GMM(n_components=2, priors=np.array([0.5, 0.5]), means=means,
covariances=covariances, random_state=0)
ax = AxisStub()
plot_error_ellipses(ax, gmm)
assert_equal(ax.count, 16)
ax = AxisStub()
plot_error_ellipses(ax, gmm, colors=["r", "g"])
assert_equal(ax.count, 16)
def test_verbose_from_samples():
"""Test verbose output."""
global X
random_state = check_random_state(0)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
gmm = GMM(n_components=2, verbose=True, random_state=random_state)
gmm.from_samples(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("converged" in out)
def test_uninitialized():
"""Test behavior of uninitialized GMM."""
random_state = check_random_state(0)
gmm = GMM(n_components=2, random_state=random_state)
assert_raises(ValueError, gmm.sample, 10)
assert_raises(ValueError, gmm.to_probability_density, np.ones((1, 1)))
assert_raises(ValueError, gmm.condition, np.zeros(0), np.zeros(0))
assert_raises(ValueError, gmm.predict, np.zeros(0), np.zeros(0))
assert_raises(ValueError, gmm.to_ellipses)
gmm = GMM(n_components=2, priors=np.ones(2), random_state=random_state)
assert_raises(ValueError, gmm.sample, 10)
assert_raises(ValueError, gmm.to_probability_density, np.ones((1, 1)))
assert_raises(ValueError, gmm.condition, np.zeros(0), np.zeros(0))
assert_raises(ValueError, gmm.predict, np.zeros(0), np.zeros(0))
assert_raises(ValueError, gmm.to_ellipses)
gmm = GMM(n_components=2, priors=np.ones(2), means=np.zeros((2, 2)),
random_state=random_state)
assert_raises(ValueError, gmm.sample, 10)
assert_raises(ValueError, gmm.to_probability_density, np.ones((1, 1)))
assert_raises(ValueError, gmm.condition, np.zeros(0), np.zeros(0))
assert_raises(ValueError, gmm.predict, np.zeros(0), np.zeros(0))
assert_raises(ValueError, gmm.to_ellipses)
def test_float_precision_error():
try:
from sklearn.datasets import load_boston
except ImportError:
raise SkipTest("sklearn is not available")
boston = load_boston()
X, y = boston.data, boston.target
gmm = GMM(n_components=10, random_state=2016)
gmm.from_samples(X)
def test_kmeanspp_initialization():
random_state = check_random_state(0)
n_samples = 300
n_features = 2
X = np.ndarray((n_samples, n_features))
mean0 = np.array([0.0, 1.0])
X[:n_samples // 3, :] = random_state.multivariate_normal(
mean0, [[0.5, -1.0], [-1.0, 5.0]], size=(n_samples // 3,))
mean1 = np.array([-2.0, -2.0])
X[n_samples // 3:-n_samples // 3, :] = random_state.multivariate_normal(
mean1, [[3.0, 1.0], [1.0, 1.0]], size=(n_samples // 3,))
mean2 = np.array([3.0, 1.0])
X[-n_samples // 3:, :] = random_state.multivariate_normal(
mean2, [[3.0, -1.0], [-1.0, 1.0]], size=(n_samples // 3,))
# artificial scaling, makes standard implementation fail
# either the initial covariances have to be adjusted or we have
# to normalize the dataset
X[:, 1] *= 10000.0
gmm = GMM(n_components=3, random_state=random_state)
gmm.from_samples(X, init_params="random")
# random initialization fails
assert_less(gmm.covariances[0, 0, 0], np.finfo(float).eps)
assert_less(gmm.covariances[1, 0, 0], np.finfo(float).eps)
assert_less(gmm.covariances[2, 0, 0], np.finfo(float).eps)
assert_less(gmm.covariances[0, 1, 1], np.finfo(float).eps)
assert_less(gmm.covariances[1, 1, 1], np.finfo(float).eps)
assert_less(gmm.covariances[2, 1, 1], np.finfo(float).eps)
gmm = GMM(n_components=3, random_state=random_state)
gmm.from_samples(X, init_params="kmeans++")
mean_dists = pdist(gmm.means)
assert_true(all(mean_dists > 1))
assert_true(all(1e7 < gmm.covariances[:, 1, 1]))
assert_true(all(gmm.covariances[:, 1, 1] < 1e9))
def test_unknown_initialization():
gmm = GMM(n_components=3, random_state=0)
assert_raises(ValueError, gmm.from_samples, X, init_params="unknown")
def test_mvn_to_mvn():
means = 123.0 * np.ones((1, 1))
covs = 4.0 * np.ones((1, 1, 1))
gmm = GMM(n_components=1, priors=np.ones(1), means=means, covariances=covs)
mvn = gmm.to_mvn()
assert_array_almost_equal(mvn.mean, means[0])
assert_array_almost_equal(mvn.covariance, covs[0])
def test_2_components_to_mvn():
priors = np.array([0.25, 0.75])
means = np.array([[1.0, 2.0], [3.0, 4.0]])
covs = np.array([
[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]],
])
gmm = GMM(n_components=1, priors=priors, means=means, covariances=covs)
mvn = gmm.to_mvn()
assert_array_almost_equal(mvn.mean, np.array([2.5, 3.5]))
def test_gmm_to_mvn_vs_mvn():
random_state = check_random_state(0)
gmm = GMM(n_components=2, random_state=random_state)
gmm.from_samples(X)
mvn_from_gmm = gmm.to_mvn()
mvn = MVN(random_state=random_state)
mvn.from_samples(X)
assert_array_almost_equal(mvn_from_gmm.mean, mvn.mean)
assert_array_almost_equal(
mvn_from_gmm.covariance, mvn.covariance, decimal=3)
def test_extract_mvn_negative_idx():
gmm = GMM(n_components=2, priors=0.5 * np.ones(2), means=np.zeros((2, 2)),
covariances=[np.eye(2)] * 2)
assert_raises(ValueError, gmm.extract_mvn, -1)
def test_extract_mvn_idx_too_high():
gmm = GMM(n_components=2, priors=0.5 * np.ones(2), means=np.zeros((2, 2)),
covariances=[np.eye(2)] * 2)
assert_raises(ValueError, gmm.extract_mvn, 2)
def test_extract_mvns():
gmm = GMM(n_components=2, priors=0.5 * np.ones(2),
means=np.array([[1, 2], [3, 4]]), covariances=[np.eye(2)] * 2)
mvn0 = gmm.extract_mvn(0)
assert_array_almost_equal(mvn0.mean, np.array([1, 2]))
mvn1 = gmm.extract_mvn(1)
assert_array_almost_equal(mvn1.mean, np.array([3, 4]))
| [
"numpy.sum",
"numpy.abs",
"nose.tools.assert_true",
"numpy.ones",
"sklearn.datasets.load_boston",
"scipy.spatial.distance.pdist",
"numpy.linalg.norm",
"numpy.testing.assert_array_almost_equal",
"gmr.GMM",
"numpy.ndarray",
"numpy.meshgrid",
"nose.tools.assert_less",
"numpy.copy",
"gmr.plot_... | [((574, 595), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (592, 595), False, 'from gmr.utils import check_random_state\n'), ((605, 640), 'numpy.array', 'np.array', (['[[0.0, 1.0], [2.0, -1.0]]'], {}), '([[0.0, 1.0], [2.0, -1.0]])\n', (613, 640), True, 'import numpy as np\n'), ((673, 737), 'numpy.array', 'np.array', (['[[[0.5, -1.0], [-1.0, 5.0]], [[5.0, 1.0], [1.0, 0.5]]]'], {}), '([[[0.5, -1.0], [-1.0, 5.0]], [[5.0, 1.0], [1.0, 0.5]]])\n', (681, 737), True, 'import numpy as np\n'), ((924, 943), 'numpy.vstack', 'np.vstack', (['(X1, X2)'], {}), '((X1, X2))\n', (933, 943), True, 'import numpy as np\n'), ((991, 1013), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (999, 1013), True, 'import numpy as np\n'), ((1018, 1083), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'kmeansplusplus_initialization', 'X', '(0)', '(0)'], {}), '(ValueError, kmeansplusplus_initialization, X, 0, 0)\n', (1031, 1083), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((1132, 1154), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (1140, 1154), True, 'import numpy as np\n'), ((1159, 1224), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'kmeansplusplus_initialization', 'X', '(2)', '(0)'], {}), '(ValueError, kmeansplusplus_initialization, X, 2, 0)\n', (1172, 1224), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((1267, 1289), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (1275, 1289), True, 'import numpy as np\n'), ((1304, 1342), 'gmr.kmeansplusplus_initialization', 'kmeansplusplus_initialization', (['X', '(1)', '(0)'], {}), '(X, 1, 0)\n', (1333, 1342), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((1347, 1384), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['X', 'centers'], {}), '(X, centers)\n', (1372, 1384), False, 'from numpy.testing import assert_array_almost_equal\n'), ((1428, 1462), 'numpy.array', 'np.array', (['[[0.0, 1.0], [1.0, 0.0]]'], {}), '([[0.0, 1.0], [1.0, 0.0]])\n', (1436, 1462), True, 'import numpy as np\n'), ((1477, 1515), 'gmr.kmeansplusplus_initialization', 'kmeansplusplus_initialization', (['X', '(1)', '(0)'], {}), '(X, 1, 0)\n', (1506, 1515), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((1520, 1544), 'nose.tools.assert_in', 'assert_in', (['centers[0]', 'X'], {}), '(centers[0], X)\n', (1529, 1544), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((1600, 1634), 'numpy.array', 'np.array', (['[[0.0, 1.0], [1.0, 0.0]]'], {}), '([[0.0, 1.0], [1.0, 0.0]])\n', (1608, 1634), True, 'import numpy as np\n'), ((1649, 1687), 'gmr.kmeansplusplus_initialization', 'kmeansplusplus_initialization', (['X', '(2)', '(0)'], {}), '(X, 2, 0)\n', (1678, 1687), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((1692, 1716), 'nose.tools.assert_in', 'assert_in', (['centers[0]', 'X'], {}), '(centers[0], X)\n', (1701, 1716), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((1721, 1745), 'nose.tools.assert_in', 'assert_in', (['centers[1]', 'X'], {}), '(centers[1], X)\n', (1730, 1745), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((1750, 1794), 'nose.tools.assert_false', 'assert_false', (['(centers[0, 0] == centers[1, 0])'], {}), '(centers[0, 0] == centers[1, 0])\n', (1762, 1794), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((1852, 1943), 'numpy.array', 'np.array', (['[[0.0, 1.0], [1.0, 0.0], [0.0, 0.0], [1.0, 1.0], [100.0, 0.0], [0.0, 100.0]]'], {}), '([[0.0, 1.0], [1.0, 0.0], [0.0, 0.0], [1.0, 1.0], [100.0, 0.0], [\n 0.0, 100.0]])\n', (1860, 1943), True, 'import numpy as np\n'), ((2002, 2040), 'gmr.kmeansplusplus_initialization', 'kmeansplusplus_initialization', (['X', '(3)', '(0)'], {}), '(X, 3, 0)\n', (2031, 2040), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((2173, 2262), 'nose.tools.assert_true', 'assert_true', (['(X[0] in centers or X[1] in centers or X[2] in centers or X[3] in centers)'], {}), '(X[0] in centers or X[1] in centers or X[2] in centers or X[3] in\n centers)\n', (2184, 2262), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((3205, 3251), 'gmr.GMM', 'GMM', ([], {'n_components': '(2)', 'random_state': 'random_state'}), '(n_components=2, random_state=random_state)\n', (3208, 3251), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((3602, 3648), 'gmr.GMM', 'GMM', ([], {'n_components': '(2)', 'random_state': 'random_state'}), '(n_components=2, random_state=random_state)\n', (3605, 3648), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((4675, 4721), 'gmr.GMM', 'GMM', ([], {'n_components': '(2)', 'random_state': 'random_state'}), '(n_components=2, random_state=random_state)\n', (4678, 4721), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((4755, 4782), 'numpy.linspace', 'np.linspace', (['(-100)', '(100)', '(201)'], {}), '(-100, 100, 201)\n', (4766, 4782), True, 'import numpy as np\n'), ((5099, 5120), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (5117, 5120), False, 'from gmr.utils import check_random_state\n'), ((5788, 5809), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (5806, 5809), False, 'from gmr.utils import check_random_state\n'), ((5823, 5858), 'numpy.array', 'np.array', (['[[0.0, 1.0], [2.0, -1.0]]'], {}), '([[0.0, 1.0], [2.0, -1.0]])\n', (5831, 5858), True, 'import numpy as np\n'), ((5899, 5961), 'numpy.array', 'np.array', (['[[[0.5, 0.0], [0.0, 5.0]], [[5.0, 0.0], [0.0, 0.5]]]'], {}), '([[[0.5, 0.0], [0.0, 5.0]], [[5.0, 0.0], [0.0, 0.5]]])\n', (5907, 5961), True, 'import numpy as np\n'), ((6351, 6372), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (6369, 6372), False, 'from gmr.utils import check_random_state\n'), ((6386, 6421), 'numpy.array', 'np.array', (['[[0.0, 1.0], [2.0, -1.0]]'], {}), '([[0.0, 1.0], [2.0, -1.0]])\n', (6394, 6421), True, 'import numpy as np\n'), ((6462, 6524), 'numpy.array', 'np.array', (['[[[0.5, 0.0], [0.0, 5.0]], [[5.0, 0.0], [0.0, 0.5]]]'], {}), '([[[0.5, 0.0], [0.0, 5.0]], [[5.0, 0.0], [0.0, 0.5]]])\n', (6470, 6524), True, 'import numpy as np\n'), ((6777, 6818), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['means[0]', 'mean'], {}), '(means[0], mean)\n', (6802, 6818), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6823, 6855), 'nose.tools.assert_equal', 'assert_equal', (['angle', '(0.5 * np.pi)'], {}), '(angle, 0.5 * np.pi)\n', (6835, 6855), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((6985, 7026), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['means[1]', 'mean'], {}), '(means[1], mean)\n', (7010, 7026), False, 'from numpy.testing import assert_array_almost_equal\n'), ((7031, 7058), 'nose.tools.assert_equal', 'assert_equal', (['angle', '(-np.pi)'], {}), '(angle, -np.pi)\n', (7043, 7058), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((7216, 7237), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (7234, 7237), False, 'from gmr.utils import check_random_state\n'), ((7486, 7503), 'numpy.hstack', 'np.hstack', (['(x, y)'], {}), '((x, y))\n', (7495, 7503), True, 'import numpy as np\n'), ((7515, 7561), 'gmr.GMM', 'GMM', ([], {'n_components': '(2)', 'random_state': 'random_state'}), '(n_components=2, random_state=random_state)\n', (7518, 7561), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((7909, 7931), 'nose.tools.assert_less', 'assert_less', (['mse', '(0.01)'], {}), '(mse, 0.01)\n', (7920, 7931), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((8052, 8073), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (8070, 8073), False, 'from gmr.utils import check_random_state\n'), ((8322, 8348), 'numpy.hstack', 'np.hstack', (['(x, x[::-1], y)'], {}), '((x, x[::-1], y))\n', (8331, 8348), True, 'import numpy as np\n'), ((8360, 8406), 'gmr.GMM', 'GMM', ([], {'n_components': '(2)', 'random_state': 'random_state'}), '(n_components=2, random_state=random_state)\n', (8363, 8406), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((8649, 8670), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (8667, 8670), False, 'from gmr.utils import check_random_state\n'), ((8825, 8844), 'numpy.vstack', 'np.vstack', (['(y1, y2)'], {}), '((y1, y2))\n', (8834, 8844), True, 'import numpy as np\n'), ((8859, 8876), 'numpy.hstack', 'np.hstack', (['(x, y)'], {}), '((x, y))\n', (8868, 8876), True, 'import numpy as np\n'), ((8888, 8934), 'gmr.GMM', 'GMM', ([], {'n_components': '(2)', 'random_state': 'random_state'}), '(n_components=2, random_state=random_state)\n', (8891, 8934), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((9282, 9304), 'nose.tools.assert_less', 'assert_less', (['mse', '(0.01)'], {}), '(mse, 0.01)\n', (9293, 9304), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((9489, 9499), 'test_mvn.AxisStub', 'AxisStub', ([], {}), '()\n', (9497, 9499), False, 'from test_mvn import AxisStub\n'), ((9504, 9532), 'gmr.plot_error_ellipses', 'plot_error_ellipses', (['ax', 'gmm'], {}), '(ax, gmm)\n', (9523, 9532), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((9537, 9563), 'nose.tools.assert_equal', 'assert_equal', (['ax.count', '(16)'], {}), '(ax.count, 16)\n', (9549, 9563), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((9574, 9584), 'test_mvn.AxisStub', 'AxisStub', ([], {}), '()\n', (9582, 9584), False, 'from test_mvn import AxisStub\n'), ((9589, 9636), 'gmr.plot_error_ellipses', 'plot_error_ellipses', (['ax', 'gmm'], {'colors': "['r', 'g']"}), "(ax, gmm, colors=['r', 'g'])\n", (9608, 9636), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((9641, 9667), 'nose.tools.assert_equal', 'assert_equal', (['ax.count', '(16)'], {}), '(ax.count, 16)\n', (9653, 9667), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((9766, 9787), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (9784, 9787), False, 'from gmr.utils import check_random_state\n'), ((9834, 9844), 'io.StringIO', 'StringIO', ([], {}), '()\n', (9842, 9844), False, 'from io import StringIO\n'), ((10190, 10211), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (10208, 10211), False, 'from gmr.utils import check_random_state\n'), ((10222, 10268), 'gmr.GMM', 'GMM', ([], {'n_components': '(2)', 'random_state': 'random_state'}), '(n_components=2, random_state=random_state)\n', (10225, 10268), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((10273, 10314), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'gmm.sample', '(10)'], {}), '(ValueError, gmm.sample, 10)\n', (10286, 10314), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((10534, 10576), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'gmm.to_ellipses'], {}), '(ValueError, gmm.to_ellipses)\n', (10547, 10576), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((10657, 10698), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'gmm.sample', '(10)'], {}), '(ValueError, gmm.sample, 10)\n', (10670, 10698), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((10918, 10960), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'gmm.to_ellipses'], {}), '(ValueError, gmm.to_ellipses)\n', (10931, 10960), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((11079, 11120), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'gmm.sample', '(10)'], {}), '(ValueError, gmm.sample, 10)\n', (11092, 11120), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((11340, 11382), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'gmm.to_ellipses'], {}), '(ValueError, gmm.to_ellipses)\n', (11353, 11382), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((11566, 11579), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (11577, 11579), False, 'from sklearn.datasets import load_boston\n'), ((11628, 11667), 'gmr.GMM', 'GMM', ([], {'n_components': '(10)', 'random_state': '(2016)'}), '(n_components=10, random_state=2016)\n', (11631, 11667), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((11749, 11770), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (11767, 11770), False, 'from gmr.utils import check_random_state\n'), ((11819, 11854), 'numpy.ndarray', 'np.ndarray', (['(n_samples, n_features)'], {}), '((n_samples, n_features))\n', (11829, 11854), True, 'import numpy as np\n'), ((11867, 11887), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (11875, 11887), True, 'import numpy as np\n'), ((12029, 12051), 'numpy.array', 'np.array', (['[-2.0, -2.0]'], {}), '([-2.0, -2.0])\n', (12037, 12051), True, 'import numpy as np\n'), ((12206, 12226), 'numpy.array', 'np.array', (['[3.0, 1.0]'], {}), '([3.0, 1.0])\n', (12214, 12226), True, 'import numpy as np\n'), ((12552, 12598), 'gmr.GMM', 'GMM', ([], {'n_components': '(3)', 'random_state': 'random_state'}), '(n_components=3, random_state=random_state)\n', (12555, 12598), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((13068, 13114), 'gmr.GMM', 'GMM', ([], {'n_components': '(3)', 'random_state': 'random_state'}), '(n_components=3, random_state=random_state)\n', (13071, 13114), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((13180, 13196), 'scipy.spatial.distance.pdist', 'pdist', (['gmm.means'], {}), '(gmm.means)\n', (13185, 13196), False, 'from scipy.spatial.distance import pdist\n'), ((13387, 13422), 'gmr.GMM', 'GMM', ([], {'n_components': '(3)', 'random_state': '(0)'}), '(n_components=3, random_state=0)\n', (13390, 13422), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((13427, 13496), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'gmm.from_samples', 'X'], {'init_params': '"""unknown"""'}), "(ValueError, gmm.from_samples, X, init_params='unknown')\n", (13440, 13496), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((13701, 13746), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mvn.mean', 'means[0]'], {}), '(mvn.mean, means[0])\n', (13726, 13746), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13751, 13801), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mvn.covariance', 'covs[0]'], {}), '(mvn.covariance, covs[0])\n', (13776, 13801), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13849, 13871), 'numpy.array', 'np.array', (['[0.25, 0.75]'], {}), '([0.25, 0.75])\n', (13857, 13871), True, 'import numpy as np\n'), ((13884, 13918), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 4.0]]'], {}), '([[1.0, 2.0], [3.0, 4.0]])\n', (13892, 13918), True, 'import numpy as np\n'), ((13930, 13992), 'numpy.array', 'np.array', (['[[[1.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 1.0]]]'], {}), '([[[1.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 1.0]]])\n', (13938, 13992), True, 'import numpy as np\n'), ((14044, 14109), 'gmr.GMM', 'GMM', ([], {'n_components': '(1)', 'priors': 'priors', 'means': 'means', 'covariances': 'covs'}), '(n_components=1, priors=priors, means=means, covariances=covs)\n', (14047, 14109), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((14246, 14267), 'gmr.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (14264, 14267), False, 'from gmr.utils import check_random_state\n'), ((14278, 14324), 'gmr.GMM', 'GMM', ([], {'n_components': '(2)', 'random_state': 'random_state'}), '(n_components=2, random_state=random_state)\n', (14281, 14324), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((14391, 14421), 'gmr.MVN', 'MVN', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (14394, 14421), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((14450, 14504), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mvn_from_gmm.mean', 'mvn.mean'], {}), '(mvn_from_gmm.mean, mvn.mean)\n', (14475, 14504), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14509, 14586), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mvn_from_gmm.covariance', 'mvn.covariance'], {'decimal': '(3)'}), '(mvn_from_gmm.covariance, mvn.covariance, decimal=3)\n', (14534, 14586), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14761, 14807), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'gmm.extract_mvn', '(-1)'], {}), '(ValueError, gmm.extract_mvn, -1)\n', (14774, 14807), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((14973, 15018), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'gmm.extract_mvn', '(2)'], {}), '(ValueError, gmm.extract_mvn, 2)\n', (14986, 15018), False, 'from nose.tools import assert_equal, assert_less, assert_raises, assert_in, assert_false, assert_true\n'), ((2089, 2111), 'numpy.array', 'np.array', (['[100.0, 0.0]'], {}), '([100.0, 0.0])\n', (2097, 2111), True, 'import numpy as np\n'), ((2136, 2158), 'numpy.array', 'np.array', (['[0.0, 100.0]'], {}), '([0.0, 100.0])\n', (2144, 2158), True, 'import numpy as np\n'), ((2410, 2436), 'numpy.array', 'np.array', (['[[0, 1], [2, 3]]'], {}), '([[0, 1], [2, 3]])\n', (2418, 2436), True, 'import numpy as np\n'), ((2517, 2537), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (2525, 2537), True, 'import numpy as np\n'), ((2607, 2626), 'numpy.array', 'np.array', (['[[[1.0]]]'], {}), '([[[1.0]]])\n', (2615, 2626), True, 'import numpy as np\n'), ((2705, 2730), 'numpy.array', 'np.array', (['[[0], [1], [2]]'], {}), '([[0], [1], [2]])\n', (2713, 2730), True, 'import numpy as np\n'), ((2922, 2948), 'numpy.array', 'np.array', (['[[0, 0], [3, 4]]'], {}), '([[0, 0], [3, 4]])\n', (2930, 2948), True, 'import numpy as np\n'), ((3018, 3055), 'numpy.array', 'np.array', (['[[[9.0, 0.0], [0.0, 16.0]]]'], {}), '([[[9.0, 0.0], [0.0, 16.0]]])\n', (3026, 3055), True, 'import numpy as np\n'), ((3292, 3331), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.means[0] - means[0])'], {}), '(gmm.means[0] - means[0])\n', (3306, 3331), True, 'import numpy as np\n'), ((3356, 3407), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.covariances[0] - covariances[0])'], {}), '(gmm.covariances[0] - covariances[0])\n', (3370, 3407), True, 'import numpy as np\n'), ((3431, 3470), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.means[1] - means[1])'], {}), '(gmm.means[1] - means[1])\n', (3445, 3470), True, 'import numpy as np\n'), ((3494, 3545), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.covariances[1] - covariances[1])'], {}), '(gmm.covariances[1] - covariances[1])\n', (3508, 3545), True, 'import numpy as np\n'), ((3689, 3728), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.means[0] - means[0])'], {}), '(gmm.means[0] - means[0])\n', (3703, 3728), True, 'import numpy as np\n'), ((3752, 3803), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.covariances[0] - covariances[0])'], {}), '(gmm.covariances[0] - covariances[0])\n', (3766, 3803), True, 'import numpy as np\n'), ((3827, 3866), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.means[1] - means[1])'], {}), '(gmm.means[1] - means[1])\n', (3841, 3866), True, 'import numpy as np\n'), ((3890, 3941), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.covariances[1] - covariances[1])'], {}), '(gmm.covariances[1] - covariances[1])\n', (3904, 3941), True, 'import numpy as np\n'), ((4306, 4345), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.means[0] - means[0])'], {}), '(gmm.means[0] - means[0])\n', (4320, 4345), True, 'import numpy as np\n'), ((4369, 4420), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.covariances[0] - covariances[0])'], {}), '(gmm.covariances[0] - covariances[0])\n', (4383, 4420), True, 'import numpy as np\n'), ((4444, 4483), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.means[1] - means[1])'], {}), '(gmm.means[1] - means[1])\n', (4458, 4483), True, 'import numpy as np\n'), ((4507, 4558), 'numpy.linalg.norm', 'np.linalg.norm', (['(gmm.covariances[1] - covariances[1])'], {}), '(gmm.covariances[1] - covariances[1])\n', (4521, 4558), True, 'import numpy as np\n'), ((4908, 4917), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (4914, 4917), True, 'import numpy as np\n'), ((4964, 4988), 'numpy.abs', 'np.abs', (['(1.0 - approx_int)'], {}), '(1.0 - approx_int)\n', (4970, 4988), True, 'import numpy as np\n'), ((5293, 5306), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (5301, 5306), True, 'import numpy as np\n'), ((5308, 5323), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (5316, 5323), True, 'import numpy as np\n'), ((5377, 5392), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (5385, 5392), True, 'import numpy as np\n'), ((5452, 5469), 'numpy.array', 'np.array', (['[[0.3]]'], {}), '([[0.3]])\n', (5460, 5469), True, 'import numpy as np\n'), ((5503, 5516), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5511, 5516), True, 'import numpy as np\n'), ((5518, 5533), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (5526, 5533), True, 'import numpy as np\n'), ((5587, 5603), 'numpy.array', 'np.array', (['[-1.0]'], {}), '([-1.0])\n', (5595, 5603), True, 'import numpy as np\n'), ((5663, 5680), 'numpy.array', 'np.array', (['[[0.3]]'], {}), '([[0.3]])\n', (5671, 5680), True, 'import numpy as np\n'), ((6880, 6892), 'numpy.sqrt', 'np.sqrt', (['(5.0)'], {}), '(5.0)\n', (6887, 6892), True, 'import numpy as np\n'), ((6919, 6931), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (6926, 6931), True, 'import numpy as np\n'), ((7083, 7095), 'numpy.sqrt', 'np.sqrt', (['(5.0)'], {}), '(5.0)\n', (7090, 7095), True, 'import numpy as np\n'), ((7122, 7134), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (7129, 7134), True, 'import numpy as np\n'), ((7267, 7295), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', 'n_samples'], {}), '(0, 2, n_samples)\n', (7278, 7295), True, 'import numpy as np\n'), ((7444, 7463), 'numpy.vstack', 'np.vstack', (['(y1, y2)'], {}), '((y1, y2))\n', (7453, 7463), True, 'import numpy as np\n'), ((7707, 7727), 'numpy.array', 'np.array', (['[0.5, 2.5]'], {}), '([0.5, 2.5])\n', (7715, 7727), True, 'import numpy as np\n'), ((7784, 7804), 'numpy.array', 'np.array', (['[1.5, 2.5]'], {}), '([1.5, 2.5])\n', (7792, 7804), True, 'import numpy as np\n'), ((7841, 7854), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (7849, 7854), True, 'import numpy as np\n'), ((7869, 7892), 'numpy.sum', 'np.sum', (['((y - pred) ** 2)'], {}), '((y - pred) ** 2)\n', (7875, 7892), True, 'import numpy as np\n'), ((8103, 8131), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', 'n_samples'], {}), '(0, 2, n_samples)\n', (8114, 8131), True, 'import numpy as np\n'), ((8280, 8299), 'numpy.vstack', 'np.vstack', (['(y1, y2)'], {}), '((y1, y2))\n', (8289, 8299), True, 'import numpy as np\n'), ((8461, 8477), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (8469, 8477), True, 'import numpy as np\n'), ((8479, 8502), 'numpy.hstack', 'np.hstack', (['(x, x[::-1])'], {}), '((x, x[::-1]))\n', (8488, 8502), True, 'import numpy as np\n'), ((8514, 8537), 'numpy.sum', 'np.sum', (['((y - pred) ** 2)'], {}), '((y - pred) ** 2)\n', (8520, 8537), True, 'import numpy as np\n'), ((8700, 8728), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', 'n_samples'], {}), '(0, 2, n_samples)\n', (8711, 8728), True, 'import numpy as np\n'), ((9080, 9100), 'numpy.array', 'np.array', (['[1.5, 2.5]'], {}), '([1.5, 2.5])\n', (9088, 9100), True, 'import numpy as np\n'), ((9157, 9177), 'numpy.array', 'np.array', (['[0.5, 2.5]'], {}), '([0.5, 2.5])\n', (9165, 9177), True, 'import numpy as np\n'), ((9214, 9227), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (9222, 9227), True, 'import numpy as np\n'), ((9242, 9265), 'numpy.sum', 'np.sum', (['((y - pred) ** 2)'], {}), '((y - pred) ** 2)\n', (9248, 9265), True, 'import numpy as np\n'), ((9868, 9928), 'gmr.GMM', 'GMM', ([], {'n_components': '(2)', 'verbose': '(True)', 'random_state': 'random_state'}), '(n_components=2, verbose=True, random_state=random_state)\n', (9871, 9928), False, 'from gmr import GMM, MVN, plot_error_ellipses, kmeansplusplus_initialization, covariance_initialization\n'), ((9984, 10005), 'sys.stdout.getvalue', 'sys.stdout.getvalue', ([], {}), '()\n', (10003, 10005), False, 'import sys\n'), ((10014, 10032), 'sys.stdout.close', 'sys.stdout.close', ([], {}), '()\n', (10030, 10032), False, 'import sys\n'), ((10373, 10388), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10380, 10388), True, 'import numpy as np\n'), ((10435, 10446), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (10443, 10446), True, 'import numpy as np\n'), ((10448, 10459), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (10456, 10459), True, 'import numpy as np\n'), ((10504, 10515), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (10512, 10515), True, 'import numpy as np\n'), ((10517, 10528), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (10525, 10528), True, 'import numpy as np\n'), ((10757, 10772), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10764, 10772), True, 'import numpy as np\n'), ((10819, 10830), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (10827, 10830), True, 'import numpy as np\n'), ((10832, 10843), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (10840, 10843), True, 'import numpy as np\n'), ((10888, 10899), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (10896, 10899), True, 'import numpy as np\n'), ((10901, 10912), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (10909, 10912), True, 'import numpy as np\n'), ((11179, 11194), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (11186, 11194), True, 'import numpy as np\n'), ((11241, 11252), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (11249, 11252), True, 'import numpy as np\n'), ((11254, 11265), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (11262, 11265), True, 'import numpy as np\n'), ((11310, 11321), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (11318, 11321), True, 'import numpy as np\n'), ((11323, 11334), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (11331, 11334), True, 'import numpy as np\n'), ((13542, 13557), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (13549, 13557), True, 'import numpy as np\n'), ((13575, 13593), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (13582, 13593), True, 'import numpy as np\n'), ((14173, 14193), 'numpy.array', 'np.array', (['[2.5, 3.5]'], {}), '([2.5, 3.5])\n', (14181, 14193), True, 'import numpy as np\n'), ((15249, 15265), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (15257, 15265), True, 'import numpy as np\n'), ((15338, 15354), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (15346, 15354), True, 'import numpy as np\n'), ((2800, 2840), 'numpy.array', 'np.array', (['[[[2.0 / 3.0]], [[2.0 / 3.0]]]'], {}), '([[[2.0 / 3.0]], [[2.0 / 3.0]]])\n', (2808, 2840), True, 'import numpy as np\n'), ((4142, 4156), 'numpy.copy', 'np.copy', (['means'], {}), '(means)\n', (4149, 4156), True, 'import numpy as np\n'), ((4184, 4204), 'numpy.copy', 'np.copy', (['covariances'], {}), '(covariances)\n', (4191, 4204), True, 'import numpy as np\n'), ((4233, 4254), 'gmr.utils.check_random_state', 'check_random_state', (['(2)'], {}), '(2)\n', (4251, 4254), False, 'from gmr.utils import check_random_state\n'), ((5159, 5179), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (5167, 5179), True, 'import numpy as np\n'), ((6028, 6048), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (6036, 6048), True, 'import numpy as np\n'), ((6591, 6611), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (6599, 6611), True, 'import numpy as np\n'), ((7640, 7650), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (7647, 7650), True, 'import numpy as np\n'), ((9013, 9023), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (9020, 9023), True, 'import numpy as np\n'), ((9389, 9409), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (9397, 9409), True, 'import numpy as np\n'), ((10614, 10624), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (10621, 10624), True, 'import numpy as np\n'), ((10998, 11008), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (11005, 11008), True, 'import numpy as np\n'), ((11016, 11032), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (11024, 11032), True, 'import numpy as np\n'), ((11515, 11551), 'nose.plugins.skip.SkipTest', 'SkipTest', (['"""sklearn is not available"""'], {}), "('sklearn is not available')\n", (11523, 11551), False, 'from nose.plugins.skip import SkipTest\n'), ((12721, 12736), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (12729, 12736), True, 'import numpy as np\n'), ((12784, 12799), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (12792, 12799), True, 'import numpy as np\n'), ((12847, 12862), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (12855, 12862), True, 'import numpy as np\n'), ((12910, 12925), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (12918, 12925), True, 'import numpy as np\n'), ((12973, 12988), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (12981, 12988), True, 'import numpy as np\n'), ((13036, 13051), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (13044, 13051), True, 'import numpy as np\n'), ((13631, 13641), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (13638, 13641), True, 'import numpy as np\n'), ((14696, 14712), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (14704, 14712), True, 'import numpy as np\n'), ((14908, 14924), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (14916, 14924), True, 'import numpy as np\n'), ((15121, 15147), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (15129, 15147), True, 'import numpy as np\n'), ((4124, 4134), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4131, 4134), True, 'import numpy as np\n'), ((14678, 14688), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (14685, 14688), True, 'import numpy as np\n'), ((14890, 14900), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (14897, 14900), True, 'import numpy as np\n'), ((15089, 15099), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (15096, 15099), True, 'import numpy as np\n'), ((4825, 4842), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (4836, 4842), True, 'import numpy as np\n'), ((14741, 14750), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (14747, 14750), True, 'import numpy as np\n'), ((14953, 14962), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (14959, 14962), True, 'import numpy as np\n'), ((15162, 15171), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (15168, 15171), True, 'import numpy as np\n')] |
import scipy.sparse.linalg as spsl
import scipy.linalg as spl
import scipy.sparse as spm
import scipy.optimize as spo
import numpy as np
from numerics.softmax import logistic
from utils.label_factory import BinaryLabels
n_classes = 10
class HyperParameterOptimiser(object):
def __init__(self):
pass
def optimise_hyper_params_multiclass(self, f_posterior):
raise NotImplementedError
# # perform standard optimisation routine using the gradients of the likelihood and the likelihood
# grads_hypers, likelihood_funciton = self._compute_hypers_partial_derivatives_multiclass()
def _compute_hypers_partial_derivatives_multiclass(self, cov_matrix, f_posterior, targets):
raise NotImplementedError
# num_test_samples = targets.shape[0]
# # W = - grad^2 log p(y|f)
# w = np.zeros((num_test_samples, n_classes)) # TODO fill with sense
# w = np.sqrt(w)
# # L = cholesky(I + W^(1/2) * K * W^(1/2))
# L = spl.cholesky(spm.identity(num_test_samples) + spm.diags(w).dot())
def optimise_hyper_params_binary(self, cov_matrix, f_posterior, a, targets, hypers, cls):
likelihood, delta_sigma, delta_lambda = self._compute_hypers_partial_derivatives_binary(cov_matrix, f_posterior,
a, targets, hypers, cls)
new_hypers = {'sigma': np.max((hypers['sigma'] + 0.01*delta_sigma, np.array([0.5])), axis=0),
'lambda': np.max((hypers['lambda'] + 0.01*delta_lambda, np.array([0.5])), axis=0)}
return new_hypers
def _compute_hypers_partial_derivatives_binary(self, cov_matrix, f_posterior, a, targets, hypers, cls):
num_samples = f_posterior.shape[0]
binary_targets = BinaryLabels(class_one=cls).generate_labels(targets)
pi = logistic(f_posterior)
# W = - delta^2 log p(y|f)
neg_hessian = pi - pi ** 2
neg_hessian_sqrt = spm.diags(np.sqrt(neg_hessian), format='csc')
# L = cholesky(I + W^(1/2) * K * W^(1/2))
L = spl.cholesky((spm.identity(num_samples) +
neg_hessian_sqrt.dot(spm.csc_matrix(cov_matrix).dot(neg_hessian_sqrt)))
.toarray(), lower=True)
approx_log_marg_likelihood = -0.5 * a.dot(f_posterior) \
+ np.sum(np.log(logistic(f_posterior))) \
- np.sum(np.log(np.diagonal(L)))
# R = W^(1/2) * L^T \ (L \ W^(1/2))
R = neg_hessian_sqrt.dot(spsl.spsolve(spm.csc_matrix(L.T), spsl.spsolve(spm.csc_matrix(L), neg_hessian_sqrt)))
C = spsl.spsolve(spm.csc_matrix(L), neg_hessian_sqrt.dot(cov_matrix))
delta3_pi = (pi**2) * (1 - pi) - pi * ((1 - pi)**2)
# -1/2 * diag(diag(K) - diag(C^T * C)) * delta^# log p(y|f)
s_2 = -0.5 * spm.diags(np.diagonal(cov_matrix) - np.diagonal(C.T.dot(C))).dot(delta3_pi)
# compute derivative for hyper['sigma']
derivative_matrix_sigma = 2.0 * cov_matrix / hypers['sigma']
s_1 = 0.5 * a.dot(derivative_matrix_sigma.dot(a)) - 0.5 * np.trace(R.dot(derivative_matrix_sigma))
b = derivative_matrix_sigma.dot(binary_targets - pi)
s_3 = b - cov_matrix.dot(R.dot(b))
delta_sigma = s_1 + s_2.dot(s_3)
# compute derivative for hyper['lambda']
derivative_matrix_lambda = cov_matrix * (-np.log(cov_matrix / (hypers['sigma']**2)) / hypers['lambda'])
s_1 = 0.5 * a.dot(derivative_matrix_lambda.dot(a)) - 0.5 * np.trace(R.dot(derivative_matrix_sigma))
b = derivative_matrix_lambda.dot(binary_targets - pi)
s_3 = b - cov_matrix.dot(R.dot(b))
delta_lambda = s_1 + s_2.dot(s_3)
return approx_log_marg_likelihood, delta_sigma, delta_lambda
| [
"numpy.log",
"numerics.softmax.logistic",
"utils.label_factory.BinaryLabels",
"numpy.diagonal",
"scipy.sparse.csc_matrix",
"scipy.sparse.identity",
"numpy.array",
"numpy.sqrt"
] | [((1880, 1901), 'numerics.softmax.logistic', 'logistic', (['f_posterior'], {}), '(f_posterior)\n', (1888, 1901), False, 'from numerics.softmax import logistic\n'), ((2009, 2029), 'numpy.sqrt', 'np.sqrt', (['neg_hessian'], {}), '(neg_hessian)\n', (2016, 2029), True, 'import numpy as np\n'), ((2698, 2715), 'scipy.sparse.csc_matrix', 'spm.csc_matrix', (['L'], {}), '(L)\n', (2712, 2715), True, 'import scipy.sparse as spm\n'), ((1814, 1841), 'utils.label_factory.BinaryLabels', 'BinaryLabels', ([], {'class_one': 'cls'}), '(class_one=cls)\n', (1826, 1841), False, 'from utils.label_factory import BinaryLabels\n'), ((2600, 2619), 'scipy.sparse.csc_matrix', 'spm.csc_matrix', (['L.T'], {}), '(L.T)\n', (2614, 2619), True, 'import scipy.sparse as spm\n'), ((1478, 1493), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (1486, 1493), True, 'import numpy as np\n'), ((1583, 1598), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (1591, 1598), True, 'import numpy as np\n'), ((2493, 2507), 'numpy.diagonal', 'np.diagonal', (['L'], {}), '(L)\n', (2504, 2507), True, 'import numpy as np\n'), ((2634, 2651), 'scipy.sparse.csc_matrix', 'spm.csc_matrix', (['L'], {}), '(L)\n', (2648, 2651), True, 'import scipy.sparse as spm\n'), ((3446, 3487), 'numpy.log', 'np.log', (["(cov_matrix / hypers['sigma'] ** 2)"], {}), "(cov_matrix / hypers['sigma'] ** 2)\n", (3452, 3487), True, 'import numpy as np\n'), ((2121, 2146), 'scipy.sparse.identity', 'spm.identity', (['num_samples'], {}), '(num_samples)\n', (2133, 2146), True, 'import scipy.sparse as spm\n'), ((2414, 2435), 'numerics.softmax.logistic', 'logistic', (['f_posterior'], {}), '(f_posterior)\n', (2422, 2435), False, 'from numerics.softmax import logistic\n'), ((2910, 2933), 'numpy.diagonal', 'np.diagonal', (['cov_matrix'], {}), '(cov_matrix)\n', (2921, 2933), True, 'import numpy as np\n'), ((2196, 2222), 'scipy.sparse.csc_matrix', 'spm.csc_matrix', (['cov_matrix'], {}), '(cov_matrix)\n', (2210, 2222), True, 'import scipy.sparse as spm\n')] |
import numpy as np
import pickle
from scipy.spatial import distance_matrix
import codecs
import csv
import sklearn
from tqdm import tqdm
from sklearn import metrics
from sklearn.cluster import SpectralClustering
from sklearn.metrics import silhouette_score
from sklearn.cluster import KMeans
from sklearn_extra.cluster import KMedoids
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
from numpy import save
def calculate_WSS(points, kmin, kmax):
sse = []
sil = []
clusters=[]
for k in tqdm(range(kmin, kmax+1)):
c=[]
kmeans = KMeans(n_clusters = k, init='k-means++',algorithm='full',max_iter=2000,n_init=50).fit(points)
centroids = kmeans.cluster_centers_
pred_clusters = kmeans.predict(points)
labels = kmeans.labels_
for cent in centroids:
d=[]
for p in points:
d.append(np.dot(cent,p))
dmax=max(d)
dmaxi=d.index(dmax)
c.append([dmaxi,labels[dmaxi]])
curr_sse = 0
# calculating the square of Euclidean distance of each point from its cluster center and add to current WSS
for i in range(len(points)):
curr_center = centroids[pred_clusters[i]]
diff=points[i]-curr_center
curr_sse += np.linalg.norm(diff)**2
sse.append(curr_sse)
sil.append(silhouette_score(points, labels, metric = 'euclidean'))
clusters.append([labels,c])
return sse,sil,clusters
# Xt = [ x.strip().split() for x in codecs.open("data/title_vectors.txt", "r", "utf-8").readlines() ]
# Xt = np.array(Xt).astype(np.float64)
# Xc = [ x.strip().split() for x in codecs.open("data/vectors.txt", "r", "utf-8").readlines() ]
# Xc = np.array(Xc).astype(np.float64)
# X=[]
# for i in range(len(Xt)):
# X.append([])
# for j in range(len(Xt[i])):
# X[i].append(Xt[i][j])
# for k in range(len(Xc[i])):
# X[i].append(Xc[i][k])
X = [ x.strip().split() for x in codecs.open("data/ap_c_vectors.txt", "r", "utf-8").readlines() ]
X = np.array(X).astype(np.float64)
print(len(X))
# eigen_solver='amg'
# clustering = sklearn.cluster.SpectralClustering(n_clusters=12,assign_labels="discretize",random_state=0,n_jobs=-1).fit(X)
# labels = clustering.labels_
# # n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# n_clusters_ = len(set(labels))
# n_noise_ = list(labels).count(-1)
# print('Estimated number of clusters: %d' % n_clusters_)
# print('Estimated number of noise points: %d' % n_noise_)
nX=[list(x/np.linalg.norm(x)) for x in X]
print(len(nX))
Dmat=distance_matrix(nX, nX)
print("|WSS and Silhouette score plotter|")
kmin=int(input("Enter value of kmin: "))
kmax=int(input("Enter value of kmax: "))
sse,sil,clusters=calculate_WSS(nX,kmin,kmax)
k=range(kmin, kmax+1)
silp=[k,sil]
ssep=[k,sse]
with open("sil.csv","w") as my_csv:
csvWriter = csv.writer(my_csv,delimiter=',')
csvWriter.writerows(silp)
with open("sse.csv","w") as my_csv:
csvWriter = csv.writer(my_csv,delimiter=',')
csvWriter.writerows(ssep)
kmin2=kmin
kmax2=kmax
ans='y'
while ans=='y':
kmini=k.index(kmin2)
kmaxi=k.index(kmax2)+1
plt.plot(k[kmini:kmaxi],sse[kmini:kmaxi],label='sse')
plt.scatter(k[kmini:kmaxi],sse[kmini:kmaxi],label='sse')
plt.savefig('WSS.png')
plt.clf()
plt.plot(k[kmini:kmaxi],sil[kmini:kmaxi],label='sil')
plt.scatter(k[kmini:kmaxi],sil[kmini:kmaxi],label='sse')
plt.savefig('Sil.png')
plt.clf()
ans=input("Do you want to continue(y/n)? : ")
if ans=='y':
kmin2=int(input("Enter value of kmin: "))
kmax2=int(input("Enter value of kmax: "))
kopt=int(input("Enter optimum value of k based on WSS and Silhouette score plot: "))
kindex=k.index(kopt)
labels=clusters[kindex][0]
centroids=clusters[kindex][1]
with open("clusters.txt", "wb") as fp:
pickle.dump(clusters[kindex], fp)
with open("clusters.txt", "rb") as fp:
ldata=pickle.load(fp)
print(ldata)
save('Dmat.npy',Dmat)
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# n_noise_ = list(labels).count(-1)
# db = DBSCAN(eps=10, min_samples=5 ,n_jobs=-1).fit(X)
# labels = db.labels_
# n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# n_noise_ = list(labels).count(-1)
print('Estimated number of clusters: %d' % n_clusters_)
# print('Estimated number of noise points: %d' % n_noise_)
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
A=[]
for i in range(n_clusters_):
A.append([])
for i in range(len(X)):
if labels[i]!=-1:
A[labels[i]-1].append(i)
for i in range(n_clusters_ ):
print("No of Article in part ",i," :",len(A[i]))
| [
"pickle.dump",
"numpy.save",
"csv.writer",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"codecs.open",
"matplotlib.pyplot.scatter",
"sklearn.cluster.KMeans",
"scipy.spatial.distance_matrix",
"sklearn.metrics.silhouette_score",
"pickle.load",
"numpy.array",
"numpy.linalg.norm",
"numpy... | [((2566, 2589), 'scipy.spatial.distance_matrix', 'distance_matrix', (['nX', 'nX'], {}), '(nX, nX)\n', (2581, 2589), False, 'from scipy.spatial import distance_matrix\n'), ((3949, 3971), 'numpy.save', 'save', (['"""Dmat.npy"""', 'Dmat'], {}), "('Dmat.npy', Dmat)\n", (3953, 3971), False, 'from numpy import save\n'), ((2870, 2903), 'csv.writer', 'csv.writer', (['my_csv'], {'delimiter': '""","""'}), "(my_csv, delimiter=',')\n", (2880, 2903), False, 'import csv\n'), ((2988, 3021), 'csv.writer', 'csv.writer', (['my_csv'], {'delimiter': '""","""'}), "(my_csv, delimiter=',')\n", (2998, 3021), False, 'import csv\n'), ((3155, 3210), 'matplotlib.pyplot.plot', 'plt.plot', (['k[kmini:kmaxi]', 'sse[kmini:kmaxi]'], {'label': '"""sse"""'}), "(k[kmini:kmaxi], sse[kmini:kmaxi], label='sse')\n", (3163, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3270), 'matplotlib.pyplot.scatter', 'plt.scatter', (['k[kmini:kmaxi]', 'sse[kmini:kmaxi]'], {'label': '"""sse"""'}), "(k[kmini:kmaxi], sse[kmini:kmaxi], label='sse')\n", (3223, 3270), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3294), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""WSS.png"""'], {}), "('WSS.png')\n", (3283, 3294), True, 'import matplotlib.pyplot as plt\n'), ((3298, 3307), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3305, 3307), True, 'import matplotlib.pyplot as plt\n'), ((3311, 3366), 'matplotlib.pyplot.plot', 'plt.plot', (['k[kmini:kmaxi]', 'sil[kmini:kmaxi]'], {'label': '"""sil"""'}), "(k[kmini:kmaxi], sil[kmini:kmaxi], label='sil')\n", (3319, 3366), True, 'import matplotlib.pyplot as plt\n'), ((3368, 3426), 'matplotlib.pyplot.scatter', 'plt.scatter', (['k[kmini:kmaxi]', 'sil[kmini:kmaxi]'], {'label': '"""sse"""'}), "(k[kmini:kmaxi], sil[kmini:kmaxi], label='sse')\n", (3379, 3426), True, 'import matplotlib.pyplot as plt\n'), ((3428, 3450), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Sil.png"""'], {}), "('Sil.png')\n", (3439, 3450), True, 'import matplotlib.pyplot as plt\n'), ((3454, 3463), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3461, 3463), True, 'import matplotlib.pyplot as plt\n'), ((3835, 3868), 'pickle.dump', 'pickle.dump', (['clusters[kindex]', 'fp'], {}), '(clusters[kindex], fp)\n', (3846, 3868), False, 'import pickle\n'), ((3918, 3933), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (3929, 3933), False, 'import pickle\n'), ((2018, 2029), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2026, 2029), True, 'import numpy as np\n'), ((4421, 4456), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['X', 'labels'], {}), '(X, labels)\n', (4445, 4456), False, 'from sklearn import metrics\n'), ((1322, 1374), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['points', 'labels'], {'metric': '"""euclidean"""'}), "(points, labels, metric='euclidean')\n", (1338, 1374), False, 'from sklearn.metrics import silhouette_score\n'), ((2513, 2530), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (2527, 2530), True, 'import numpy as np\n'), ((586, 672), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'init': '"""k-means++"""', 'algorithm': '"""full"""', 'max_iter': '(2000)', 'n_init': '(50)'}), "(n_clusters=k, init='k-means++', algorithm='full', max_iter=2000,\n n_init=50)\n", (592, 672), False, 'from sklearn.cluster import KMeans\n'), ((1248, 1268), 'numpy.linalg.norm', 'np.linalg.norm', (['diff'], {}), '(diff)\n', (1262, 1268), True, 'import numpy as np\n'), ((1948, 1998), 'codecs.open', 'codecs.open', (['"""data/ap_c_vectors.txt"""', '"""r"""', '"""utf-8"""'], {}), "('data/ap_c_vectors.txt', 'r', 'utf-8')\n", (1959, 1998), False, 'import codecs\n'), ((876, 891), 'numpy.dot', 'np.dot', (['cent', 'p'], {}), '(cent, p)\n', (882, 891), True, 'import numpy as np\n')] |
from random import sample
import numpy as np
import nnfs
class Loss:
# Calculates the data and regularization losses
# given model output and ground truth values
def remember_trainable_layers(self, trainable_layers):
self.trainable_layers = trainable_layers
def regularization_loss(self, layer):
regularization_loss = 0
for layer in self.trainable_layers:
if layer.weight_regularizer_l1 > 0:
regularization_loss += layer.weight_regularizer_l1 * np.sum(np.abs(layer.weights))
if layer.weight_regularizer_l2 > 0:
regularization_loss += layer.weight_regularizer_l2 * np.sum(layer.weights**2)
if layer.bias_regularizer_l1 > 0:
regularization_loss += layer.bias_regularizer_l1 * np.sum(np.abs(layer.biases))
if layer.bias_regularizer_l2 > 0:
regularization_loss += layer.bias_regularizer_l2 * np.sum(layer.biases**2)
return regularization_loss
def calculate(self, output, y, *, include_regularization=False):
# Calculate sample loss
sample_losses = self.forward(output, y)
# Calculate mean loss
data_loss = np.mean(sample_losses)
self.accumulated_sum += np.sum(sample_losses)
self.accumulated_count += len(sample_losses)
if not include_regularization:
return data_loss
# Return Loss
return data_loss, self.regularization_loss()
def calculate_accumulated(self, *, include_regularization=False):
data_loss = self.accumulated_sum / self.accumulated_count
if not include_regularization:
return data_loss
return data_loss, self.regularization_loss()
def new_pass(self):
self.accumulated_sum = 0
self.accumulated_count = 0
class Loss_CategoricalCrossEntropy(Loss):
# Forward pass
def forward(self, y_pred, y_true):
# Number of samples in each bactch
samples = len(y_pred)
# Clip the data to prevent division by 0
# Clip both sides to not to drag mean towards any value
y_pred_clipped = np.clip(y_pred, 1e-7, 1-1e-7)
# Probabilities for target values only if categorical labels
if len(y_true.shape) == 1:
correct_confidence = y_pred_clipped[range(samples), y_true]
# Mask values only for One Hot Encoded labels
elif len(y_true.shape) ==2:
correct_confidence = np.sum(y_pred_clipped * y_true, axis=1)
# Losses
negative_log_likelihoods = -np.log(correct_confidence)
return negative_log_likelihoods
def backward(self, dvalues, y_true):
# Number of sample
samples = len(dvalues)
# No of labels in each sample
labels = len(dvalues[0])
# If labels are sparse, turn them into one-hot vector
if len(y_true.shape) == 1:
y_true = np.eye(labels[y_true])
# Calculate gradients
self.dinputs = -y_true/dvalues
# Normalize Gradients
self.dinputs = self.dinputs/samples
class Loss_BinaryCrossEntropy(Loss):
def forward(self, y_pred, y_true):
y_pred_clipped = np.clip(y_pred, 1e-7, 1-1e-7)
sample_losses = -(y_true * np.log(y_pred_clipped) + (1 - y_true) * np.log(1 - y_pred_clipped))
sample_losses = np.mean(sample_losses, axis=-1)
return sample_losses
def backward(self, dvalues, y_true):
samples = len(dvalues)
outputs = len(dvalues[0])
clipped_dvalues = np.clip(dvalues, 1e-7, 1-1e-7)
self.dinputs = -(y_true / clipped_dvalues - (1 - y_true) / (1 - clipped_dvalues)) / outputs
self.dinputs = self.dinputs / samples
class Loss_MeanSquaredError(Loss):
def forward(self, y_pred, y_true):
sample_losses = np.mean((y_true - y_pred)**2, axis=-1)
return sample_losses
def backward(self, dvalues, y_true):
samples = len(dvalues)
outputs = len(dvalues[0])
self.dinputs = -2 * (y_true - dvalues) / outputs
self.dinputs = self.dinputs / samples
class Loss_MeanAbsoluteError(Loss):
def forward(self, y_pred, y_true):
sample_losses = np.mean(np.abs(y_true - y_pred)**2, axis=-1)
return sample_losses
def backward(self, dvalues, y_true):
samples = len(dvalues)
outputs = len(dvalues[0])
self.dinputs = np.sign(y_true - dvalues) / outputs
self.dinputs = self.dinputs / samples | [
"numpy.sum",
"numpy.log",
"numpy.abs",
"numpy.clip",
"numpy.mean",
"numpy.sign",
"numpy.eye"
] | [((1213, 1235), 'numpy.mean', 'np.mean', (['sample_losses'], {}), '(sample_losses)\n', (1220, 1235), True, 'import numpy as np\n'), ((1270, 1291), 'numpy.sum', 'np.sum', (['sample_losses'], {}), '(sample_losses)\n', (1276, 1291), True, 'import numpy as np\n'), ((2170, 2203), 'numpy.clip', 'np.clip', (['y_pred', '(1e-07)', '(1 - 1e-07)'], {}), '(y_pred, 1e-07, 1 - 1e-07)\n', (2177, 2203), True, 'import numpy as np\n'), ((3226, 3259), 'numpy.clip', 'np.clip', (['y_pred', '(1e-07)', '(1 - 1e-07)'], {}), '(y_pred, 1e-07, 1 - 1e-07)\n', (3233, 3259), True, 'import numpy as np\n'), ((3384, 3415), 'numpy.mean', 'np.mean', (['sample_losses'], {'axis': '(-1)'}), '(sample_losses, axis=-1)\n', (3391, 3415), True, 'import numpy as np\n'), ((3581, 3615), 'numpy.clip', 'np.clip', (['dvalues', '(1e-07)', '(1 - 1e-07)'], {}), '(dvalues, 1e-07, 1 - 1e-07)\n', (3588, 3615), True, 'import numpy as np\n'), ((3860, 3900), 'numpy.mean', 'np.mean', (['((y_true - y_pred) ** 2)'], {'axis': '(-1)'}), '((y_true - y_pred) ** 2, axis=-1)\n', (3867, 3900), True, 'import numpy as np\n'), ((2595, 2621), 'numpy.log', 'np.log', (['correct_confidence'], {}), '(correct_confidence)\n', (2601, 2621), True, 'import numpy as np\n'), ((2955, 2977), 'numpy.eye', 'np.eye', (['labels[y_true]'], {}), '(labels[y_true])\n', (2961, 2977), True, 'import numpy as np\n'), ((4449, 4474), 'numpy.sign', 'np.sign', (['(y_true - dvalues)'], {}), '(y_true - dvalues)\n', (4456, 4474), True, 'import numpy as np\n'), ((2501, 2540), 'numpy.sum', 'np.sum', (['(y_pred_clipped * y_true)'], {'axis': '(1)'}), '(y_pred_clipped * y_true, axis=1)\n', (2507, 2540), True, 'import numpy as np\n'), ((4251, 4274), 'numpy.abs', 'np.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (4257, 4274), True, 'import numpy as np\n'), ((666, 692), 'numpy.sum', 'np.sum', (['(layer.weights ** 2)'], {}), '(layer.weights ** 2)\n', (672, 692), True, 'import numpy as np\n'), ((947, 972), 'numpy.sum', 'np.sum', (['(layer.biases ** 2)'], {}), '(layer.biases ** 2)\n', (953, 972), True, 'import numpy as np\n'), ((3292, 3314), 'numpy.log', 'np.log', (['y_pred_clipped'], {}), '(y_pred_clipped)\n', (3298, 3314), True, 'import numpy as np\n'), ((3332, 3358), 'numpy.log', 'np.log', (['(1 - y_pred_clipped)'], {}), '(1 - y_pred_clipped)\n', (3338, 3358), True, 'import numpy as np\n'), ((526, 547), 'numpy.abs', 'np.abs', (['layer.weights'], {}), '(layer.weights)\n', (532, 547), True, 'import numpy as np\n'), ((812, 832), 'numpy.abs', 'np.abs', (['layer.biases'], {}), '(layer.biases)\n', (818, 832), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions for reading in a scan and generating a multislice plot for
sticking in reports
"""
import subprocess
import os
import itertools
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import nibabel as nib
from scipy import ndimage
from helpers import get_terminal
def par2nii(dcm, out_folder):
path_to_dcm2nii = '/Users/manusdonahue/Documents/Sky/mricron/dcm2nii'
conversion_command = f'{path_to_dcm2nii} -o {out_folder} -a n -i n -d n -p n -e n -f y -v n {dcm}'
original_stem = get_terminal(dcm)[:-4]
subprocess.run(conversion_command, check=True, shell=True)
return os.path.join(out_folder, f'{original_stem}.nii.gz') # this is the name of the output
def filter_zeroed_axial_slices(nii_data, thresh=0.99):
# removes slices if the number of pixels that are lesser than or equal to 0 exceeds a % threshold, and replaces NaN with -1
the_data = nii_data.copy()
wherenan = np.isnan(the_data)
the_data[wherenan] = -1
if thresh:
keep = []
for i in range(the_data.shape[2]):
d = the_data[:,:,i]
near_zero = np.isclose(d,0)
less_zero = (d <= 0)
bad_pixels = np.logical_or(near_zero, less_zero)
perc_bad = bad_pixels.sum() / d.size
if not perc_bad >= thresh:
keep.append(True)
else:
keep.append(False)
new = the_data[:,:,keep]
return new
else:
return the_data
def compare_nii_images(niis, cmaps=[matplotlib.cm.gray,matplotlib.cm.inferno],
cmaxes=[None,None], save=True,
out_name=None, frames=6, ax_font_size=32):
plt.style.use('dark_background')
if type(frames) == int:
nrows = frames
elif type(frames) == list:
nrows = len(frames)
fig, axs = plt.subplots(nrows, 3, figsize=(3*3,nrows*1.95))
fig.subplots_adjust(hspace=0.0, wspace=-0.4)
data1 = nib.load(niis[0]).get_fdata()
data2 = nib.load(niis[1]).get_fdata()
datas = [data1,data2]
num_slices = data1.shape[2] - 1 # num of axial slices
if type(frames) == int:
a_fifth = int(num_slices * 0.15)
step = ((num_slices-a_fifth) - (0+a_fifth)) / (frames - 1)
selected_frames = [int((0+a_fifth) + step * i) for i in range(frames)]
else:
selected_frames = frames
for cmap in cmaps:
cmap.set_bad('black',1.)
for ax_row, f in zip(axs, selected_frames):
ax_slice1 = ndimage.rotate(data1[:,:,f].T, 180)
ax_slice1[np.isclose(ax_slice1,0)] = np.nan
ax_slice1[ax_slice1 < 0] = np.nan
ax_slice1 = np.fliplr(ax_slice1) # convert to radiological orientation
ax_slice2 = ndimage.rotate(data2[:,:,f].T, 180)
ax_slice2[np.isclose(ax_slice2,0)] = np.nan
ax_slice2[ax_slice2 < 0] = np.nan
ax_slice2 = np.fliplr(ax_slice2) # convert to radiological orientation
vvals = [None,None]
for i,val in enumerate(vvals):
cmax = cmaxes[i]
if cmaps[i] != matplotlib.cm.gray:
if cmax is not None:
vvals[i] = [0, cmax]
else:
vvals[i] = [0, round(np.nanpercentile(datas[i], 99.5),2)]
else:
if cmax is not None:
vvals[i] = [0, round(np.nanpercentile(datas[i], cmax),2)]
else:
vvals[i] = [0, round(np.nanpercentile(datas[i], 97.5),2)]
#print(f'vvals: {vvals}')
im1 = ax_row[0].imshow(ax_slice1, interpolation='nearest', cmap=cmaps[0], vmin=vvals[0][0], vmax=vvals[0][1])
ax_row[0].axis('off')
im3 = ax_row[2].imshow(ax_slice2, interpolation='nearest', cmap=cmaps[1], vmin=vvals[1][0], vmax=vvals[1][1])
ax_row[2].axis('off')
im2p1 = ax_row[1].imshow(ax_slice1, interpolation='nearest', cmap=cmaps[0], vmin=vvals[0][0], vmax=vvals[0][1], alpha=1)
im2p2 = ax_row[1].imshow(ax_slice2, interpolation='nearest', cmap=cmaps[1], vmin=vvals[1][0], vmax=vvals[1][1], alpha=0.5)
ax_row[1].axis('off')
vmax = vvals[1][1]
if vmax > 100:
rounder = 0
by = 20
elif vmax > 50:
rounder = 0
by = 10
elif vmax > 10:
rounder = 0
by = 5
elif vmax > 1:
rounder = 1
by = 0.5
else:
rounder = 2
by = 0.1
vmax = round(vmax, rounder)
if cmaps[1] != matplotlib.cm.gray:
tks = list(np.arange(0, vmax, by))
tks.append(vmax)
if tks[-1] - tks[-2] < 0.35*by:
del tks[-2] # if the last two ticks are very close together, delete the penultimate tick
cbar_ax = fig.add_axes([0.1,0.055,0.8,0.015])
fig.colorbar(im3, cbar_ax, orientation='horizontal', ticks=tks)
#plt.tight_layout(0.2)
if save:
plt.savefig(out_name, dpi=200, bbox_inches='tight')
else:
plt.show()
plt.rcParams.update(plt.rcParamsDefault)
def nii_image(nii, dimensions, out_name, cmap, cmax=None, save=True, specified_frames=None, ax_font_size=32):
"""
Produces a png representing multiple AXIAL slices of a NiFTI
Parameters
----------
nii : str
path to NiFTI in question.
dimensions : tuple of int
the dimensions of the subimages, (x,y). Produces x*y subplots.
out_name : str
name of the output image.
cmap : str or matplotlib cmap
matplotlib color map.
cmax : float
optional arg for setting colorbar/intensity thresholds. If cmap is
grayscale, cmax is used to set the upper percentile threshold. Otherwise,
cmax is the maximum value of the colorbar.
Returns
-------
The thresholding value (upper percentile for grayscale, absolute value for all other cmaps).
"""
plt.style.use('dark_background')
img = nib.load(nii)
data = img.get_fdata()
#data = filter_zeroed_axial_slices(data)
data = filter_zeroed_axial_slices(data, thresh=False)
num_slices = data.shape[2] - 1 # num of axial slices
d0, d1 = dimensions
"""
appropriate = False
while not appropriate:
num_subs = d0*d1
if num_subs <= (num_slices+1):
appropriate = True
else:
print(f'\n!!!!!\n\nNotice: not enough slices to fill plot. Reducing plot dimensions for {out_name}\n\n!!!!!\n')
if d1 >= d0:
d1 -= 1
else:
d0 -= 1
if 0 in (d0, d1):
raise Exception('Subplot dimensions cannot include 0')
step = (num_slices - 0) / (num_subs - 1)
frames = [int(0 + step * i) for i in range(num_subs)]
"""
if cmap != matplotlib.cm.gray:
frames = np.arange(10,40,1)
else:
frames = np.arange(0,25,1)
if specified_frames:
frames = specified_frames
#print(f"FRAMES: {frames}")
d0_l = [i for i in range(d0)]
d1_l = [i for i in range(d1)]
subplots = list(itertools.product(d0_l, d1_l))
mult = 3
fig, ax = plt.subplots(d0, d1, figsize=(d1*mult,d0*mult))
if cmap != matplotlib.cm.gray:
if cmax is not None:
vmin, vmax = [0, cmax]
else:
vmin, vmax = [0, round(np.nanpercentile(data, 99.5),2)]
"""
round the scaling to nearest 10 for CBF, nearest 0.1 for CVR and CVRMax, and nearest 10 for CVRDelay.
"""
if vmax > 100:
rounder = 0
by = 20
elif vmax > 50:
rounder = 0
by = 10
elif vmax > 10:
rounder = 0
by = 5
elif vmax > 1:
rounder = 1
by = 0.5
else:
rounder = 2
by = 0.1
vmax = round(vmax, rounder)
ret_max = vmax
else:
if cmax is not None:
vmin, vmax = [0, round(np.nanpercentile(data, cmax),2)]
ret_max = cmax
else:
vmin, vmax = [0, round(np.nanpercentile(data, 97.5),2)]
ret_max = 97.5
# print(vmin,vmax)
# print(frames)
# print(data.shape)
cmap.set_bad('black',1.)
for (i,j), f in zip(subplots, frames):
#print(f'FRAMING: {f}')
ax_slice = ndimage.rotate(data[:,:,f].T, 180)
ax_slice[np.isclose(ax_slice,0)] = np.nan
ax_slice[ax_slice < 0] = np.nan
ax_slice = np.fliplr(ax_slice) # convert to radiological orientation
im = ax[i][j].imshow(ax_slice, interpolation='nearest', cmap=cmap, vmin=vmin, vmax=vmax)
ax[i][j].axis('off')
matplotlib.rcParams.update({'font.size': ax_font_size})
plt.tight_layout(0.8)
if cmap != matplotlib.cm.gray:
tks = list(np.arange(0, vmax, by))
tks.append(vmax)
if tks[-1] - tks[-2] < 0.35*by:
del tks[-2] # if the last two ticks are very close together, delete the penultimate tick
cbar_ax = fig.add_axes([0.1,0.055,0.8,0.015])
fig.colorbar(im, cbar_ax, orientation='horizontal', ticks=tks)
else:
pass
plt.subplots_adjust(wspace=0.000, hspace=0.000)
if save:
plt.savefig(out_name, dpi=200)
else:
plt.show()
plt.rcParams.update(plt.rcParamsDefault)
return ret_max
| [
"numpy.nanpercentile",
"numpy.isnan",
"matplotlib.pyplot.style.use",
"numpy.isclose",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"matplotlib.rcParams.update",
"matplotlib.pyplot.rcParams.update",
"helpers.get_terminal",
"itertools.product",
"matplotlib.pyplot.subplots",
... | [((614, 672), 'subprocess.run', 'subprocess.run', (['conversion_command'], {'check': '(True)', 'shell': '(True)'}), '(conversion_command, check=True, shell=True)\n', (628, 672), False, 'import subprocess\n'), ((689, 740), 'os.path.join', 'os.path.join', (['out_folder', 'f"""{original_stem}.nii.gz"""'], {}), "(out_folder, f'{original_stem}.nii.gz')\n", (701, 740), False, 'import os\n'), ((1005, 1023), 'numpy.isnan', 'np.isnan', (['the_data'], {}), '(the_data)\n', (1013, 1023), True, 'import numpy as np\n'), ((1836, 1868), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (1849, 1868), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2057), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', '(3)'], {'figsize': '(3 * 3, nrows * 1.95)'}), '(nrows, 3, figsize=(3 * 3, nrows * 1.95))\n', (2016, 2057), True, 'import matplotlib.pyplot as plt\n'), ((5266, 5306), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['plt.rcParamsDefault'], {}), '(plt.rcParamsDefault)\n', (5285, 5306), True, 'import matplotlib.pyplot as plt\n'), ((6160, 6192), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (6173, 6192), True, 'import matplotlib.pyplot as plt\n'), ((6208, 6221), 'nibabel.load', 'nib.load', (['nii'], {}), '(nii)\n', (6216, 6221), True, 'import nibabel as nib\n'), ((7467, 7519), 'matplotlib.pyplot.subplots', 'plt.subplots', (['d0', 'd1'], {'figsize': '(d1 * mult, d0 * mult)'}), '(d0, d1, figsize=(d1 * mult, d0 * mult))\n', (7479, 7519), True, 'import matplotlib.pyplot as plt\n'), ((9067, 9122), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': ax_font_size}"], {}), "({'font.size': ax_font_size})\n", (9093, 9122), False, 'import matplotlib\n'), ((9127, 9148), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', (['(0.8)'], {}), '(0.8)\n', (9143, 9148), True, 'import matplotlib.pyplot as plt\n'), ((9586, 9629), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.0)', 'hspace': '(0.0)'}), '(wspace=0.0, hspace=0.0)\n', (9605, 9629), True, 'import matplotlib.pyplot as plt\n'), ((9725, 9765), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['plt.rcParamsDefault'], {}), '(plt.rcParamsDefault)\n', (9744, 9765), True, 'import matplotlib.pyplot as plt\n'), ((582, 599), 'helpers.get_terminal', 'get_terminal', (['dcm'], {}), '(dcm)\n', (594, 599), False, 'from helpers import get_terminal\n'), ((2686, 2723), 'scipy.ndimage.rotate', 'ndimage.rotate', (['data1[:, :, f].T', '(180)'], {}), '(data1[:, :, f].T, 180)\n', (2700, 2723), False, 'from scipy import ndimage\n'), ((2836, 2856), 'numpy.fliplr', 'np.fliplr', (['ax_slice1'], {}), '(ax_slice1)\n', (2845, 2856), True, 'import numpy as np\n'), ((2924, 2961), 'scipy.ndimage.rotate', 'ndimage.rotate', (['data2[:, :, f].T', '(180)'], {}), '(data2[:, :, f].T, 180)\n', (2938, 2961), False, 'from scipy import ndimage\n'), ((3074, 3094), 'numpy.fliplr', 'np.fliplr', (['ax_slice2'], {}), '(ax_slice2)\n', (3083, 3094), True, 'import numpy as np\n'), ((5167, 5218), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_name'], {'dpi': '(200)', 'bbox_inches': '"""tight"""'}), "(out_name, dpi=200, bbox_inches='tight')\n", (5178, 5218), True, 'import matplotlib.pyplot as plt\n'), ((5237, 5247), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5245, 5247), True, 'import matplotlib.pyplot as plt\n'), ((7127, 7147), 'numpy.arange', 'np.arange', (['(10)', '(40)', '(1)'], {}), '(10, 40, 1)\n', (7136, 7147), True, 'import numpy as np\n'), ((7173, 7192), 'numpy.arange', 'np.arange', (['(0)', '(25)', '(1)'], {}), '(0, 25, 1)\n', (7182, 7192), True, 'import numpy as np\n'), ((7394, 7423), 'itertools.product', 'itertools.product', (['d0_l', 'd1_l'], {}), '(d0_l, d1_l)\n', (7411, 7423), False, 'import itertools\n'), ((8725, 8761), 'scipy.ndimage.rotate', 'ndimage.rotate', (['data[:, :, f].T', '(180)'], {}), '(data[:, :, f].T, 180)\n', (8739, 8761), False, 'from scipy import ndimage\n'), ((8869, 8888), 'numpy.fliplr', 'np.fliplr', (['ax_slice'], {}), '(ax_slice)\n', (8878, 8888), True, 'import numpy as np\n'), ((9656, 9686), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_name'], {'dpi': '(200)'}), '(out_name, dpi=200)\n', (9667, 9686), True, 'import matplotlib.pyplot as plt\n'), ((9705, 9715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9713, 9715), True, 'import matplotlib.pyplot as plt\n'), ((1202, 1218), 'numpy.isclose', 'np.isclose', (['d', '(0)'], {}), '(d, 0)\n', (1212, 1218), True, 'import numpy as np\n'), ((1289, 1324), 'numpy.logical_or', 'np.logical_or', (['near_zero', 'less_zero'], {}), '(near_zero, less_zero)\n', (1302, 1324), True, 'import numpy as np\n'), ((2119, 2136), 'nibabel.load', 'nib.load', (['niis[0]'], {}), '(niis[0])\n', (2127, 2136), True, 'import nibabel as nib\n'), ((2161, 2178), 'nibabel.load', 'nib.load', (['niis[1]'], {}), '(niis[1])\n', (2169, 2178), True, 'import nibabel as nib\n'), ((2740, 2764), 'numpy.isclose', 'np.isclose', (['ax_slice1', '(0)'], {}), '(ax_slice1, 0)\n', (2750, 2764), True, 'import numpy as np\n'), ((2978, 3002), 'numpy.isclose', 'np.isclose', (['ax_slice2', '(0)'], {}), '(ax_slice2, 0)\n', (2988, 3002), True, 'import numpy as np\n'), ((4780, 4802), 'numpy.arange', 'np.arange', (['(0)', 'vmax', 'by'], {}), '(0, vmax, by)\n', (4789, 4802), True, 'import numpy as np\n'), ((8777, 8800), 'numpy.isclose', 'np.isclose', (['ax_slice', '(0)'], {}), '(ax_slice, 0)\n', (8787, 8800), True, 'import numpy as np\n'), ((9221, 9243), 'numpy.arange', 'np.arange', (['(0)', 'vmax', 'by'], {}), '(0, vmax, by)\n', (9230, 9243), True, 'import numpy as np\n'), ((7672, 7700), 'numpy.nanpercentile', 'np.nanpercentile', (['data', '(99.5)'], {}), '(data, 99.5)\n', (7688, 7700), True, 'import numpy as np\n'), ((8342, 8370), 'numpy.nanpercentile', 'np.nanpercentile', (['data', 'cmax'], {}), '(data, cmax)\n', (8358, 8370), True, 'import numpy as np\n'), ((8451, 8479), 'numpy.nanpercentile', 'np.nanpercentile', (['data', '(97.5)'], {}), '(data, 97.5)\n', (8467, 8479), True, 'import numpy as np\n'), ((3439, 3471), 'numpy.nanpercentile', 'np.nanpercentile', (['datas[i]', '(99.5)'], {}), '(datas[i], 99.5)\n', (3455, 3471), True, 'import numpy as np\n'), ((3585, 3617), 'numpy.nanpercentile', 'np.nanpercentile', (['datas[i]', 'cmax'], {}), '(datas[i], cmax)\n', (3601, 3617), True, 'import numpy as np\n'), ((3685, 3717), 'numpy.nanpercentile', 'np.nanpercentile', (['datas[i]', '(97.5)'], {}), '(datas[i], 97.5)\n', (3701, 3717), True, 'import numpy as np\n')] |
# Copyright 2020 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines utilities for the tasks."""
import numpy as np
from transformers import T5Tokenizer
def round_stsb_target(label):
"""STSB maps two sentences to a floating point number between 1 and 5
representing their semantic similarity. Since we are treating all tasks as
text-to-text tasks we need to convert this floating point number to a string.
The vast majority of the similarity score labels in STSB are in the set
[0, 0.2, 0.4, ..., 4.8, 5.0]. So, we first round the number to the closest
entry in this set, and then we convert the result to a string (literally e.g.
"3.4"). This converts STSB roughly into a 26-class classification dataset.
Args:
label: original label.
Returns:
A preprocessed label.
"""
return np.round((label * 5) / 5, decimals=1)
def compute_task_max_decoding_length(word_list):
"""Computes the max decoding length for the given list of words
Args:
word_list: A list of stringss.
Returns:
maximum length after tokenization of the inputs.
"""
tokenizer = T5Tokenizer.from_pretrained('t5-base')
max_len = 0
for word in word_list:
ids = tokenizer.encode(word)
max_len = max(max_len, len(ids))
return max_len
| [
"transformers.T5Tokenizer.from_pretrained",
"numpy.round"
] | [((1357, 1392), 'numpy.round', 'np.round', (['(label * 5 / 5)'], {'decimals': '(1)'}), '(label * 5 / 5, decimals=1)\n', (1365, 1392), True, 'import numpy as np\n'), ((1653, 1691), 'transformers.T5Tokenizer.from_pretrained', 'T5Tokenizer.from_pretrained', (['"""t5-base"""'], {}), "('t5-base')\n", (1680, 1691), False, 'from transformers import T5Tokenizer\n')] |
#!/usr/bin/python3
import numpy
import pyopencl as cl
from .simple_gene import SimpleGene
class SimpleChromosome:
# SimpleChromosome - a chromosome contains a list of Genes.
# __genes - a list of Genes
# __name - name of the chromosome
# __improving_func - a function name in kernel to gurantee a better mutation result.
# dna - an listed of Gene's dna
# dna_total_length - sum of the lenght of all genes's dna
def __init__(self, genes, name = ''):
assert all(isinstance(gene, SimpleGene) for gene in genes)
assert type(genes) == list
self.__genes = genes
self.__name = name
self.__improving_func = None
@property
def num_of_genes(self):
# The number of genes inside this SimpleChromosome.
return len(self.__genes)
@property
def name(self):
return self.__name
@property
def dna_total_length(self):
# Sum of the dna lenght of each gene.
return sum([gene.length for gene in self.__genes])
@property
def dna(self):
return [gene.dna for gene in self.__genes]
@dna.setter
def dna(self, dna_sequence):
assert self.num_of_genes == len(dna_sequence)
for i, gene in enumerate(self.__genes):
gene.dna = dna_sequence[i]
@property
def genes(self):
return self.__genes
@property
def gene_elements(self):
return [] if len(self.__genes) == 0 else self.__genes[0].elements
@property
def gene_elements_in_kernel(self):
return [] if len(self.__genes) == 0 else self.__genes[0].elements_in_kernel
@property
def kernel_file(self):
return 'simple_chromosome.cl'
@property
def struct_name(self):
return '__SimpleChromosome';
@property
def chromosome_size_define(self):
return 'SIMPLE_CHROMOSOME_GENE_SIZE'
def early_terminated(self, best , worst):
# If the difference between the best and the worst is negligible,
# terminate the program to save time.
return abs(worst - best) < 0.0001
def from_kernel_value(self, data):
# Construct a SimpleChromosome object on system memory according to
# the calculated 'data' on opencl(device) memory.
assert len(data) == self.num_of_genes
genes = [self.__genes[idx].from_kernel_value(v) for idx, v in enumerate(data)]
return SimpleChromosome(genes, self.__name)
def use_improving_only_mutation(self, helper_func_name):
# Set a helper function to make sure a better mutation result.
self.__improving_func = helper_func_name
def kernelize(self):
# - Build a str which contains c99-like codes. This str will be written
# into a final kernel document called 'final.cl' for execution.
# - Gene elements, size, mutation function is pre-defined as MACRO for
# easier usage.
elements_size_list = [str(gene.elements_length) for gene in self.__genes]
candidates = '#define SIMPLE_CHROMOSOME_GENE_ELEMENTS_SIZE {' +\
', '.join(elements_size_list) + '}\n'
defines = '#define SIMPLE_CHROMOSOME_GENE_SIZE ' + str(self.num_of_genes) + '\n' +\
'#define SIMPLE_CHROMOSOME_GENE_MUTATE_FUNC ' +\
self.__genes[0].mutate_func_name + '\n'
return candidates + defines
def save(self, data, ctx, queue, population):
total_dna_size = population * self.dna_total_length
# prepare memory
other_chromosomes = numpy.zeros(total_dna_size, dtype=numpy.int32)
ratios = numpy.zeros(population, dtype=numpy.float32)
# read data from cl
cl.enqueue_read_buffer(queue, self.__dev_ratios, ratios)
cl.enqueue_read_buffer(queue, self.__dev_other_chromosomes, other_chromosomes).wait()
# save all of them
data['other_chromosomes'] = other_chromosomes
data['ratios'] = ratios
def restore(self, data, ctx, queue, population):
other_chromosomes = data['other_chromosomes']
ratios = data['ratios']
# prepare CL memory
mf = cl.mem_flags
self.__dev_ratios = cl.Buffer(ctx, mf.WRITE_ONLY, ratios.nbytes)
self.__dev_other_chromosomes = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=other_chromosomes)
# Copy data from main memory to GPU memory
cl.enqueue_copy(queue, self.__dev_ratios, ratios)
cl.enqueue_copy(queue, self.__dev_other_chromosomes, other_chromosomes)
def preexecute_kernels(self, ctx, queue, population):
# initialize global variables for kernel execution
total_dna_size = population * self.dna_total_length
other_chromosomes = numpy.zeros(total_dna_size, dtype=numpy.int32)
ratios = numpy.zeros(population, dtype=numpy.float32)
mf = cl.mem_flags
# prepare device memory for usage.
self.__dev_ratios = cl.Buffer(ctx, mf.WRITE_ONLY, ratios.nbytes)
self.__dev_other_chromosomes = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=other_chromosomes)
def get_populate_kernel_names(self):
return ['simple_chromosome_populate']
def get_crossover_kernel_names(self):
return ['simple_chromosome_calc_ratio',\
'simple_chromosome_pick_chromosomes',\
'simple_chromosome_do_crossover']
def get_mutation_kernel_names(self):
return ['simple_chromosome_mutate_all']
def execute_populate(self, prg, queue, population, dev_chromosomes, dev_rnum):
prg.simple_chromosome_populate(queue,
(population,),
(1,),
dev_chromosomes,
dev_rnum).wait()
def selection_preparation(self, prg, queue, dev_fitnesses):
prg.simple_chromosome_calc_ratio(queue,
(1,),
(1,),
dev_fitnesses,
self.__dev_ratios).wait()
def execute_get_current_elites(self, prg, queue, top,
dev_chromosomes, dev_current_elites,
dev_best_indices):
prg.simple_chromosome_get_the_elites(queue, (1,), (1,),
dev_best_indices,
dev_chromosomes,
dev_current_elites,
numpy.int32(top)).wait()
def execute_update_current_elites(self, prg, queue, top, dev_worst_indices,
dev_chromosomes, dev_updated_elites,
dev_fitnesses, dev_updated_elite_fitness):
prg.simple_chromosome_update_the_elites(queue, (1,), (1,),
numpy.int32(top),
dev_worst_indices,
dev_chromosomes,
dev_updated_elites,
dev_fitnesses,
dev_updated_elite_fitness).wait()
def execute_crossover(self, prg, queue, population, generation_idx, prob_crossover,
dev_chromosomes, dev_fitnesses, dev_rnum, best_fitness):
prg.simple_chromosome_pick_chromosomes(queue,
(population,),
(1,),
dev_chromosomes,
dev_fitnesses,
self.__dev_other_chromosomes,
self.__dev_ratios,
dev_rnum).wait()
prg.simple_chromosome_do_crossover(queue,
(population,),
(1,),
dev_chromosomes,
dev_fitnesses,
self.__dev_other_chromosomes,
dev_rnum,
numpy.float32(best_fitness),
numpy.float32(prob_crossover)).wait()
def execute_mutation(self, prg, queue, population, generation_idx, prob_mutate,
dev_chromosomes, dev_fitnesses, dev_rnum, extra_list):
prg.simple_chromosome_mutate_all(queue,
(population,),
(1,),
dev_chromosomes,
dev_rnum,
numpy.float32(prob_mutate)).wait()
| [
"pyopencl.enqueue_copy",
"pyopencl.enqueue_read_buffer",
"numpy.float32",
"numpy.zeros",
"pyopencl.Buffer",
"numpy.int32"
] | [((3556, 3602), 'numpy.zeros', 'numpy.zeros', (['total_dna_size'], {'dtype': 'numpy.int32'}), '(total_dna_size, dtype=numpy.int32)\n', (3567, 3602), False, 'import numpy\n'), ((3620, 3664), 'numpy.zeros', 'numpy.zeros', (['population'], {'dtype': 'numpy.float32'}), '(population, dtype=numpy.float32)\n', (3631, 3664), False, 'import numpy\n'), ((3701, 3757), 'pyopencl.enqueue_read_buffer', 'cl.enqueue_read_buffer', (['queue', 'self.__dev_ratios', 'ratios'], {}), '(queue, self.__dev_ratios, ratios)\n', (3723, 3757), True, 'import pyopencl as cl\n'), ((4187, 4231), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', 'mf.WRITE_ONLY', 'ratios.nbytes'], {}), '(ctx, mf.WRITE_ONLY, ratios.nbytes)\n', (4196, 4231), True, 'import pyopencl as cl\n'), ((4271, 4346), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', '(mf.READ_WRITE | mf.COPY_HOST_PTR)'], {'hostbuf': 'other_chromosomes'}), '(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=other_chromosomes)\n', (4280, 4346), True, 'import pyopencl as cl\n'), ((4455, 4504), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['queue', 'self.__dev_ratios', 'ratios'], {}), '(queue, self.__dev_ratios, ratios)\n', (4470, 4504), True, 'import pyopencl as cl\n'), ((4513, 4584), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['queue', 'self.__dev_other_chromosomes', 'other_chromosomes'], {}), '(queue, self.__dev_other_chromosomes, other_chromosomes)\n', (4528, 4584), True, 'import pyopencl as cl\n'), ((4792, 4838), 'numpy.zeros', 'numpy.zeros', (['total_dna_size'], {'dtype': 'numpy.int32'}), '(total_dna_size, dtype=numpy.int32)\n', (4803, 4838), False, 'import numpy\n'), ((4856, 4900), 'numpy.zeros', 'numpy.zeros', (['population'], {'dtype': 'numpy.float32'}), '(population, dtype=numpy.float32)\n', (4867, 4900), False, 'import numpy\n'), ((5000, 5044), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', 'mf.WRITE_ONLY', 'ratios.nbytes'], {}), '(ctx, mf.WRITE_ONLY, ratios.nbytes)\n', (5009, 5044), True, 'import pyopencl as cl\n'), ((5084, 5159), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', '(mf.READ_WRITE | mf.COPY_HOST_PTR)'], {'hostbuf': 'other_chromosomes'}), '(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=other_chromosomes)\n', (5093, 5159), True, 'import pyopencl as cl\n'), ((3766, 3844), 'pyopencl.enqueue_read_buffer', 'cl.enqueue_read_buffer', (['queue', 'self.__dev_other_chromosomes', 'other_chromosomes'], {}), '(queue, self.__dev_other_chromosomes, other_chromosomes)\n', (3788, 3844), True, 'import pyopencl as cl\n'), ((6739, 6755), 'numpy.int32', 'numpy.int32', (['top'], {}), '(top)\n', (6750, 6755), False, 'import numpy\n'), ((7116, 7132), 'numpy.int32', 'numpy.int32', (['top'], {}), '(top)\n', (7127, 7132), False, 'import numpy\n'), ((8597, 8624), 'numpy.float32', 'numpy.float32', (['best_fitness'], {}), '(best_fitness)\n', (8610, 8624), False, 'import numpy\n'), ((8669, 8698), 'numpy.float32', 'numpy.float32', (['prob_crossover'], {}), '(prob_crossover)\n', (8682, 8698), False, 'import numpy\n'), ((9174, 9200), 'numpy.float32', 'numpy.float32', (['prob_mutate'], {}), '(prob_mutate)\n', (9187, 9200), False, 'import numpy\n')] |
# tests for the config reader module
import os
from attr import validate
import pytest
import pandas as pd
from numpy.testing import assert_almost_equal
from jsonschema.exceptions import ValidationError
from tardis.io import config_reader
from tardis.io.config_reader import Configuration
def data_path(filename):
data_dir = os.path.dirname(__file__)
return os.path.abspath(os.path.join(data_dir, "data", filename))
def test_convergence_section_parser():
test_convergence_section = {
"type": "damped",
"lock_t_inner_cyles": 1,
"t_inner_update_exponent": -0.5,
"damping_constant": 0.5,
"threshold": 0.05,
"fraction": 0.8,
"hold_iterations": 3,
"t_rad": {"damping_constant": 1.0},
}
parsed_convergence_section = config_reader.parse_convergence_section(
test_convergence_section
)
assert_almost_equal(
parsed_convergence_section["t_rad"]["damping_constant"], 1.0
)
assert_almost_equal(
parsed_convergence_section["w"]["damping_constant"], 0.5
)
def test_from_config_dict(tardis_config_verysimple):
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
assert conf.config_dirname == "test"
assert_almost_equal(
conf.spectrum.start.value,
tardis_config_verysimple["spectrum"]["start"].value,
)
assert_almost_equal(
conf.spectrum.stop.value,
tardis_config_verysimple["spectrum"]["stop"].value,
)
tardis_config_verysimple["spectrum"]["start"] = "Invalid"
with pytest.raises(ValidationError):
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
def test_config_hdf(hdf_file_path, tardis_config_verysimple):
expected = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
expected.to_hdf(hdf_file_path, overwrite=True)
actual = pd.read_hdf(hdf_file_path, key="/simulation/config")
expected = expected.get_properties()["config"]
assert actual[0] == expected[0]
def test_model_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Model Section of the Tardis Config YAML File
Validates:
Density: branch85_w7
Velocity (Start < End)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
assert conf.model.structure.density.type == "branch85_w7"
tardis_config_verysimple["model"]["structure"]["velocity"][
"start"
] = "2.0e4 km/s"
tardis_config_verysimple["model"]["structure"]["velocity"][
"stop"
] = "1.1e4 km/s"
with pytest.raises(ValueError) as ve:
if (
conf.model.structure.velocity.start
< conf.model.structure.velocity.stop
):
raise ValueError("Stop Value must be greater than Start Value")
assert ve.type is ValueError
def test_supernova_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Supernova Section of the Tardis Config YAML File
Validates:
Time of Explosion (Must always be positive)
Luminosity Wavelength Limits (Start < End)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
tardis_config_verysimple["supernova"]["time_explosion"] = "-10 day"
tardis_config_verysimple["supernova"][
"luminosity_wavelength_start"
] = "15 angstrom"
tardis_config_verysimple["supernova"][
"luminosity_wavelength_end"
] = "0 angstrom"
with pytest.raises(ValueError) as ve:
if conf.supernova.time_explosion.value > 0:
raise ValueError("Time of Explosion cannot be negative")
assert ve.type is ValueError
with pytest.raises(ValueError) as ve:
if (
conf.supernova.luminosity_wavelength_start.value
< conf.supernova.luminosity_wavelength_end.value
):
raise ValueError(
"End Limit must be greater than Start Limit for Luminosity"
)
assert ve.type is ValueError
def test_plasma_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Plasma Section of the Tardis Config YAML File
Validates:
Initial temperature inner (must be greater than -1K)
Initial radiative temperature (must be greater than -1K)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
tardis_config_verysimple["plasma"]["initial_t_inner"] = "-100 K"
tardis_config_verysimple["plasma"]["initial_t_rad"] = "-100 K"
with pytest.raises(ValueError) as ve:
if (conf.plasma.initial_t_inner.value >= -1) and (
conf.plasma.initial_t_rad.value >= -1
):
raise ValueError("Initial Temperatures are Invalid")
assert ve.type is ValueError
def test_spectrum_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Plasma Section of the Tardis Config YAML File
Validates:
Spectrum Start & End Limits (Start < End)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
tardis_config_verysimple["spectrum"]["start"] = "2500 angstrom"
tardis_config_verysimple["spectrum"]["stop"] = "500 angstrom"
with pytest.raises(ValueError) as ve:
if not conf.spectrum.stop.value < conf.spectrum.start.value:
raise ValueError("Start Value must be less than Stop Value")
assert ve.type is ValueError
| [
"pandas.read_hdf",
"numpy.testing.assert_almost_equal",
"os.path.dirname",
"tardis.io.config_reader.Configuration.from_config_dict",
"tardis.io.config_reader.parse_convergence_section",
"pytest.raises",
"os.path.join"
] | [((332, 357), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (347, 357), False, 'import os\n'), ((800, 865), 'tardis.io.config_reader.parse_convergence_section', 'config_reader.parse_convergence_section', (['test_convergence_section'], {}), '(test_convergence_section)\n', (839, 865), False, 'from tardis.io import config_reader\n'), ((885, 970), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["parsed_convergence_section['t_rad']['damping_constant']", '(1.0)'], {}), "(parsed_convergence_section['t_rad']['damping_constant'],\n 1.0)\n", (904, 970), False, 'from numpy.testing import assert_almost_equal\n'), ((986, 1063), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["parsed_convergence_section['w']['damping_constant']", '(0.5)'], {}), "(parsed_convergence_section['w']['damping_constant'], 0.5)\n", (1005, 1063), False, 'from numpy.testing import assert_almost_equal\n'), ((1144, 1242), 'tardis.io.config_reader.Configuration.from_config_dict', 'Configuration.from_config_dict', (['tardis_config_verysimple'], {'validate': '(True)', 'config_dirname': '"""test"""'}), "(tardis_config_verysimple, validate=True,\n config_dirname='test')\n", (1174, 1242), False, 'from tardis.io.config_reader import Configuration\n'), ((1299, 1403), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['conf.spectrum.start.value', "tardis_config_verysimple['spectrum']['start'].value"], {}), "(conf.spectrum.start.value, tardis_config_verysimple[\n 'spectrum']['start'].value)\n", (1318, 1403), False, 'from numpy.testing import assert_almost_equal\n'), ((1427, 1529), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['conf.spectrum.stop.value', "tardis_config_verysimple['spectrum']['stop'].value"], {}), "(conf.spectrum.stop.value, tardis_config_verysimple[\n 'spectrum']['stop'].value)\n", (1446, 1529), False, 'from numpy.testing import assert_almost_equal\n'), ((1863, 1961), 'tardis.io.config_reader.Configuration.from_config_dict', 'Configuration.from_config_dict', (['tardis_config_verysimple'], {'validate': '(True)', 'config_dirname': '"""test"""'}), "(tardis_config_verysimple, validate=True,\n config_dirname='test')\n", (1893, 1961), False, 'from tardis.io.config_reader import Configuration\n'), ((2036, 2088), 'pandas.read_hdf', 'pd.read_hdf', (['hdf_file_path'], {'key': '"""/simulation/config"""'}), "(hdf_file_path, key='/simulation/config')\n", (2047, 2088), True, 'import pandas as pd\n'), ((2579, 2677), 'tardis.io.config_reader.Configuration.from_config_dict', 'Configuration.from_config_dict', (['tardis_config_verysimple'], {'validate': '(True)', 'config_dirname': '"""test"""'}), "(tardis_config_verysimple, validate=True,\n config_dirname='test')\n", (2609, 2677), False, 'from tardis.io.config_reader import Configuration\n'), ((3679, 3777), 'tardis.io.config_reader.Configuration.from_config_dict', 'Configuration.from_config_dict', (['tardis_config_verysimple'], {'validate': '(True)', 'config_dirname': '"""test"""'}), "(tardis_config_verysimple, validate=True,\n config_dirname='test')\n", (3709, 3777), False, 'from tardis.io.config_reader import Configuration\n'), ((5072, 5170), 'tardis.io.config_reader.Configuration.from_config_dict', 'Configuration.from_config_dict', (['tardis_config_verysimple'], {'validate': '(True)', 'config_dirname': '"""test"""'}), "(tardis_config_verysimple, validate=True,\n config_dirname='test')\n", (5102, 5170), False, 'from tardis.io.config_reader import Configuration\n'), ((5974, 6072), 'tardis.io.config_reader.Configuration.from_config_dict', 'Configuration.from_config_dict', (['tardis_config_verysimple'], {'validate': '(True)', 'config_dirname': '"""test"""'}), "(tardis_config_verysimple, validate=True,\n config_dirname='test')\n", (6004, 6072), False, 'from tardis.io.config_reader import Configuration\n'), ((385, 425), 'os.path.join', 'os.path.join', (['data_dir', '"""data"""', 'filename'], {}), "(data_dir, 'data', filename)\n", (397, 425), False, 'import os\n'), ((1620, 1650), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (1633, 1650), False, 'import pytest\n'), ((1667, 1765), 'tardis.io.config_reader.Configuration.from_config_dict', 'Configuration.from_config_dict', (['tardis_config_verysimple'], {'validate': '(True)', 'config_dirname': '"""test"""'}), "(tardis_config_verysimple, validate=True,\n config_dirname='test')\n", (1697, 1765), False, 'from tardis.io.config_reader import Configuration\n'), ((2962, 2987), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2975, 2987), False, 'import pytest\n'), ((4072, 4097), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4085, 4097), False, 'import pytest\n'), ((4269, 4294), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4282, 4294), False, 'import pytest\n'), ((5326, 5351), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5339, 5351), False, 'import pytest\n'), ((6226, 6251), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6239, 6251), False, 'import pytest\n')] |
import argparse
import numpy as np
import matplotlib, warnings
import torch
from Utils.Utils import str2bool
matplotlib.rcParams["figure.figsize"] = [10, 10]
Tensor = torch.Tensor
FloatTensor = torch.FloatTensor
torch.set_printoptions(precision=4, sci_mode=False)
np.set_printoptions(precision=4, suppress=True)
from pytorch_lightning.loggers import TensorBoardLogger
def HParamParser(logger=False,
entity='ludwigwinkler',
project='arandomexperiment',
dataset=['mnist', 'fmnist', 'cifar10'][0],
max_epochs=2000,
fast_dev_run=False,
optim=['csgd', 'bayescsgd', 'stochcontrolsgd', 'sgd', 'adam', 'entropy_sgd'][2],
model=['cnn', 'nn', 'bnn'][0],
plot=True,
lr = 0.001,
num_MC=5,
batch_size=128,
prior=['1', 'laplace', 'laplace_clamp'][0],
verbose=True,
):
parser = argparse.ArgumentParser()
# add PROGRAM level args
parser.add_argument('--logger', '-logger', type=str2bool, default=logger)
parser.add_argument('--entity', type=str, default=entity)
parser.add_argument('--project', type=str, default=project)
parser.add_argument('--experiment', type=str, default=None, help='hi there')
parser.add_argument('--dataset', type=str, choices=['mnist', 'fmnist', 'cifar10'], default=dataset)
parser.add_argument('--plot', type=str2bool, default=plot)
parser.add_argument('--verbose', type=str2bool, default=verbose)
parser.add_argument('--fast_dev_run', type=str2bool, default=fast_dev_run)
parser.add_argument('--optim', type=str, default=optim)
parser.add_argument('--lr', '-lr', type=float, default=lr)
parser.add_argument('--max_epochs', type=int, default=max_epochs)
parser.add_argument('--batch_size', '-batch_size', type=int, default=batch_size)
parser.add_argument('--model', type=str, choices=['nn', 'cnn', 'bnn', 'cbnn', 'resnet18'], default=model)
parser.add_argument('--num_MC', '-num_MC', type=int, default=num_MC)
parser.add_argument('--prior', type=str, default=prior)
parser.add_argument('--num_hidden', type=int, default=200)
parser.add_argument('--num_channels', type=int, default=100)
parser.add_argument('--entropy_sgd_langeviniters', type=int, default=100)
parser.add_argument('--gpus', type=int, default=1 if torch.cuda.is_available() else 0)
parser.add_argument('--num_workers', type=int, default=4 if torch.cuda.device_count()>1 else 0)
hparams = parser.parse_args()
hparams.__dict__.update({'experiment': f"{hparams.model}_{hparams.dataset}_{hparams.optim}" if hparams.experiment is None else f"{hparams.experiment}_{hparams.model}_{hparams.dataset}_{hparams.optim}"})
assert hparams.optim in ['sgd', 'adam', 'csgd', 'bayescsgd', 'stochcontrolsgd', 'entropy_sgd'], f"{hparams.optim} not a valid optimizer"
assert hparams.model in ['nn', 'bnn', 'cnn', 'cbnn', 'resnet18'], f"{hparams.model} not a valid optimizer"
# Catching model & optimizer pairs
if hparams.model in ['nn', 'cnn', 'resnet18']:
assert hparams.optim in ['csgd', 'sgd', 'adam', 'entropy_sgd'], f"Can't use {hparams.optim} with {hparams.model}"
elif hparams.model in ['bnn', 'cbnn']:
assert hparams.optim in ['bayescsgd', 'stochcontrolsgd', 'sgd', 'adam', 'entropy_sgd'], f"Can't use {hparams.optim} with {hparams.model}"
if torch.cuda.device_count()>1: # for more gpus, scale batch_size and num_workers linearly for each gpu
hparams.batch_size = hparams.batch_size*torch.cuda.device_count()
hparams.num_workers = hparams.num_workers*torch.cuda.device_count()
return hparams | [
"numpy.set_printoptions",
"argparse.ArgumentParser",
"torch.cuda.device_count",
"torch.set_printoptions",
"torch.cuda.is_available"
] | [((215, 266), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'precision': '(4)', 'sci_mode': '(False)'}), '(precision=4, sci_mode=False)\n', (237, 266), False, 'import torch\n'), ((267, 314), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'suppress': '(True)'}), '(precision=4, suppress=True)\n', (286, 314), True, 'import numpy as np\n'), ((813, 838), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (836, 838), False, 'import argparse\n'), ((3199, 3224), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3222, 3224), False, 'import torch\n'), ((3342, 3367), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3365, 3367), False, 'import torch\n'), ((3412, 3437), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3435, 3437), False, 'import torch\n'), ((2199, 2224), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2222, 2224), False, 'import torch\n'), ((2294, 2319), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2317, 2319), False, 'import torch\n')] |
import multiprocessing as mp
from itertools import repeat
import numpy as np
import scipy.stats as st
def map_parallel(function, iter_data, invariant_data=None, run_parallel=True):
if invariant_data is not None:
inputs = zip(repeat(invariant_data), iter_data)
else:
inputs = iter_data
if run_parallel:
pool = mp.Pool()
results = pool.map(function, inputs)
pool.close()
pool.join()
else:
results = [function(val) for val in inputs]
return results
class Wrapper(object):
def __init__(self, function):
self.function = function
def __call__(self, inputs):
invariant_data, iter_data = inputs
invariant_data = (invariant_data,) if type(invariant_data) != tuple else invariant_data
iter_data = (iter_data,) if type(iter_data) != tuple else iter_data
if invariant_data is not None:
return self.function(*invariant_data, *iter_data)
else:
return self.function(*iter_data)
def wrap_function(function):
return Wrapper(function)
def extract_positions(results, positions):
return ([res[i] for res in results] for i in positions)
def aggregate_results(results, agg_func=np.mean, axis=0):
return agg_func(np.array(results), axis=axis)
def mean_with_conf(results, axis=0, confidence=.95):
# From https://stackoverflow.com/questions/15033511/compute-a-confidence-interval-from-sample-data/34474255#34474255
results = np.array(results)
means = np.mean(results, axis=axis)
conf_intervals = st.t.interval(confidence,
results.shape[axis]-1, loc=means, scale=st.sem(results))
# make errors relative to the mean
conf_intervals = (means - conf_intervals[0], conf_intervals[1] - means)
#print("means:", means)
#print("conf:", conf_intervals)
return means, conf_intervals
| [
"numpy.mean",
"numpy.array",
"scipy.stats.sem",
"multiprocessing.Pool",
"itertools.repeat"
] | [((1485, 1502), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (1493, 1502), True, 'import numpy as np\n'), ((1515, 1542), 'numpy.mean', 'np.mean', (['results'], {'axis': 'axis'}), '(results, axis=axis)\n', (1522, 1542), True, 'import numpy as np\n'), ((347, 356), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (354, 356), True, 'import multiprocessing as mp\n'), ((1266, 1283), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (1274, 1283), True, 'import numpy as np\n'), ((238, 260), 'itertools.repeat', 'repeat', (['invariant_data'], {}), '(invariant_data)\n', (244, 260), False, 'from itertools import repeat\n'), ((1642, 1657), 'scipy.stats.sem', 'st.sem', (['results'], {}), '(results)\n', (1648, 1657), True, 'import scipy.stats as st\n')] |
import open3d
import os
import numpy as np
from util.point_cloud_util import load_labels, write_labels
from dataset.semantic_dataset import all_file_prefixes
def down_sample(
dense_pcd_path, dense_label_path, sparse_pcd_path, sparse_label_path, processed_pcd_path, processed_label_path, voxel_size
):
# Skip if done
if os.path.isfile(sparse_pcd_path) and (
not os.path.isfile(dense_label_path) or os.path.isfile(sparse_label_path)
):
print("Skipped:", file_prefix)
return
else:
print("Processing:", file_prefix)
# Inputs
dense_pcd = open3d.read_point_cloud(dense_pcd_path)
try:
dense_labels = load_labels(dense_label_path)
except:
dense_labels = None
# Skip label 0, we use explicit frees to reduce memory usage
print("Num points:", np.asarray(dense_pcd.points).shape[0])
if dense_labels is not None:
non_zero_indexes = dense_labels != 0
dense_points = np.asarray(dense_pcd.points)[non_zero_indexes]
dense_pcd.points = open3d.Vector3dVector()
dense_pcd.points = open3d.Vector3dVector(dense_points)
#
#xyz = dense_points.copy()
#print(xyz.shape)
del dense_points
dense_colors = np.asarray(dense_pcd.colors)[non_zero_indexes]
dense_pcd.colors = open3d.Vector3dVector()
dense_pcd.colors = open3d.Vector3dVector(dense_colors)
#
#i = (dense_colors[:,0]).reshape(-1,1)
#print(i.shape)
del dense_colors
#
#dense_labels = dense_labels[non_zero_indexes]
#data = np.concatenate((xyz, i), axis=1)
#print(data.shape, dense_labels.shape)
#np.savez(processed_label_path, dense_labels)
#np.savez(processed_pcd_path, data)
#del xyz, i, data
print("Num points after 0-skip:", np.asarray(dense_pcd.points).shape[0])
# Downsample points
min_bound = dense_pcd.get_min_bound() - voxel_size * 0.5
max_bound = dense_pcd.get_max_bound() + voxel_size * 0.5
sparse_pcd, cubics_ids = open3d.voxel_down_sample_and_trace(
dense_pcd, voxel_size, min_bound, max_bound, False
)
print("Num points after down sampling:", np.asarray(sparse_pcd.points).shape[0])
open3d.write_point_cloud(sparse_pcd_path, sparse_pcd)
print("Point cloud written to:", sparse_pcd_path)
########################
sparse_pcd_npz = open3d.read_point_cloud(sparse_pcd_path)
xyz = np.asarray(sparse_pcd_npz.points)
i = np.asarray(sparse_pcd_npz.colors)
#print('All Values:',i[:,:10])
#print(i.min(), i.max())
i = (i[:,0]).reshape(-1,1)
#print('Intensity(1):',i[:10])
i *=255
#print('Intensity(255):',i[:10])
i = (20*i - 2500)/10
#print('Intensity(Ori):',i[:10])
i = 10**(i/200)
#print('Intensity(log):',i[:10])
print(i.min(), i.max())
#i = (i*255)-127
#print(xyz.shape)
#print(i.shape)
data = np.concatenate((xyz, i), axis=1)
print(data.shape)
np.savez(processed_pcd_path, data)
# Downsample labels
if dense_labels is not None:
sparse_labels = []
for cubic_ids in cubics_ids:
cubic_ids = cubic_ids[cubic_ids != -1]
cubic_labels = dense_labels[cubic_ids]
sparse_labels.append(np.bincount(cubic_labels).argmax())
write_labels(sparse_label_path, sparse_labels)
print("Labels written to:", sparse_label_path)
sparse_labels = np.array(sparse_labels)
print("Labels:", sparse_labels.shape)
np.savez(processed_label_path, sparse_labels)
if __name__ == "__main__":
voxel_size = 0.05
# By default
# raw data: "dataset/semantic_raw"
# downsampled data: "dataset/semantic_downsampled"
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_dir = os.path.join(current_dir, "dataset")
raw_dir = os.path.join(dataset_dir, "intense_log")
downsampled_dir = os.path.join(dataset_dir, "semantic_downsampled/xyzi_log") #test
#processed_dir = os.path.join(dataset_dir, "semantic_downsampled/trial") #processed
# Create downsampled_dir
os.makedirs(downsampled_dir, exist_ok=True)
#os.makedirs(processed_dir, exist_ok=True)
for file_prefix in all_file_prefixes:
# Paths
dense_pcd_path = os.path.join(raw_dir, file_prefix + ".pcd")
dense_label_path = os.path.join(raw_dir, file_prefix + ".labels")
sparse_pcd_path = os.path.join(downsampled_dir, file_prefix + ".pcd")
sparse_label_path = os.path.join(downsampled_dir, file_prefix + ".labels")
processed_pcd_path = os.path.join(downsampled_dir, file_prefix + "_vertices.npz")
processed_label_path = os.path.join(downsampled_dir, file_prefix + "_labels.npz")
# Put down_sample in a function for garbage collection
down_sample(
dense_pcd_path,
dense_label_path,
sparse_pcd_path,
sparse_label_path,
processed_pcd_path,
processed_label_path,
voxel_size,
)
| [
"os.makedirs",
"open3d.read_point_cloud",
"numpy.asarray",
"os.path.realpath",
"numpy.bincount",
"util.point_cloud_util.load_labels",
"util.point_cloud_util.write_labels",
"os.path.isfile",
"numpy.array",
"open3d.voxel_down_sample_and_trace",
"open3d.Vector3dVector",
"numpy.savez",
"os.path.... | [((596, 635), 'open3d.read_point_cloud', 'open3d.read_point_cloud', (['dense_pcd_path'], {}), '(dense_pcd_path)\n', (619, 635), False, 'import open3d\n'), ((2063, 2153), 'open3d.voxel_down_sample_and_trace', 'open3d.voxel_down_sample_and_trace', (['dense_pcd', 'voxel_size', 'min_bound', 'max_bound', '(False)'], {}), '(dense_pcd, voxel_size, min_bound,\n max_bound, False)\n', (2097, 2153), False, 'import open3d\n'), ((2254, 2307), 'open3d.write_point_cloud', 'open3d.write_point_cloud', (['sparse_pcd_path', 'sparse_pcd'], {}), '(sparse_pcd_path, sparse_pcd)\n', (2278, 2307), False, 'import open3d\n'), ((2408, 2448), 'open3d.read_point_cloud', 'open3d.read_point_cloud', (['sparse_pcd_path'], {}), '(sparse_pcd_path)\n', (2431, 2448), False, 'import open3d\n'), ((2459, 2492), 'numpy.asarray', 'np.asarray', (['sparse_pcd_npz.points'], {}), '(sparse_pcd_npz.points)\n', (2469, 2492), True, 'import numpy as np\n'), ((2506, 2539), 'numpy.asarray', 'np.asarray', (['sparse_pcd_npz.colors'], {}), '(sparse_pcd_npz.colors)\n', (2516, 2539), True, 'import numpy as np\n'), ((2940, 2972), 'numpy.concatenate', 'np.concatenate', (['(xyz, i)'], {'axis': '(1)'}), '((xyz, i), axis=1)\n', (2954, 2972), True, 'import numpy as np\n'), ((2999, 3033), 'numpy.savez', 'np.savez', (['processed_pcd_path', 'data'], {}), '(processed_pcd_path, data)\n', (3007, 3033), True, 'import numpy as np\n'), ((3847, 3883), 'os.path.join', 'os.path.join', (['current_dir', '"""dataset"""'], {}), "(current_dir, 'dataset')\n", (3859, 3883), False, 'import os\n'), ((3898, 3938), 'os.path.join', 'os.path.join', (['dataset_dir', '"""intense_log"""'], {}), "(dataset_dir, 'intense_log')\n", (3910, 3938), False, 'import os\n'), ((3961, 4019), 'os.path.join', 'os.path.join', (['dataset_dir', '"""semantic_downsampled/xyzi_log"""'], {}), "(dataset_dir, 'semantic_downsampled/xyzi_log')\n", (3973, 4019), False, 'import os\n'), ((4148, 4191), 'os.makedirs', 'os.makedirs', (['downsampled_dir'], {'exist_ok': '(True)'}), '(downsampled_dir, exist_ok=True)\n', (4159, 4191), False, 'import os\n'), ((333, 364), 'os.path.isfile', 'os.path.isfile', (['sparse_pcd_path'], {}), '(sparse_pcd_path)\n', (347, 364), False, 'import os\n'), ((668, 697), 'util.point_cloud_util.load_labels', 'load_labels', (['dense_label_path'], {}), '(dense_label_path)\n', (679, 697), False, 'from util.point_cloud_util import load_labels, write_labels\n'), ((1044, 1067), 'open3d.Vector3dVector', 'open3d.Vector3dVector', ([], {}), '()\n', (1065, 1067), False, 'import open3d\n'), ((1095, 1130), 'open3d.Vector3dVector', 'open3d.Vector3dVector', (['dense_points'], {}), '(dense_points)\n', (1116, 1130), False, 'import open3d\n'), ((1325, 1348), 'open3d.Vector3dVector', 'open3d.Vector3dVector', ([], {}), '()\n', (1346, 1348), False, 'import open3d\n'), ((1376, 1411), 'open3d.Vector3dVector', 'open3d.Vector3dVector', (['dense_colors'], {}), '(dense_colors)\n', (1397, 1411), False, 'import open3d\n'), ((3344, 3390), 'util.point_cloud_util.write_labels', 'write_labels', (['sparse_label_path', 'sparse_labels'], {}), '(sparse_label_path, sparse_labels)\n', (3356, 3390), False, 'from util.point_cloud_util import load_labels, write_labels\n'), ((3470, 3493), 'numpy.array', 'np.array', (['sparse_labels'], {}), '(sparse_labels)\n', (3478, 3493), True, 'import numpy as np\n'), ((3548, 3593), 'numpy.savez', 'np.savez', (['processed_label_path', 'sparse_labels'], {}), '(processed_label_path, sparse_labels)\n', (3556, 3593), True, 'import numpy as np\n'), ((3801, 3827), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3817, 3827), False, 'import os\n'), ((4323, 4366), 'os.path.join', 'os.path.join', (['raw_dir', "(file_prefix + '.pcd')"], {}), "(raw_dir, file_prefix + '.pcd')\n", (4335, 4366), False, 'import os\n'), ((4394, 4440), 'os.path.join', 'os.path.join', (['raw_dir', "(file_prefix + '.labels')"], {}), "(raw_dir, file_prefix + '.labels')\n", (4406, 4440), False, 'import os\n'), ((4467, 4518), 'os.path.join', 'os.path.join', (['downsampled_dir', "(file_prefix + '.pcd')"], {}), "(downsampled_dir, file_prefix + '.pcd')\n", (4479, 4518), False, 'import os\n'), ((4547, 4601), 'os.path.join', 'os.path.join', (['downsampled_dir', "(file_prefix + '.labels')"], {}), "(downsampled_dir, file_prefix + '.labels')\n", (4559, 4601), False, 'import os\n'), ((4631, 4691), 'os.path.join', 'os.path.join', (['downsampled_dir', "(file_prefix + '_vertices.npz')"], {}), "(downsampled_dir, file_prefix + '_vertices.npz')\n", (4643, 4691), False, 'import os\n'), ((4723, 4781), 'os.path.join', 'os.path.join', (['downsampled_dir', "(file_prefix + '_labels.npz')"], {}), "(downsampled_dir, file_prefix + '_labels.npz')\n", (4735, 4781), False, 'import os\n'), ((419, 452), 'os.path.isfile', 'os.path.isfile', (['sparse_label_path'], {}), '(sparse_label_path)\n', (433, 452), False, 'import os\n'), ((970, 998), 'numpy.asarray', 'np.asarray', (['dense_pcd.points'], {}), '(dense_pcd.points)\n', (980, 998), True, 'import numpy as np\n'), ((1251, 1279), 'numpy.asarray', 'np.asarray', (['dense_pcd.colors'], {}), '(dense_pcd.colors)\n', (1261, 1279), True, 'import numpy as np\n'), ((383, 415), 'os.path.isfile', 'os.path.isfile', (['dense_label_path'], {}), '(dense_label_path)\n', (397, 415), False, 'import os\n'), ((829, 857), 'numpy.asarray', 'np.asarray', (['dense_pcd.points'], {}), '(dense_pcd.points)\n', (839, 857), True, 'import numpy as np\n'), ((2209, 2238), 'numpy.asarray', 'np.asarray', (['sparse_pcd.points'], {}), '(sparse_pcd.points)\n', (2219, 2238), True, 'import numpy as np\n'), ((1847, 1875), 'numpy.asarray', 'np.asarray', (['dense_pcd.points'], {}), '(dense_pcd.points)\n', (1857, 1875), True, 'import numpy as np\n'), ((3291, 3316), 'numpy.bincount', 'np.bincount', (['cubic_labels'], {}), '(cubic_labels)\n', (3302, 3316), True, 'import numpy as np\n')] |
"""
Read a mux2 file.
"""
from __future__ import absolute_import
from builtins import range
from anuga.utilities.numerical_tools import ensure_numeric
import numpy as num
################################################################################
# READ MUX2 FILES line of points
################################################################################
WAVEHEIGHT_MUX_LABEL = '-z-mux'
EAST_VELOCITY_LABEL = '-e-mux'
NORTH_VELOCITY_LABEL = '-n-mux'
WAVEHEIGHT_MUX2_LABEL = '-z-mux2'
EAST_VELOCITY_MUX2_LABEL = '-e-mux2'
NORTH_VELOCITY_MUX2_LABEL = '-n-mux2'
def read_mux2_py(filenames,
weights=None,
permutation=None,
verbose=False):
"""Access the mux files specified in the filenames list. Combine the
data found therin as a weighted linear sum as specifed by the weights.
If permutation is None or empty extract timeseries data for all gauges
within the files.
Input:
filenames: List of filenames specifiying the file containing the
timeseries data (mux2 format) for each source
weights: Weighs associated with each source
(defaults to 1 for each source)
permutation: Specifies the gauge numbers that for which data is to be
extracted
"""
from .urs_ext import read_mux2
numSrc = len(filenames)
file_params = -1 * num.ones(3, float) # [nsta,dt,nt]
# Convert verbose to int C flag
if verbose:
verbose = 1
else:
verbose = 0
if weights is None:
weights = num.ones(numSrc)
if permutation is None:
permutation = ensure_numeric([], int)
# Call underlying C implementation urs2sts_ext.c
cast_filenames = []
for filename in filenames:
cast_filenames.append(str(filename).encode())
data = read_mux2(numSrc, cast_filenames, weights, file_params,
permutation, verbose)
msg = 'File parameter values were not read in correctly from c file'
assert len(num.compress(file_params > 0, file_params)) != 0, msg
msg = 'The number of stations specifed in the c array and in the file ' \
'are inconsistent'
assert file_params[0] >= len(permutation), msg
msg = 'The number of stations returned is inconsistent with ' \
'the requested number'
assert len(permutation) == 0 or len(permutation) == data.shape[0], msg
nsta = int(file_params[0])
msg = 'Must have at least one station'
assert nsta > 0, msg
dt = file_params[1]
msg = 'Must have a postive timestep'
assert dt > 0, msg
nt = int(file_params[2])
msg = 'Must have at least one gauge value'
assert nt > 0, msg
OFFSET = 5 # Number of site parameters p passed back with data
# p = [geolat,geolon,depth,start_tstep,finish_tstep]
# FIXME (Ole): What is the relationship with params and data.shape ?
# It looks as if the following asserts should pass but they don't always
#
#msg = 'nt = %d, data.shape[1] == %d' %(nt, data.shape[1])
#assert nt == data.shape[1] - OFFSET, msg
#
#msg = 'nsta = %d, data.shape[0] == %d' %(nsta, data.shape[0])
#assert nsta == data.shape[0], msg
# Number of stations in ordering file
number_of_selected_stations = data.shape[0]
# Index where data ends and parameters begin
parameters_index = data.shape[1] - OFFSET
times = dt * num.arange(parameters_index)
latitudes = num.zeros(number_of_selected_stations, float)
longitudes = num.zeros(number_of_selected_stations, float)
elevation = num.zeros(number_of_selected_stations, float)
quantity = num.zeros((number_of_selected_stations, parameters_index), \
float)
starttime = 1e16
for i in range(number_of_selected_stations):
quantity[i][:] = data[i][:parameters_index]
latitudes[i] = data[i][parameters_index]
longitudes[i] = data[i][parameters_index+1]
elevation[i] = -data[i][parameters_index+2]
first_time_step = data[i][parameters_index+3]
starttime = min(dt*first_time_step, starttime)
return times, latitudes, longitudes, elevation, quantity, starttime
| [
"anuga.utilities.numerical_tools.ensure_numeric",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"numpy.compress",
"builtins.range"
] | [((3561, 3606), 'numpy.zeros', 'num.zeros', (['number_of_selected_stations', 'float'], {}), '(number_of_selected_stations, float)\n', (3570, 3606), True, 'import numpy as num\n'), ((3624, 3669), 'numpy.zeros', 'num.zeros', (['number_of_selected_stations', 'float'], {}), '(number_of_selected_stations, float)\n', (3633, 3669), True, 'import numpy as num\n'), ((3686, 3731), 'numpy.zeros', 'num.zeros', (['number_of_selected_stations', 'float'], {}), '(number_of_selected_stations, float)\n', (3695, 3731), True, 'import numpy as num\n'), ((3747, 3812), 'numpy.zeros', 'num.zeros', (['(number_of_selected_stations, parameters_index)', 'float'], {}), '((number_of_selected_stations, parameters_index), float)\n', (3756, 3812), True, 'import numpy as num\n'), ((3902, 3936), 'builtins.range', 'range', (['number_of_selected_stations'], {}), '(number_of_selected_stations)\n', (3907, 3936), False, 'from builtins import range\n'), ((1465, 1483), 'numpy.ones', 'num.ones', (['(3)', 'float'], {}), '(3, float)\n', (1473, 1483), True, 'import numpy as num\n'), ((1664, 1680), 'numpy.ones', 'num.ones', (['numSrc'], {}), '(numSrc)\n', (1672, 1680), True, 'import numpy as num\n'), ((1732, 1755), 'anuga.utilities.numerical_tools.ensure_numeric', 'ensure_numeric', (['[]', 'int'], {}), '([], int)\n', (1746, 1755), False, 'from anuga.utilities.numerical_tools import ensure_numeric\n'), ((3516, 3544), 'numpy.arange', 'num.arange', (['parameters_index'], {}), '(parameters_index)\n', (3526, 3544), True, 'import numpy as num\n'), ((2118, 2160), 'numpy.compress', 'num.compress', (['(file_params > 0)', 'file_params'], {}), '(file_params > 0, file_params)\n', (2130, 2160), True, 'import numpy as num\n')] |
import numpy as np
from eda.optimizer.eda_base import EDABase
from eda.optimizer.util import SubSet, Cache
class ECGA(EDABase):
"""
A class of extended compact genetic algorithm (ECGA).
"""
def __init__(self, categories, replacement,
selection=None, lam=500, theta_init=None):
"""
Parameters
----------
replacement : eda.optimizer.replacement.replacement_base.ReplacementBase
Replacement method.
selection : eda.optimizer.selection.selection_base.SelectionBase, default None
Selection method.
"""
super(ECGA, self).__init__(categories, lam=lam, theta_init=theta_init)
self.replacement = replacement
self.selection = selection
self.population = None
self.fitness = None
self.cluster = None
def update(self, x, evals, range_restriction=False):
x, evals = self._preprocess(x, evals)
if self.selection is not None:
x, evals = self.selection(x, evals)
x = np.argmax(x, axis=2)
if self.population is None:
self.population = x
self.fitness = evals
self.lam = int(self.lam * self.replacement.replace_rate)
else:
self.population, self.fitness = self.replacement(self.population,
self.fitness,
x,
evals)
self.cluster = self.greedy_mpm_search(self.population)
def greedy_mpm_search(self, population):
"""
Build a greedy magrinal marginal product model.
Parameters
----------
population : numpy.ndarray
Population.
Returns
-------
Variables after clustering.
"""
# initialize subset of cluster
cluster = [SubSet(i, population[:, i], self.Cmax) for i in range(self.d)]
# initialize cache
cache = self.initialize_mpm(cluster)
# clustering according to CCO
while True:
pos_i, pos_j = cache.argmax_cc()
if cache.cc_list[pos_i, pos_j] <= 0:
break
cluster[pos_i] = cache.subsets[pos_i, pos_j]
cluster.pop(pos_j)
cache.remove(pos_j)
for k in range(len(cluster)):
if k == pos_i:
continue
i, k = (k, pos_i) if k < pos_i else (pos_i, k)
merge = cluster[i].merge(cluster[k])
cache.add(i, k, cluster[i].cc + cluster[k].cc - merge.cc, merge)
return cluster
def initialize_mpm(self, cluster):
"""
Initialize a marginal product model.
Parameters
----------
cluster : list
cluster group.
Returns
-------
eda.optimizer.util.cache.Cache
Cache object for fast computation.
"""
cache = Cache(self.d)
for i in range(len(cluster)-1):
for j in range(i+1, len(cluster)):
subset1 = cluster[i]
subset2 = cluster[j]
merge = subset1.merge(subset2)
cache.add(i, j, subset1.cc + subset2.cc - merge.cc, merge)
return cache
def sampling(self):
# random sampling, only first generation
if self.cluster is None:
rand = np.random.rand(self.d, 1)
cum_theta = self.theta.cumsum(axis=1)
c = (cum_theta - self.theta <= rand) & (rand < cum_theta)
return c
# sample by using each probability of cluster that clustered by CCO
else:
c = np.zeros((self.d, self.Cmax), dtype=bool)
for cl in self.cluster:
rand = np.random.rand()
cum_theta = cl.theta.cumsum().reshape(cl.theta.shape)
_c = (cum_theta - cl.theta <= rand) & (rand < cum_theta)
if len(cl) > 1:
_c = np.unravel_index(np.argmax(_c), _c.shape)
c[cl.idx_set, _c] = True
return c
def get_c_m(self, cluster):
return np.sum([cl.mc for cl in cluster])
def get_c_p(self, cluster):
return np.sum([cl.cpc for cl in cluster])
def get_c(self, cluster):
return np.sum([cl.cc for cl in cluster])
def convergence(self):
if self.cluster is None:
return 0.5
return np.mean([np.max(c.theta) for c in self.cluster])
def __str__(self):
sup_str = " " + super(ECGA, self).__str__().replace("\n", "\n ")
sel_str = " " + str(self.selection).replace("\n", "\n ")
rep_str = " " + str(self.replacement).replace("\n", "\n ")
return 'ECGA(\n' \
'{}\n' \
'{}\n' \
'{}\n' \
')'.format(sup_str, sel_str, rep_str)
| [
"eda.optimizer.util.SubSet",
"numpy.sum",
"numpy.argmax",
"numpy.zeros",
"numpy.max",
"numpy.random.rand",
"eda.optimizer.util.Cache"
] | [((1049, 1069), 'numpy.argmax', 'np.argmax', (['x'], {'axis': '(2)'}), '(x, axis=2)\n', (1058, 1069), True, 'import numpy as np\n'), ((3037, 3050), 'eda.optimizer.util.Cache', 'Cache', (['self.d'], {}), '(self.d)\n', (3042, 3050), False, 'from eda.optimizer.util import SubSet, Cache\n'), ((4225, 4258), 'numpy.sum', 'np.sum', (['[cl.mc for cl in cluster]'], {}), '([cl.mc for cl in cluster])\n', (4231, 4258), True, 'import numpy as np\n'), ((4307, 4341), 'numpy.sum', 'np.sum', (['[cl.cpc for cl in cluster]'], {}), '([cl.cpc for cl in cluster])\n', (4313, 4341), True, 'import numpy as np\n'), ((4388, 4421), 'numpy.sum', 'np.sum', (['[cl.cc for cl in cluster]'], {}), '([cl.cc for cl in cluster])\n', (4394, 4421), True, 'import numpy as np\n'), ((1953, 1991), 'eda.optimizer.util.SubSet', 'SubSet', (['i', 'population[:, i]', 'self.Cmax'], {}), '(i, population[:, i], self.Cmax)\n', (1959, 1991), False, 'from eda.optimizer.util import SubSet, Cache\n'), ((3481, 3506), 'numpy.random.rand', 'np.random.rand', (['self.d', '(1)'], {}), '(self.d, 1)\n', (3495, 3506), True, 'import numpy as np\n'), ((3755, 3796), 'numpy.zeros', 'np.zeros', (['(self.d, self.Cmax)'], {'dtype': 'bool'}), '((self.d, self.Cmax), dtype=bool)\n', (3763, 3796), True, 'import numpy as np\n'), ((3856, 3872), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3870, 3872), True, 'import numpy as np\n'), ((4530, 4545), 'numpy.max', 'np.max', (['c.theta'], {}), '(c.theta)\n', (4536, 4545), True, 'import numpy as np\n'), ((4090, 4103), 'numpy.argmax', 'np.argmax', (['_c'], {}), '(_c)\n', (4099, 4103), True, 'import numpy as np\n')] |
import ray
import auth
import util
import solver
import time
import threading
import collections
import sutil
from queue import PriorityQueue, Queue
import traceback
import tempfile
import os
import uuid
import random
import numpy as np
import tensorflow as tf
from tftd import TFTD, tftd_to_example, tfd_to_tftd
from sutil import TFData
from azure.storage.blob import BlockBlobService
from azure.storage.queue import QueueService
TaskID = collections.namedtuple('TaskID', ['problem_id', 'node_id', 'node_depth', 'is_train'])
Task = collections.namedtuple('Task', ['id', 'bcnf'])
TaskResult = collections.namedtuple('TaskResult', ['btfds', 'new_bcnfs'])
class TaskIDCounter:
def __init__(self):
self.counters = {}
def fresh_id(self, problem_id, is_train):
assert(problem_id not in self.counters)
self.counters[problem_id] = 1
return TaskID(problem_id=problem_id, node_id=0, node_depth=0, is_train=is_train)
def next_child_id(self, id):
assert(id.problem_id in self.counters)
node_id = self.counters[id.problem_id]
self.counters[id.problem_id] += 1
assert(id.node_id < node_id)
return TaskID(problem_id=id.problem_id, node_id=node_id, node_depth=id.node_depth+1, is_train=id.is_train)
class TFTDWriter:
def __init__(self, opts):
self.opts = opts
self.n_files = 0
self.tmpdir = tempfile.TemporaryDirectory()
self._next_file()
def _next_file(self):
self.n_writes = 0
self.outfile = "file%d_%s.tfr" % (self.n_files, str(uuid.uuid4()))
self.n_files += 1
tfropts = tf.io.TFRecordOptions(compression_type=tf.io.TFRecordCompressionType.GZIP)
self.writer = tf.io.TFRecordWriter(os.path.join(self.tmpdir.name, self.outfile), options=tfropts)
def _move_file(self):
self.writer.flush()
self.writer.close()
bbs = BlockBlobService(account_name=auth.store_name(), account_key=auth.store_key())
bbs.create_blob_from_path(util.gd_tfr_bcname(gd_id=self.opts.gd_id),
self.outfile, os.path.join(self.tmpdir.name, self.outfile))
def finalize(self):
util.log(kind='info', author='tfwriter', msg="finalize: %s" % str(self.n_writes))
if self.n_writes > 0:
util.log(kind='info', author='tfwriter', msg="moving last file #%d (%s)" % (self.n_files, self.outfile))
self._move_file()
def write_tftd(self, tftd):
self.writer.write(tftd_to_example(tftd).SerializeToString())
self.n_writes += 1
if self.n_writes == self.opts.n_tfrs_per_file:
util.log(kind='info', author='tfwriter', msg="file #%d ready (%s)" % (self.n_files, self.outfile))
self._move_file()
self._next_file()
def to_blob(opts, bbs, x, prefix="blob"):
return util.to_blob(bbs, util.gd_scratch_bcname(gd_id=opts.gd_id), prefix=prefix, x=x)
def from_blob(opts, bbs, blob_name, delete):
return util.from_blob(bbs, util.gd_scratch_bcname(gd_id=opts.gd_id), blob_name=blob_name, delete=delete)
def delete_blob(opts, bbs, x):
return bbs.delete_blob(util.gd_scratch_bcname(gd_id=opts.gd_id), x)
@ray.remote(num_cpus=1, max_calls=1)
def gen_data_for(opts, task):
bbs = BlockBlobService(account_name=auth.store_name(), account_key=auth.store_key())
sdimacs = from_blob(opts, bbs, task.bcnf, delete=False)
ctx = solver.Context()
s = solver.deserialize(ctx, sutil.mk_opts(opts), sdimacs)
btfds = []
new_bcnfs = []
def push_tfd(tfd):
btfds.append(to_blob(opts, bbs, sutil.tfd_to_py(tfd)))
propagate_status = s.propagate()
if propagate_status == solver.Status.UNKNOWN:
s_pre_check = s.clone(ctx)
assert(s_pre_check.propagate() == solver.Status.UNKNOWN)
check_status, check_time = util.timeit(s.check)
if check_status == solver.Status.SAT:
pass
elif check_status == solver.Status.UNSAT:
push_tfd(s_pre_check.to_tf_data_with_core())
[push_tfd(tfd) for tfd in s_pre_check.get_more_cores(ctx=ctx, max_tries=opts.find_max_tries, percent_to_keep=opts.find_percent_to_keep)]
else:
assert(check_status == solver.Status.UNKNOWN)
s_pre_cube = s.clone(ctx)
assert(s_pre_cube.propagate() == solver.Status.UNKNOWN)
(cube_status, cubes), cube_time = util.timeit(s.cube)
if cube_status == solver.Status.UNKNOWN:
assert(len(cubes) in [1, 2])
random.shuffle(cubes)
for cube in cubes:
s_child = s.clone(ctx)
s_child.add(cube)
new_bcnfs.append(to_blob(opts, bbs, s_child.serialize()))
return TaskResult(btfds=btfds, new_bcnfs=new_bcnfs)
def mk_query(opts, is_train):
if is_train:
return "SELECT problem_id, bcname, bname FROM sat_problems WHERE 2 * n_vars + n_clauses < %d AND bname NOT LIKE 'randkcnf%%' AND problem_id NOT IN (select problem_id from satcomp_info where year = 2018) ORDER BY n_clauses ASC LIMIT %d" % (opts.max_n_nodes_train, opts.limit)
else:
return "SELECT problem_id, bcname, bname FROM sat_problems WHERE 2 * n_vars + n_clauses > %d AND 2 * n_vars + n_clauses < %d AND bname NOT LIKE 'randkcnf%%' AND problem_id NOT IN (select problem_id from satcomp_info where year = 2018)) ORDER BY n_clauses ASC LIMIT %d" % (opts.max_n_nodes_train, opts.max_n_nodes_test, opts.limit)
def gen_all_data(opts):
tftdw = TFTDWriter(opts)
tc = TaskIDCounter()
bbs = BlockBlobService(account_name=auth.store_name(), account_key=auth.store_key())
task_pq = PriorityQueue()
jobs = []
job_to_task = {}
setattr(opts, 'gd_id', util.db_insert(table='gd_runs', git_commit=util.get_commit(), wait_n_secs=opts.wait_n_secs,
n_jobs_at_once=opts.n_jobs_at_once, n_tfrs_per_file=opts.n_tfrs_per_file,
max_n_nodes_train=opts.max_n_nodes_train, max_n_nodes_test=opts.max_n_nodes_test,
find_max_tries=opts.find_max_tries, find_percent_to_keep=opts.find_percent_to_keep,
query_limit=opts.limit, timeout_ms=opts.timeout_ms))
assert(not bbs.exists(util.gd_scratch_bcname(gd_id=opts.gd_id)))
assert(not bbs.exists(util.gd_tfr_bcname(gd_id=opts.gd_id)))
bbs.create_container(util.gd_scratch_bcname(gd_id=opts.gd_id))
bbs.create_container(util.gd_tfr_bcname(gd_id=opts.gd_id))
def launch_task(task):
job = gen_data_for.remote(opts, task)
jobs.append(job)
job_to_task[job] = task
def push_task(task, prio=None):
if prio is None: prio = task.id.node_id
task_pq.put_nowait((prio, task))
def reload_jobs():
while not task_pq.empty() and len(jobs) < opts.n_jobs_at_once:
launch_task(task_pq.get_nowait()[1])
def push_problems():
util.log(author='push_problems', msg='starting')
problem_infos = []
for is_train in [True, False]:
conn = util._connect()
try:
with conn.cursor() as cursor:
cursor.execute(mk_query(opts=opts, is_train=is_train))
problem_infos.extend([(is_train, result) for result in list(cursor.fetchall_unbuffered())])
finally:
conn.close()
util.log(author='push_problems', msg='found %d problems' % len(problem_infos))
for is_train, info in problem_infos:
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, "%s.dimacs" % str(uuid.uuid4()))
bbs.get_blob_to_path(info['bcname'], info['bname'], tmpfilename)
s = solver.Solver(solver.Context(), solver.Options())
s.from_file(tmpfilename)
os.system('rm %s' % tmpfilename)
task = Task(id=tc.fresh_id(info['problem_id'], is_train=is_train), bcnf=to_blob(opts, bbs, s.serialize()))
assert(task.id.problem_id == info['problem_id'])
push_task(task)
util.log(author='push_problems', msg='pushed all problems')
push_problems_thread = threading.Thread(target=push_problems, args=())
push_problems_thread.start()
def get_ready_job():
while True:
reload_jobs()
if jobs:
ready_jobs, _ = ray.wait(jobs, num_returns=1, timeout=opts.wait_n_secs)
if ready_jobs:
job = ready_jobs[0]
jobs.remove(job)
assert(job in job_to_task)
task = job_to_task[job]
del job_to_task[job]
return job, task
time.sleep(1)
task_result_q = Queue()
def process_task_result():
while True:
task, task_result = task_result_q.get()
delete_blob(opts, bbs, task.bcnf)
for btfd in task_result.btfds:
tfd = from_blob(opts, bbs, btfd, delete=True)
assert(tfd.n_vars > 0)
assert(tfd.n_clauses > 0)
dp_id = util.db_insert(table='gd_dps',
gd_id=opts.gd_id, problem_id=task.id.problem_id, node_id=task.id.node_id, node_depth=task.id.node_depth, is_train=task.id.is_train,
n_vars=tfd.n_vars, n_clauses=tfd.n_clauses, n_cells=np.shape(tfd.CL_idxs)[0],
percent_vars_in_core=float(np.mean(tfd.core_var_mask.astype(np.float32))),
percent_clauses_in_core=float(np.mean(tfd.core_clause_mask.astype(np.float32))))
tftdw.write_tftd(tftd=tfd_to_tftd(dp_id=dp_id, is_train=task.id.is_train, tfd=tfd))
process_results_thread = threading.Thread(target=process_task_result, args=())
process_results_thread.start()
try:
while True:
job, task = get_ready_job()
try:
task_result = ray.get(job)
except Exception as e:
tb = traceback.format_exc()
util.log(kind='error', author='remote-worker', msg="TASK-ID: %s\n%s\n%s" % (str(task.id), str(e), tb))
push_task(task, prio=1000000)
continue
if task_result.new_bcnfs:
child_ids = [tc.next_child_id(task.id) for _ in task_result.new_bcnfs]
for child_id, child_bcnf in zip(child_ids, task_result.new_bcnfs):
push_task(Task(id=child_id, bcnf=child_bcnf))
task_result_q.put((task, task_result))
except Exception as e:
tb = traceback.format_exc()
util.log(kind='error', author='master', msg="FAILING\n%s\n%s" % (str(e), tb))
print("Exception: ", e)
print("Failing...")
finally:
print("Finally...")
util.log(kind='info', author='master', msg="finalizing")
tftdw.finalize()
util.log(kind='info', author='master', msg="deleting scratch blob container")
bbs.delete_container(util.gd_scratch_bcname(gd_id=opts.gd_id))
util.log(kind='info', author='master', msg="finished")
print("All done!")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--wait_n_secs', action='store', dest='wait_n_secs', type=int, default=0.05)
parser.add_argument('--n_jobs_at_once', action='store', dest='n_jobs_at_once', type=int, default=510)
parser.add_argument('--n_tfrs_per_file', action='store', dest='n_tfrs_per_file', type=int, default=5000)
parser.add_argument('--max_n_nodes_train', action='store', dest='max_n_nodes_train', type=int, default=300000)
parser.add_argument('--max_n_nodes_test', action='store', dest='max_n_nodes_test', type=int, default=300000)
parser.add_argument('--limit', action='store', dest='limit', type=int, default=1000000)
parser.add_argument('--timeout_ms', action='store', dest='timeout_ms', type=int, default=60000)
parser.add_argument('--find_max_tries', action='store', dest='find_max_tries', type=int, default=10)
parser.add_argument('--find_percent_to_keep', action='store', dest='find_percent_to_keep', type=int, default=0.99995)
parser.add_argument('--redis_address', action='store', dest='redis_address', type=str, default=None)
opts = parser.parse_args()
util.log(kind='error', author='master', msg="joining ray @ %s" % opts.redis_address)
ray.init(redis_address=opts.redis_address)
gen_all_data(opts=opts)
| [
"argparse.ArgumentParser",
"random.shuffle",
"util.gd_tfr_bcname",
"solver.Options",
"numpy.shape",
"util._connect",
"os.path.join",
"ray.remote",
"tempfile.TemporaryDirectory",
"auth.store_key",
"solver.Context",
"sutil.tfd_to_py",
"traceback.format_exc",
"tensorflow.io.TFRecordOptions",
... | [((445, 534), 'collections.namedtuple', 'collections.namedtuple', (['"""TaskID"""', "['problem_id', 'node_id', 'node_depth', 'is_train']"], {}), "('TaskID', ['problem_id', 'node_id', 'node_depth',\n 'is_train'])\n", (467, 534), False, 'import collections\n'), ((548, 594), 'collections.namedtuple', 'collections.namedtuple', (['"""Task"""', "['id', 'bcnf']"], {}), "('Task', ['id', 'bcnf'])\n", (570, 594), False, 'import collections\n'), ((614, 674), 'collections.namedtuple', 'collections.namedtuple', (['"""TaskResult"""', "['btfds', 'new_bcnfs']"], {}), "('TaskResult', ['btfds', 'new_bcnfs'])\n", (636, 674), False, 'import collections\n'), ((3245, 3280), 'ray.remote', 'ray.remote', ([], {'num_cpus': '(1)', 'max_calls': '(1)'}), '(num_cpus=1, max_calls=1)\n', (3255, 3280), False, 'import ray\n'), ((3483, 3499), 'solver.Context', 'solver.Context', ([], {}), '()\n', (3497, 3499), False, 'import solver\n'), ((5799, 5814), 'queue.PriorityQueue', 'PriorityQueue', ([], {}), '()\n', (5812, 5814), False, 'from queue import PriorityQueue, Queue\n'), ((8430, 8477), 'threading.Thread', 'threading.Thread', ([], {'target': 'push_problems', 'args': '()'}), '(target=push_problems, args=())\n', (8446, 8477), False, 'import threading\n'), ((9016, 9023), 'queue.Queue', 'Queue', ([], {}), '()\n', (9021, 9023), False, 'from queue import PriorityQueue, Queue\n'), ((10068, 10121), 'threading.Thread', 'threading.Thread', ([], {'target': 'process_task_result', 'args': '()'}), '(target=process_task_result, args=())\n', (10084, 10121), False, 'import threading\n'), ((11532, 11557), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11555, 11557), False, 'import argparse\n'), ((12662, 12751), 'util.log', 'util.log', ([], {'kind': '"""error"""', 'author': '"""master"""', 'msg': "('joining ray @ %s' % opts.redis_address)"}), "(kind='error', author='master', msg='joining ray @ %s' % opts.\n redis_address)\n", (12670, 12751), False, 'import util\n'), ((12751, 12793), 'ray.init', 'ray.init', ([], {'redis_address': 'opts.redis_address'}), '(redis_address=opts.redis_address)\n', (12759, 12793), False, 'import ray\n'), ((1438, 1467), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1465, 1467), False, 'import tempfile\n'), ((1673, 1747), 'tensorflow.io.TFRecordOptions', 'tf.io.TFRecordOptions', ([], {'compression_type': 'tf.io.TFRecordCompressionType.GZIP'}), '(compression_type=tf.io.TFRecordCompressionType.GZIP)\n', (1694, 1747), True, 'import tensorflow as tf\n'), ((2922, 2962), 'util.gd_scratch_bcname', 'util.gd_scratch_bcname', ([], {'gd_id': 'opts.gd_id'}), '(gd_id=opts.gd_id)\n', (2944, 2962), False, 'import util\n'), ((3061, 3101), 'util.gd_scratch_bcname', 'util.gd_scratch_bcname', ([], {'gd_id': 'opts.gd_id'}), '(gd_id=opts.gd_id)\n', (3083, 3101), False, 'import util\n'), ((3198, 3238), 'util.gd_scratch_bcname', 'util.gd_scratch_bcname', ([], {'gd_id': 'opts.gd_id'}), '(gd_id=opts.gd_id)\n', (3220, 3238), False, 'import util\n'), ((3540, 3559), 'sutil.mk_opts', 'sutil.mk_opts', (['opts'], {}), '(opts)\n', (3553, 3559), False, 'import sutil\n'), ((3920, 3940), 'util.timeit', 'util.timeit', (['s.check'], {}), '(s.check)\n', (3931, 3940), False, 'import util\n'), ((6607, 6647), 'util.gd_scratch_bcname', 'util.gd_scratch_bcname', ([], {'gd_id': 'opts.gd_id'}), '(gd_id=opts.gd_id)\n', (6629, 6647), False, 'import util\n'), ((6674, 6710), 'util.gd_tfr_bcname', 'util.gd_tfr_bcname', ([], {'gd_id': 'opts.gd_id'}), '(gd_id=opts.gd_id)\n', (6692, 6710), False, 'import util\n'), ((7147, 7195), 'util.log', 'util.log', ([], {'author': '"""push_problems"""', 'msg': '"""starting"""'}), "(author='push_problems', msg='starting')\n", (7155, 7195), False, 'import util\n'), ((8342, 8401), 'util.log', 'util.log', ([], {'author': '"""push_problems"""', 'msg': '"""pushed all problems"""'}), "(author='push_problems', msg='pushed all problems')\n", (8350, 8401), False, 'import util\n'), ((11142, 11198), 'util.log', 'util.log', ([], {'kind': '"""info"""', 'author': '"""master"""', 'msg': '"""finalizing"""'}), "(kind='info', author='master', msg='finalizing')\n", (11150, 11198), False, 'import util\n'), ((11232, 11309), 'util.log', 'util.log', ([], {'kind': '"""info"""', 'author': '"""master"""', 'msg': '"""deleting scratch blob container"""'}), "(kind='info', author='master', msg='deleting scratch blob container')\n", (11240, 11309), False, 'import util\n'), ((11389, 11443), 'util.log', 'util.log', ([], {'kind': '"""info"""', 'author': '"""master"""', 'msg': '"""finished"""'}), "(kind='info', author='master', msg='finished')\n", (11397, 11443), False, 'import util\n'), ((1793, 1837), 'os.path.join', 'os.path.join', (['self.tmpdir.name', 'self.outfile'], {}), '(self.tmpdir.name, self.outfile)\n', (1805, 1837), False, 'import os\n'), ((2066, 2107), 'util.gd_tfr_bcname', 'util.gd_tfr_bcname', ([], {'gd_id': 'self.opts.gd_id'}), '(gd_id=self.opts.gd_id)\n', (2084, 2107), False, 'import util\n'), ((2157, 2201), 'os.path.join', 'os.path.join', (['self.tmpdir.name', 'self.outfile'], {}), '(self.tmpdir.name, self.outfile)\n', (2169, 2201), False, 'import os\n'), ((2360, 2468), 'util.log', 'util.log', ([], {'kind': '"""info"""', 'author': '"""tfwriter"""', 'msg': "('moving last file #%d (%s)' % (self.n_files, self.outfile))"}), "(kind='info', author='tfwriter', msg='moving last file #%d (%s)' %\n (self.n_files, self.outfile))\n", (2368, 2468), False, 'import util\n'), ((2691, 2794), 'util.log', 'util.log', ([], {'kind': '"""info"""', 'author': '"""tfwriter"""', 'msg': "('file #%d ready (%s)' % (self.n_files, self.outfile))"}), "(kind='info', author='tfwriter', msg='file #%d ready (%s)' % (self.\n n_files, self.outfile))\n", (2699, 2794), False, 'import util\n'), ((3356, 3373), 'auth.store_name', 'auth.store_name', ([], {}), '()\n', (3371, 3373), False, 'import auth\n'), ((3387, 3403), 'auth.store_key', 'auth.store_key', ([], {}), '()\n', (3401, 3403), False, 'import auth\n'), ((5728, 5745), 'auth.store_name', 'auth.store_name', ([], {}), '()\n', (5743, 5745), False, 'import auth\n'), ((5759, 5775), 'auth.store_key', 'auth.store_key', ([], {}), '()\n', (5773, 5775), False, 'import auth\n'), ((6473, 6513), 'util.gd_scratch_bcname', 'util.gd_scratch_bcname', ([], {'gd_id': 'opts.gd_id'}), '(gd_id=opts.gd_id)\n', (6495, 6513), False, 'import util\n'), ((6542, 6578), 'util.gd_tfr_bcname', 'util.gd_tfr_bcname', ([], {'gd_id': 'opts.gd_id'}), '(gd_id=opts.gd_id)\n', (6560, 6578), False, 'import util\n'), ((7281, 7296), 'util._connect', 'util._connect', ([], {}), '()\n', (7294, 7296), False, 'import util\n'), ((8981, 8994), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8991, 8994), False, 'import time\n'), ((10924, 10946), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10944, 10946), False, 'import traceback\n'), ((11339, 11379), 'util.gd_scratch_bcname', 'util.gd_scratch_bcname', ([], {'gd_id': 'opts.gd_id'}), '(gd_id=opts.gd_id)\n', (11361, 11379), False, 'import util\n'), ((1983, 2000), 'auth.store_name', 'auth.store_name', ([], {}), '()\n', (1998, 2000), False, 'import auth\n'), ((2014, 2030), 'auth.store_key', 'auth.store_key', ([], {}), '()\n', (2028, 2030), False, 'import auth\n'), ((3673, 3693), 'sutil.tfd_to_py', 'sutil.tfd_to_py', (['tfd'], {}), '(tfd)\n', (3688, 3693), False, 'import sutil\n'), ((4485, 4504), 'util.timeit', 'util.timeit', (['s.cube'], {}), '(s.cube)\n', (4496, 4504), False, 'import util\n'), ((5936, 5953), 'util.get_commit', 'util.get_commit', ([], {}), '()\n', (5951, 5953), False, 'import util\n'), ((7747, 7776), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7774, 7776), False, 'import tempfile\n'), ((8080, 8112), 'os.system', 'os.system', (["('rm %s' % tmpfilename)"], {}), "('rm %s' % tmpfilename)\n", (8089, 8112), False, 'import os\n'), ((8636, 8691), 'ray.wait', 'ray.wait', (['jobs'], {'num_returns': '(1)', 'timeout': 'opts.wait_n_secs'}), '(jobs, num_returns=1, timeout=opts.wait_n_secs)\n', (8644, 8691), False, 'import ray\n'), ((10274, 10286), 'ray.get', 'ray.get', (['job'], {}), '(job)\n', (10281, 10286), False, 'import ray\n'), ((1608, 1620), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1618, 1620), False, 'import uuid\n'), ((2554, 2575), 'tftd.tftd_to_example', 'tftd_to_example', (['tftd'], {}), '(tftd)\n', (2569, 2575), False, 'from tftd import TFTD, tftd_to_example, tfd_to_tftd\n'), ((4620, 4641), 'random.shuffle', 'random.shuffle', (['cubes'], {}), '(cubes)\n', (4634, 4641), False, 'import random\n'), ((7987, 8003), 'solver.Context', 'solver.Context', ([], {}), '()\n', (8001, 8003), False, 'import solver\n'), ((8005, 8021), 'solver.Options', 'solver.Options', ([], {}), '()\n', (8019, 8021), False, 'import solver\n'), ((10343, 10365), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10363, 10365), False, 'import traceback\n'), ((9976, 10036), 'tftd.tfd_to_tftd', 'tfd_to_tftd', ([], {'dp_id': 'dp_id', 'is_train': 'task.id.is_train', 'tfd': 'tfd'}), '(dp_id=dp_id, is_train=task.id.is_train, tfd=tfd)\n', (9987, 10036), False, 'from tftd import TFTD, tftd_to_example, tfd_to_tftd\n'), ((7857, 7869), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7867, 7869), False, 'import uuid\n'), ((9678, 9699), 'numpy.shape', 'np.shape', (['tfd.CL_idxs'], {}), '(tfd.CL_idxs)\n', (9686, 9699), True, 'import numpy as np\n')] |
import numpy as np
import plotly.express as px
from datetime import datetime
from cxotime import CxoTime
time_axis_format = [
# dict(dtickrange=[None, 600000], value="%H:%M:%S.%L\n"),
dict(dtickrange=[None, 60000000], value="%H:%M:%S\n%Y:%j"),
dict(dtickrange=[60000000, 315360000], value="%Y:%j"),
dict(dtickrange=[315360000, "M1"], value="%e %b\n%Y:%j"),
dict(dtickrange=["M1", "M6"], value="%Y:%j"),
dict(dtickrange=["M6", None], value="%Y")
]
# font = 'Courier New, monospace'
font = 'Arial'
title_format = {
'family': font,
'size': 32,
'color': '#666666',
# 'color': '#7f7f7f'
}
sub_title_format = {
'family': font,
'size': 24,
'color': '#666666'
}
axis_format = {
'family': font,
'size': 20,
'color': '#666666'
}
label_format = {
'family': font,
'size': 24,
'color': '#666666'
}
legend_format = {
'family': font,
'size': 16,
'color': "#666666",
}
legend_format_top_right= {
'yanchor': "top",
'y': 0.99,
'xanchor': "right",
'x': 0.99
}
colors = px.colors.qualitative.D3
def hex_to_rgba(hexstr, opacity):
hexstr = hexstr.lstrip('#')
hlen = len(hexstr)
rgba = [int(hexstr[i : i + int(hlen/3)], 16) for i in range(0, hlen, int(hlen/3))] + [opacity, ]
return tuple(rgba)
def hex_to_rgba_str(hexstr, opacity):
rgba = hex_to_rgba(hexstr, opacity)
return f'rgba({rgba[0]},{rgba[1]},{rgba[2]},{rgba[3]})'
def format_dates(cheta_dates):
return np.array([datetime.strptime(d, '%Y:%j:%H:%M:%S.%f') for d in CxoTime(cheta_dates).date])
def format_plot_data(model_results, limit, state_data, dwell1_state, dwell2_state):
# keep_ind = find_non_repeated_points(msid_data[msid].vals)
# all_dates = format_dates(msid_data.times)
plot_data = []
plot_data.append({
'type': 'scattergl',
'x': format_dates(state_data['state_times']),
'y': state_data['state_keys'],
'name': 'State Keys',
'line': {'color': '#666666', 'width': 2, 'shape': 'hv'},
'mode': 'lines',
'showlegend': False,
'xaxis': 'x',
'yaxis': 'y',
})
plot_data.append({
'type': 'scattergl',
'x': format_dates([state_data['state_times'][0], state_data['state_times'][-1]]),
'y': [limit, limit],
'name': 'State Keys',
'line': {'color': 'black', 'width': 2, 'shape': 'hv', 'dash': 'dash'},
'mode': 'lines',
'showlegend': False,
'xaxis': 'x2',
'yaxis': 'y2',
})
plot_data.append({
'type': 'scattergl',
'x': format_dates(model_results.times),
'y': model_results.mvals,
'name': 'AACCCDPT Temperatures',
'line': {'color': '#666666', 'width': 2, 'shape': 'hv'},
'mode': 'lines',
'showlegend': False,
'xaxis': 'x2',
'yaxis': 'y2',
})
return plot_data
def generate_converged_solution_plot_dict(plot_data, shapes, annotations, tstart, tstop, units='Celsius'):
plot_start = datetime.strptime(CxoTime(tstart).date, '%Y:%j:%H:%M:%S.%f')
plot_stop = datetime.strptime(CxoTime(tstop).date, '%Y:%j:%H:%M:%S.%f')
plot_object = {
'data': plot_data,
'layout':
{
'hovermode': "closest",
'autosize': False,
'width': 1200,
'height': 600,
'margin': {'l': 80, 'r': 50, 't': 50, 'b': 70},
'title':
{
'text': 'Converged Timbre Dwell Simulation',
'font': title_format,
'y': 0.98,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'
},
'yaxis':
{
# 'title':
# {
# 'text': 'Simulation Temperature',
# 'font': label_format
# },
'tickfont': axis_format,
'domain': [0.0, 0.19],
'range': [0.5, 2.5],
'zeroline': False,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'anchor': 'x',
'tickvals': [1, 2],
'ticktext': ['State 1', 'State 2'],
},
'yaxis2':
{
'title':
{
'text': f'Simulation Temperature<br>({units})',
'font': label_format
},
'tickfont': axis_format,
'domain': [0.2, 1.0],
# 'range': [-30, 45],
'zeroline': False,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'anchor': 'x2',
},
'xaxis':
{
'domain': [0, 1],
'tickfont': axis_format,
'tickformatstops': time_axis_format,
'zeroline': False,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'range': [plot_start, plot_stop],
'showticklabels': True,
'anchor': 'y',
},
'xaxis2':
{
'domain': [0, 1],
'tickfont': axis_format,
'tickformatstops': time_axis_format,
'zeroline': False,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'range': [plot_start, plot_stop],
'showticklabels': False,
'anchor': 'y2',
'matches': 'x',
'ticks': '',
},
'showlegend': False,
'template': 'simple_white',
'shapes': shapes,
'annotations': annotations,
},
}
return plot_object
def format_shapes(state_data):
shape_data = []
s = zip(state_data['state_times'], state_data['state_keys'])
for t1, s1 in s:
t2, s2 = next(s)
t3, s3 = next(s) # Ignore
t4, s4 = next(s) # Ignore
shape_data.extend([
{
'fillcolor': 'black',
'line': {'width': 0},
'opacity': 0.05,
'type': 'rect',
'x0': datetime.strptime(CxoTime(t1).date, '%Y:%j:%H:%M:%S.%f'),
'x1': datetime.strptime(CxoTime(t2).date, '%Y:%j:%H:%M:%S.%f'),
'y0': 0,
'y1': 1,
'xref': 'x2',
'yref': 'y2 domain',
},
{
'fillcolor': 'black',
'line': {'width': 0},
'opacity': 0.05,
'type': 'rect',
'x0': datetime.strptime(CxoTime(t1).date, '%Y:%j:%H:%M:%S.%f'),
'x1': datetime.strptime(CxoTime(t2).date, '%Y:%j:%H:%M:%S.%f'),
'y0': 0,
'y1': 1,
'xref': 'x',
'yref': 'y domain',
}
])
return shape_data
def gen_unused_range(tstart, tstop, t_backoff=1725000):
tstop = CxoTime(tstop).secs - t_backoff
spans = [{
'fillcolor': 'black',
'line': {'width': 0},
'opacity': 0.25,
'type': 'rect',
'x0': datetime.strptime(CxoTime(tstart).date, '%Y:%j:%H:%M:%S.%f'),
'x1': datetime.strptime(CxoTime(tstop).date, '%Y:%j:%H:%M:%S.%f'),
'y0': 0,
'y1': 1,
'xref': 'x',
'yref': 'y domain',
},
{
'fillcolor': 'black',
'line': {'width': 0},
'opacity': 0.25,
'type': 'rect',
'x0': datetime.strptime(CxoTime(tstart).date, '%Y:%j:%H:%M:%S.%f'),
'x1': datetime.strptime(CxoTime(tstop).date, '%Y:%j:%H:%M:%S.%f'),
'y0': 0,
'y1': 1,
'xref': 'x2',
'yref': 'y2 domain',
}
]
return spans
def gen_range_annotations(tstart, tstop, yloc1, yloc2, t_backoff=1725000):
tstart = CxoTime(tstart).secs
tstop = CxoTime(tstop).secs
tmid = tstop - t_backoff
ttext = (tstop + tmid) / 2.
arrow1 = {
'x': datetime.strptime(CxoTime(tmid).date, '%Y:%j:%H:%M:%S.%f'),
'y': yloc1,
'text': '',
'showarrow': True,
'arrowhead': 2,
'arrowwidth': 3,
'arrowcolor': 'rgb(100,100,100)',
'xref': "x2",
'yref': "y2",
'ax': datetime.strptime(CxoTime(ttext).date, '%Y:%j:%H:%M:%S.%f'),
'ay': yloc1,
'axref': 'x2',
'ayref': 'y2',
'xanchor': "center",
'yanchor': "bottom",
'font': label_format
}
arrow2 = {
'x': datetime.strptime(CxoTime(tstop).date, '%Y:%j:%H:%M:%S.%f'),
'y': yloc1,
'text': '',
'showarrow': True,
'arrowhead': 2,
'arrowwidth': 3,
'arrowcolor': 'rgb(100,100,100)',
'xref': "x2",
'yref': "y2",
'ax': datetime.strptime(CxoTime(ttext).date, '%Y:%j:%H:%M:%S.%f'),
'ay': yloc1,
'axref': 'x2',
'ayref': 'y2',
'xanchor': "center",
'yanchor': "bottom",
'font': label_format
}
text = {
'x': datetime.strptime(CxoTime(ttext).date, '%Y:%j:%H:%M:%S.%f'),
'y': yloc2,
'text': 'Data Range used for evaluation',
'showarrow': False,
'xref': "x2",
'yref': "y2",
'xanchor': "center",
'yanchor': "bottom",
'font': label_format
}
annotations = [arrow1, arrow2, text]
return annotations
def gen_limit_annotation(xloc, yloc, limit, units):
text_dict = {
'x': datetime.strptime(CxoTime(xloc).date, '%Y:%j:%H:%M:%S.%f'),
'y': yloc,
'text': f'Limit = {limit} {units}',
'showarrow': False,
'xref': "x2",
'yref': "y2",
'xanchor': "center",
'yanchor': "bottom",
'font': label_format
}
return [text_dict, ]
def gen_shading_annotation(xloc, yloc, dwell1_text, dwell2_text):
text1 = f'Lightly Shaded Vertical Bands = Dwell State #1 ({dwell1_text})'
text2 = f'Unshaded Vertical Bands = Dwell State #2 ({dwell2_text})'
text = f'{text1}<br>{text2}'
text_dict = {
'x': datetime.strptime(CxoTime(xloc).date, '%Y:%j:%H:%M:%S.%f'),
'y': yloc,
'text': text,
'showarrow': False,
'xref': "x2",
'yref': "y2",
'xanchor': "right",
'yanchor': "bottom",
'font': label_format,
'align': 'right'
}
return [text_dict, ]
def generate_example_balance_plot_dict(t_dwell1, t_dwell2, dwell1_state, dwell2_state):
plot_object = {
'data': {
'x': ['Hot Dwell', 'Cold Dwell'],
'y': [np.round(t_dwell1, 1), t_dwell2, 1],
'type': 'bar',
'text': [f'Dwell #1 State<br>Pitch: {dwell1_state["pitch"]}<br><br>Dwell Time: {np.round(t_dwell1, 0):.0f}s<br>(Input)',
f'Dwell #2 State<br>Pitch: {dwell2_state["pitch"]}<br><br>Dwell Time: {np.round(t_dwell2, 0):.0f}s<br>(Calculated)'],
'textposition': 'inside',
'textfont': {'family': font, 'size': 24, 'color': 'white'},
'marker': {'color': [colors[3], colors[0]]}
},
'layout':
{
'hovermode': "closest",
'autosize': False,
'width': 1200,
'height': 600,
'margin': {'l': 80, 'r': 50, 't': 50, 'b': 70},
'title':
{
'text': 'Timbre Produced Dwell Balance',
'font': title_format,
'y': 0.98,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'
},
'yaxis':
{
'title':{'text': 'Dwell Time (Kiloseconds)','font': label_format},
'tickfont': axis_format,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'anchor': 'x',
},
'xaxis':
{
'domain': [0, 1],
'tickfont': axis_format,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'showticklabels': True,
'anchor': 'y',
},
'showlegend': False,
'template': 'simple_white',
},
}
return plot_object
def generate_step_2_plot_dict(plot_data, tstart, tstop, title, units='Celsius'):
plot_start = datetime.strptime(CxoTime(tstart).date, '%Y:%j:%H:%M:%S.%f')
plot_stop = datetime.strptime(CxoTime(tstop).date, '%Y:%j:%H:%M:%S.%f')
plot_object = {
'data': plot_data,
'layout':
{
'hovermode': "closest",
'autosize': False,
'width': 1200,
'height': 600,
'margin': {'l': 80, 'r': 50, 't': 50, 'b': 70},
'title':
{
'text': title,
'font': title_format,
'y': 0.98,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'
},
'yaxis':
{
'title':
{
'text': f'Resulting Temperatures for<br>Dwell #2 Guesses ({units})',
'font': label_format
},
'tickfont': axis_format,
'zeroline': False,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'anchor': 'x',
},
'xaxis':
{
'domain': [0, 1],
'tickfont': axis_format,
'tickformatstops': time_axis_format,
'zeroline': False,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'range': [plot_start, plot_stop],
'showticklabels': True,
'tickangle': 30,
'anchor': 'y',
},
'showlegend': True,
'template': 'simple_white',
},
}
return plot_object
def format_step_2_plot_data(model_data, limit, tstart, tstop):
plot_start = datetime.strptime(CxoTime(tstart).date, '%Y:%j:%H:%M:%S.%f')
plot_stop = datetime.strptime(CxoTime(tstop).date, '%Y:%j:%H:%M:%S.%f')
seq_colors = px.colors.n_colors(hex_to_rgba_str(colors[3], 1),
hex_to_rgba_str(colors[0], 1),
len(model_data),
colortype='rgb')
plot_data = []
for (t, results), c in zip(model_data.items(), seq_colors):
model_results = results['model_results']['aacccdpt']
plot_data.append({
'type': 'scattergl',
'x': format_dates(model_results.times),
'y': model_results.mvals,
'name': f't_dwell2 = {t:.1f}',
'line': {'color': c, 'width': 2, 'shape': 'hv'},
'mode': 'lines',
'showlegend': True,
'xaxis': 'x',
'yaxis': 'y',
})
plot_data.append({
'type': 'scattergl',
'x': [plot_start, plot_stop],
'y': [limit, limit],
'name': 'Limit',
'line': {'color': 'black', 'width': 2, 'shape': 'hv', 'dash': 'dash'},
'mode': 'lines',
'showlegend': False,
'xaxis': 'x',
'yaxis': 'y',
})
return plot_data
def generate_step_3_max_temp_plot_dict(output, title, t_dwell2, units='Celsius'):
seq_colors = px.colors.n_colors(hex_to_rgba_str(colors[3], 1),
hex_to_rgba_str(colors[0], 1),
len(output),
colortype='rgb')
plot_object = {
'data': [
{
'type': 'scattergl',
'x': output['duration2'],
'y': output['max'],
'name': f'Dwell 2 Duration Guesses',
'line': {'color': 'black', 'width': 2, 'shape': 'hv'},
'marker': {
'size': 24,
'cmax': max(output['duration2']),
'cmin': min(output['duration2']),
'color': output['duration2'],
'colorscale': list(zip(np.linspace(0, 1, 10), seq_colors)),
},
'mode': 'markers',
'showlegend': False,
'xaxis': 'x',
'yaxis': 'y',
}
],
'layout':
{
'hovermode': "closest",
'autosize': False,
'width': 1200,
'height': 600,
'margin': {'l': 80, 'r': 50, 't': 50, 'b': 70},
'title':
{
'text': title,
'font': title_format,
'y': 0.98,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'
},
'yaxis':
{
'title':
{
'text': f'Temperature ({units})',
'font': label_format
},
'tickfont': axis_format,
'zeroline': False,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'anchor': 'x',
},
'xaxis':
{
'domain': [0, 1],
'tickfont': axis_format,
# 'tickformatstops': time_axis_format,
'zeroline': False,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
# 'range': [plot_start, plot_stop],
'showticklabels': True,
# 'tickangle': 30,
'anchor': 'y',
},
'showlegend': True,
'template': 'simple_white',
'shapes': [
{
'line': {'color': 'black', 'dash': 'dash', 'width': 3},
'opacity': 0.65,
'type': 'line',
'x0': 0,
'x1': 1,
'y0': -6.5,
'y1': -6.5,
'xref': 'x domain',
'yref': 'y',
},
{
'line': {'color': 'black', 'dash': 'dash', 'width': 3},
'opacity': 0.65,
'type': 'line',
'x0': t_dwell2,
'x1': t_dwell2,
'y0': 0,
'y1': 1,
'xref': 'x',
'yref': 'y domain',
},
],
'annotations': [
{
'x': 65000,
'y': -6.485,
'text': 'Limit = -6.5C',
'showarrow': False,
'xref': "x",
'yref': "y",
'xanchor': "center",
'yanchor': "bottom",
'font': label_format
},
{
'x': t_dwell2 - 200,
'y': -7.15,
'text': f't_dwell2 = {t_dwell2:.0f}s',
'showarrow': False,
'xref': "x",
'yref': "y",
'xanchor': "right",
'yanchor': "bottom",
'font': label_format,
'textangle': -90,
},
],
},
}
return plot_object
def generate_timbre_dwell_plot_data(results, filter_set):
plot_data = []
for plot_set in filter_set:
ind = np.zeros(len(results)) < 1
name = []
for key, value in plot_set.items():
ind = ind & (results[key] == value)
name.append(f'{str(key).capitalize()}: {value}')
if np.median(results['hotter_state'][ind & results['converged']]) == 1:
plot_color = colors[0]
else:
plot_color = colors[3]
plot_data.append(
{
'type': 'scattergl',
'x': results['pitch2'][ind],
'y': results['t_dwell2'][ind],
'name': ' '.join(name),
'line': {'color':plot_color, 'width': 2},
'marker': {
'size': 10,
'color': plot_color,
},
'mode': 'lines+markers',
# 'showlegend': legend,
'xaxis': 'x',
'yaxis': 'y',
}
)
return plot_data
def generate_timber_output_plot_dict(plot_data, title, legend=True):
plot_object = {
'data': plot_data,
'layout':
{
'hovermode': "closest",
'autosize': False,
'width': 1200,
'height': 600,
'margin': {'l': 80, 'r': 50, 't': 50, 'b': 70},
'title':
{
'text': title,
'font': title_format,
'y': 0.98,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'
},
'yaxis':
{
'title':
{
'text': f'Dwell Time (s)',
'font': label_format
},
'tickfont': axis_format,
'zeroline': False,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'anchor': 'x',
'showgrid': True,
},
'xaxis':
{
'domain': [0, 1],
'tickfont': axis_format,
# 'tickformatstops': time_axis_format,
'zeroline': False,
'linecolor': '#666666',
'linewidth': 1,
'mirror': True,
'range': [45, 180],
'showticklabels': True,
# 'tickangle': 30,
'anchor': 'y',
'showgrid': True,
},
'showlegend': legend,
'template': 'simple_white',
},
}
return plot_object
| [
"numpy.median",
"cxotime.CxoTime",
"datetime.datetime.strptime",
"numpy.linspace",
"numpy.round"
] | [((8880, 8895), 'cxotime.CxoTime', 'CxoTime', (['tstart'], {}), '(tstart)\n', (8887, 8895), False, 'from cxotime import CxoTime\n'), ((8913, 8927), 'cxotime.CxoTime', 'CxoTime', (['tstop'], {}), '(tstop)\n', (8920, 8927), False, 'from cxotime import CxoTime\n'), ((1510, 1551), 'datetime.datetime.strptime', 'datetime.strptime', (['d', '"""%Y:%j:%H:%M:%S.%f"""'], {}), "(d, '%Y:%j:%H:%M:%S.%f')\n", (1527, 1551), False, 'from datetime import datetime\n'), ((3064, 3079), 'cxotime.CxoTime', 'CxoTime', (['tstart'], {}), '(tstart)\n', (3071, 3079), False, 'from cxotime import CxoTime\n'), ((3141, 3155), 'cxotime.CxoTime', 'CxoTime', (['tstop'], {}), '(tstop)\n', (3148, 3155), False, 'from cxotime import CxoTime\n'), ((7963, 7977), 'cxotime.CxoTime', 'CxoTime', (['tstop'], {}), '(tstop)\n', (7970, 7977), False, 'from cxotime import CxoTime\n'), ((13704, 13719), 'cxotime.CxoTime', 'CxoTime', (['tstart'], {}), '(tstart)\n', (13711, 13719), False, 'from cxotime import CxoTime\n'), ((13781, 13795), 'cxotime.CxoTime', 'CxoTime', (['tstop'], {}), '(tstop)\n', (13788, 13795), False, 'from cxotime import CxoTime\n'), ((15782, 15797), 'cxotime.CxoTime', 'CxoTime', (['tstart'], {}), '(tstart)\n', (15789, 15797), False, 'from cxotime import CxoTime\n'), ((15859, 15873), 'cxotime.CxoTime', 'CxoTime', (['tstop'], {}), '(tstop)\n', (15866, 15873), False, 'from cxotime import CxoTime\n'), ((22171, 22233), 'numpy.median', 'np.median', (["results['hotter_state'][ind & results['converged']]"], {}), "(results['hotter_state'][ind & results['converged']])\n", (22180, 22233), True, 'import numpy as np\n'), ((9041, 9054), 'cxotime.CxoTime', 'CxoTime', (['tmid'], {}), '(tmid)\n', (9048, 9054), False, 'from cxotime import CxoTime\n'), ((9317, 9331), 'cxotime.CxoTime', 'CxoTime', (['ttext'], {}), '(ttext)\n', (9324, 9331), False, 'from cxotime import CxoTime\n'), ((9566, 9580), 'cxotime.CxoTime', 'CxoTime', (['tstop'], {}), '(tstop)\n', (9573, 9580), False, 'from cxotime import CxoTime\n'), ((9843, 9857), 'cxotime.CxoTime', 'CxoTime', (['ttext'], {}), '(ttext)\n', (9850, 9857), False, 'from cxotime import CxoTime\n'), ((10090, 10104), 'cxotime.CxoTime', 'CxoTime', (['ttext'], {}), '(ttext)\n', (10097, 10104), False, 'from cxotime import CxoTime\n'), ((10536, 10549), 'cxotime.CxoTime', 'CxoTime', (['xloc'], {}), '(xloc)\n', (10543, 10549), False, 'from cxotime import CxoTime\n'), ((11132, 11145), 'cxotime.CxoTime', 'CxoTime', (['xloc'], {}), '(xloc)\n', (11139, 11145), False, 'from cxotime import CxoTime\n'), ((11624, 11645), 'numpy.round', 'np.round', (['t_dwell1', '(1)'], {}), '(t_dwell1, 1)\n', (11632, 11645), True, 'import numpy as np\n'), ((1561, 1581), 'cxotime.CxoTime', 'CxoTime', (['cheta_dates'], {}), '(cheta_dates)\n', (1568, 1581), False, 'from cxotime import CxoTime\n'), ((8151, 8166), 'cxotime.CxoTime', 'CxoTime', (['tstart'], {}), '(tstart)\n', (8158, 8166), False, 'from cxotime import CxoTime\n'), ((8227, 8241), 'cxotime.CxoTime', 'CxoTime', (['tstop'], {}), '(tstop)\n', (8234, 8241), False, 'from cxotime import CxoTime\n'), ((8532, 8547), 'cxotime.CxoTime', 'CxoTime', (['tstart'], {}), '(tstart)\n', (8539, 8547), False, 'from cxotime import CxoTime\n'), ((8612, 8626), 'cxotime.CxoTime', 'CxoTime', (['tstop'], {}), '(tstop)\n', (8619, 8626), False, 'from cxotime import CxoTime\n'), ((11780, 11801), 'numpy.round', 'np.round', (['t_dwell1', '(0)'], {}), '(t_dwell1, 0)\n', (11788, 11801), True, 'import numpy as np\n'), ((11913, 11934), 'numpy.round', 'np.round', (['t_dwell2', '(0)'], {}), '(t_dwell2, 0)\n', (11921, 11934), True, 'import numpy as np\n'), ((7162, 7173), 'cxotime.CxoTime', 'CxoTime', (['t1'], {}), '(t1)\n', (7169, 7173), False, 'from cxotime import CxoTime\n'), ((7242, 7253), 'cxotime.CxoTime', 'CxoTime', (['t2'], {}), '(t2)\n', (7249, 7253), False, 'from cxotime import CxoTime\n'), ((7610, 7621), 'cxotime.CxoTime', 'CxoTime', (['t1'], {}), '(t1)\n', (7617, 7621), False, 'from cxotime import CxoTime\n'), ((7690, 7701), 'cxotime.CxoTime', 'CxoTime', (['t2'], {}), '(t2)\n', (7697, 7701), False, 'from cxotime import CxoTime\n'), ((17890, 17911), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (17901, 17911), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from scipy.stats import spearmanr, rankdata
from shenshang.simulate import simulate_correlation, correlate_xy
@pytest.mark.parametrize(
'strength, x, y, exp',
[(1,
np.array([5, 8, 2, 4, 3, 0, 1, 6, 7, 9]),
np.array([9, 1, 4, 5, 2, 3, 6, 8, 0, 7]),
np.array([5, 8, 2, 4, 3, 0, 1, 6, 7, 9])),
(-1,
np.array([5, 8, 2, 4, 3, 0, 1, 6, 7, 9]),
np.array([9, 1, 4, 5, 2, 3, 6, 8, 0, 7]),
np.array([4, 1, 7, 5, 6, 9, 8, 3, 2, 0])),
(0,
np.array([5, 8, 2, 4, 3, 0, 1, 6, 7, 9]),
np.array([9, 1, 4, 5, 2, 3, 6, 8, 0, 7]),
np.array([9, 1, 4, 5, 2, 3, 6, 8, 0, 7])),
(0.5,
np.array([5, 8, 2, 4, 3, 0, 1, 6, 7, 9]),
np.array([9, 1, 4, 5, 2, 3, 6, 8, 0, 7]),
np.array([9, 8, 1, 5, 2, 3, 0, 4, 6, 7])),
(-0.5,
np.array([5, 8, 2, 4, 3, 0, 1, 6, 7, 9]),
np.array([9, 1, 4, 5, 2, 3, 6, 8, 0, 7]),
np.array([9, 0, 6, 5, 2, 3, 8, 4, 1, 7])),
(-0.8,
np.array([0] * 5 + list(range(10))),
np.array([0] * 5 + list(range(9, -1, -1))),
np.array([9, 8, 7, 0, 6, 5, 4, 2, 1, 0, 0, 3, 0, 0, 0]))])
def test_correlate_xy(strength, x, y, exp):
assert np.all(correlate_xy(x, y, strength, inplace=False, seed=9) == exp)
# print(spearmanr(x, y))
# print(spearmanr(x, exp))
pos = y != exp
sx = rankdata(x[pos], 'ordinal')
if strength < 0:
sy = rankdata(-exp[pos], 'ordinal')
else:
sy = rankdata(exp[pos], 'ordinal')
np.testing.assert_array_equal(sx, sy)
def test_simulate_cor():
m = np.array([[ 0, 0, 37, 28, 63, 0, 0, 34, 4, 90],
[46, 75, 77, 78, 18, 5, 5, 0, 26, 50],
[73, 0, 0, 89, 24, 81, 48, 0, 26, 25],
[28, 50, 25, 91, 46, 0, 2, 76, 0, 92],
[81, 60, 54, 40, 12, 66, 51, 23, 0, 92],
[66, 0, 64, 43, 83, 35, 6, 89, 0, 78],
[97, 52, 26, 89, 24, 0, 25, 15, 92, 53],
[88, 0, 49, 16, 0, 92, 61, 15, 0, 78],
[ 0, 64, 0, 76, 43, 3, 0, 88, 31, 99],
[24, 69, 57, 17, 96, 47, 58, 29, 80, 15]])
structure = ((2, 0.5), (2, -0.5))
obs = simulate_correlation(m, structure, inplace=False, seed=7)
np.testing.assert_array_equal(obs[1], np.array([[7, 8],
[9, 5],
[6, 3],
[4, 2]]))
np.testing.assert_array_equal(obs[2], np.array([ 0.5, 0.5, -0.5, -0.5]))
| [
"shenshang.simulate.correlate_xy",
"numpy.testing.assert_array_equal",
"scipy.stats.rankdata",
"numpy.array",
"shenshang.simulate.simulate_correlation"
] | [((1355, 1382), 'scipy.stats.rankdata', 'rankdata', (['x[pos]', '"""ordinal"""'], {}), "(x[pos], 'ordinal')\n", (1363, 1382), False, 'from scipy.stats import spearmanr, rankdata\n'), ((1505, 1542), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sx', 'sy'], {}), '(sx, sy)\n', (1534, 1542), True, 'import numpy as np\n'), ((1578, 2003), 'numpy.array', 'np.array', (['[[0, 0, 37, 28, 63, 0, 0, 34, 4, 90], [46, 75, 77, 78, 18, 5, 5, 0, 26, 50],\n [73, 0, 0, 89, 24, 81, 48, 0, 26, 25], [28, 50, 25, 91, 46, 0, 2, 76, 0,\n 92], [81, 60, 54, 40, 12, 66, 51, 23, 0, 92], [66, 0, 64, 43, 83, 35, 6,\n 89, 0, 78], [97, 52, 26, 89, 24, 0, 25, 15, 92, 53], [88, 0, 49, 16, 0,\n 92, 61, 15, 0, 78], [0, 64, 0, 76, 43, 3, 0, 88, 31, 99], [24, 69, 57, \n 17, 96, 47, 58, 29, 80, 15]]'], {}), '([[0, 0, 37, 28, 63, 0, 0, 34, 4, 90], [46, 75, 77, 78, 18, 5, 5, 0,\n 26, 50], [73, 0, 0, 89, 24, 81, 48, 0, 26, 25], [28, 50, 25, 91, 46, 0,\n 2, 76, 0, 92], [81, 60, 54, 40, 12, 66, 51, 23, 0, 92], [66, 0, 64, 43,\n 83, 35, 6, 89, 0, 78], [97, 52, 26, 89, 24, 0, 25, 15, 92, 53], [88, 0,\n 49, 16, 0, 92, 61, 15, 0, 78], [0, 64, 0, 76, 43, 3, 0, 88, 31, 99], [\n 24, 69, 57, 17, 96, 47, 58, 29, 80, 15]])\n', (1586, 2003), True, 'import numpy as np\n'), ((2219, 2276), 'shenshang.simulate.simulate_correlation', 'simulate_correlation', (['m', 'structure'], {'inplace': '(False)', 'seed': '(7)'}), '(m, structure, inplace=False, seed=7)\n', (2239, 2276), False, 'from shenshang.simulate import simulate_correlation, correlate_xy\n'), ((1417, 1447), 'scipy.stats.rankdata', 'rankdata', (['(-exp[pos])', '"""ordinal"""'], {}), "(-exp[pos], 'ordinal')\n", (1425, 1447), False, 'from scipy.stats import spearmanr, rankdata\n'), ((1471, 1500), 'scipy.stats.rankdata', 'rankdata', (['exp[pos]', '"""ordinal"""'], {}), "(exp[pos], 'ordinal')\n", (1479, 1500), False, 'from scipy.stats import spearmanr, rankdata\n'), ((2319, 2361), 'numpy.array', 'np.array', (['[[7, 8], [9, 5], [6, 3], [4, 2]]'], {}), '([[7, 8], [9, 5], [6, 3], [4, 2]])\n', (2327, 2361), True, 'import numpy as np\n'), ((2561, 2593), 'numpy.array', 'np.array', (['[0.5, 0.5, -0.5, -0.5]'], {}), '([0.5, 0.5, -0.5, -0.5])\n', (2569, 2593), True, 'import numpy as np\n'), ((1207, 1258), 'shenshang.simulate.correlate_xy', 'correlate_xy', (['x', 'y', 'strength'], {'inplace': '(False)', 'seed': '(9)'}), '(x, y, strength, inplace=False, seed=9)\n', (1219, 1258), False, 'from shenshang.simulate import simulate_correlation, correlate_xy\n'), ((214, 254), 'numpy.array', 'np.array', (['[5, 8, 2, 4, 3, 0, 1, 6, 7, 9]'], {}), '([5, 8, 2, 4, 3, 0, 1, 6, 7, 9])\n', (222, 254), True, 'import numpy as np\n'), ((262, 302), 'numpy.array', 'np.array', (['[9, 1, 4, 5, 2, 3, 6, 8, 0, 7]'], {}), '([9, 1, 4, 5, 2, 3, 6, 8, 0, 7])\n', (270, 302), True, 'import numpy as np\n'), ((310, 350), 'numpy.array', 'np.array', (['[5, 8, 2, 4, 3, 0, 1, 6, 7, 9]'], {}), '([5, 8, 2, 4, 3, 0, 1, 6, 7, 9])\n', (318, 350), True, 'import numpy as np\n'), ((369, 409), 'numpy.array', 'np.array', (['[5, 8, 2, 4, 3, 0, 1, 6, 7, 9]'], {}), '([5, 8, 2, 4, 3, 0, 1, 6, 7, 9])\n', (377, 409), True, 'import numpy as np\n'), ((417, 457), 'numpy.array', 'np.array', (['[9, 1, 4, 5, 2, 3, 6, 8, 0, 7]'], {}), '([9, 1, 4, 5, 2, 3, 6, 8, 0, 7])\n', (425, 457), True, 'import numpy as np\n'), ((465, 505), 'numpy.array', 'np.array', (['[4, 1, 7, 5, 6, 9, 8, 3, 2, 0]'], {}), '([4, 1, 7, 5, 6, 9, 8, 3, 2, 0])\n', (473, 505), True, 'import numpy as np\n'), ((523, 563), 'numpy.array', 'np.array', (['[5, 8, 2, 4, 3, 0, 1, 6, 7, 9]'], {}), '([5, 8, 2, 4, 3, 0, 1, 6, 7, 9])\n', (531, 563), True, 'import numpy as np\n'), ((571, 611), 'numpy.array', 'np.array', (['[9, 1, 4, 5, 2, 3, 6, 8, 0, 7]'], {}), '([9, 1, 4, 5, 2, 3, 6, 8, 0, 7])\n', (579, 611), True, 'import numpy as np\n'), ((619, 659), 'numpy.array', 'np.array', (['[9, 1, 4, 5, 2, 3, 6, 8, 0, 7]'], {}), '([9, 1, 4, 5, 2, 3, 6, 8, 0, 7])\n', (627, 659), True, 'import numpy as np\n'), ((679, 719), 'numpy.array', 'np.array', (['[5, 8, 2, 4, 3, 0, 1, 6, 7, 9]'], {}), '([5, 8, 2, 4, 3, 0, 1, 6, 7, 9])\n', (687, 719), True, 'import numpy as np\n'), ((727, 767), 'numpy.array', 'np.array', (['[9, 1, 4, 5, 2, 3, 6, 8, 0, 7]'], {}), '([9, 1, 4, 5, 2, 3, 6, 8, 0, 7])\n', (735, 767), True, 'import numpy as np\n'), ((775, 815), 'numpy.array', 'np.array', (['[9, 8, 1, 5, 2, 3, 0, 4, 6, 7]'], {}), '([9, 8, 1, 5, 2, 3, 0, 4, 6, 7])\n', (783, 815), True, 'import numpy as np\n'), ((836, 876), 'numpy.array', 'np.array', (['[5, 8, 2, 4, 3, 0, 1, 6, 7, 9]'], {}), '([5, 8, 2, 4, 3, 0, 1, 6, 7, 9])\n', (844, 876), True, 'import numpy as np\n'), ((884, 924), 'numpy.array', 'np.array', (['[9, 1, 4, 5, 2, 3, 6, 8, 0, 7]'], {}), '([9, 1, 4, 5, 2, 3, 6, 8, 0, 7])\n', (892, 924), True, 'import numpy as np\n'), ((932, 972), 'numpy.array', 'np.array', (['[9, 0, 6, 5, 2, 3, 8, 4, 1, 7]'], {}), '([9, 0, 6, 5, 2, 3, 8, 4, 1, 7])\n', (940, 972), True, 'import numpy as np\n'), ((1086, 1141), 'numpy.array', 'np.array', (['[9, 8, 7, 0, 6, 5, 4, 2, 1, 0, 0, 3, 0, 0, 0]'], {}), '([9, 8, 7, 0, 6, 5, 4, 2, 1, 0, 0, 3, 0, 0, 0])\n', (1094, 1141), True, 'import numpy as np\n')] |
"""
Module for k-Lines extraction and k-Label manipulation
Recall that bands contains NO information of the k-points.
So we must provide that ourselves, and reading the dftb_pin.hsd (the parsed
input) seems the best way to do so. We also need to figure out what to do
with equivalent points, or points where a new path starts.
Finally, points internal to the Brilloin Zone are labeled with Greek letters,
which should be rendered properly.
"""
import sys
from os.path import normpath, expanduser, isdir
from os.path import join as joinpath
import logging
import numpy as np
from collections import defaultdict
from skpar.dftbutils.lattice import getSymPtLabel
logger = logging.getLogger(__name__)
def get_klines(lattice, hsdfile='dftb_pin.hsd', workdir=None, *args, **kwargs):
"""
This routine analyses the KPointsAndWeights stanza in the input file of DFTB+
(given as an input argument *hsdfile*), and returns the k-path, based on
the lattice object (given as an input argument *lattice*).
If the file name is not provided, the routine looks in the default
dftb_pin.hsd, i.e. in the parsed file!
The routine returns a list of tuples (kLines) and a dictionary (kLinesDict)
with the symmetry points and indexes of the corresponding k-point in the
output band-structure.
kLines is ordered, as per the appearence of symmetry points in the hsd input, e.g.:
* [('L', 0), ('Γ', 50), ('X', 110), ('U', 130), ('K', 131), ('Γ', 181)]
therefore it may contain repetitions (e.g. for 'Γ', in this case).
kLinesDict returns a dictionary of lists, so that there's a single entry for
non-repetitive k-points, and more than one entries in the list of repetitive
symmetry k-points, e.g. (see for 'Γ' above):
* {'X': [110], 'K': [131], 'U': [130], 'L': [0], 'Γ': [50, 181]}
"""
kLines_dftb = list()
if workdir is not None:
fhsd = normpath(expanduser(joinpath(workdir, hsdfile)))
else:
fhsd = hsdfile
with open(fhsd, 'r') as fh:
for line in fh:
if 'KPointsAndWeights = Klines {'.lower() in ' '.join(line.lower().split()):
extraline = next(fh)
while not extraline.strip().startswith("}"):
# skip over commented line, in case of non-parsed .hsd file
while extraline.strip().startswith("#"):
extraline = next(fh)
words = extraline.split()[:4]
nk, k = int(words[0]), [float(w) for w in words[1:]]
kLabel = getSymPtLabel(k, lattice)
if kLabel:
kLines_dftb.append((kLabel, nk))
if len(words)>4 and words[4] == "}":
extraline = "}"
else:
extraline = next(fh)
#logger.debug('Parsed {} and obtained:'.format(hsdfile))
# At this stage, kLines_dftb contains distances between k points
#logger.debug('\tkLines_dftb: {}'.format(kLines_dftb))
# Next, we convert it to index, from 0 to nk-1
kLines = [(lbl, sum([_dist for (_lbl,_dist) in kLines_dftb[:i+1]])-1)
for (i,(lbl, dist)) in enumerate(kLines_dftb)]
#logger.debug('\tkLines : {}'.format(kLines))
klbls = set([lbl for (lbl, nn) in kLines])
kLinesDict = dict.fromkeys(klbls)
for lbl, nn in kLines:
if kLinesDict[lbl] is not None:
kLinesDict[lbl].append(nn)
else:
kLinesDict[lbl] = [nn, ]
#logger.debug('\tkLinesDict : {}'.format(kLinesDict))
output = kLines, kLinesDict
return output
def greekLabels(kLines):
"""
Check if Γ is within the kLines and set the label to its latex formulation.
Note that the routine will accept either list of tupples ('label',int_index) or
a list of strings, i.e. either kLines or only the kLinesLabels.
Could do check for other k-points with greek lables, byond Γ
(i.e. points that are inside the BZ, not at the faces) but in the future.
"""
try:
lbls, ixs = list(zip(*kLines))
except ValueError:
lbls, ixs = kLines, None
lbls = list(lbls)
for i, lbl in enumerate(lbls):
if lbl == 'Gamma':
#lbls[i] = r'$\Gamma$'
lbls[i] = "Γ"
if ixs is not None:
result = list(zip(lbls, ixs))
else:
result = lbls
return result
def get_kvec_abscissa(lat, kLines):
"""Return abscissa values for the reciprocal lengths corresponding
to the k-vectors derived from kLines.
"""
xx = []
xt = []
xl = []
skipticklabel = False
logger.debug('Constructing k-vector abscissa for BS plotting:')
logger.debug('kLines:\n{}'.format(kLines))
pos = 0
xx.append(np.atleast_1d(pos))
for item1, item2 in zip(kLines[:-1], kLines[1:]):
l1, i1 = item1
kp1 = lat.get_kcomp(l1)
l2, i2 = item2
kp2 = lat.get_kcomp(l2)
nseg = i2 - i1
if l1 == 'Gamma':
l1 = 'Γ'
if l2 == 'Gamma':
l2 = 'Γ'
if nseg > 1:
seglen = np.linalg.norm(lat.get_kvec(kp2-kp1))
xsegm, delta = np.linspace(0, seglen, nseg+1, retstep=True)
if not skipticklabel:
xt.append(pos)
xl.append(l1)
else:
skipticklabel = False
else:
seglen = 0
delta = 0
xsegm = np.zeros(2)
xt.append(pos)
if l1 == l2:
xl.append(l1)
else:
xl.append('{}|{}'.format(l1, l2))
skipticklabel=True
logger.debug('{:>2s} -- {:2s}: {:8.3f} -- {:8.3f} : {:8.3f}/{:<3d}={:8.3f}'.
format(l1, l2, pos+xsegm[0], pos+xsegm[-1], seglen, len(xsegm), delta))
xx.append(pos+xsegm[1:])
pos += seglen
# append the tick and label for the last point
xt.append(pos)
xl.append(l2)
#
for item in xx:
item = np.array(item)
xx = np.concatenate(xx)
assert xx.shape == (kLines[-1][-1]+1,), (xx.shape, kLines)
logger.debug('Tick labels: {}'.format(', '.join(['{}:{:.3f}'.format(l,t) for l,t in zip(xl, xt)])))
return xx, xt, xl
| [
"skpar.dftbutils.lattice.getSymPtLabel",
"numpy.zeros",
"logging.getLogger",
"numpy.array",
"numpy.linspace",
"numpy.atleast_1d",
"os.path.join",
"numpy.concatenate"
] | [((673, 700), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (690, 700), False, 'import logging\n'), ((6063, 6081), 'numpy.concatenate', 'np.concatenate', (['xx'], {}), '(xx)\n', (6077, 6081), True, 'import numpy as np\n'), ((4804, 4822), 'numpy.atleast_1d', 'np.atleast_1d', (['pos'], {}), '(pos)\n', (4817, 4822), True, 'import numpy as np\n'), ((6039, 6053), 'numpy.array', 'np.array', (['item'], {}), '(item)\n', (6047, 6053), True, 'import numpy as np\n'), ((5212, 5258), 'numpy.linspace', 'np.linspace', (['(0)', 'seglen', '(nseg + 1)'], {'retstep': '(True)'}), '(0, seglen, nseg + 1, retstep=True)\n', (5223, 5258), True, 'import numpy as np\n'), ((5489, 5500), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (5497, 5500), True, 'import numpy as np\n'), ((1946, 1972), 'os.path.join', 'joinpath', (['workdir', 'hsdfile'], {}), '(workdir, hsdfile)\n', (1954, 1972), True, 'from os.path import join as joinpath\n'), ((2589, 2614), 'skpar.dftbutils.lattice.getSymPtLabel', 'getSymPtLabel', (['k', 'lattice'], {}), '(k, lattice)\n', (2602, 2614), False, 'from skpar.dftbutils.lattice import getSymPtLabel\n')] |
import os
os.environ["CUDA_DEVICES_ORDER"]= "PCI_BUS_IS"
os.environ["CUDA_VISIBLE_DEVICES"]= "1"
class pde_wan():
def __init__(self, dim, beta, N_int, N_bd):
import numpy as np
global np
#
import time
global time
#
import tensorflow as tf
global tf
#
import matplotlib.pyplot as plt
global plt
#
from scipy.interpolate import griddata
global griddata
#
self.up= 1.0
self.low= -1.0
self.la1= np.pi/2
self.la2= 1.0/2
self.mesh_size= 50
self.beta= beta
#
self.v_layer= 6
self.v_hidden_size= 50
self.v_step= 1
self.v_rate= 0.04
self.u_layer= 6
self.u_hidden_size= 20
self.u_step= 2
self.u_rate= 0.015
self.batch_size= N_int
self.test_size= 5000
self.bound_size= N_bd #(*2*dim)
self.iteration= 20001
self.dim= dim
def sample_train(self, dm_size, bd_size, dim):
low, up, la1, la2= self.low, self.up, self.la1, self.la2
#*********************************************************
# collocation points in domain
x_dm= np.random.uniform(low, up, [dm_size, dim])
#*********************************************************
# collocation points on boundary
x_bd_list=[]
for i in range(dim):
x_bound= np.random.uniform(low, up, [bd_size, dim])
x_bound[:,i]= up
x_bd_list.append(x_bound)
x_bound= np.random.uniform(low, up, [bd_size, dim])
x_bound[:,i]= low
x_bd_list.append(x_bound)
x_bd= np.concatenate(x_bd_list, axis=0)
#*********************************************************
int_dm= (up-low)**dim
#********************************************************
# Value of f(x)
x1_pow= np.power(x_dm[:,0], 2)
x2_pow= np.power(x_dm[:,1], 2)
x_y= np.sum(np.power(x_dm, 2), 1)
#
f_dm= (4*(x1_pow*la1**2+x2_pow*la2**2)*(x_y+1)*np.sin(la1*x1_pow+la2*x2_pow)
-(4*la1*x1_pow+4*la2*x2_pow+2*(la1+la2)*(1+x_y))*np.cos(la1*x1_pow+la2*x2_pow)
+2*(x1_pow*la1**2+x2_pow*la2**2)*np.cos(la1*x1_pow+la2*x2_pow)**2)
f_dm= np.reshape(f_dm, [-1, 1])
#*********************************************************
# Dirichlet boundary condition
x1_pow= np.power(x_bd[:,0], 2)
x2_pow= np.power(x_bd[:,1], 2)
u_bd= np.sin(la1*x1_pow+la2*x2_pow)
u_bd= np.reshape(u_bd, [-1, 1])
#*********************************************************
x_dm= np.float32(x_dm)
x_bd= np.float32(x_bd)
int_dm= np.float32(int_dm)
f_dm= np.float32(f_dm)
u_bd= np.float32(u_bd)
return(x_dm, f_dm, x_bd, u_bd, int_dm)
def sample_test(self, test_size, dim):
low, up, la1, la2 = self.low, self.up, self.la1, self.la2
#**********************************************************
x_mesh= np.linspace(low, up, self.mesh_size)
x_dm= np.random.uniform(low, up, [test_size, dim])
#***********************************************************
# Value of u(x)
x1_pow= np.power(x_dm[:,0], 2)
x2_pow= np.power(x_dm[:,1], 2)
u_dm= np.sin(la1*x1_pow+la2*x2_pow)
u_dm= np.reshape(u_dm, [-1, 1])
#***********************************************************
x_dm= np.float32(x_dm)
u_dm= np.float32(u_dm)
mesh= np.meshgrid(x_mesh, x_mesh)
return(mesh, x_dm, u_dm)
def DNN_u(self, x_in, out_size, name, reuse):
h_size= self.u_hidden_size
with tf.variable_scope(name, reuse= reuse):
hi= tf.layers.dense(x_in, h_size, activation= tf.nn.tanh,
name='input_layer')
hi= tf.layers.dense(hi, h_size, activation= tf.nn.tanh,
name='input_layer1')
for i in range(self.u_layer):
if i%2==0:
hi= tf.layers.dense(hi, h_size, activation= tf.nn.softplus,
name='h_layer'+str(i))
else:
hi= tf.sin(tf.layers.dense(hi, h_size), name='h_layer'+str(i))
out= tf.layers.dense(hi, out_size, name='out_layer')
return(out)
def DNN_v(self, x_in, out_size, name, reuse):
h_size= self.v_hidden_size
with tf.variable_scope(name, reuse= reuse):
hi= tf.layers.dense(x_in, h_size, activation= tf.nn.tanh,
name='input_layer')
hi= tf.layers.dense(hi, h_size, activation= tf.nn.tanh,
name='input_layer1')
for i in range(self.v_layer):
if i%2==0:
hi= tf.layers.dense(hi, h_size, activation= tf.nn.softplus,
name='h_layer'+str(i))
else:
hi= tf.sin(tf.layers.dense(hi, h_size), name='h_layer'+str(i))
out= tf.layers.dense(hi, out_size, name='out_layer')
return(out)
def grad_u(self, x, out_size, name):
#**************************************
# u(x,y)
fun_u= self.DNN_u(x, out_size, name, tf.AUTO_REUSE)
#*************************************
# grad_u(x,y)
grad_u= tf.gradients(fun_u, x, unconnected_gradients='zero')[0]
return(fun_u, grad_u)
def grad_v(self, x, out_size, name):
#**************************************
# v(x,y)
fun_v= self.DNN_v(x, out_size, name, tf.AUTO_REUSE)
#*************************************
# grad_v(x,y)
grad_v= tf.gradients(fun_v, x, unconnected_gradients='zero')[0]
return(fun_v, grad_v)
def fun_a(self, x):
#********************************************************
a_val= tf.add(1.0, tf.reduce_sum(tf.pow(x, 2), 1))
out= tf.reshape(a_val, [-1,1])
return(out)
def fun_w(self, x, low=-1.0, up=1.0):
I1= 0.210987
x_list= tf.split(x, self.dim, 1)
#**************************************************
x_scale_list=[]
h_len= (up-low)/2.0
for i in range(self.dim):
x_scale= (x_list[i]-low-h_len)/h_len
x_scale_list.append(x_scale)
#************************************************
z_x_list=[];
for i in range(self.dim):
supp_x= tf.greater(1-tf.abs(x_scale_list[i]), 0)
z_x= tf.where(supp_x, tf.exp(1/(tf.pow(x_scale_list[i], 2)-1))/I1,
tf.zeros_like(x_scale_list[i]))
z_x_list.append(z_x)
#***************************************************
w_val= tf.constant(1.0)
for i in range(self.dim):
w_val= tf.multiply(w_val, z_x_list[i])
dw= tf.gradients(w_val, x, unconnected_gradients='zero')[0]
dw= tf.where(tf.is_nan(dw), tf.zeros_like(dw), dw)
return(w_val, dw)
def build(self):
#**************************************************************
with tf.name_scope('placeholder'):
self.x_domain= tf.placeholder(tf.float32, shape=[None, self.dim], name='x_dm')
self.f_obv= tf.placeholder(tf.float32, shape=[None, 1], name='f_obv')
self.x_bound= tf.placeholder(tf.float32, shape=[None, self.dim], name='x_b')
self.g_obv= tf.placeholder(tf.float32, shape=[None, 1], name='g_obv')
self.int_domain= tf.placeholder(tf.float32, shape=(), name='int_domain')
#**************************************************************
name_u= 'dnn_u'; name_v= 'dnn_v';
self.u_val, self.du= self.grad_u(self.x_domain, 1, name_u)
self.v_val, self.dv= self.grad_v(self.x_domain, 1, name_v)
self.w_val, self.dw= self.fun_w(self.x_domain)
u_bound= self.DNN_u(self.x_bound, 1, name_u, tf.AUTO_REUSE)
#
a_val= self.fun_a(self.x_domain)
self.wv= tf.multiply(self.w_val, self.v_val)
#
du_du= tf.reduce_sum(tf.multiply(self.du, self.du), axis=1)
du_du= tf.reshape(du_du, [-1,1])
#
du_dw= tf.reduce_sum(tf.multiply(self.du, self.dw), axis=1)
du_dw= tf.reshape(du_dw, [-1,1])
#
du_dv= tf.reduce_sum(tf.multiply(self.du, self.dv), axis=1)
du_dv= tf.reshape(du_dv, [-1,1])
#
du_dwv= tf.add(tf.multiply(self.v_val, du_dw),
tf.multiply(self.w_val, du_dv))
#**************************************************************
with tf.name_scope('loss'):
with tf.name_scope('u_loss'):
test_norm = tf.multiply(tf.reduce_mean(self.wv**2), self.int_domain)
#
int_l1= tf.multiply(tf.reduce_mean(
tf.multiply(a_val, du_dwv)), self.int_domain)
int_l2= tf.multiply(tf.reduce_mean(tf.multiply(self.wv, 0.5*du_du)),self.int_domain)
int_r= tf.multiply(tf.reduce_mean(
tf.multiply(self.f_obv, self.wv)), self.int_domain)
#
self.loss_int= tf.square(int_l1+int_l2-int_r) / test_norm
#
self.loss_bound= tf.reduce_mean(tf.abs(u_bound-self.g_obv))
#
self.loss_u= (self.beta)*self.loss_bound+(1.0)*self.loss_int
with tf.name_scope('v_loss'):
#
self.loss_v= - tf.log(self.loss_int)
#**************************************************************
#
u_vars= tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='dnn_u')
v_vars= tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='dnn_v')
#***************************************************************
#
with tf.name_scope('optimizer'):
self.u_opt= tf.train.AdagradOptimizer(self.u_rate).minimize(
self.loss_u, var_list= u_vars)
self.v_opt= tf.train.AdagradOptimizer(self.v_rate).minimize(
self.loss_v, var_list= v_vars)
def train(self):
#****************************************************************
tf.reset_default_graph(); self.build();
#***************************************************************
# testing data
mesh, test_x, test_u= self.sample_test(self.test_size, self.dim);
step=[]; error_l2r=[]; error_l2=[];
time_start= time.time()
#***************************************************************
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#****************************
for i in range(self.iteration):
# training data (mini-batch)
train_data= self.sample_train(self.batch_size, self.bound_size, self.dim)
feed_train={self.x_domain: train_data[0],
self.f_obv: train_data[1],
self.x_bound: train_data[2],
self.g_obv: train_data[3],
self.int_domain: train_data[4]}
if i%5==0:
#*********************************
pred_u, pred_v= sess.run([self.u_val, self.v_val],feed_dict={self.x_domain: test_x})
err_l2= np.sqrt(np.mean(np.square(test_u-pred_u)))
u_norm= np.sqrt(np.mean(np.square(test_u)))
step.append(i+1);
error_l2r.append(err_l2/u_norm); error_l2.append(err_l2)
if i%200==0:
loss_v, loss_u, int_u, loss_bd= sess.run(
[self.loss_v, self.loss_u, self.loss_int, self.loss_bound],
feed_dict= feed_train)
print('Iterations:{}'.format(i))
print('u_loss:{} v_loss:{}'.format(loss_u, loss_v))
print('int_u:{} loss_bd:{} err_l2r:{}'.format(
int_u, loss_bd, error_l2r[-1]))
#
for _ in range(self.v_step):
_ = sess.run(self.v_opt, feed_dict=feed_train)
for _ in range(self.u_step):
_ = sess.run(self.u_opt, feed_dict=feed_train)
#
time_end= time.time()
#*******************************************
print('L2_e is {}, L2_re is {}'.format(error_l2[-1], error_l2r[-1]))
print('Total running time is {}:'.format(time_end-time_start))
return(mesh, test_x, test_u, pred_u, step, error_l2, error_l2r, self.dim)
if __name__=='__main__':
dim, beta, N_int, N_bd= 5, 20000000, 20000, 100
demo= pde_wan(dim, beta, N_int, N_bd)
mesh, test_x, test_u, pred_u, step, error_l2, error_l2r, dim= demo.train()
# save data as .mat form
import scipy.io
data_save= {}
data_save['mesh']= mesh
data_save['test_x']= test_x
data_save['test_u']= test_u
data_save['pred_u']= pred_u
data_save['step']= step
data_save['error_l2']= error_l2
data_save['error_l2r']= error_l2r
scipy.io.savemat('WAN_nonlinear_%dd'%(dim), data_save)
| [
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.multiply",
"numpy.sin",
"tensorflow.split",
"numpy.meshgrid",
"tensorflow.abs",
"numpy.power",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"numpy.reshape",
"... | [((1264, 1306), 'numpy.random.uniform', 'np.random.uniform', (['low', 'up', '[dm_size, dim]'], {}), '(low, up, [dm_size, dim])\n', (1281, 1306), True, 'import numpy as np\n'), ((1742, 1775), 'numpy.concatenate', 'np.concatenate', (['x_bd_list'], {'axis': '(0)'}), '(x_bd_list, axis=0)\n', (1756, 1775), True, 'import numpy as np\n'), ((1987, 2010), 'numpy.power', 'np.power', (['x_dm[:, 0]', '(2)'], {}), '(x_dm[:, 0], 2)\n', (1995, 2010), True, 'import numpy as np\n'), ((2026, 2049), 'numpy.power', 'np.power', (['x_dm[:, 1]', '(2)'], {}), '(x_dm[:, 1], 2)\n', (2034, 2049), True, 'import numpy as np\n'), ((2378, 2403), 'numpy.reshape', 'np.reshape', (['f_dm', '[-1, 1]'], {}), '(f_dm, [-1, 1])\n', (2388, 2403), True, 'import numpy as np\n'), ((2526, 2549), 'numpy.power', 'np.power', (['x_bd[:, 0]', '(2)'], {}), '(x_bd[:, 0], 2)\n', (2534, 2549), True, 'import numpy as np\n'), ((2565, 2588), 'numpy.power', 'np.power', (['x_bd[:, 1]', '(2)'], {}), '(x_bd[:, 1], 2)\n', (2573, 2588), True, 'import numpy as np\n'), ((2602, 2637), 'numpy.sin', 'np.sin', (['(la1 * x1_pow + la2 * x2_pow)'], {}), '(la1 * x1_pow + la2 * x2_pow)\n', (2608, 2637), True, 'import numpy as np\n'), ((2646, 2671), 'numpy.reshape', 'np.reshape', (['u_bd', '[-1, 1]'], {}), '(u_bd, [-1, 1])\n', (2656, 2671), True, 'import numpy as np\n'), ((2753, 2769), 'numpy.float32', 'np.float32', (['x_dm'], {}), '(x_dm)\n', (2763, 2769), True, 'import numpy as np\n'), ((2784, 2800), 'numpy.float32', 'np.float32', (['x_bd'], {}), '(x_bd)\n', (2794, 2800), True, 'import numpy as np\n'), ((2817, 2835), 'numpy.float32', 'np.float32', (['int_dm'], {}), '(int_dm)\n', (2827, 2835), True, 'import numpy as np\n'), ((2850, 2866), 'numpy.float32', 'np.float32', (['f_dm'], {}), '(f_dm)\n', (2860, 2866), True, 'import numpy as np\n'), ((2881, 2897), 'numpy.float32', 'np.float32', (['u_bd'], {}), '(u_bd)\n', (2891, 2897), True, 'import numpy as np\n'), ((3139, 3175), 'numpy.linspace', 'np.linspace', (['low', 'up', 'self.mesh_size'], {}), '(low, up, self.mesh_size)\n', (3150, 3175), True, 'import numpy as np\n'), ((3190, 3234), 'numpy.random.uniform', 'np.random.uniform', (['low', 'up', '[test_size, dim]'], {}), '(low, up, [test_size, dim])\n', (3207, 3234), True, 'import numpy as np\n'), ((3344, 3367), 'numpy.power', 'np.power', (['x_dm[:, 0]', '(2)'], {}), '(x_dm[:, 0], 2)\n', (3352, 3367), True, 'import numpy as np\n'), ((3383, 3406), 'numpy.power', 'np.power', (['x_dm[:, 1]', '(2)'], {}), '(x_dm[:, 1], 2)\n', (3391, 3406), True, 'import numpy as np\n'), ((3420, 3455), 'numpy.sin', 'np.sin', (['(la1 * x1_pow + la2 * x2_pow)'], {}), '(la1 * x1_pow + la2 * x2_pow)\n', (3426, 3455), True, 'import numpy as np\n'), ((3464, 3489), 'numpy.reshape', 'np.reshape', (['u_dm', '[-1, 1]'], {}), '(u_dm, [-1, 1])\n', (3474, 3489), True, 'import numpy as np\n'), ((3573, 3589), 'numpy.float32', 'np.float32', (['x_dm'], {}), '(x_dm)\n', (3583, 3589), True, 'import numpy as np\n'), ((3604, 3620), 'numpy.float32', 'np.float32', (['u_dm'], {}), '(u_dm)\n', (3614, 3620), True, 'import numpy as np\n'), ((3635, 3662), 'numpy.meshgrid', 'np.meshgrid', (['x_mesh', 'x_mesh'], {}), '(x_mesh, x_mesh)\n', (3646, 3662), True, 'import numpy as np\n'), ((6141, 6167), 'tensorflow.reshape', 'tf.reshape', (['a_val', '[-1, 1]'], {}), '(a_val, [-1, 1])\n', (6151, 6167), True, 'import tensorflow as tf\n'), ((6271, 6295), 'tensorflow.split', 'tf.split', (['x', 'self.dim', '(1)'], {}), '(x, self.dim, 1)\n', (6279, 6295), True, 'import tensorflow as tf\n'), ((6953, 6969), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {}), '(1.0)\n', (6964, 6969), True, 'import tensorflow as tf\n'), ((8217, 8252), 'tensorflow.multiply', 'tf.multiply', (['self.w_val', 'self.v_val'], {}), '(self.w_val, self.v_val)\n', (8228, 8252), True, 'import tensorflow as tf\n'), ((8346, 8372), 'tensorflow.reshape', 'tf.reshape', (['du_du', '[-1, 1]'], {}), '(du_du, [-1, 1])\n', (8356, 8372), True, 'import tensorflow as tf\n'), ((8465, 8491), 'tensorflow.reshape', 'tf.reshape', (['du_dw', '[-1, 1]'], {}), '(du_dw, [-1, 1])\n', (8475, 8491), True, 'import tensorflow as tf\n'), ((8584, 8610), 'tensorflow.reshape', 'tf.reshape', (['du_dv', '[-1, 1]'], {}), '(du_dv, [-1, 1])\n', (8594, 8610), True, 'import tensorflow as tf\n'), ((9856, 9919), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""dnn_u"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='dnn_u')\n", (9873, 9919), True, 'import tensorflow as tf\n'), ((9936, 9999), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""dnn_v"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='dnn_v')\n", (9953, 9999), True, 'import tensorflow as tf\n'), ((10481, 10505), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (10503, 10505), True, 'import tensorflow as tf\n'), ((10756, 10767), 'time.time', 'time.time', ([], {}), '()\n', (10765, 10767), False, 'import time\n'), ((1486, 1528), 'numpy.random.uniform', 'np.random.uniform', (['low', 'up', '[bd_size, dim]'], {}), '(low, up, [bd_size, dim])\n', (1503, 1528), True, 'import numpy as np\n'), ((1617, 1659), 'numpy.random.uniform', 'np.random.uniform', (['low', 'up', '[bd_size, dim]'], {}), '(low, up, [bd_size, dim])\n', (1634, 1659), True, 'import numpy as np\n'), ((2069, 2086), 'numpy.power', 'np.power', (['x_dm', '(2)'], {}), '(x_dm, 2)\n', (2077, 2086), True, 'import numpy as np\n'), ((3799, 3835), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), '(name, reuse=reuse)\n', (3816, 3835), True, 'import tensorflow as tf\n'), ((3854, 3926), 'tensorflow.layers.dense', 'tf.layers.dense', (['x_in', 'h_size'], {'activation': 'tf.nn.tanh', 'name': '"""input_layer"""'}), "(x_in, h_size, activation=tf.nn.tanh, name='input_layer')\n", (3869, 3926), True, 'import tensorflow as tf\n'), ((3976, 4047), 'tensorflow.layers.dense', 'tf.layers.dense', (['hi', 'h_size'], {'activation': 'tf.nn.tanh', 'name': '"""input_layer1"""'}), "(hi, h_size, activation=tf.nn.tanh, name='input_layer1')\n", (3991, 4047), True, 'import tensorflow as tf\n'), ((4415, 4462), 'tensorflow.layers.dense', 'tf.layers.dense', (['hi', 'out_size'], {'name': '"""out_layer"""'}), "(hi, out_size, name='out_layer')\n", (4430, 4462), True, 'import tensorflow as tf\n'), ((4586, 4622), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), '(name, reuse=reuse)\n', (4603, 4622), True, 'import tensorflow as tf\n'), ((4641, 4713), 'tensorflow.layers.dense', 'tf.layers.dense', (['x_in', 'h_size'], {'activation': 'tf.nn.tanh', 'name': '"""input_layer"""'}), "(x_in, h_size, activation=tf.nn.tanh, name='input_layer')\n", (4656, 4713), True, 'import tensorflow as tf\n'), ((4763, 4834), 'tensorflow.layers.dense', 'tf.layers.dense', (['hi', 'h_size'], {'activation': 'tf.nn.tanh', 'name': '"""input_layer1"""'}), "(hi, h_size, activation=tf.nn.tanh, name='input_layer1')\n", (4778, 4834), True, 'import tensorflow as tf\n'), ((5222, 5269), 'tensorflow.layers.dense', 'tf.layers.dense', (['hi', 'out_size'], {'name': '"""out_layer"""'}), "(hi, out_size, name='out_layer')\n", (5237, 5269), True, 'import tensorflow as tf\n'), ((5546, 5598), 'tensorflow.gradients', 'tf.gradients', (['fun_u', 'x'], {'unconnected_gradients': '"""zero"""'}), "(fun_u, x, unconnected_gradients='zero')\n", (5558, 5598), True, 'import tensorflow as tf\n'), ((5888, 5940), 'tensorflow.gradients', 'tf.gradients', (['fun_v', 'x'], {'unconnected_gradients': '"""zero"""'}), "(fun_v, x, unconnected_gradients='zero')\n", (5900, 5940), True, 'import tensorflow as tf\n'), ((7023, 7054), 'tensorflow.multiply', 'tf.multiply', (['w_val', 'z_x_list[i]'], {}), '(w_val, z_x_list[i])\n', (7034, 7054), True, 'import tensorflow as tf\n'), ((7067, 7119), 'tensorflow.gradients', 'tf.gradients', (['w_val', 'x'], {'unconnected_gradients': '"""zero"""'}), "(w_val, x, unconnected_gradients='zero')\n", (7079, 7119), True, 'import tensorflow as tf\n'), ((7144, 7157), 'tensorflow.is_nan', 'tf.is_nan', (['dw'], {}), '(dw)\n', (7153, 7157), True, 'import tensorflow as tf\n'), ((7159, 7176), 'tensorflow.zeros_like', 'tf.zeros_like', (['dw'], {}), '(dw)\n', (7172, 7176), True, 'import tensorflow as tf\n'), ((7319, 7347), 'tensorflow.name_scope', 'tf.name_scope', (['"""placeholder"""'], {}), "('placeholder')\n", (7332, 7347), True, 'import tensorflow as tf\n'), ((7376, 7439), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.dim]', 'name': '"""x_dm"""'}), "(tf.float32, shape=[None, self.dim], name='x_dm')\n", (7390, 7439), True, 'import tensorflow as tf\n'), ((7464, 7521), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]', 'name': '"""f_obv"""'}), "(tf.float32, shape=[None, 1], name='f_obv')\n", (7478, 7521), True, 'import tensorflow as tf\n'), ((7548, 7610), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.dim]', 'name': '"""x_b"""'}), "(tf.float32, shape=[None, self.dim], name='x_b')\n", (7562, 7610), True, 'import tensorflow as tf\n'), ((7635, 7692), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]', 'name': '"""g_obv"""'}), "(tf.float32, shape=[None, 1], name='g_obv')\n", (7649, 7692), True, 'import tensorflow as tf\n'), ((7722, 7777), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '()', 'name': '"""int_domain"""'}), "(tf.float32, shape=(), name='int_domain')\n", (7736, 7777), True, 'import tensorflow as tf\n'), ((8292, 8321), 'tensorflow.multiply', 'tf.multiply', (['self.du', 'self.du'], {}), '(self.du, self.du)\n', (8303, 8321), True, 'import tensorflow as tf\n'), ((8411, 8440), 'tensorflow.multiply', 'tf.multiply', (['self.du', 'self.dw'], {}), '(self.du, self.dw)\n', (8422, 8440), True, 'import tensorflow as tf\n'), ((8530, 8559), 'tensorflow.multiply', 'tf.multiply', (['self.du', 'self.dv'], {}), '(self.du, self.dv)\n', (8541, 8559), True, 'import tensorflow as tf\n'), ((8643, 8673), 'tensorflow.multiply', 'tf.multiply', (['self.v_val', 'du_dw'], {}), '(self.v_val, du_dw)\n', (8654, 8673), True, 'import tensorflow as tf\n'), ((8698, 8728), 'tensorflow.multiply', 'tf.multiply', (['self.w_val', 'du_dv'], {}), '(self.w_val, du_dv)\n', (8709, 8728), True, 'import tensorflow as tf\n'), ((8815, 8836), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (8828, 8836), True, 'import tensorflow as tf\n'), ((10097, 10123), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (10110, 10123), True, 'import tensorflow as tf\n'), ((10854, 10866), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10864, 10866), True, 'import tensorflow as tf\n'), ((12639, 12650), 'time.time', 'time.time', ([], {}), '()\n', (12648, 12650), False, 'import time\n'), ((6110, 6122), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (6116, 6122), True, 'import tensorflow as tf\n'), ((6812, 6842), 'tensorflow.zeros_like', 'tf.zeros_like', (['x_scale_list[i]'], {}), '(x_scale_list[i])\n', (6825, 6842), True, 'import tensorflow as tf\n'), ((8855, 8878), 'tensorflow.name_scope', 'tf.name_scope', (['"""u_loss"""'], {}), "('u_loss')\n", (8868, 8878), True, 'import tensorflow as tf\n'), ((9660, 9683), 'tensorflow.name_scope', 'tf.name_scope', (['"""v_loss"""'], {}), "('v_loss')\n", (9673, 9683), True, 'import tensorflow as tf\n'), ((10897, 10930), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10928, 10930), True, 'import tensorflow as tf\n'), ((2156, 2191), 'numpy.sin', 'np.sin', (['(la1 * x1_pow + la2 * x2_pow)'], {}), '(la1 * x1_pow + la2 * x2_pow)\n', (2162, 2191), True, 'import numpy as np\n'), ((2251, 2286), 'numpy.cos', 'np.cos', (['(la1 * x1_pow + la2 * x2_pow)'], {}), '(la1 * x1_pow + la2 * x2_pow)\n', (2257, 2286), True, 'import numpy as np\n'), ((2330, 2365), 'numpy.cos', 'np.cos', (['(la1 * x1_pow + la2 * x2_pow)'], {}), '(la1 * x1_pow + la2 * x2_pow)\n', (2336, 2365), True, 'import numpy as np\n'), ((6678, 6701), 'tensorflow.abs', 'tf.abs', (['x_scale_list[i]'], {}), '(x_scale_list[i])\n', (6684, 6701), True, 'import tensorflow as tf\n'), ((8920, 8948), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.wv ** 2)'], {}), '(self.wv ** 2)\n', (8934, 8948), True, 'import tensorflow as tf\n'), ((9382, 9416), 'tensorflow.square', 'tf.square', (['(int_l1 + int_l2 - int_r)'], {}), '(int_l1 + int_l2 - int_r)\n', (9391, 9416), True, 'import tensorflow as tf\n'), ((9515, 9543), 'tensorflow.abs', 'tf.abs', (['(u_bound - self.g_obv)'], {}), '(u_bound - self.g_obv)\n', (9521, 9543), True, 'import tensorflow as tf\n'), ((9735, 9756), 'tensorflow.log', 'tf.log', (['self.loss_int'], {}), '(self.loss_int)\n', (9741, 9756), True, 'import tensorflow as tf\n'), ((10149, 10187), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['self.u_rate'], {}), '(self.u_rate)\n', (10174, 10187), True, 'import tensorflow as tf\n'), ((10273, 10311), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['self.v_rate'], {}), '(self.v_rate)\n', (10298, 10311), True, 'import tensorflow as tf\n'), ((4346, 4373), 'tensorflow.layers.dense', 'tf.layers.dense', (['hi', 'h_size'], {}), '(hi, h_size)\n', (4361, 4373), True, 'import tensorflow as tf\n'), ((5153, 5180), 'tensorflow.layers.dense', 'tf.layers.dense', (['hi', 'h_size'], {}), '(hi, h_size)\n', (5168, 5180), True, 'import tensorflow as tf\n'), ((9059, 9085), 'tensorflow.multiply', 'tf.multiply', (['a_val', 'du_dwv'], {}), '(a_val, du_dwv)\n', (9070, 9085), True, 'import tensorflow as tf\n'), ((9156, 9189), 'tensorflow.multiply', 'tf.multiply', (['self.wv', '(0.5 * du_du)'], {}), '(self.wv, 0.5 * du_du)\n', (9167, 9189), True, 'import tensorflow as tf\n'), ((9281, 9313), 'tensorflow.multiply', 'tf.multiply', (['self.f_obv', 'self.wv'], {}), '(self.f_obv, self.wv)\n', (9292, 9313), True, 'import tensorflow as tf\n'), ((11669, 11695), 'numpy.square', 'np.square', (['(test_u - pred_u)'], {}), '(test_u - pred_u)\n', (11678, 11695), True, 'import numpy as np\n'), ((11740, 11757), 'numpy.square', 'np.square', (['test_u'], {}), '(test_u)\n', (11749, 11757), True, 'import numpy as np\n'), ((6750, 6776), 'tensorflow.pow', 'tf.pow', (['x_scale_list[i]', '(2)'], {}), '(x_scale_list[i], 2)\n', (6756, 6776), True, 'import tensorflow as tf\n')] |
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import os
import argparse
import random
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import StandardScaler
from models import ConvDiscriminator, ConvGenerator, LSTMDiscriminator, LSTMGenerator
from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler
from utils import get_gradient_penalty, plot_series, get_samples, generate_fake_samples, make_csv, derive_metrics
# Reproducibility #
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main(args):
# Device Configuration #
device = torch.device(f'cuda:{args.gpu_num}' if torch.cuda.is_available() else 'cpu')
# Fix Seed for Reproducibility #
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# Samples, Plots, Weights and CSV Path #
paths = [args.samples_path, args.weights_path, args.csv_path, args.inference_path]
for path in paths:
make_dirs(path)
# Prepare Data #
data = pd.read_csv(args.data_path)[args.column]
# Prepare Data #
scaler_1 = StandardScaler()
scaler_2 = StandardScaler()
preprocessed_data = pre_processing(data, scaler_1, scaler_2, args.constant, args.delta)
train_X, train_Y, test_X, test_Y = prepare_data(data, preprocessed_data, args)
train_X = moving_windows(train_X, args.ts_dim)
train_Y = moving_windows(train_Y, args.ts_dim)
test_X = moving_windows(test_X, args.ts_dim)
test_Y = moving_windows(test_Y, args.ts_dim)
# Prepare Networks #
if args.model == 'conv':
D = ConvDiscriminator(args.ts_dim).to(device)
G = ConvGenerator(args.latent_dim, args.ts_dim).to(device)
elif args.model == 'lstm':
D = LSTMDiscriminator(args.ts_dim).to(device)
G = LSTMGenerator(args.latent_dim, args.ts_dim).to(device)
else:
raise NotImplementedError
#########
# Train #
#########
if args.mode == 'train':
# Loss Function #
if args.criterion == 'l2':
criterion = nn.MSELoss()
elif args.criterion == 'wgangp':
pass
else:
raise NotImplementedError
# Optimizers #
if args.optim == 'sgd':
D_optim = torch.optim.SGD(D.parameters(), lr=args.lr, momentum=0.9)
G_optim = torch.optim.SGD(G.parameters(), lr=args.lr, momentum=0.9)
elif args.optim == 'adam':
D_optim = torch.optim.Adam(D.parameters(), lr=args.lr, betas=(0., 0.9))
G_optim = torch.optim.Adam(G.parameters(), lr=args.lr, betas=(0., 0.9))
else:
raise NotImplementedError
D_optim_scheduler = get_lr_scheduler(D_optim, args)
G_optim_scheduler = get_lr_scheduler(G_optim, args)
# Lists #
D_losses, G_losses = list(), list()
# Train #
print("Training Time Series GAN started with total epoch of {}.".format(args.num_epochs))
for epoch in range(args.num_epochs):
# Initialize Optimizers #
G_optim.zero_grad()
D_optim.zero_grad()
#######################
# Train Discriminator #
#######################
if args.criterion == 'l2':
n_critics = 1
elif args.criterion == 'wgangp':
n_critics = 5
for j in range(n_critics):
series, start_dates = get_samples(train_X, train_Y, args.batch_size)
# Data Preparation #
series = series.to(device)
noise = torch.randn(args.batch_size, 1, args.latent_dim).to(device)
# Adversarial Loss using Real Image #
prob_real = D(series.float())
if args.criterion == 'l2':
real_labels = torch.ones(prob_real.size()).to(device)
D_real_loss = criterion(prob_real, real_labels)
elif args.criterion == 'wgangp':
D_real_loss = -torch.mean(prob_real)
# Adversarial Loss using Fake Image #
fake_series = G(noise)
prob_fake = D(fake_series.detach())
if args.criterion == 'l2':
fake_labels = torch.zeros(prob_fake.size()).to(device)
D_fake_loss = criterion(prob_fake, fake_labels)
elif args.criterion == 'wgangp':
D_fake_loss = torch.mean(prob_fake)
D_gp_loss = args.lambda_gp * get_gradient_penalty(D, series.float(), fake_series.float(), device)
# Calculate Total Discriminator Loss #
D_loss = D_fake_loss + D_real_loss
if args.criterion == 'wgangp':
D_loss += args.lambda_gp * D_gp_loss
# Back Propagation and Update #
D_loss.backward()
D_optim.step()
###################
# Train Generator #
###################
# Adversarial Loss #
fake_series = G(noise)
prob_fake = D(fake_series)
# Calculate Total Generator Loss #
if args.criterion == 'l2':
real_labels = torch.ones(prob_fake.size()).to(device)
G_loss = criterion(prob_fake, real_labels)
elif args.criterion == 'wgangp':
G_loss = -torch.mean(prob_fake)
# Back Propagation and Update #
G_loss.backward()
G_optim.step()
# Add items to Lists #
D_losses.append(D_loss.item())
G_losses.append(G_loss.item())
# Adjust Learning Rate #
D_optim_scheduler.step()
G_optim_scheduler.step()
# Print Statistics, Save Model Weights and Series #
if (epoch+1) % args.log_every == 0:
# Print Statistics and Save Model #
print("Epochs [{}/{}] | D Loss {:.4f} | G Loss {:.4f}".format(epoch+1, args.num_epochs, np.average(D_losses), np.average(G_losses)))
torch.save(G.state_dict(), os.path.join(args.weights_path, 'TS_using{}_and_{}_Epoch_{}.pkl'.format(G.__class__.__name__, args.criterion.upper(), epoch + 1)))
# Generate Samples and Save Plots and CSVs #
series, fake_series = generate_fake_samples(test_X, test_Y, G, scaler_1, scaler_2, args, device)
plot_series(series, fake_series, G, epoch, args, args.samples_path)
make_csv(series, fake_series, G, epoch, args, args.csv_path)
########
# Test #
########
elif args.mode == 'test':
# Load Model Weights #
G.load_state_dict(torch.load(os.path.join(args.weights_path, 'TS_using{}_and_{}_Epoch_{}.pkl'.format(G.__class__.__name__, args.criterion.upper(), args.num_epochs))))
# Lists #
real, fake = list(), list()
# Inference #
for idx in range(0, test_X.shape[0], args.ts_dim):
# Do not plot if the remaining data is less than time dimension #
end_ix = idx + args.ts_dim
if end_ix > len(test_X)-1:
break
# Prepare Data #
test_data = test_X[idx, :]
test_data = np.expand_dims(test_data, axis=0)
test_data = np.expand_dims(test_data, axis=1)
test_data = torch.from_numpy(test_data).to(device)
start = test_Y[idx, 0]
noise = torch.randn(args.val_batch_size, 1, args.latent_dim).to(device)
# Generate Fake Data #
with torch.no_grad():
fake_series = G(noise)
# Convert to Numpy format for Saving #
test_data = np.squeeze(test_data.cpu().data.numpy())
fake_series = np.squeeze(fake_series.cpu().data.numpy())
test_data = post_processing(test_data, start, scaler_1, scaler_2, args.delta)
fake_series = post_processing(fake_series, start, scaler_1, scaler_2, args.delta)
real += test_data.tolist()
fake += fake_series.tolist()
# Plot, Save to CSV file and Derive Metrics #
plot_series(real, fake, G, args.num_epochs-1, args, args.inference_path)
make_csv(real, fake, G, args.num_epochs-1, args, args.inference_path)
derive_metrics(real, fake, args)
else:
raise NotImplementedError
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_num', type=int, default=5, help='gpu number')
parser.add_argument('--seed', type=int, default=7777, help='seed')
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'], help='train or test')
parser.add_argument('--data_path', type=str, default='./data/energydata_complete.csv', help='data path')
parser.add_argument('--column', type=str, default='Appliances', help='which column to generate')
parser.add_argument('--train_split', type=float, default=0.8, help='train-test split ratio')
parser.add_argument('--batch_size', type=int, default=256, help='mini-batch size')
parser.add_argument('--val_batch_size', type=int, default=1, help='mini-batch size for validation')
parser.add_argument('--num_epochs', type=int, default=1000, help='total epoch for training')
parser.add_argument('--log_every', type=int, default=50, help='save log data for every default iteration')
parser.add_argument('--metric_iteration', type=int, default=5, help='iterate calculation for metrics for evaluation')
parser.add_argument('--model', type=str, default='conv', choices=['conv', 'lstm'], help='which network to train')
parser.add_argument('--delta', type=float, default=0.7, help='delta')
parser.add_argument('--constant', type=float, default=0.0, help='If zero in the original data, please set it as non-zero, e.g. 1e-1')
parser.add_argument('--ts_dim', type=int, default=100, help='time series dimension, how many time steps to synthesize')
parser.add_argument('--latent_dim', type=int, default=25, help='noise dimension')
parser.add_argument('--criterion', type=str, default='wgangp', choices=['l2', 'wgangp'], help='criterion')
parser.add_argument('--lambda_gp', type=int, default=10, help='constant for gradient penalty')
parser.add_argument('--optim', type=str, default='adam', choices=['sgd', 'adam'], help='which optimizer to update')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--lr_decay_rate', type=float, default=0.5, help='decay learning rate')
parser.add_argument('--lr_decay_every', type=int, default=1000, help='decay learning rate for every default epoch')
parser.add_argument('--lr_scheduler', type=str, default='step', choices=['step', 'plateau', 'cosine'], help='learning rate scheduler')
parser.add_argument('--samples_path', type=str, default='./results/samples/', help='samples path')
parser.add_argument('--weights_path', type=str, default='./results/weights/', help='weights path')
parser.add_argument('--csv_path', type=str, default='./results/csv/', help='csv path')
parser.add_argument('--inference_path', type=str, default='./results/inference/', help='inference path')
args = parser.parse_args()
torch.cuda.empty_cache()
main(args) | [
"utils.pre_processing",
"utils.prepare_data",
"numpy.random.seed",
"sklearn.preprocessing.StandardScaler",
"argparse.ArgumentParser",
"utils.get_samples",
"pandas.read_csv",
"torch.randn",
"utils.get_lr_scheduler",
"utils.make_csv",
"torch.no_grad",
"models.ConvDiscriminator",
"torch.nn.MSEL... | [((171, 204), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (194, 204), False, 'import warnings\n'), ((847, 869), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (858, 869), False, 'import random\n'), ((874, 899), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (888, 899), True, 'import numpy as np\n'), ((904, 932), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (921, 932), False, 'import torch\n'), ((937, 970), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (959, 970), False, 'import torch\n'), ((1262, 1278), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1276, 1278), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1294, 1310), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1308, 1310), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1335, 1402), 'utils.pre_processing', 'pre_processing', (['data', 'scaler_1', 'scaler_2', 'args.constant', 'args.delta'], {}), '(data, scaler_1, scaler_2, args.constant, args.delta)\n', (1349, 1402), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((1443, 1486), 'utils.prepare_data', 'prepare_data', (['data', 'preprocessed_data', 'args'], {}), '(data, preprocessed_data, args)\n', (1455, 1486), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((1502, 1538), 'utils.moving_windows', 'moving_windows', (['train_X', 'args.ts_dim'], {}), '(train_X, args.ts_dim)\n', (1516, 1538), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((1553, 1589), 'utils.moving_windows', 'moving_windows', (['train_Y', 'args.ts_dim'], {}), '(train_Y, args.ts_dim)\n', (1567, 1589), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((1604, 1639), 'utils.moving_windows', 'moving_windows', (['test_X', 'args.ts_dim'], {}), '(test_X, args.ts_dim)\n', (1618, 1639), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((1653, 1688), 'utils.moving_windows', 'moving_windows', (['test_Y', 'args.ts_dim'], {}), '(test_Y, args.ts_dim)\n', (1667, 1688), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((8868, 8893), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8891, 8893), False, 'import argparse\n'), ((11735, 11759), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (11757, 11759), False, 'import torch\n'), ((1135, 1150), 'utils.make_dirs', 'make_dirs', (['path'], {}), '(path)\n', (1144, 1150), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((1184, 1211), 'pandas.read_csv', 'pd.read_csv', (['args.data_path'], {}), '(args.data_path)\n', (1195, 1211), True, 'import pandas as pd\n'), ((2909, 2940), 'utils.get_lr_scheduler', 'get_lr_scheduler', (['D_optim', 'args'], {}), '(D_optim, args)\n', (2925, 2940), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((2969, 3000), 'utils.get_lr_scheduler', 'get_lr_scheduler', (['G_optim', 'args'], {}), '(G_optim, args)\n', (2985, 3000), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((767, 792), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (790, 792), False, 'import torch\n'), ((2250, 2262), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2260, 2262), True, 'import torch.nn as nn\n'), ((8572, 8646), 'utils.plot_series', 'plot_series', (['real', 'fake', 'G', '(args.num_epochs - 1)', 'args', 'args.inference_path'], {}), '(real, fake, G, args.num_epochs - 1, args, args.inference_path)\n', (8583, 8646), False, 'from utils import get_gradient_penalty, plot_series, get_samples, generate_fake_samples, make_csv, derive_metrics\n'), ((8653, 8724), 'utils.make_csv', 'make_csv', (['real', 'fake', 'G', '(args.num_epochs - 1)', 'args', 'args.inference_path'], {}), '(real, fake, G, args.num_epochs - 1, args, args.inference_path)\n', (8661, 8724), False, 'from utils import get_gradient_penalty, plot_series, get_samples, generate_fake_samples, make_csv, derive_metrics\n'), ((8731, 8763), 'utils.derive_metrics', 'derive_metrics', (['real', 'fake', 'args'], {}), '(real, fake, args)\n', (8745, 8763), False, 'from utils import get_gradient_penalty, plot_series, get_samples, generate_fake_samples, make_csv, derive_metrics\n'), ((1756, 1786), 'models.ConvDiscriminator', 'ConvDiscriminator', (['args.ts_dim'], {}), '(args.ts_dim)\n', (1773, 1786), False, 'from models import ConvDiscriminator, ConvGenerator, LSTMDiscriminator, LSTMGenerator\n'), ((1810, 1853), 'models.ConvGenerator', 'ConvGenerator', (['args.latent_dim', 'args.ts_dim'], {}), '(args.latent_dim, args.ts_dim)\n', (1823, 1853), False, 'from models import ConvDiscriminator, ConvGenerator, LSTMDiscriminator, LSTMGenerator\n'), ((3670, 3716), 'utils.get_samples', 'get_samples', (['train_X', 'train_Y', 'args.batch_size'], {}), '(train_X, train_Y, args.batch_size)\n', (3681, 3716), False, 'from utils import get_gradient_penalty, plot_series, get_samples, generate_fake_samples, make_csv, derive_metrics\n'), ((6691, 6765), 'utils.generate_fake_samples', 'generate_fake_samples', (['test_X', 'test_Y', 'G', 'scaler_1', 'scaler_2', 'args', 'device'], {}), '(test_X, test_Y, G, scaler_1, scaler_2, args, device)\n', (6712, 6765), False, 'from utils import get_gradient_penalty, plot_series, get_samples, generate_fake_samples, make_csv, derive_metrics\n'), ((6782, 6849), 'utils.plot_series', 'plot_series', (['series', 'fake_series', 'G', 'epoch', 'args', 'args.samples_path'], {}), '(series, fake_series, G, epoch, args, args.samples_path)\n', (6793, 6849), False, 'from utils import get_gradient_penalty, plot_series, get_samples, generate_fake_samples, make_csv, derive_metrics\n'), ((6866, 6926), 'utils.make_csv', 'make_csv', (['series', 'fake_series', 'G', 'epoch', 'args', 'args.csv_path'], {}), '(series, fake_series, G, epoch, args, args.csv_path)\n', (6874, 6926), False, 'from utils import get_gradient_penalty, plot_series, get_samples, generate_fake_samples, make_csv, derive_metrics\n'), ((7652, 7685), 'numpy.expand_dims', 'np.expand_dims', (['test_data'], {'axis': '(0)'}), '(test_data, axis=0)\n', (7666, 7685), True, 'import numpy as np\n'), ((7710, 7743), 'numpy.expand_dims', 'np.expand_dims', (['test_data'], {'axis': '(1)'}), '(test_data, axis=1)\n', (7724, 7743), True, 'import numpy as np\n'), ((8260, 8325), 'utils.post_processing', 'post_processing', (['test_data', 'start', 'scaler_1', 'scaler_2', 'args.delta'], {}), '(test_data, start, scaler_1, scaler_2, args.delta)\n', (8275, 8325), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((8352, 8419), 'utils.post_processing', 'post_processing', (['fake_series', 'start', 'scaler_1', 'scaler_2', 'args.delta'], {}), '(fake_series, start, scaler_1, scaler_2, args.delta)\n', (8367, 8419), False, 'from utils import make_dirs, pre_processing, post_processing, prepare_data, moving_windows, get_lr_scheduler\n'), ((1913, 1943), 'models.LSTMDiscriminator', 'LSTMDiscriminator', (['args.ts_dim'], {}), '(args.ts_dim)\n', (1930, 1943), False, 'from models import ConvDiscriminator, ConvGenerator, LSTMDiscriminator, LSTMGenerator\n'), ((1967, 2010), 'models.LSTMGenerator', 'LSTMGenerator', (['args.latent_dim', 'args.ts_dim'], {}), '(args.latent_dim, args.ts_dim)\n', (1980, 2010), False, 'from models import ConvDiscriminator, ConvGenerator, LSTMDiscriminator, LSTMGenerator\n'), ((7981, 7996), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7994, 7996), False, 'import torch\n'), ((3822, 3870), 'torch.randn', 'torch.randn', (['args.batch_size', '(1)', 'args.latent_dim'], {}), '(args.batch_size, 1, args.latent_dim)\n', (3833, 3870), False, 'import torch\n'), ((4709, 4730), 'torch.mean', 'torch.mean', (['prob_fake'], {}), '(prob_fake)\n', (4719, 4730), False, 'import torch\n'), ((5716, 5737), 'torch.mean', 'torch.mean', (['prob_fake'], {}), '(prob_fake)\n', (5726, 5737), False, 'import torch\n'), ((6372, 6392), 'numpy.average', 'np.average', (['D_losses'], {}), '(D_losses)\n', (6382, 6392), True, 'import numpy as np\n'), ((6394, 6414), 'numpy.average', 'np.average', (['G_losses'], {}), '(G_losses)\n', (6404, 6414), True, 'import numpy as np\n'), ((7768, 7795), 'torch.from_numpy', 'torch.from_numpy', (['test_data'], {}), '(test_data)\n', (7784, 7795), False, 'import torch\n'), ((7864, 7916), 'torch.randn', 'torch.randn', (['args.val_batch_size', '(1)', 'args.latent_dim'], {}), '(args.val_batch_size, 1, args.latent_dim)\n', (7875, 7916), False, 'import torch\n'), ((4270, 4291), 'torch.mean', 'torch.mean', (['prob_real'], {}), '(prob_real)\n', (4280, 4291), False, 'import torch\n')] |
import sys
import numpy as np
import argparse
import json
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tempfile
import shutil
import logging
import subprocess
import tensorflow as tf
import matplotlib.pyplot as plt
from presearch_trrosetta.architecture.trRosetta import trRosetta
from presearch_trrosetta.utils.config import DistanceConfig
from presearch_trrosetta.utils import vocab
from presearch_trrosetta.prepare.create_dataset import save_data
import pdb
# todo : tf code style
# todo : test code
# todo : argument collection
# todo : distance map
# todo : test for casp, parsing -> distance map
#
import tqdm
def predict_model(config):
"""
get model
"""
kwargs = dict(
max_len=config.max_len,
bins=config.bins,
n2d_filters=64,
n2d_layers=61,
dropout_rate=0.15,
)
model = trRosetta(**kwargs)
return model
def pred2dist(length,
pred : tf.Variable):
"""
convert the prediction to distance map.
Return
distance map : [width, height]
"""
width, height , bins = pred.get_shape()
mask = np.triu(np.ones([length,length]), k=1)
argmax = tf.argmax(pred, axis=-1)[:length,:length]
# argmax -> 0~16 , real dist -> 2~18
argmax = ((argmax + 2) * mask).numpy()
# distmap is symmetric.
img = (argmax+argmax.T) .astype(np.float64)
img *= 256 / bins
return img
def pred2rr(length,
pred : tf.Variable):
"""
convert the prediction to rr format.
Return
rr : It is casp formant, [index i, index j, lower bound distance , upper bound distance, probability]
"""
pred_numpy = pred.numpy()[ :length, :length]
result_list = []
for i in range(len(pred_numpy)):
for j in range(len(pred_numpy)):
# todo 1. : check i+4 or i+6
if i + 1 < j:
argmax = np.argmax(pred_numpy[i, j])
prob = pred_numpy[i, j, argmax]
if argmax < 6:
prob = pred_numpy[i, j, argmax]
result_list.append([i + 1, j + 1, argmax + 2, argmax + 3, prob])
elif argmax < 10:
prob = pred_numpy[i, j, argmax]
result_list.append([i + 1, j + 1, 0, 12, prob])
result_array = np.array(result_list)
result_array = result_array[result_array[:, -1].argsort()[::-1]]
return result_array
def get_prediction(inputs,
model : tf.keras.Model,
):
length = inputs['length'][0]
prediction = model.predict_step(inputs)[0]
distancemap = pred2dist(length, prediction)
rr = pred2rr(length, prediction)
return {"distancemap": distancemap,
"rr": rr,}
def get_target_list(fasta_path,
a3m_path,
dca_path,
pssm_path):
"""
check the it is existed that all of needed data. (f2d_dca, f1d_pssm)
To make these two data, a3m file is must needed
:return
target_list : target fasta that prepared all of data.
no_a3m_list : no a3m fasta (it is not applied msa - hhblits)
no_dca_list : no dca fasta (it is not applied direct coupling analysis from baker)
"""
target_list = []
no_a3m_list = []
no_dca_list = []
for fasta_file in os.listdir(fasta_path):
fasta_name,_ext = os.path.splitext(fasta_file)
a3m_file = f"{a3m_path}/{fasta_name}.a3m"
f2d_dca_file = f"{dca_path}/{fasta_name}.npy"
f1d_pssm_file = f"{pssm_path}/{fasta_name}.txt"
# for prediction, we need a3m, f2d_dca, f1d_pssm.
if not os.path.exists(a3m_file):
no_a3m_list.append(fasta_name)
elif not (os.path.exists(f2d_dca_file) and os.path.exists(f1d_pssm_file)) :
no_dca_list.append(fasta_name)
else :
target_list.append(fasta_name)
return target_list, no_a3m_list, no_dca_list
def get_seq(fasta_file,
max_len):
"""
For the matching the maximum length, cutting or padding the seq
:param fasta_file : .seq file
:param max_len : maximum length of fasta.
:return:
seq : one-hot & padded or cut sequence array
length : length of fasta
ex)
seq : [[1, 0, 0 .. ], [0, 1, 0 ...], ...]
length : 50
"""
with open(fasta_file, "r") as output:
fasta_name = output.readline().rstrip()
seq = output.readline().rstrip()
assert type(seq) != 'str', 'check the seq_file, it must have fasta name line and AA seq line'
#assert '\n' in seq, 'please exclude \n'
seq = seq[:max_len]
length = len(seq)
if len(seq)<max_len:
seq += '-' * (max_len - len(seq))
seq = [vocab.onehot_dict[_seq.upper()] for _seq in seq]
return np.array(seq), length
def get_pssm(pssm_file,
max_len):
"""
For the matching the maximum length, cutting or padding the pssm
:param pssm_file:
:param max_len : maximum length of fasta.
:return : padded or cut pssm array
"""
pssm = np.loadtxt(pssm_file)
pssm = pssm[:max_len]
if len(pssm) < max_len:
# padding
pssm = np.vstack([pssm, np.zeros([max_len - pssm.shape[0], 21])])
return pssm
def get_dca(msa_file,
max_len):
"""
For the matching the maximum length, cutting or padding the dca.
:param msa_file: .npy file
:param max_len : maximum length of fasta.
:return : padded or cut dca array - [max_len,max_len,441]
"""
msa_prpc = np.load(msa_file)
msa_prpc = msa_prpc[:max_len, :max_len, :]
pad_len = max(0, max_len - msa_prpc.shape[0])
# todo : log ?
if msa_prpc.shape[0] < max_len:
msa_prpc = np.pad(msa_prpc, [[0, pad_len], [0, pad_len], [0, 0]])
return msa_prpc
def save_rr(output_path,
fasta,
rr,
seq
):
"""
save the rrfile
"""
with open(f'{output_path}/{fasta}/{fasta}.rr', mode='w') as file_obj:
file_obj.write(''.join([vocab.onehot_dict_inv[s] for s in seq]) + '\n')
for idx, _array in enumerate(rr):
if idx + 1 == len(rr):
file_obj.write('{:.0f} {:.0f} {:.0f} {:.0f} {:f}'.format(*_array))
else:
file_obj.write('{:.0f} {:.0f} {:.0f} {:.0f} {:f}\n'.format(*_array))
def save_distancemap(output_path,
fasta,
distancemap,
):
plt.imsave(f'{output_path}/{fasta}/{fasta}.png',distancemap)
def check_train_data(target_fasta,
train_fasta_list):
"""
check the is it trained fasta ?
"""
for fasta in train_fasta_list:
if target_fasta[:4] in fasta:
return True
break
return False
def makedirs(*path):
for _path in path :
os.makedirs(_path,exist_ok=True)
def prpc_data(fasta_path,
a3m_path,
dca_path,
pssm_path,
database_path,
length_min = 50,
length_max = 300,
msa_depth_min = 100,
msa_depth_max = 200,):
target_fasta_list, no_a3m_list, no_dca_list = get_target_list(fasta_path, a3m_path, dca_path, pssm_path)
print(target_fasta_list, no_a3m_list, no_dca_list)
logging.info(f"need the pre-processing. we have to create a3m, f1d_pssm, f2d_dca. "
f"number of data that needed processing. : {len(no_a3m_list) + len(no_dca_list)}")
with tempfile.TemporaryDirectory() as tmp_path:
tmp_fasta_path = f'{tmp_path}/fasta'
tmp_a3m_path = f'{tmp_path}/a3m'
makedirs(tmp_fasta_path, tmp_a3m_path)
# absPath =os.path.abspath(__file__)
# curPath, file = os.path.split(absPath)
# copy the fasta from data_path to tmp_path
for no_a3m_data in no_a3m_list:
logging.info(no_a3m_data)
src_fasta = f'{fasta_path}/{no_a3m_data}.fasta'
dst_fasta = f'{tmp_fasta_path}/{no_a3m_data}.fasta'
shutil.copy(src_fasta, dst_fasta)
# subprocess.call(["python ../msa_hhblits_mp.py --fasta_dir={tmp_path}/fasta --a3m_dir={tmp_path}/a3m --database_dir=/uniclust/uniclust30_2018_08 --cpu=8"])
subprocess.call(["python", "./presearch_trrosetta/prepare/create_a3m.py",
"--fasta_path", tmp_fasta_path,
"--a3m_path", tmp_a3m_path,
"--database_path", database_path,
"--cpu", "8"])
# todo : seq -> fasta
# move the finished a3m file from tmp_path to output_path
for finished_a3m_data in no_a3m_list:
src_a3m = f'{tmp_a3m_path}/{finished_a3m_data}.a3m'
dst_a3m = f'{a3m_path}/{finished_a3m_data}.a3m'
shutil.copy(src_a3m, dst_a3m)
print("a3m finish")
# copy the a3m from data_path to tmp_path
for no_dca_data in no_dca_list:
src_a3m = f'{a3m_path}/{no_dca_data}.a3m'
dst_a3m = f'{tmp_a3m_path}/{no_dca_data}.a3m'
shutil.copy(src_a3m, dst_a3m)
save_data(a3m_path=tmp_a3m_path,
dca_path=dca_path,
pssm_path=pssm_path,
length_min=length_min,
length_max=length_max,
msa_depth_min=msa_depth_min,
msa_depth_max=msa_depth_max,
mode='numpy')
logging.info("pre-proceesing is finished.")
def predict(fasta_path,
a3m_path,
dca_path,
pssm_path,
output_path,
config,
experiment_folder,
):
model = predict_model(config)
model.load_weights(config.load_weight)
#model.summary()
# we want to check that target is already trained or not.
target_fasta_list, no_a3m_list, no_dca_list = get_target_list(fasta_path, a3m_path, dca_path, pssm_path)
trained_fasta_list = np.loadtxt(f'{experiment_folder}/train_list.txt', dtype=str)
for fasta in tqdm.tqdm(target_fasta_list, desc="predict"):
# todo : using the __getitem__ or not.
seq, length = get_seq(f'{fasta_path}/{fasta}.fasta', config.max_len)
f1d_pssm = get_pssm(f'{pssm_path}/{fasta}.txt', config.max_len)
f2d_dca = get_dca(f'{dca_path}/{fasta}.npy', config.max_len)
inputs = {'seq': np.expand_dims(seq,axis=0),
'f2d_dca': np.expand_dims(f2d_dca,axis=0),
'f1d_pssm': np.expand_dims(f1d_pssm,axis=0),
'length' : [length]}
prediction = get_prediction(inputs,
model
)
os.makedirs(f'{output_path}/{fasta}/', exist_ok=True)
save_rr(output_path,fasta,prediction['rr'],seq[:length])
save_distancemap(output_path,fasta,prediction['distancemap'][:length,:length])
with open(f'{output_path}/{fasta}/logs.txt', mode='w') as file_obj:
if check_train_data(fasta,trained_fasta_list):
file_obj.write("trained")
else:
file_obj.write("not-trained")
print(f"prediction is finished, check your output fordler{output_path}")
def parse_args(args) :
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment_folder', default=None)
parser.add_argument('--fasta_path', help="set fasta path")
parser.add_argument('--a3m_path', help="set a3m path")
parser.add_argument('--dca_path', help="set dca path")
parser.add_argument('--pssm_path', help="set pssm path")
parser.add_argument('--output_path', help="set your output path")
parser.add_argument('--database_path', help="set your Uniclust database")
parser.add_argument('--length_min', default=50, type=int,
help='set your minimum value of sequence length, default value is 50')
parser.add_argument('--length_max', default=300, type=int,
help='set your maximum value of sequence length, default value is 300')
parser.add_argument('--msa_depth_min', default=100, type=int,
help='set your minimum number of msa result, default value is 100')
parser.add_argument('--msa_depth_max', default=200, type=int,
help='set your minimum number of msa result, default value is 200')
return parser.parse_args(args)
def main(args=None):
# todo parallel processing. -> no plan
# todo check -> how many use msa result? (current 200)
if args is None:
args = sys.argv[1:]
args = parse_args(args)
if args.experiment_folder == None :
config = DistanceConfig()
else :
if os.path.isfile(f'{args.experiment_folder}/config.json'):
print("config file exist")
config = DistanceConfig.from_json_file(f'{args.experiment_folder}/config.json')
else :
config = DistanceConfig()
with open(f'{args.experiment_folder}/config.json', 'w', encoding='utf-8') as config_file :
json.dump(config.to_dict(), config_file)
config.load_weight = f'{args.experiment_folder}/{config.load_weight}'
makedirs(args.fasta_path, args.a3m_path, args.dca_path, args.pssm_path)
prpc_data(
fasta_path = args.fasta_path,
a3m_path = args.a3m_path,
dca_path = args.dca_path,
pssm_path = args.pssm_path,
database_path = args.database_path,
length_min = args.length_min,
length_max = args.length_max,
msa_depth_min = args.msa_depth_min,
msa_depth_max = args.msa_depth_max,
)
predict(
fasta_path = args.fasta_path,
a3m_path = args.a3m_path,
dca_path = args.dca_path,
pssm_path = args.pssm_path,
output_path = args.output_path,
config = config,
experiment_folder = args.experiment_folder,
)
if __name__ == '__main__':
main()
# todo : DCon까지
# evaluate
# report 다른것도 추가하고
# cif 전처리 까지 추가 해서 create data
# nmr 구조는 여러개의 체인이 중복 (실험을 여러번 해서 ? )
# .seq -> .fasta | [
"numpy.load",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.ones",
"presearch_trrosetta.utils.config.DistanceConfig.from_json_file",
"os.path.isfile",
"matplotlib.pyplot.imsave",
"shutil.copy",
"numpy.pad",
"tempfile.TemporaryDirectory",
"os.path.exists",
"numpy.loadtxt",
"tqdm.tqdm",
... | [((899, 918), 'presearch_trrosetta.architecture.trRosetta.trRosetta', 'trRosetta', ([], {}), '(**kwargs)\n', (908, 918), False, 'from presearch_trrosetta.architecture.trRosetta import trRosetta\n'), ((2388, 2409), 'numpy.array', 'np.array', (['result_list'], {}), '(result_list)\n', (2396, 2409), True, 'import numpy as np\n'), ((3450, 3472), 'os.listdir', 'os.listdir', (['fasta_path'], {}), '(fasta_path)\n', (3460, 3472), False, 'import os\n'), ((5253, 5274), 'numpy.loadtxt', 'np.loadtxt', (['pssm_file'], {}), '(pssm_file)\n', (5263, 5274), True, 'import numpy as np\n'), ((5743, 5760), 'numpy.load', 'np.load', (['msa_file'], {}), '(msa_file)\n', (5750, 5760), True, 'import numpy as np\n'), ((6709, 6770), 'matplotlib.pyplot.imsave', 'plt.imsave', (['f"""{output_path}/{fasta}/{fasta}.png"""', 'distancemap'], {}), "(f'{output_path}/{fasta}/{fasta}.png', distancemap)\n", (6719, 6770), True, 'import matplotlib.pyplot as plt\n'), ((10312, 10372), 'numpy.loadtxt', 'np.loadtxt', (['f"""{experiment_folder}/train_list.txt"""'], {'dtype': 'str'}), "(f'{experiment_folder}/train_list.txt', dtype=str)\n", (10322, 10372), True, 'import numpy as np\n'), ((10393, 10437), 'tqdm.tqdm', 'tqdm.tqdm', (['target_fasta_list'], {'desc': '"""predict"""'}), "(target_fasta_list, desc='predict')\n", (10402, 10437), False, 'import tqdm\n'), ((11654, 11679), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11677, 11679), False, 'import argparse\n'), ((1182, 1207), 'numpy.ones', 'np.ones', (['[length, length]'], {}), '([length, length])\n', (1189, 1207), True, 'import numpy as np\n'), ((1227, 1251), 'tensorflow.argmax', 'tf.argmax', (['pred'], {'axis': '(-1)'}), '(pred, axis=-1)\n', (1236, 1251), True, 'import tensorflow as tf\n'), ((3501, 3529), 'os.path.splitext', 'os.path.splitext', (['fasta_file'], {}), '(fasta_file)\n', (3517, 3529), False, 'import os\n'), ((4966, 4979), 'numpy.array', 'np.array', (['seq'], {}), '(seq)\n', (4974, 4979), True, 'import numpy as np\n'), ((5939, 5993), 'numpy.pad', 'np.pad', (['msa_prpc', '[[0, pad_len], [0, pad_len], [0, 0]]'], {}), '(msa_prpc, [[0, pad_len], [0, pad_len], [0, 0]])\n', (5945, 5993), True, 'import numpy as np\n'), ((7103, 7136), 'os.makedirs', 'os.makedirs', (['_path'], {'exist_ok': '(True)'}), '(_path, exist_ok=True)\n', (7114, 7136), False, 'import os\n'), ((7780, 7809), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7807, 7809), False, 'import tempfile\n'), ((8543, 8733), 'subprocess.call', 'subprocess.call', (["['python', './presearch_trrosetta/prepare/create_a3m.py', '--fasta_path',\n tmp_fasta_path, '--a3m_path', tmp_a3m_path, '--database_path',\n database_path, '--cpu', '8']"], {}), "(['python', './presearch_trrosetta/prepare/create_a3m.py',\n '--fasta_path', tmp_fasta_path, '--a3m_path', tmp_a3m_path,\n '--database_path', database_path, '--cpu', '8'])\n", (8558, 8733), False, 'import subprocess\n'), ((9437, 9636), 'presearch_trrosetta.prepare.create_dataset.save_data', 'save_data', ([], {'a3m_path': 'tmp_a3m_path', 'dca_path': 'dca_path', 'pssm_path': 'pssm_path', 'length_min': 'length_min', 'length_max': 'length_max', 'msa_depth_min': 'msa_depth_min', 'msa_depth_max': 'msa_depth_max', 'mode': '"""numpy"""'}), "(a3m_path=tmp_a3m_path, dca_path=dca_path, pssm_path=pssm_path,\n length_min=length_min, length_max=length_max, msa_depth_min=\n msa_depth_min, msa_depth_max=msa_depth_max, mode='numpy')\n", (9446, 9636), False, 'from presearch_trrosetta.prepare.create_dataset import save_data\n'), ((9772, 9815), 'logging.info', 'logging.info', (['"""pre-proceesing is finished."""'], {}), "('pre-proceesing is finished.')\n", (9784, 9815), False, 'import logging\n'), ((11072, 11125), 'os.makedirs', 'os.makedirs', (['f"""{output_path}/{fasta}/"""'], {'exist_ok': '(True)'}), "(f'{output_path}/{fasta}/', exist_ok=True)\n", (11083, 11125), False, 'import os\n'), ((13093, 13109), 'presearch_trrosetta.utils.config.DistanceConfig', 'DistanceConfig', ([], {}), '()\n', (13107, 13109), False, 'from presearch_trrosetta.utils.config import DistanceConfig\n'), ((13134, 13189), 'os.path.isfile', 'os.path.isfile', (['f"""{args.experiment_folder}/config.json"""'], {}), "(f'{args.experiment_folder}/config.json')\n", (13148, 13189), False, 'import os\n'), ((3772, 3796), 'os.path.exists', 'os.path.exists', (['a3m_file'], {}), '(a3m_file)\n', (3786, 3796), False, 'import os\n'), ((8167, 8192), 'logging.info', 'logging.info', (['no_a3m_data'], {}), '(no_a3m_data)\n', (8179, 8192), False, 'import logging\n'), ((8332, 8365), 'shutil.copy', 'shutil.copy', (['src_fasta', 'dst_fasta'], {}), '(src_fasta, dst_fasta)\n', (8343, 8365), False, 'import shutil\n'), ((9116, 9145), 'shutil.copy', 'shutil.copy', (['src_a3m', 'dst_a3m'], {}), '(src_a3m, dst_a3m)\n', (9127, 9145), False, 'import shutil\n'), ((9396, 9425), 'shutil.copy', 'shutil.copy', (['src_a3m', 'dst_a3m'], {}), '(src_a3m, dst_a3m)\n', (9407, 9425), False, 'import shutil\n'), ((10738, 10765), 'numpy.expand_dims', 'np.expand_dims', (['seq'], {'axis': '(0)'}), '(seq, axis=0)\n', (10752, 10765), True, 'import numpy as np\n'), ((10796, 10827), 'numpy.expand_dims', 'np.expand_dims', (['f2d_dca'], {'axis': '(0)'}), '(f2d_dca, axis=0)\n', (10810, 10827), True, 'import numpy as np\n'), ((10859, 10891), 'numpy.expand_dims', 'np.expand_dims', (['f1d_pssm'], {'axis': '(0)'}), '(f1d_pssm, axis=0)\n', (10873, 10891), True, 'import numpy as np\n'), ((13253, 13323), 'presearch_trrosetta.utils.config.DistanceConfig.from_json_file', 'DistanceConfig.from_json_file', (['f"""{args.experiment_folder}/config.json"""'], {}), "(f'{args.experiment_folder}/config.json')\n", (13282, 13323), False, 'from presearch_trrosetta.utils.config import DistanceConfig\n'), ((13362, 13378), 'presearch_trrosetta.utils.config.DistanceConfig', 'DistanceConfig', ([], {}), '()\n', (13376, 13378), False, 'from presearch_trrosetta.utils.config import DistanceConfig\n'), ((1961, 1988), 'numpy.argmax', 'np.argmax', (['pred_numpy[i, j]'], {}), '(pred_numpy[i, j])\n', (1970, 1988), True, 'import numpy as np\n'), ((5384, 5423), 'numpy.zeros', 'np.zeros', (['[max_len - pssm.shape[0], 21]'], {}), '([max_len - pssm.shape[0], 21])\n', (5392, 5423), True, 'import numpy as np\n'), ((3861, 3889), 'os.path.exists', 'os.path.exists', (['f2d_dca_file'], {}), '(f2d_dca_file)\n', (3875, 3889), False, 'import os\n'), ((3894, 3923), 'os.path.exists', 'os.path.exists', (['f1d_pssm_file'], {}), '(f1d_pssm_file)\n', (3908, 3923), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 28 10:01:15 2022
@author: awatson
"""
from skimage import io
from itertools import product
from matplotlib import pyplot as plt
import numpy as np
import os
import math
import json
import math
import tifffile
import imagecodecs
import zarr
import dask
from dask.delayed import delayed
## weave specific imports
from .util import pixToMB, MBToPix, prepareAndGetFilePath, getFullFilePath, makeDir, getMetaFile
testInput = np.zeros((2,10,30000,30000), dtype=np.uint16)
location= r'z:\testWeave'
## A class to create and read weave
class weave_make:
def __init__(self, inputArray, saveLocation, maxLowResMB=10, chunks=(512,512), compression='zlib'):
'''
Input array should be layed out as (t,c,z,y,x)
'''
while len(inputArray.shape) < 5:
inputArray = inputArray[None,...]
self.inputArray = inputArray
self.shape = inputArray.shape
try:
self.size = inputArray.size
except Exception:
self.size = math.prod(self.shape)
self.location = saveLocation
self.maxLowResMB = maxLowResMB
self.dtype = str(inputArray.dtype) # str allows us to serialize to json
self.chunks = chunks
self.compression = compression
## Create meta dict which will be saved to disk to descrive weave array
self.meta = {}
self.meta['shape'] = self.shape
self.meta['size'] = self.size
self.meta['location'] = self.location
self.meta['maxLowResMB'] = self.maxLowResMB
self.meta['dtype'] = str(self.dtype)
self.meta['chunks'] = self.chunks
self.meta['compression'] = self.compression
## Determine the proper weave number (ie subsample number)
for ii in range(1,max(self.shape[-2::])+1):
# blockSize = ii*ii*16/8/1024/1024
sizeSubSamp = math.ceil(self.shape[-2] / ii) * math.ceil(self.shape[-1] / ii)
sizeSubSamp = pixToMB(sizeSubSamp)
# print(sizeSubSamp)
if sizeSubSamp <= self.maxLowResMB:
subSamp = ii
print('Subsample rate = {}'.format(subSamp))
break
self.meta['weaveNumber'] = subSamp
self.meta['lowResSizeMB'] = sizeSubSamp
self.meta['total_file_count'] = math.prod(self.meta['shape']) * self.meta['weaveNumber']**2
self.meta['size_uncompressedTB'] = pixToMB(self.size[:-2]) / 1024 / 1024
# Run Save
makeDir(os.path.split(getMetaFile(self.meta['location']))[0])
with open(getMetaFile(self.meta['location']), 'w') as fp:
json.dump(self.meta, fp)
self.makeWeave()
def makeWeave(self):
toWrite = []
for t,c,z,ii,oo in product(range(self.meta['shape'][0]),
range(self.meta['shape'][1]),
range(self.meta['shape'][2]),
range(self.meta['weaveNumber']),
range(self.meta['weaveNumber'])
):
fileName = getFullFilePath(self.location,t,c,z,ii,oo)
print('Queueing {}'.format(fileName))
# toWrite.append(
# delayed(self.saveTiff)
# (fileName,
# self.inputArray[t,c,z,ii::self.meta['weaveNumber'], oo::self.meta['weaveNumber']],
# tile=self.meta['chunks'],
# compression=self.meta['compression']
# )
# )
toWrite.append(
delayed(self.saveTiff)
(fileName,
t,c,z,ii,oo,
tile=self.meta['chunks'],
compression=self.meta['compression']
)
)
print('Computing Saves')
# print(toWrite)
dask.compute(toWrite)
# def saveTiff(self,fileName,array,tile=(512,512),compression=None):
# print('Saving {} \n'.format(fileName))
# makeDir(os.path.split(fileName)[0])
# tifffile.imwrite(fileName,array,tile=tile,compression=compression)
def saveTiff(self,fileName,t,c,z,ii,oo,tile=(512,512),compression=None):
print('Saving {} \n'.format(fileName))
makeDir(os.path.split(fileName)[0])
tifffile.imwrite(
fileName,
self.inputArray[t,c,z,ii::self.meta['weaveNumber'], oo::self.meta['weaveNumber']],
tile=tile,
compression=compression
)
# # Size of single low res
# for idx in subImages:
# if isinstance(idx, tuple) != True:
# continue
# shapeSingleImg = subImages[idx].shape
# size = pixToMB(subImages[idx].shape[0] * subImages[idx].shape[1])
# break
# print('Low-Res version = {} MB'.format(size))
# print('Shape of single low res image = {}'.format(shapeSingleImg))
# # Determine the size of the image for the given resolution level
# # Insert this info into the subImages dict as 'resolution{#}_shape'
# y=0
# x=0
# for ii in range(self.meta['weaveNumber']):
# y+=subImages[(ii,0)].shape[0]
# x+=subImages[(0,ii)].shape[1]
# subImages['resolution{}_shape'.format(self.meta['weaveNumber']-ii-1)] = (y,x)
# chunkSize = (512,512)
# # Reassemble Full resolution (whole image)
# canvas = np.zeros(self.meta['shape'], dtype=self.meta['dtype'])
# for ii,oo in product(range(self.meta['weaveNumber']),range(self.meta['weaveNumber'])):
# canvas[ii::self.meta['weaveNumber'], oo::self.meta['weaveNumber']] = \
# subImages[(ii,oo)]
| [
"json.dump",
"math.ceil",
"math.prod",
"tifffile.imwrite",
"numpy.zeros",
"dask.compute",
"os.path.split",
"dask.delayed.delayed"
] | [((471, 519), 'numpy.zeros', 'np.zeros', (['(2, 10, 30000, 30000)'], {'dtype': 'np.uint16'}), '((2, 10, 30000, 30000), dtype=np.uint16)\n', (479, 519), True, 'import numpy as np\n'), ((4046, 4067), 'dask.compute', 'dask.compute', (['toWrite'], {}), '(toWrite)\n', (4058, 4067), False, 'import dask\n'), ((4499, 4657), 'tifffile.imwrite', 'tifffile.imwrite', (['fileName', "self.inputArray[t, c, z, ii::self.meta['weaveNumber'], oo::self.meta[\n 'weaveNumber']]"], {'tile': 'tile', 'compression': 'compression'}), "(fileName, self.inputArray[t, c, z, ii::self.meta[\n 'weaveNumber'], oo::self.meta['weaveNumber']], tile=tile, compression=\n compression)\n", (4515, 4657), False, 'import tifffile\n'), ((2414, 2443), 'math.prod', 'math.prod', (["self.meta['shape']"], {}), "(self.meta['shape'])\n", (2423, 2443), False, 'import math\n'), ((2731, 2755), 'json.dump', 'json.dump', (['self.meta', 'fp'], {}), '(self.meta, fp)\n', (2740, 2755), False, 'import json\n'), ((1069, 1090), 'math.prod', 'math.prod', (['self.shape'], {}), '(self.shape)\n', (1078, 1090), False, 'import math\n'), ((1979, 2009), 'math.ceil', 'math.ceil', (['(self.shape[-2] / ii)'], {}), '(self.shape[-2] / ii)\n', (1988, 2009), False, 'import math\n'), ((2012, 2042), 'math.ceil', 'math.ceil', (['(self.shape[-1] / ii)'], {}), '(self.shape[-1] / ii)\n', (2021, 2042), False, 'import math\n'), ((4463, 4486), 'os.path.split', 'os.path.split', (['fileName'], {}), '(fileName)\n', (4476, 4486), False, 'import os\n'), ((3750, 3772), 'dask.delayed.delayed', 'delayed', (['self.saveTiff'], {}), '(self.saveTiff)\n', (3757, 3772), False, 'from dask.delayed import delayed\n')] |
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import time
from scipy.stats import norm, mode
import os
import rospy
class TLClassifier(object):
def __init__(self):
GRAPH_FILE = 'frozen_inference_graph.pb'
model_path = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(model_path, GRAPH_FILE)
rospy.loginfo("model_path={}".format(model_path))
self.detection_graph = self.load_graph(model_path)
rospy.loginfo("model loaded")
# `get_tensor_by_name` returns the Tensor with the associated name in the Graph.
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
# The classification of the object (integer id).
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
def filter_boxes(self, min_score, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
# print('number of classes = %d' % n)
idxs = []
for i in range(n):
if scores[i] >= min_score:
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
def load_graph(self, graph_file):
"""Loads a frozen inference graph"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
#od_graph_def = tf.compat.v1.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
#with tf.io.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def to_string(self, state):
out = "unknown"
if state == TrafficLight.GREEN:
out = "green"
elif state == TrafficLight.YELLOW:
out = "yellow"
elif state == TrafficLight.RED:
out = "red"
return out
def get_classification(self, image, tag):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# tag = "{:.0f}".format(time.time())[-3:]
rospy.loginfo(str("calling classifier on light - [%s]" % tag))
start = time.time()
image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0)
with tf.Session(graph=self.detection_graph) as sess:
# Actual detection.
(boxes, scores, classes) = sess.run([self.detection_boxes, self.detection_scores, self.detection_classes],
feed_dict={self.image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
confidence_cutoff = 0.6
# Filter boxes with a confidence score less than `confidence_cutoff`
boxes, scores, classes = self.filter_boxes(confidence_cutoff, boxes, scores, classes)
options = [TrafficLight.GREEN, TrafficLight.RED, TrafficLight.YELLOW, TrafficLight.UNKNOWN]
if len(classes) != 0:
result = options[int(mode(classes)[0][0])-1]
# colors = [red, yellow, green, unknown]
#rospy.loginfo("upcoming light={}".format(self.to_string(result), ))
rospy.loginfo(str('upcoming light classied as %s in %.3f s - [%s]' % (self.to_string(result), time.time()-start, tag)))
return result
else:
rospy.loginfo(str("unable to classify - [%s]" % tag))
return TrafficLight.UNKNOWN
| [
"scipy.stats.mode",
"os.path.realpath",
"numpy.asarray",
"tensorflow.Session",
"time.time",
"rospy.loginfo",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"numpy.squeeze",
"tensorflow.import_graph_def",
"tensorflow.GraphDef",
"os.path.join"
] | [((354, 390), 'os.path.join', 'os.path.join', (['model_path', 'GRAPH_FILE'], {}), '(model_path, GRAPH_FILE)\n', (366, 390), False, 'import os\n'), ((525, 554), 'rospy.loginfo', 'rospy.loginfo', (['"""model loaded"""'], {}), "('model loaded')\n", (538, 554), False, 'import rospy\n'), ((1933, 1943), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1941, 1943), True, 'import tensorflow as tf\n'), ((3098, 3109), 'time.time', 'time.time', ([], {}), '()\n', (3107, 3109), False, 'import time\n'), ((304, 330), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (320, 330), False, 'import os\n'), ((2004, 2017), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2015, 2017), True, 'import tensorflow as tf\n'), ((3144, 3177), 'numpy.asarray', 'np.asarray', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (3154, 3177), True, 'import numpy as np\n'), ((3196, 3234), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph'}), '(graph=self.detection_graph)\n', (3206, 3234), True, 'import tensorflow as tf\n'), ((3567, 3584), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (3577, 3584), True, 'import numpy as np\n'), ((3606, 3624), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (3616, 3624), True, 'import numpy as np\n'), ((3647, 3666), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (3657, 3666), True, 'import numpy as np\n'), ((2087, 2119), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['graph_file', '"""rb"""'], {}), "(graph_file, 'rb')\n", (2101, 2119), True, 'import tensorflow as tf\n'), ((2315, 2357), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (2334, 2357), True, 'import tensorflow as tf\n'), ((4061, 4074), 'scipy.stats.mode', 'mode', (['classes'], {}), '(classes)\n', (4065, 4074), False, 'from scipy.stats import norm, mode\n'), ((4343, 4354), 'time.time', 'time.time', ([], {}), '()\n', (4352, 4354), False, 'import time\n')] |
import os
import shutil
import numpy as np
old_model_path = '/home/guoran/git-repo/yolo_test_1218/yolov3_model'
new_model_path = "yolov3_model_python/"
os.mkdir(new_model_path)
layers = os.listdir(old_model_path)
print(layers)
for layer in layers:
models=os.listdir(os.path.join(old_model_path, layer))
for model in models:
src_path = old_model_path+"/"+layer+"/"+model
#print(src_path)
dst_dir = os.path.join(new_model_path, layer + "-" + model)
#print(dst_dir)
os.mkdir(dst_dir)
os.mkdir(dst_dir+"-momentum")
#print(dst_dir)
shutil.copyfile(src_path , dst_dir + "/out")
momentum = np.fromfile(src_path, dtype=np.float32)
momentum[:] = 0
momentum.tofile(dst_dir+"-momentum/out")
print("cp",old_model_path+"/"+layer+"/"+model , dst_dir + "/out")
| [
"os.mkdir",
"numpy.fromfile",
"shutil.copyfile",
"os.path.join",
"os.listdir"
] | [((152, 176), 'os.mkdir', 'os.mkdir', (['new_model_path'], {}), '(new_model_path)\n', (160, 176), False, 'import os\n'), ((186, 212), 'os.listdir', 'os.listdir', (['old_model_path'], {}), '(old_model_path)\n', (196, 212), False, 'import os\n'), ((270, 305), 'os.path.join', 'os.path.join', (['old_model_path', 'layer'], {}), '(old_model_path, layer)\n', (282, 305), False, 'import os\n'), ((429, 478), 'os.path.join', 'os.path.join', (['new_model_path', "(layer + '-' + model)"], {}), "(new_model_path, layer + '-' + model)\n", (441, 478), False, 'import os\n'), ((511, 528), 'os.mkdir', 'os.mkdir', (['dst_dir'], {}), '(dst_dir)\n', (519, 528), False, 'import os\n'), ((537, 568), 'os.mkdir', 'os.mkdir', (["(dst_dir + '-momentum')"], {}), "(dst_dir + '-momentum')\n", (545, 568), False, 'import os\n'), ((599, 642), 'shutil.copyfile', 'shutil.copyfile', (['src_path', "(dst_dir + '/out')"], {}), "(src_path, dst_dir + '/out')\n", (614, 642), False, 'import shutil\n'), ((663, 702), 'numpy.fromfile', 'np.fromfile', (['src_path'], {'dtype': 'np.float32'}), '(src_path, dtype=np.float32)\n', (674, 702), True, 'import numpy as np\n')] |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Task where both the input and output sequence are plain text.
"""
import os
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import gfile
from seq2seq.tasks.decode_text import _get_prediction_length
from seq2seq.tasks.inference_task import InferenceTask, unbatch_dict
def _get_scores(predictions_dict):
"""Returns the attention scores, sliced by source and target length.
"""
prediction_len = _get_prediction_length(predictions_dict)
source_len = predictions_dict["features.source_len"]
return predictions_dict["attention_scores"][:prediction_len, :source_len]
def _create_figure(predictions_dict):
"""Creates and returns a new figure that visualizes
attention scores for for a single model predictions.
"""
# Find out how long the predicted sequence is
target_words = list(predictions_dict["predicted_tokens"])
prediction_len = _get_prediction_length(predictions_dict)
# Get source words
source_len = predictions_dict["features.source_len"]
source_words = predictions_dict["features.source_tokens"][:source_len]
# Plot
fig = plt.figure(figsize=(8, 8))
plt.imshow(
X=predictions_dict["attention_scores"][:prediction_len, :source_len],
interpolation="nearest",
cmap=plt.cm.Blues)
plt.xticks(np.arange(source_len), source_words, rotation=45)
plt.yticks(np.arange(prediction_len), target_words, rotation=-45)
fig.tight_layout()
return fig
class DumpAttention(InferenceTask):
"""Defines inference for tasks where both the input and output sequences
are plain text.
Params:
delimiter: Character by which tokens are delimited. Defaults to space.
unk_replace: If true, enable unknown token replacement based on attention
scores.
unk_mapping: If `unk_replace` is true, this can be the path to a file
defining a dictionary to improve UNK token replacement. Refer to the
documentation for more details.
dump_attention_dir: Save attention scores and plots to this directory.
dump_attention_no_plot: If true, only save attention scores, not
attention plots.
dump_beams: Write beam search debugging information to this file.
"""
def __init__(self, params):
super(DumpAttention, self).__init__(params)
self._attention_scores_accum = []
self._idx = 0
if not self.params["output_dir"]:
raise ValueError("Must specify output_dir for DumpAttention")
@staticmethod
def default_params():
params = {}
params.update({"output_dir": "", "dump_plots": True})
return params
def begin(self):
super(DumpAttention, self).begin()
gfile.MakeDirs(self.params["output_dir"])
def before_run(self, _run_context):
fetches = {}
fetches["predicted_tokens"] = self._predictions["predicted_tokens"]
fetches["features.source_len"] = self._predictions["features.source_len"]
fetches["features.source_tokens"] = self._predictions[
"features.source_tokens"]
fetches["attention_scores"] = self._predictions["attention_scores"]
return tf.train.SessionRunArgs(fetches)
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
for fetches in unbatch_dict(fetches_batch):
# Convert to unicode
fetches["predicted_tokens"] = np.char.decode(
fetches["predicted_tokens"].astype("S"), "utf-8")
fetches["features.source_tokens"] = np.char.decode(
fetches["features.source_tokens"].astype("S"), "utf-8")
if self.params["dump_plots"]:
output_path = os.path.join(self.params["output_dir"],
"{:05d}.png".format(self._idx))
_create_figure(fetches)
plt.savefig(output_path)
plt.close()
tf.logging.info("Wrote %s", output_path)
self._idx += 1
self._attention_scores_accum.append(_get_scores(fetches))
def end(self, _session):
scores_path = os.path.join(self.params["output_dir"],
"attention_scores.npz")
np.savez(scores_path, *self._attention_scores_accum)
tf.logging.info("Wrote %s", scores_path)
| [
"tensorflow.gfile.MakeDirs",
"tensorflow.train.SessionRunArgs",
"tensorflow.logging.info",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"seq2seq.tasks.inference_task.unbatch_dict",
"numpy.arange",
"numpy.savez",
"os.path.join",
"seq2seq.tasks.decode_text._ge... | [((1032, 1072), 'seq2seq.tasks.decode_text._get_prediction_length', '_get_prediction_length', (['predictions_dict'], {}), '(predictions_dict)\n', (1054, 1072), False, 'from seq2seq.tasks.decode_text import _get_prediction_length\n'), ((1488, 1528), 'seq2seq.tasks.decode_text._get_prediction_length', '_get_prediction_length', (['predictions_dict'], {}), '(predictions_dict)\n', (1510, 1528), False, 'from seq2seq.tasks.decode_text import _get_prediction_length\n'), ((1697, 1723), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1707, 1723), True, 'from matplotlib import pyplot as plt\n'), ((1726, 1855), 'matplotlib.pyplot.imshow', 'plt.imshow', ([], {'X': "predictions_dict['attention_scores'][:prediction_len, :source_len]", 'interpolation': '"""nearest"""', 'cmap': 'plt.cm.Blues'}), "(X=predictions_dict['attention_scores'][:prediction_len, :\n source_len], interpolation='nearest', cmap=plt.cm.Blues)\n", (1736, 1855), True, 'from matplotlib import pyplot as plt\n'), ((1883, 1904), 'numpy.arange', 'np.arange', (['source_len'], {}), '(source_len)\n', (1892, 1904), True, 'import numpy as np\n'), ((1946, 1971), 'numpy.arange', 'np.arange', (['prediction_len'], {}), '(prediction_len)\n', (1955, 1971), True, 'import numpy as np\n'), ((3213, 3254), 'tensorflow.gfile.MakeDirs', 'gfile.MakeDirs', (["self.params['output_dir']"], {}), "(self.params['output_dir'])\n", (3227, 3254), False, 'from tensorflow import gfile\n'), ((3637, 3669), 'tensorflow.train.SessionRunArgs', 'tf.train.SessionRunArgs', (['fetches'], {}), '(fetches)\n', (3660, 3669), True, 'import tensorflow as tf\n'), ((3778, 3805), 'seq2seq.tasks.inference_task.unbatch_dict', 'unbatch_dict', (['fetches_batch'], {}), '(fetches_batch)\n', (3790, 3805), False, 'from seq2seq.tasks.inference_task import InferenceTask, unbatch_dict\n'), ((4503, 4566), 'os.path.join', 'os.path.join', (["self.params['output_dir']", '"""attention_scores.npz"""'], {}), "(self.params['output_dir'], 'attention_scores.npz')\n", (4515, 4566), False, 'import os\n'), ((4602, 4654), 'numpy.savez', 'np.savez', (['scores_path', '*self._attention_scores_accum'], {}), '(scores_path, *self._attention_scores_accum)\n', (4610, 4654), True, 'import numpy as np\n'), ((4659, 4699), 'tensorflow.logging.info', 'tf.logging.info', (['"""Wrote %s"""', 'scores_path'], {}), "('Wrote %s', scores_path)\n", (4674, 4699), True, 'import tensorflow as tf\n'), ((4276, 4300), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {}), '(output_path)\n', (4287, 4300), True, 'from matplotlib import pyplot as plt\n'), ((4309, 4320), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4318, 4320), True, 'from matplotlib import pyplot as plt\n'), ((4329, 4369), 'tensorflow.logging.info', 'tf.logging.info', (['"""Wrote %s"""', 'output_path'], {}), "('Wrote %s', output_path)\n", (4344, 4369), True, 'import tensorflow as tf\n')] |
import numpy as np
def minmax_scale(X:np.ndarray, axis=0, feature_range=(0,1)):
X_out = np.zeros_like(X)
X_min = np.min(X, axis=axis)
X_max = np.max(X, axis=axis)
X_out = (X - X_min) / (X_max - X_min)
X_out = X_out * (feature_range[1]-feature_range[0]) + feature_range[0]
return X_out
def transpose(x:np.ndarray) -> np.ndarray:
return x.transpose() | [
"numpy.min",
"numpy.zeros_like",
"numpy.max"
] | [((93, 109), 'numpy.zeros_like', 'np.zeros_like', (['X'], {}), '(X)\n', (106, 109), True, 'import numpy as np\n'), ((122, 142), 'numpy.min', 'np.min', (['X'], {'axis': 'axis'}), '(X, axis=axis)\n', (128, 142), True, 'import numpy as np\n'), ((155, 175), 'numpy.max', 'np.max', (['X'], {'axis': 'axis'}), '(X, axis=axis)\n', (161, 175), True, 'import numpy as np\n')] |
from numpy import multiply
from scipy.optimize import linprog
__author__ = ["<NAME>", "<NAME>"]
__copyright__ = "Copyright 2017"
__credits__ = []
__version__ = "1.0"
__status__ = "Development"
def header():
print('\n Agroplex - © 2017\n\n'
' <NAME> e <NAME>\n'
' Faculdade de Tecnologia de Ourinhos - FATEC | Tel.: (14) 3512-2024\n\n')
def ler_float(text):
while True:
try:
valor = float(input(text).replace(',', '.'))
break
except ValueError:
continue
return valor
def ler_int(text):
while True:
try:
valor = int(input(text))
break
except ValueError:
continue
return valor
def resolver(self):
print('\n Resolvendo...')
r = self.r # Restrição 1, 2, 3... ex.: [[X1, X2], [X1, X2], [X1, X2]]
b = self.b # valores bases das restrições
# quando é para maximizar, multiplica por -1, ou seja, faz o inverso
f = multiply(self.x, -1) # valores da função objetivo
xi_bounds = (0, None)
return linprog(f, r, b, bounds=xi_bounds, options={"disp": False})
| [
"scipy.optimize.linprog",
"numpy.multiply"
] | [((1023, 1043), 'numpy.multiply', 'multiply', (['self.x', '(-1)'], {}), '(self.x, -1)\n', (1031, 1043), False, 'from numpy import multiply\n'), ((1117, 1176), 'scipy.optimize.linprog', 'linprog', (['f', 'r', 'b'], {'bounds': 'xi_bounds', 'options': "{'disp': False}"}), "(f, r, b, bounds=xi_bounds, options={'disp': False})\n", (1124, 1176), False, 'from scipy.optimize import linprog\n')] |
import pandas as pd
import numpy as np
import sys
import argparse
import time
from scipy.special import gamma
import os
import pickle
import torch
import NMF_functions
from ARD_NMF import ARD_NMF
import pyarrow.feather as feather
from ARD_NMF import run_method_engine
import torch.nn as nn
import torch.multiprocessing as mp
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
def run_parameter_sweep(parameters,dataset,args,Beta):
output = []
objectives = []
nsigs = []
times = []
for idx in range(len(parameters)):
data = ARD_NMF(dataset,args.objective)
W,H,cost,time = run_method_engine(data, args.a, args.phi, args.b, Beta,
args.prior_on_W, args.prior_on_H, args.K0, args.tolerance,args.max_iter)
nsig = write_output(W,H,data.channel_names,data.sample_names,args.output_dir,
args.output_prefix + "_" + parameters['label'][idx])
times.append(time)
nsigs.append(nsig)
objectives.append(cost)
parameters['nsigs'] = nsigs
parameters['objective'] = objectives
parameters['times'] = times
parameters.to_csv(args.output_dir + '/' + args.output_prefix + '_results.txt',sep='\t',index=None)
def write_output(W, H, channel_names, sample_names, output_directory, label, active_thresh = 1e-5):
createFolder(output_directory)
nonzero_idx = (np.sum(H, axis=1) * np.sum(W, axis=0)) > active_thresh
W_active = W[:, nonzero_idx]
H_active = H[nonzero_idx, :]
nsig = np.sum(nonzero_idx)
# Normalize W and transfer weight to H matrix
W_weight = np.sum(W_active, axis=0)
W_final = W_active / W_weight
H_final = W_weight[:, np.newaxis] * H_active
sig_names = ['W' + str(j) for j in range(1, nsig + 1)]
W_df = pd.DataFrame(data=W_final, index=channel_names, columns=sig_names)
H_df = pd.DataFrame(data=H_final, index=sig_names, columns=sample_names);
# Write W and H matrices
W_df.to_csv(output_directory + '/'+label+ '_W.txt', sep='\t')
H_df.to_csv(output_directory + '/'+label+ '_H.txt', sep='\t')
return nsig
def main():
''' Run ARD NMF'''
torch.multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser(
description='NMF with some sparsity penalty described https://arxiv.org/pdf/1111.6085.pdf')
parser.add_argument('--data', help='Data Matrix', required=True)
parser.add_argument('--feather', help='Input in feather format', required=False, default=False, action='store_true')
parser.add_argument('--parquet', help='Input in parquet format', required=False, default=False, action='store_true')
parser.add_argument('--K0', help='Initial K parameter', required=False, default=None, type=int)
parser.add_argument('--max_iter', help='maximum iterations', required=False, default=10000, type=int)
parser.add_argument('--del_', help='Early stop condition based on lambda change', required=False, default=1,
type=int)
parser.add_argument('--tolerance', help='Early stop condition based on max lambda entry', required=False, default=1e-6,
type=float)
parser.add_argument('--phi', help='dispersion parameter see paper for discussion of choosing phi '
'default = 1', required=False, default=1.0, type=float)
parser.add_argument('--a', help='Hyperparamter for lambda. We recommend trying various values of a. Smaller values'
'will result in sparser results a good starting point might be'
'a = log(F+N)', required=False, default=10.0,type=float)
parser.add_argument('--b', help='Hyperparamter for lambda. Default used is as recommended in Tan and Fevotte 2012',
required = False,type=float, default = None)
parser.add_argument('--objective',help='Defines the data objective. Choose between "poisson" or "gaussian". Defaults to Poisson',
required=False,default='poisson',type=str)
parser.add_argument('--prior_on_W',help = 'Prior on W matrix "L1" (exponential) or "L2" (half-normal)'
,required = False, default = 'L1',type=str)
parser.add_argument('--prior_on_H',help = 'Prior on H matrix "L1" (exponential) or "L2" (half-normal)'
,required = False, default = 'L1',type=str)
parser.add_argument('--output_dir', help='output_file_name if run in array mode this correspond to the output directory', required=True)
parser.add_argument('--output_prefix', help='Prefix for output files', required=False, default="result", type=str)
parser.add_argument('--labeled', help='Input has row and column labels', required=False,default=False, action='store_true')
parser.add_argument('--report_frequency', help='Number of iterations between progress reports', required=False,
default=100, type=int)
parser.add_argument('--dtype', help='Floating point accuracy', required=False,
default='Float32', type=str)
parser.add_argument('--parameters_file', help='allows running many different configurations of the NMF method on a multi'
'GPU system. To run in this mode provide this argument with a text file with '
'the following headers:(a,phi,b,prior_on_W,prior_on_H,Beta,label) label '
'indicates the output stem of the results from each run.', required = False
,default = None)
args = parser.parse_args()
print('Reading data frame from '+ args.data)
if args.dtype == 'Float32':
args.dtype = torch.float32
elif args.dtype == 'Float16':
args.dtype = torch.float16
if args.parquet:
dataset = pd.read_parquet(args.data)
elif args.feather:
print('loading feather...')
dataset = feather.read_dataframe(args.data)
else:
if args.labeled:
dataset = pd.read_csv(args.data, sep='\t', header=0, index_col=0)
else:
dataset = pd.read_csv(args.data, sep='\t', header=None)
if args.objective.lower() == 'poisson':
Beta = 1
elif args.objective.lower() == 'gaussian':
Beta = 2
else:
print('objective parameter should be one of "gaussian" or "poisson"')
sys.exit()
if args.parameters_file != None:
parameters = pd.read_csv(args.parameters_file,sep='\t')
run_parameter_sweep(parameters,dataset,args,Beta)
else:
data = ARD_NMF(dataset,args.objective)
W,H,cost,time = run_method_engine(data, args.a, args.phi, args.b, Beta,
args.prior_on_W, args.prior_on_H, args.K0, args.tolerance,args.max_iter)
nsig = write_output(W,H,data.channel_names,data.sample_names,args.output_dir,args.output_prefix)
if __name__ == "__main__":
main()
| [
"pandas.DataFrame",
"numpy.sum",
"argparse.ArgumentParser",
"ARD_NMF.run_method_engine",
"os.makedirs",
"ARD_NMF.ARD_NMF",
"pandas.read_csv",
"pyarrow.feather.read_dataframe",
"os.path.exists",
"torch.multiprocessing.set_start_method",
"pandas.read_parquet",
"sys.exit"
] | [((1716, 1735), 'numpy.sum', 'np.sum', (['nonzero_idx'], {}), '(nonzero_idx)\n', (1722, 1735), True, 'import numpy as np\n'), ((1817, 1841), 'numpy.sum', 'np.sum', (['W_active'], {'axis': '(0)'}), '(W_active, axis=0)\n', (1823, 1841), True, 'import numpy as np\n'), ((2028, 2094), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'W_final', 'index': 'channel_names', 'columns': 'sig_names'}), '(data=W_final, index=channel_names, columns=sig_names)\n', (2040, 2094), True, 'import pandas as pd\n'), ((2114, 2179), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'H_final', 'index': 'sig_names', 'columns': 'sample_names'}), '(data=H_final, index=sig_names, columns=sample_names)\n', (2126, 2179), True, 'import pandas as pd\n'), ((2453, 2500), 'torch.multiprocessing.set_start_method', 'torch.multiprocessing.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (2491, 2500), False, 'import torch\n'), ((2515, 2640), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""NMF with some sparsity penalty described https://arxiv.org/pdf/1111.6085.pdf"""'}), "(description=\n 'NMF with some sparsity penalty described https://arxiv.org/pdf/1111.6085.pdf'\n )\n", (2538, 2640), False, 'import argparse\n'), ((700, 732), 'ARD_NMF.ARD_NMF', 'ARD_NMF', (['dataset', 'args.objective'], {}), '(dataset, args.objective)\n', (707, 732), False, 'from ARD_NMF import ARD_NMF\n'), ((756, 889), 'ARD_NMF.run_method_engine', 'run_method_engine', (['data', 'args.a', 'args.phi', 'args.b', 'Beta', 'args.prior_on_W', 'args.prior_on_H', 'args.K0', 'args.tolerance', 'args.max_iter'], {}), '(data, args.a, args.phi, args.b, Beta, args.prior_on_W,\n args.prior_on_H, args.K0, args.tolerance, args.max_iter)\n', (773, 889), False, 'from ARD_NMF import run_method_engine\n'), ((6241, 6267), 'pandas.read_parquet', 'pd.read_parquet', (['args.data'], {}), '(args.data)\n', (6256, 6267), True, 'import pandas as pd\n'), ((6882, 6925), 'pandas.read_csv', 'pd.read_csv', (['args.parameters_file'], {'sep': '"""\t"""'}), "(args.parameters_file, sep='\\t')\n", (6893, 6925), True, 'import pandas as pd\n'), ((7008, 7040), 'ARD_NMF.ARD_NMF', 'ARD_NMF', (['dataset', 'args.objective'], {}), '(dataset, args.objective)\n', (7015, 7040), False, 'from ARD_NMF import ARD_NMF\n'), ((7064, 7197), 'ARD_NMF.run_method_engine', 'run_method_engine', (['data', 'args.a', 'args.phi', 'args.b', 'Beta', 'args.prior_on_W', 'args.prior_on_H', 'args.K0', 'args.tolerance', 'args.max_iter'], {}), '(data, args.a, args.phi, args.b, Beta, args.prior_on_W,\n args.prior_on_H, args.K0, args.tolerance, args.max_iter)\n', (7081, 7197), False, 'from ARD_NMF import run_method_engine\n'), ((381, 406), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (395, 406), False, 'import os\n'), ((420, 442), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (431, 442), False, 'import os\n'), ((1560, 1577), 'numpy.sum', 'np.sum', (['H'], {'axis': '(1)'}), '(H, axis=1)\n', (1566, 1577), True, 'import numpy as np\n'), ((1580, 1597), 'numpy.sum', 'np.sum', (['W'], {'axis': '(0)'}), '(W, axis=0)\n', (1586, 1597), True, 'import numpy as np\n'), ((6345, 6378), 'pyarrow.feather.read_dataframe', 'feather.read_dataframe', (['args.data'], {}), '(args.data)\n', (6367, 6378), True, 'import pyarrow.feather as feather\n'), ((6813, 6823), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6821, 6823), False, 'import sys\n'), ((6436, 6491), 'pandas.read_csv', 'pd.read_csv', (['args.data'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)'}), "(args.data, sep='\\t', header=0, index_col=0)\n", (6447, 6491), True, 'import pandas as pd\n'), ((6528, 6573), 'pandas.read_csv', 'pd.read_csv', (['args.data'], {'sep': '"""\t"""', 'header': 'None'}), "(args.data, sep='\\t', header=None)\n", (6539, 6573), True, 'import pandas as pd\n')] |
"""
이미지 폴더의 파일을 분석하여 예측 레이블(labels_pred)을 생성하는 모듈
"""
import pandas as pd
import math
import matplotlib.pyplot as plt
import numpy as np
import os
from somber import Som
from evaluation import *
from config import *
from numpy import (array, unravel_index, nditer, linalg, random, subtract,
power, exp, pi, zeros, arange, outer, meshgrid, dot,
logical_and, mean, std, cov, argsort, linspace, transpose)
"""Returns the distance map of the weights.
Each cell is the normalised sum of the distances between
a neuron and its neighbours."""
def distance_map(self):
um = zeros((self.shape[0], self.shape[1]))
it = nditer(um, flags=['multi_index'])
while not it.finished:
for ii in range(it.multi_index[0] - 1, it.multi_index[0] + 2):
for jj in range(it.multi_index[1] - 1, it.multi_index[1] + 2):
if (ii >= 0 and ii < self.shape[0] and
jj >= 0 and jj < self.shape[1]):
w_1 = self[ii, jj, :]
w_2 = self[it.multi_index]
um[it.multi_index] += fast_norm(w_1 - w_2)
it.iternext()
um = um / um.max()
return um
"""Returns norm-2 of a 1-D numpy array.
* faster than linalg.norm in case of 1-D arrays (numpy 1.9.2rc1).
"""
def fast_norm(x):
return math.sqrt(dot(x, x.T))
"""
Self-Organizing Maps (SOMS) 알고리즘으로 특징 벡터를 클러스터링 하는 함수
예측 레이블은 DATA_DIR/LABELS_PRED.npy 에 저장
:return: None
"""
def make_labels_pred():
# 01. Load datasets
X = features = np.load(os.path.join(DATA_DIR, FEATURES + ".npy"))
data_dim = X.shape[1]
# 02. Estimate SOM matrix dimension
estimate_max_cluster = int((len(features) / NUM_IMGS_PER_MODEL)*2.0)
matrix_dim = round(math.sqrt(estimate_max_cluster))
# 03. Data normalizing
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=(-1, 1))
X = sc.fit_transform(X)
# 04. Training the SOM
som = Som((matrix_dim, matrix_dim), data_dimensionality=data_dim, learning_rate=0.5, lr_lambda=2.5, infl_lambda=2.5)
som.fit(X, num_epochs=10, updates_epoch=10, show_progressbar=True)
# predict: get the index of each best matching unit.
labels_pred = som.predict(X)
# save predicted labels
np.save(os.path.join(DATA_DIR, LABELS_PRED + ".npy"), labels_pred)
np.savetxt(os.path.join(DATA_DIR, LABELS_PRED + ".tsv"), labels_pred, "%d", delimiter="\t")
np.savetxt(os.path.join(DATA_DIR, LABELS_PRED + ".txt"), labels_pred, "%d", delimiter="\t")
# quantization error: how well do the best matching units fit?
quantization_error = som.quantization_error(X)
# inversion: associate each node with the exemplar that fits best.
inverted = som.invert_projection(X, labels_pred)
# Mapping: get weights, mapped to the grid points of the SOM
mapped = som.map_weights()
# 06.Visualization
from pylab import bone, pcolor, colorbar, plot, show
bone()
distance = distance_map(mapped).T
pcolor(distance)
colorbar()
show()
if __name__ == '__main__':
make_labels_pred() | [
"pylab.show",
"math.sqrt",
"pylab.pcolor",
"numpy.nditer",
"sklearn.preprocessing.MinMaxScaler",
"numpy.zeros",
"pylab.colorbar",
"pylab.bone",
"somber.Som",
"numpy.dot",
"os.path.join"
] | [((631, 668), 'numpy.zeros', 'zeros', (['(self.shape[0], self.shape[1])'], {}), '((self.shape[0], self.shape[1]))\n', (636, 668), False, 'from numpy import array, unravel_index, nditer, linalg, random, subtract, power, exp, pi, zeros, arange, outer, meshgrid, dot, logical_and, mean, std, cov, argsort, linspace, transpose\n'), ((679, 712), 'numpy.nditer', 'nditer', (['um'], {'flags': "['multi_index']"}), "(um, flags=['multi_index'])\n", (685, 712), False, 'from numpy import array, unravel_index, nditer, linalg, random, subtract, power, exp, pi, zeros, arange, outer, meshgrid, dot, logical_and, mean, std, cov, argsort, linspace, transpose\n'), ((1938, 1973), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (1950, 1973), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2044, 2159), 'somber.Som', 'Som', (['(matrix_dim, matrix_dim)'], {'data_dimensionality': 'data_dim', 'learning_rate': '(0.5)', 'lr_lambda': '(2.5)', 'infl_lambda': '(2.5)'}), '((matrix_dim, matrix_dim), data_dimensionality=data_dim, learning_rate=\n 0.5, lr_lambda=2.5, infl_lambda=2.5)\n', (2047, 2159), False, 'from somber import Som\n'), ((3057, 3063), 'pylab.bone', 'bone', ([], {}), '()\n', (3061, 3063), False, 'from pylab import bone, pcolor, colorbar, plot, show\n'), ((3108, 3124), 'pylab.pcolor', 'pcolor', (['distance'], {}), '(distance)\n', (3114, 3124), False, 'from pylab import bone, pcolor, colorbar, plot, show\n'), ((3130, 3140), 'pylab.colorbar', 'colorbar', ([], {}), '()\n', (3138, 3140), False, 'from pylab import bone, pcolor, colorbar, plot, show\n'), ((3146, 3152), 'pylab.show', 'show', ([], {}), '()\n', (3150, 3152), False, 'from pylab import bone, pcolor, colorbar, plot, show\n'), ((1384, 1395), 'numpy.dot', 'dot', (['x', 'x.T'], {}), '(x, x.T)\n', (1387, 1395), False, 'from numpy import array, unravel_index, nditer, linalg, random, subtract, power, exp, pi, zeros, arange, outer, meshgrid, dot, logical_and, mean, std, cov, argsort, linspace, transpose\n'), ((1602, 1643), 'os.path.join', 'os.path.join', (['DATA_DIR', "(FEATURES + '.npy')"], {}), "(DATA_DIR, FEATURES + '.npy')\n", (1614, 1643), False, 'import os\n'), ((1813, 1844), 'math.sqrt', 'math.sqrt', (['estimate_max_cluster'], {}), '(estimate_max_cluster)\n', (1822, 1844), False, 'import math\n'), ((2365, 2409), 'os.path.join', 'os.path.join', (['DATA_DIR', "(LABELS_PRED + '.npy')"], {}), "(DATA_DIR, LABELS_PRED + '.npy')\n", (2377, 2409), False, 'import os\n'), ((2440, 2484), 'os.path.join', 'os.path.join', (['DATA_DIR', "(LABELS_PRED + '.tsv')"], {}), "(DATA_DIR, LABELS_PRED + '.tsv')\n", (2452, 2484), False, 'import os\n'), ((2537, 2581), 'os.path.join', 'os.path.join', (['DATA_DIR', "(LABELS_PRED + '.txt')"], {}), "(DATA_DIR, LABELS_PRED + '.txt')\n", (2549, 2581), False, 'import os\n')] |
# Create source and receiver grid
import numpy as np
import h5py
import matplotlib.pyplot as plt
# Model domain
x_max = 10000 # lateral extension in x [m]
y_max = 10000 # lateral extension in y [m]
###################################################################################################
# Source grid
# Source grid spacing
dsx = 12.5 # in m
dsy = 12.5 # in m
# Minimum source position
sx_min = dsx
sy_min = dsy
# Maximum source position
sx_max = x_max - dsx
sy_max = y_max - dsy
# Source depth
zsrc = 6.0
# Coordinate ranges
xrange_src = np.arange(start=sx_min, stop=sx_max + dsx, step=dsx)
yrange_src = np.arange(start=sy_min, stop=sy_max + dsy, step=dsy)
# No. of sources
nx_rec = len(xrange_src)
ny_rec = len(yrange_src)
Y_src, X_src = np.meshgrid(yrange_src, xrange_src)
I = np.ones(X_src.shape)
Z_src = I*zsrc
# Coordinates [X, Y, Z, I] (I: off the grid identifier, 1 if on the grid, 0 else)
src_coordinates = np.concatenate((X_src.reshape(-1,1), Y_src.reshape(-1,1),
Z_src.reshape(-1,1), I.reshape(-1,1)), axis=1)
###################################################################################################
# Jittered receiver grid
def generate_jittered_indices(ns, ds, rndfactor, p, rseed, boatspeed, tfireint_min, tdelay):
# p=4 --> 75 % subsampling, ds = 12.5m
# p=2 --> 50 % subsampling, ds = 25 m
np.random.seed(rseed)
dtfirejitb1arr1 = tfireint_min + np.random.rand(1, int(ns/p))*(2*tfireint_min)
tfirejitb1arr1 = np.cumsum(dtfirejitb1arr1)
tfirejitb1arr1 = tfirejitb1arr1 - tdelay
tfirejitb1arr1 = np.round(rndfactor[0]*tfirejitb1arr1)/rndfactor[0]
sjitb1arr1 = np.round(rndfactor[1]*boatspeed*tfirejitb1arr1)/rndfactor[1]
return (sjitb1arr1/ds).astype(int)
# Underlying dense receiver grid spacing
drx = 50.0 # in m
dry = 50.0 # in m
# Minimum receiver position
rx_min = drx
ry_min = dry
# Maximum receiver position
rx_max = x_max - drx
ry_max = y_max - dry
# OBN receiver depth
zsrc = 740.0
# Coordinate ranges
xrange_rec = np.arange(start=rx_min, stop=rx_max + drx, step=drx)
yrange_rec = np.arange(start=ry_min, stop=ry_max + dry, step=dry)
nx_rec = len(xrange_rec)
ny_rec = len(yrange_rec)
n_rec = nx_rec * ny_rec
# Underlying dense receiver grid [m]
Y_rec, X_rec = np.meshgrid(yrange_rec, xrange_rec)
# Jittered grid
fac = 8
p = 2*fac
ds = 25/fac
rndfactor = np.array([1000, 100])
rseed = 3402
boatspeed = 2.5
tfireint_min = 10
tdelay = 10
n_rec_jit = int(n_rec/p)
# Jittered grid
indices = generate_jittered_indices(n_rec, ds, rndfactor, p, rseed, boatspeed, tfireint_min, tdelay)
X_idx_jit, Y_idx_jit = np.unravel_index(indices, (nx_rec, ny_rec))
X_rec_jit = X_idx_jit*drx + drx
Y_rec_jit = Y_idx_jit*dry + dry
Z_rec_jit = np.ones(n_rec_jit)*zsrc
# Random dither between -5 m and 5 m
dith_x = (-5 + (5-(-5))*np.random.rand(n_rec_jit, 1))
dith_y = (-5 + (5-(-5))*np.random.rand(n_rec_jit, 1))
X_rec_dith = X_rec_jit.reshape(-1,1) + dith_x
Y_rec_dith = Y_rec_jit.reshape(-1,1) + dith_y
# Off the grid identifier: 1 --> On the grid; 0 --> Off the grid
I_jit = np.ones(n_rec_jit)
I_dith = np.zeros(n_rec_jit)
for i in range(n_rec_jit):
if X_rec_dith[i] - X_rec_jit[i] <= 1e-9 and Y_rec_dith[i] - Y_rec_jit[i] <= 1e-9:
I_dith[i] = 1
# Gather all coordinates
rec_coordinates_jittered = np.concatenate((X_rec_jit.reshape(-1,1), Y_rec_jit.reshape(-1,1),
Z_rec_jit.reshape(-1,1), I_jit.reshape(-1,1)), axis=1)
rec_coordinates_dithered = np.concatenate((X_rec_dith.reshape(-1,1), Y_rec_dith.reshape(-1,1),
Z_rec_jit.reshape(-1,1), I_dith.reshape(-1,1)), axis=1)
# Save sources as receiver coordinates and vice versa (due to source-receiver reciprocity)
with h5py.File('src_coordinates.h5', 'w') as data_file:
data_file.create_dataset('xsrc', data=rec_coordinates_dithered)
with h5py.File('rec_coordinates.h5', 'w') as data_file:
data_file.create_dataset('rec', data=src_coordinates)
# Plot grids
plt.figure(); plt.plot(rec_coordinates_dithered[:,0], rec_coordinates_dithered[:,1], 'o')
plt.show() | [
"h5py.File",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.random.seed",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.ones",
"numpy.unravel_index",
"numpy.cumsum",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange",
"numpy.random.rand",
"numpy.round"
] | [((563, 615), 'numpy.arange', 'np.arange', ([], {'start': 'sx_min', 'stop': '(sx_max + dsx)', 'step': 'dsx'}), '(start=sx_min, stop=sx_max + dsx, step=dsx)\n', (572, 615), True, 'import numpy as np\n'), ((629, 681), 'numpy.arange', 'np.arange', ([], {'start': 'sy_min', 'stop': '(sy_max + dsy)', 'step': 'dsy'}), '(start=sy_min, stop=sy_max + dsy, step=dsy)\n', (638, 681), True, 'import numpy as np\n'), ((766, 801), 'numpy.meshgrid', 'np.meshgrid', (['yrange_src', 'xrange_src'], {}), '(yrange_src, xrange_src)\n', (777, 801), True, 'import numpy as np\n'), ((806, 826), 'numpy.ones', 'np.ones', (['X_src.shape'], {}), '(X_src.shape)\n', (813, 826), True, 'import numpy as np\n'), ((2022, 2074), 'numpy.arange', 'np.arange', ([], {'start': 'rx_min', 'stop': '(rx_max + drx)', 'step': 'drx'}), '(start=rx_min, stop=rx_max + drx, step=drx)\n', (2031, 2074), True, 'import numpy as np\n'), ((2088, 2140), 'numpy.arange', 'np.arange', ([], {'start': 'ry_min', 'stop': '(ry_max + dry)', 'step': 'dry'}), '(start=ry_min, stop=ry_max + dry, step=dry)\n', (2097, 2140), True, 'import numpy as np\n'), ((2272, 2307), 'numpy.meshgrid', 'np.meshgrid', (['yrange_rec', 'xrange_rec'], {}), '(yrange_rec, xrange_rec)\n', (2283, 2307), True, 'import numpy as np\n'), ((2367, 2388), 'numpy.array', 'np.array', (['[1000, 100]'], {}), '([1000, 100])\n', (2375, 2388), True, 'import numpy as np\n'), ((2614, 2657), 'numpy.unravel_index', 'np.unravel_index', (['indices', '(nx_rec, ny_rec)'], {}), '(indices, (nx_rec, ny_rec))\n', (2630, 2657), True, 'import numpy as np\n'), ((3072, 3090), 'numpy.ones', 'np.ones', (['n_rec_jit'], {}), '(n_rec_jit)\n', (3079, 3090), True, 'import numpy as np\n'), ((3100, 3119), 'numpy.zeros', 'np.zeros', (['n_rec_jit'], {}), '(n_rec_jit)\n', (3108, 3119), True, 'import numpy as np\n'), ((3937, 3949), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3947, 3949), True, 'import matplotlib.pyplot as plt\n'), ((3951, 4028), 'matplotlib.pyplot.plot', 'plt.plot', (['rec_coordinates_dithered[:, 0]', 'rec_coordinates_dithered[:, 1]', '"""o"""'], {}), "(rec_coordinates_dithered[:, 0], rec_coordinates_dithered[:, 1], 'o')\n", (3959, 4028), True, 'import matplotlib.pyplot as plt\n'), ((4027, 4037), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4035, 4037), True, 'import matplotlib.pyplot as plt\n'), ((1359, 1380), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (1373, 1380), True, 'import numpy as np\n'), ((1485, 1511), 'numpy.cumsum', 'np.cumsum', (['dtfirejitb1arr1'], {}), '(dtfirejitb1arr1)\n', (1494, 1511), True, 'import numpy as np\n'), ((2735, 2753), 'numpy.ones', 'np.ones', (['n_rec_jit'], {}), '(n_rec_jit)\n', (2742, 2753), True, 'import numpy as np\n'), ((3689, 3725), 'h5py.File', 'h5py.File', (['"""src_coordinates.h5"""', '"""w"""'], {}), "('src_coordinates.h5', 'w')\n", (3698, 3725), False, 'import h5py\n'), ((3814, 3850), 'h5py.File', 'h5py.File', (['"""rec_coordinates.h5"""', '"""w"""'], {}), "('rec_coordinates.h5', 'w')\n", (3823, 3850), False, 'import h5py\n'), ((1578, 1617), 'numpy.round', 'np.round', (['(rndfactor[0] * tfirejitb1arr1)'], {}), '(rndfactor[0] * tfirejitb1arr1)\n', (1586, 1617), True, 'import numpy as np\n'), ((1646, 1697), 'numpy.round', 'np.round', (['(rndfactor[1] * boatspeed * tfirejitb1arr1)'], {}), '(rndfactor[1] * boatspeed * tfirejitb1arr1)\n', (1654, 1697), True, 'import numpy as np\n'), ((2821, 2849), 'numpy.random.rand', 'np.random.rand', (['n_rec_jit', '(1)'], {}), '(n_rec_jit, 1)\n', (2835, 2849), True, 'import numpy as np\n'), ((2875, 2903), 'numpy.random.rand', 'np.random.rand', (['n_rec_jit', '(1)'], {}), '(n_rec_jit, 1)\n', (2889, 2903), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os.path
import os
import sys
import numpy as np
import neurom as nm
from neurom.core.types import NeuriteType
import json
from pprint import pprint
# Uses:
# https://github.com/BlueBrain/NeuroM
#
# some doc here:
# https://github.com/BlueBrain/NeuroM/blob/04f48747785265aa7a4f7b0750c1447cae408468/doc/source/definitions.rst#id1
# https://github.com/BlueBrain/NeuroM/blob/04f48747785265aa7a4f7b0750c1447cae408468/doc/source/definitions.rst
#print(sys.argv)
#exit()
def pretty(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
pretty(value, indent+1)
else:
print('\t' * (indent+1) + str(value))
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32,
np.float64)):
return float(obj)
elif isinstance(obj,(np.ndarray,)): #### This is the fix
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def get_morph_data(file_name, recenter=True):
''' get the morphology data from neurom '''
morph = nm.load_neuron(file_name)
if recenter:
transform = Translation(-morph.soma.center)
morph = morph.transform(transform)
data = morph._data.data_block # pylint: disable=protected-access
return morph, np.ascontiguousarray(data, dtype=np.float32)
def save_morph_as_json (input, output) :
morph, data = get_morph_data(input, False)
# these are just shorter names
sections = []
soma = {}
morpho_to_export = {
"sections": sections,
"soma": soma
}
binary_data = []
for section in morph.sections:
# for types that have a polyline/polycylinder shape
if section.type == NeuriteType.axon or section.type == NeuriteType.apical_dendrite or section.type == NeuriteType.basal_dendrite:
points = []
binary_section_encoding = np.zeros(shape=(len(section.points) * 7), dtype=float)
local_counter = 0
for point in section.points:
current_point = {
"position": [point[0], point[1], point[2]],
"radius": point[3]
}
points.append(current_point)
binary_section_encoding[local_counter * 7] = section.id # ID
binary_section_encoding[local_counter * 7 + 1] = section.type._value_ - 1 # type
binary_section_encoding[local_counter * 7 + 2] = point[0] # x
binary_section_encoding[local_counter * 7 + 3] = point[1] # y
binary_section_encoding[local_counter * 7 + 4] = point[2] # z
binary_section_encoding[local_counter * 7 + 5] = point[3] # radius
binary_section_encoding[local_counter * 7 + 6] = section.parent.id if section.parent else -1 # parent ID
local_counter += 1
binary_data.append( binary_section_encoding )
current_section = {
"id": section.id,
"parent": section.parent.id if section.parent else None,
"children": list(map(lambda x: x.id, section.children)),
"typename": section.type._name_,
"typevalue": section.type._value_ - 1, # because enum are 1-indexed
"points": points
}
sections.append( current_section )
# for the some, the only section to have a polygonal shape
elif section.type == NeuriteType.soma:
soma["id"] = section.id
soma["type"] = section.type._name_
soma["center"] = [section.points[:,0].mean(), section.points[:,1].mean(), section.points[:,2].mean() ]
soma["radius"] = 5
binary_soma_encoding = np.zeros(shape=(7), dtype=float)
binary_soma_encoding[0] = section.id
binary_soma_encoding[1] = 1
binary_soma_encoding[2] = section.points[:,0].mean()
binary_soma_encoding[3] = section.points[:,1].mean()
binary_soma_encoding[4] = section.points[:,2].mean()
binary_soma_encoding[5] = 5
binary_soma_encoding[6] = -1
binary_data.append( binary_soma_encoding )
#pprint(vars(section))
json_data = json.dumps(morpho_to_export, ensure_ascii=True, indent=2)
#json_data = json.dumps(morpho_to_export)
f = open(output + '.json','w')
f.write(json_data)
f.close()
# making the binary buffer
buff = np.concatenate(binary_data)
print(np.shape(buff))
buff.astype('float32').tofile(output + '.bin')
if __name__ == "__main__":
if len(sys.argv) < 2:
print("At least one .asc file path is axpected as input")
exit()
for input_path in sys.argv[1:]:
os.path.splitext(input_path)[0]+'.json'
output_path = os.path.splitext(input_path)[0]
save_morph_as_json(input_path, output_path)
| [
"numpy.zeros",
"neurom.load_neuron",
"json.dumps",
"numpy.shape",
"os.path.splitext",
"numpy.ascontiguousarray",
"json.JSONEncoder.default",
"numpy.concatenate"
] | [((1404, 1429), 'neurom.load_neuron', 'nm.load_neuron', (['file_name'], {}), '(file_name)\n', (1418, 1429), True, 'import neurom as nm\n'), ((4679, 4736), 'json.dumps', 'json.dumps', (['morpho_to_export'], {'ensure_ascii': '(True)', 'indent': '(2)'}), '(morpho_to_export, ensure_ascii=True, indent=2)\n', (4689, 4736), False, 'import json\n'), ((4899, 4926), 'numpy.concatenate', 'np.concatenate', (['binary_data'], {}), '(binary_data)\n', (4913, 4926), True, 'import numpy as np\n'), ((1261, 1296), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (1285, 1296), False, 'import json\n'), ((1631, 1675), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (1651, 1675), True, 'import numpy as np\n'), ((4937, 4951), 'numpy.shape', 'np.shape', (['buff'], {}), '(buff)\n', (4945, 4951), True, 'import numpy as np\n'), ((5246, 5274), 'os.path.splitext', 'os.path.splitext', (['input_path'], {}), '(input_path)\n', (5262, 5274), False, 'import os\n'), ((4175, 4205), 'numpy.zeros', 'np.zeros', ([], {'shape': '(7)', 'dtype': 'float'}), '(shape=7, dtype=float)\n', (4183, 4205), True, 'import numpy as np\n'), ((5184, 5212), 'os.path.splitext', 'os.path.splitext', (['input_path'], {}), '(input_path)\n', (5200, 5212), False, 'import os\n')] |
import pandas as pd
import numpy as np
import sklearn.metrics
from .utils import map_binary
import warnings
warnings.simplefilter("ignore")
def eval_report(means, mapping, mapping_r):
'''Compute an array of model performance metrics given mean classifer scores across all possible prediction classes
Computed metrics include binary and multi-class log-loss, macro F1 scores, micro F1 scores, and weighted F1 scores.
Args:
means (pd.DataFrame): DataFrame of classifer scores across each possible class
mapping (pd.Series): Mapping generator used to encode class labels
mapping_r(pd.Series): Mapping genrator used to decode class labels
Returns:
results (pd.DataFrame): DataFrame of computed metrics
'''
# log loss
log_loss = sklearn.metrics.log_loss(means.index.get_level_values('label').map(mapping).astype(int), means)
# f1 scores (macro, micro, weighted, per-class)
per_class_f1 = pd.Series(sklearn.metrics.f1_score(means.index.get_level_values('label').map(mapping).astype(int), means.idxmax(axis=1).astype(int), average=None, labels = range(mapping.max())), index = mapping_r[[i for i in range(mapping.max())]])
macro_f1 = sklearn.metrics.f1_score(means.index.get_level_values('label').map(mapping).astype(int), means.idxmax(axis=1).astype(int), average='macro', labels = range(mapping.max()))
micro_f1 = sklearn.metrics.f1_score(means.index.get_level_values('label').map(mapping).astype(int), means.idxmax(axis=1).astype(int), average='micro', labels = range(mapping.max()))
weighted_f1 = sklearn.metrics.f1_score(means.index.get_level_values('label').map(mapping).astype(int), means.idxmax(axis=1).astype(int), average='weighted', labels = range(mapping.max()))
# map results to their binary representation (e.g. translocating v. not translocating) and compute loss and F1 scores
binary = map_binary(means, mapping_r)
binary_loss = sklearn.metrics.log_loss(binary['true label'], binary['translocation score'])
binary_f1 = sklearn.metrics.f1_score(binary['true label'], binary['translocation score']>0.5, average='weighted')
results = {
'F1 score (per class)': per_class_f1,
'singular metrics': pd.Series({
'loss': log_loss,
'F1 score (micro)': micro_f1,
'F1 score (macro)': macro_f1,
'F1 score (weighted)': weighted_f1,
'loss (binary)': binary_loss,
'F1 score (weighted, binary)': binary_f1
},)
}
return pd.concat(results, names = ['type of metric', 'metric'])
def compute_fpr(x, n=100):
'''Compute false-positive rates for translocation score cutoffs
Args:
x (pd.DataFrame): DataFrame including 'translocation score' and 'true label' columns (as produced by TRANSPIRE.utils.map_binary)
n (int, optional): number of bins to split the translocation scores into
Returns:
fpr (pd.Series): false-positive rates for different translocation score cutoffs given the true binary labels
'''
fp = [((x['translocation score'] > i)&(x['true label']==0)).sum() for i in np.linspace(0, 1, n)]
fpr = fp/((x['true label']==0).sum())
return pd.Series(fpr, index=np.linspace(0, 1, n))
def compute_cutoff(fprs,level, i):
'''Compute score cutoff based on desired false-positive rate stringency
Args:
fprs (pd.DataFrame): calculated false-positive rates (DataFrame columns should correspond to translocation scores)
level (list): list of multindex levels to groupby (e.g. conditions, folds, etc.)
i (float): fpr cutoff between 0 and 1
Returns:
cutoffs (pd.Series): corresponding score cutoffs for each set of levels as defined by the 'level' grouping
'''
return fprs[fprs<=i].idxmax(axis=1).groupby(level).mean()
| [
"pandas.Series",
"pandas.concat",
"warnings.simplefilter",
"numpy.linspace"
] | [((111, 142), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (132, 142), False, 'import warnings\n'), ((2662, 2716), 'pandas.concat', 'pd.concat', (['results'], {'names': "['type of metric', 'metric']"}), "(results, names=['type of metric', 'metric'])\n", (2671, 2716), True, 'import pandas as pd\n'), ((2237, 2442), 'pandas.Series', 'pd.Series', (["{'loss': log_loss, 'F1 score (micro)': micro_f1, 'F1 score (macro)':\n macro_f1, 'F1 score (weighted)': weighted_f1, 'loss (binary)':\n binary_loss, 'F1 score (weighted, binary)': binary_f1}"], {}), "({'loss': log_loss, 'F1 score (micro)': micro_f1,\n 'F1 score (macro)': macro_f1, 'F1 score (weighted)': weighted_f1,\n 'loss (binary)': binary_loss, 'F1 score (weighted, binary)': binary_f1})\n", (2246, 2442), True, 'import pandas as pd\n'), ((3265, 3285), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (3276, 3285), True, 'import numpy as np\n'), ((3366, 3386), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (3377, 3386), True, 'import numpy as np\n')] |
import copy
import unittest
from unittest import TestCase
import numpy as np
import numpy.testing as nptest
import pandas as pd
import pandas.util.testing as pdtest
import raredecay.settings
out = raredecay.settings.initialize(
run_name="test reweighting", no_interactive_plots=True, n_cpu=-2
)
from raredecay.tools.data_storage import HEPDataStorage
class TestHEPDataStorageMixin(TestCase):
def setUp(self):
self._set_truth()
self.ds = self._create_ds()
self._set_truth2()
self.ds2 = self._create_ds2()
def _create_ds(self):
return HEPDataStorage(
self.data_for_hepds,
target=self.target_for_hepds,
sample_weights=self.weights_for_hepds,
index=self.truth_index,
data_name=self.truth_name,
data_name_addition=self.truth_name_addition,
)
def _create_ds2(self):
return HEPDataStorage(
self.data_for_hepds2,
target=self.target_for_hepds2,
sample_weights=self.weights_for_hepds2,
data_name=self.truth_name2,
data_name_addition=self.truth_name_addition2,
)
def _set_truth2(self):
self.truth_df2, self.truth_targets2, self.truth_weights2 = self._make_dataset2()
(
self.data_for_hepds2,
self.target_for_hepds2,
self.weights_for_hepds2,
) = self._generate_data2(
copy.deepcopy(self.truth_df2),
copy.deepcopy(self.truth_targets2),
copy.deepcopy(self.truth_weights2),
)
self.truth_weights_normalized2 = self.truth_weights2 / np.average(
self.truth_weights2
)
self.truth_name2 = "ds1"
self.truth_name_addition2 = "ds1add"
def _set_truth(self):
(
self.truth_df,
self.truth_targets,
self.truth_weights,
index,
) = self._make_dataset()
self.truth_index = copy.deepcopy(index)
(
self.data_for_hepds,
self.target_for_hepds,
self.weights_for_hepds,
) = self._generate_data(
copy.deepcopy(self.truth_df),
copy.deepcopy(self.truth_targets),
copy.deepcopy(self.truth_weights),
)
self.truth_columns = list(self.truth_df.columns)
self.truth_weights_normalized = self.truth_weights / np.average(
self.truth_weights
)
self.truth_name = "ds1"
self.truth_name_addition = "ds1add"
return
@staticmethod
def _make_dataset():
index = [0, 2, 1, 3, 4, 5, 6, 7, 8]
data = pd.DataFrame(
{
"a": list(range(9)),
"b": list(range(10, 19)),
"c": list(range(20, 29)),
"d": list(range(30, 39)),
},
index=index,
)
targets = [1, 0, 1, 0, 1, 0, 1, 0, 1]
weights = np.array([1, 1, 1, 1, 1, 2, 3, 4, 0.25])
return (copy.deepcopy(obj) for obj in (data, targets, weights, index))
@staticmethod
def _make_dataset2():
data = pd.DataFrame(
{
"a": list(range(200, 203)),
"b": list(range(210, 213)),
"c": list(range(220, 223)),
"d": list(range(230, 233)),
}
)
targets = [1, 1, 0]
weights = np.array([1.5, 10, 0.3])
return data, targets, weights
def _generate_data(self, data, targets, weights):
"""Return data file ready to be passed into |hepds_type| and creating file if necessary
OVERRIDE THIS METHOD (do not depend on the default base implementation)
Returns
-------
data
"""
self.truth_data_type = "df"
return copy.deepcopy(data), copy.deepcopy(targets), copy.deepcopy(weights)
def _generate_data2(self, data, targets, weights):
"""Return data file ready to be passed into |hepds_type| and creating file if necessary
OVERRIDE THIS METHOD (do not depend on the default base implementation)
Returns
-------
data
"""
self.truth_data_type2 = "df"
return copy.deepcopy(data), copy.deepcopy(targets), copy.deepcopy(weights)
def test_initialization(self):
pdtest.assert_frame_equal(self.truth_df, self.ds.pandasDF())
nptest.assert_almost_equal(self.truth_targets, self.ds.get_targets())
nptest.assert_almost_equal(self.truth_weights, self.ds.weights)
nptest.assert_almost_equal(self.truth_weights_normalized, self.ds.get_weights())
pdtest.assert_frame_equal(self.truth_df2, self.ds2.pandasDF())
nptest.assert_almost_equal(self.truth_targets2, self.ds2.get_targets())
nptest.assert_almost_equal(self.truth_weights2, self.ds2.weights)
nptest.assert_almost_equal(
self.truth_weights_normalized2, self.ds2.get_weights()
)
# def test_get_name(self):
# pass
#
# def test_name(self):
# pass
def test_data_name(self):
self.assertEqual(self.truth_name, self.ds.data_name)
# def test_data_name_addition(self):
# pass
#
# def test_fold_name(self):
# pass
def test_data_type(self):
self.assertEqual(
self.truth_data_type,
self.ds.data_type,
)
def test_get_index(self):
nptest.assert_almost_equal(self.truth_index, self.ds.index)
nptest.assert_almost_equal(self.truth_index, self.ds.get_index())
def test_index(self):
nptest.assert_almost_equal(self.truth_index, self.ds.index)
def test_columns(self):
self.assertListEqual(self.truth_columns, self.ds.columns)
sub_cols = ["a", "b"]
self.ds.columns = sub_cols
self.assertListEqual(sub_cols, self.ds.columns)
self.assertListEqual(sub_cols, list(self.ds.pandasDF().columns))
self.ds.columns = copy.deepcopy(self.truth_columns)
self.assertListEqual(self.truth_columns, self.ds.columns)
def test_data(self):
pass
def test_set_data(self):
ds_tmp = self.ds.copy_storage()
ds_original = self.ds
ds_tmp.set_data(copy.deepcopy(self.truth_df))
self._test_ds()
self.ds = ds_original
def test_get_weights(self):
nptest.assert_almost_equal(
self.truth_weights, self.ds.get_weights(normalize=False)
)
def test_set_weights(self):
weights_original = self.ds.get_weights(normalize=False)
weights_truth_original = self.truth_weights
self.ds.weights = 3
self.truth_weights = np.ones(len(self.truth_weights)) * 3
self.test_get_weights()
self.ds.set_weights(weights_truth_original * 1.7)
self.truth_weights = weights_truth_original * 1.7
self.test_get_weights()
# clean up
self.ds.set_weights(weights_original)
self.truth_weights = weights_truth_original
def test_set_root_selection(self):
pass
def test_pandasDF(self):
pdtest.assert_almost_equal(self.truth_df, self.ds.pandasDF())
for cols in (["a", "b"], ["a", "b", "c", "d"]):
pdtest.assert_almost_equal(
self.truth_df[cols], self.ds.pandasDF(columns=cols)
)
index = [1, 2, 5]
indexed_df = pd.DataFrame(
{"a": [2, 1, 5], "b": [12, 11, 15], "c": [22, 21, 25], "d": [32, 31, 35]},
index=index,
)
pdtest.assert_frame_equal(indexed_df, self.ds.pandasDF(index=index))
def test_get_targets(self):
indices = [1, 2, 5]
indices_truth = [2, 1, 5]
nptest.assert_almost_equal(
[self.truth_targets[index] for index in indices_truth],
self.ds.get_targets(index=indices),
)
nptest.assert_almost_equal(self.truth_targets, self.ds.get_targets())
nptest.assert_almost_equal(self.truth_targets, self.ds.targets)
def test_set_targets(self):
targets_original = self.ds.get_targets()
targets_truth_original = self.truth_targets
self.ds.targets = 1
self.truth_targets = np.ones(len(self.truth_targets))
self.test_get_targets()
self.ds.set_targets(targets_truth_original)
self.truth_targets = targets_truth_original
self.test_get_targets()
# clean up
self.ds.set_targets(targets_original)
self.truth_targets = targets_truth_original
def test_make_dataset(self):
data, targets, weights = self.ds.make_dataset(
second_storage=self.ds2, weights_ratio=0
)
truth_data = pd.concat(
(self.truth_df, self.truth_df2), axis=0, ignore_index=True
)
truth_targets = np.concatenate((self.truth_targets, self.truth_targets2))
truth_weights = np.concatenate((self.truth_weights, self.truth_weights2))
pdtest.assert_almost_equal(truth_data, data)
nptest.assert_almost_equal(truth_targets, targets)
nptest.assert_almost_equal(truth_weights, weights)
def test_copy_storage(self):
ds_copy = self.ds.copy_storage(["a", "b", "c", "d"])
ds_original = self.ds
self.ds = ds_copy
self._test_ds()
self.ds = ds_original
def test_get_LabeledDataStorage(self):
from rep.data.storage import LabeledDataStorage
truth_lds = LabeledDataStorage(
self.truth_df, target=self.truth_targets, sample_weight=self.truth_weights
)
self.assertListEqual(
list(truth_lds.__dict__.keys()),
list(self.ds.get_LabeledDataStorage().__dict__.keys()),
)
def test_make_folds(self):
ds_original = self.ds.copy_storage()
self.ds.make_folds(3, shuffle=False)
self._test_get_fold()
self._test_get_n_folds(3)
self.assertRaises(ValueError, self.ds.make_folds, 0.2)
self.ds = ds_original
def _test_get_fold(self):
train, test = self.ds.get_fold(2)
def _test_get_n_folds(self, n_folds=3):
self.assertEqual(n_folds, self.ds.get_n_folds())
def test_plot(self):
pass
def _test_ds(self):
self.test_set_weights()
self.test_get_weights()
self.test_data()
self.test_columns()
self.test_data_type()
self.test_set_targets()
self.test_get_index()
self.test_pandasDF()
self.test_get_targets()
self.test_make_dataset()
self.test_get_LabeledDataStorage()
def tearDown(self):
self._tearDown()
def _tearDown(self):
pass
class TestHEPDataStoragePandasDF(TestHEPDataStorageMixin, TestCase):
def _generate_data(self, data, targets, weights):
self.truth_data_type = "df"
return copy.deepcopy(data), copy.deepcopy(targets), copy.deepcopy(weights)
class TestHEPDataStorageOnTheFly(TestHEPDataStorageMixin, TestCase):
def _generate_data(self, data, targets, weights):
self.truth_data_type = "df"
return copy.deepcopy(data), copy.deepcopy(targets), copy.deepcopy(weights)
def _create_ds(self):
ds_tmp = HEPDataStorage(
self.data_for_hepds,
target=self.target_for_hepds,
sample_weights=self.weights_for_hepds,
# index=self.truth_index, # NO index, because it is saved sorted
data_name=self.truth_name,
data_name_addition=self.truth_name_addition,
)
ds_tmp.set_data(self.data_for_hepds)
return ds_tmp
class TestHEPDataStorageFolding(TestHEPDataStorageMixin, TestCase):
def _generate_data(self, data, targets, weights):
self.truth_data_type = "df"
return copy.deepcopy(data), copy.deepcopy(targets), copy.deepcopy(weights)
def _create_ds(self):
tmp_data_for_hepds = self.data_for_hepds * 2
tmp_data_for_hepds.set_index(
[list(range(100, 100 + len(tmp_data_for_hepds)))], inplace=True
)
tmp_data_for_hepds3 = copy.deepcopy(tmp_data_for_hepds)
tmp_data_for_hepds3.set_index(
[list(range(200, 200 + len(tmp_data_for_hepds3)))], inplace=True
)
data_tmp = pd.concat(
[tmp_data_for_hepds, self.data_for_hepds, tmp_data_for_hepds3], axis=0
)
ds_tmp = HEPDataStorage(
data_tmp,
target=3 * list(self.target_for_hepds),
sample_weights=np.concatenate([self.weights_for_hepds for _ in range(3)]),
# index=self.truth_index, # NO index, because it is saved sorted
data_name=self.truth_name,
data_name_addition=self.truth_name_addition,
)
ds_tmp.make_folds(3, shuffle=False)
ds_tmp = ds_tmp.get_fold(1)
return ds_tmp[1]
class TestHEPDataStorageROOT(TestHEPDataStorageMixin, TestCase):
def _generate_data(self, data, targets, weights):
import inspect
import os
root_data_folder = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
self.temp_file_path_root = root_data_folder + "/ds1.root"
# data['root_w'] = weights
# tmp_file = tempfile.NamedTemporaryFile(suffix='.root', delete=False)
# to_root(data.loc[data.index], tmp_file.name, key='DecayTree')
self.truth_data_type = "root"
root_dict = {
"filenames": self.temp_file_path_root,
"treename": "DecayTree",
"branches": ["a", "b", "c", "d"],
}
return root_dict, copy.deepcopy(targets), "root_w"
def _create_ds(self):
return HEPDataStorage(
self.data_for_hepds,
target=self.target_for_hepds,
sample_weights=self.weights_for_hepds,
index=self.truth_index,
data_name=self.truth_name,
data_name_addition=self.truth_name_addition,
)
# def _tearDown(self):
# import os
# os.remove(self.temp_file_path_root)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"pandas.DataFrame",
"copy.deepcopy",
"numpy.average",
"raredecay.tools.data_storage.HEPDataStorage",
"numpy.testing.assert_almost_equal",
"rep.data.storage.LabeledDataStorage",
"numpy.array",
"pandas.util.testing.assert_almost_equal",
"inspect.currentframe",
"pandas.concat",
"... | [((14144, 14159), 'unittest.main', 'unittest.main', ([], {}), '()\n', (14157, 14159), False, 'import unittest\n'), ((595, 803), 'raredecay.tools.data_storage.HEPDataStorage', 'HEPDataStorage', (['self.data_for_hepds'], {'target': 'self.target_for_hepds', 'sample_weights': 'self.weights_for_hepds', 'index': 'self.truth_index', 'data_name': 'self.truth_name', 'data_name_addition': 'self.truth_name_addition'}), '(self.data_for_hepds, target=self.target_for_hepds,\n sample_weights=self.weights_for_hepds, index=self.truth_index,\n data_name=self.truth_name, data_name_addition=self.truth_name_addition)\n', (609, 803), False, 'from raredecay.tools.data_storage import HEPDataStorage\n'), ((922, 1111), 'raredecay.tools.data_storage.HEPDataStorage', 'HEPDataStorage', (['self.data_for_hepds2'], {'target': 'self.target_for_hepds2', 'sample_weights': 'self.weights_for_hepds2', 'data_name': 'self.truth_name2', 'data_name_addition': 'self.truth_name_addition2'}), '(self.data_for_hepds2, target=self.target_for_hepds2,\n sample_weights=self.weights_for_hepds2, data_name=self.truth_name2,\n data_name_addition=self.truth_name_addition2)\n', (936, 1111), False, 'from raredecay.tools.data_storage import HEPDataStorage\n'), ((1994, 2014), 'copy.deepcopy', 'copy.deepcopy', (['index'], {}), '(index)\n', (2007, 2014), False, 'import copy\n'), ((2978, 3018), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 2, 3, 4, 0.25]'], {}), '([1, 1, 1, 1, 1, 2, 3, 4, 0.25])\n', (2986, 3018), True, 'import numpy as np\n'), ((3432, 3456), 'numpy.array', 'np.array', (['[1.5, 10, 0.3]'], {}), '([1.5, 10, 0.3])\n', (3440, 3456), True, 'import numpy as np\n'), ((4502, 4565), 'numpy.testing.assert_almost_equal', 'nptest.assert_almost_equal', (['self.truth_weights', 'self.ds.weights'], {}), '(self.truth_weights, self.ds.weights)\n', (4528, 4565), True, 'import numpy.testing as nptest\n'), ((4815, 4880), 'numpy.testing.assert_almost_equal', 'nptest.assert_almost_equal', (['self.truth_weights2', 'self.ds2.weights'], {}), '(self.truth_weights2, self.ds2.weights)\n', (4841, 4880), True, 'import numpy.testing as nptest\n'), ((5462, 5521), 'numpy.testing.assert_almost_equal', 'nptest.assert_almost_equal', (['self.truth_index', 'self.ds.index'], {}), '(self.truth_index, self.ds.index)\n', (5488, 5521), True, 'import numpy.testing as nptest\n'), ((5631, 5690), 'numpy.testing.assert_almost_equal', 'nptest.assert_almost_equal', (['self.truth_index', 'self.ds.index'], {}), '(self.truth_index, self.ds.index)\n', (5657, 5690), True, 'import numpy.testing as nptest\n'), ((6006, 6039), 'copy.deepcopy', 'copy.deepcopy', (['self.truth_columns'], {}), '(self.truth_columns)\n', (6019, 6039), False, 'import copy\n'), ((7420, 7525), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [2, 1, 5], 'b': [12, 11, 15], 'c': [22, 21, 25], 'd': [32, 31, 35]}"], {'index': 'index'}), "({'a': [2, 1, 5], 'b': [12, 11, 15], 'c': [22, 21, 25], 'd': [\n 32, 31, 35]}, index=index)\n", (7432, 7525), True, 'import pandas as pd\n'), ((7976, 8039), 'numpy.testing.assert_almost_equal', 'nptest.assert_almost_equal', (['self.truth_targets', 'self.ds.targets'], {}), '(self.truth_targets, self.ds.targets)\n', (8002, 8039), True, 'import numpy.testing as nptest\n'), ((8723, 8792), 'pandas.concat', 'pd.concat', (['(self.truth_df, self.truth_df2)'], {'axis': '(0)', 'ignore_index': '(True)'}), '((self.truth_df, self.truth_df2), axis=0, ignore_index=True)\n', (8732, 8792), True, 'import pandas as pd\n'), ((8839, 8896), 'numpy.concatenate', 'np.concatenate', (['(self.truth_targets, self.truth_targets2)'], {}), '((self.truth_targets, self.truth_targets2))\n', (8853, 8896), True, 'import numpy as np\n'), ((8921, 8978), 'numpy.concatenate', 'np.concatenate', (['(self.truth_weights, self.truth_weights2)'], {}), '((self.truth_weights, self.truth_weights2))\n', (8935, 8978), True, 'import numpy as np\n'), ((8988, 9032), 'pandas.util.testing.assert_almost_equal', 'pdtest.assert_almost_equal', (['truth_data', 'data'], {}), '(truth_data, data)\n', (9014, 9032), True, 'import pandas.util.testing as pdtest\n'), ((9041, 9091), 'numpy.testing.assert_almost_equal', 'nptest.assert_almost_equal', (['truth_targets', 'targets'], {}), '(truth_targets, targets)\n', (9067, 9091), True, 'import numpy.testing as nptest\n'), ((9100, 9150), 'numpy.testing.assert_almost_equal', 'nptest.assert_almost_equal', (['truth_weights', 'weights'], {}), '(truth_weights, weights)\n', (9126, 9150), True, 'import numpy.testing as nptest\n'), ((9479, 9578), 'rep.data.storage.LabeledDataStorage', 'LabeledDataStorage', (['self.truth_df'], {'target': 'self.truth_targets', 'sample_weight': 'self.truth_weights'}), '(self.truth_df, target=self.truth_targets, sample_weight=\n self.truth_weights)\n', (9497, 9578), False, 'from rep.data.storage import LabeledDataStorage\n'), ((11235, 11419), 'raredecay.tools.data_storage.HEPDataStorage', 'HEPDataStorage', (['self.data_for_hepds'], {'target': 'self.target_for_hepds', 'sample_weights': 'self.weights_for_hepds', 'data_name': 'self.truth_name', 'data_name_addition': 'self.truth_name_addition'}), '(self.data_for_hepds, target=self.target_for_hepds,\n sample_weights=self.weights_for_hepds, data_name=self.truth_name,\n data_name_addition=self.truth_name_addition)\n', (11249, 11419), False, 'from raredecay.tools.data_storage import HEPDataStorage\n'), ((12105, 12138), 'copy.deepcopy', 'copy.deepcopy', (['tmp_data_for_hepds'], {}), '(tmp_data_for_hepds)\n', (12118, 12138), False, 'import copy\n'), ((12285, 12370), 'pandas.concat', 'pd.concat', (['[tmp_data_for_hepds, self.data_for_hepds, tmp_data_for_hepds3]'], {'axis': '(0)'}), '([tmp_data_for_hepds, self.data_for_hepds, tmp_data_for_hepds3],\n axis=0)\n', (12294, 12370), True, 'import pandas as pd\n'), ((13721, 13929), 'raredecay.tools.data_storage.HEPDataStorage', 'HEPDataStorage', (['self.data_for_hepds'], {'target': 'self.target_for_hepds', 'sample_weights': 'self.weights_for_hepds', 'index': 'self.truth_index', 'data_name': 'self.truth_name', 'data_name_addition': 'self.truth_name_addition'}), '(self.data_for_hepds, target=self.target_for_hepds,\n sample_weights=self.weights_for_hepds, index=self.truth_index,\n data_name=self.truth_name, data_name_addition=self.truth_name_addition)\n', (13735, 13929), False, 'from raredecay.tools.data_storage import HEPDataStorage\n'), ((1455, 1484), 'copy.deepcopy', 'copy.deepcopy', (['self.truth_df2'], {}), '(self.truth_df2)\n', (1468, 1484), False, 'import copy\n'), ((1498, 1532), 'copy.deepcopy', 'copy.deepcopy', (['self.truth_targets2'], {}), '(self.truth_targets2)\n', (1511, 1532), False, 'import copy\n'), ((1546, 1580), 'copy.deepcopy', 'copy.deepcopy', (['self.truth_weights2'], {}), '(self.truth_weights2)\n', (1559, 1580), False, 'import copy\n'), ((1655, 1686), 'numpy.average', 'np.average', (['self.truth_weights2'], {}), '(self.truth_weights2)\n', (1665, 1686), True, 'import numpy as np\n'), ((2174, 2202), 'copy.deepcopy', 'copy.deepcopy', (['self.truth_df'], {}), '(self.truth_df)\n', (2187, 2202), False, 'import copy\n'), ((2216, 2249), 'copy.deepcopy', 'copy.deepcopy', (['self.truth_targets'], {}), '(self.truth_targets)\n', (2229, 2249), False, 'import copy\n'), ((2263, 2296), 'copy.deepcopy', 'copy.deepcopy', (['self.truth_weights'], {}), '(self.truth_weights)\n', (2276, 2296), False, 'import copy\n'), ((2426, 2456), 'numpy.average', 'np.average', (['self.truth_weights'], {}), '(self.truth_weights)\n', (2436, 2456), True, 'import numpy as np\n'), ((3035, 3053), 'copy.deepcopy', 'copy.deepcopy', (['obj'], {}), '(obj)\n', (3048, 3053), False, 'import copy\n'), ((3834, 3853), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (3847, 3853), False, 'import copy\n'), ((3855, 3877), 'copy.deepcopy', 'copy.deepcopy', (['targets'], {}), '(targets)\n', (3868, 3877), False, 'import copy\n'), ((3879, 3901), 'copy.deepcopy', 'copy.deepcopy', (['weights'], {}), '(weights)\n', (3892, 3901), False, 'import copy\n'), ((4243, 4262), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (4256, 4262), False, 'import copy\n'), ((4264, 4286), 'copy.deepcopy', 'copy.deepcopy', (['targets'], {}), '(targets)\n', (4277, 4286), False, 'import copy\n'), ((4288, 4310), 'copy.deepcopy', 'copy.deepcopy', (['weights'], {}), '(weights)\n', (4301, 4310), False, 'import copy\n'), ((6269, 6297), 'copy.deepcopy', 'copy.deepcopy', (['self.truth_df'], {}), '(self.truth_df)\n', (6282, 6297), False, 'import copy\n'), ((10879, 10898), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (10892, 10898), False, 'import copy\n'), ((10900, 10922), 'copy.deepcopy', 'copy.deepcopy', (['targets'], {}), '(targets)\n', (10913, 10922), False, 'import copy\n'), ((10924, 10946), 'copy.deepcopy', 'copy.deepcopy', (['weights'], {}), '(weights)\n', (10937, 10946), False, 'import copy\n'), ((11123, 11142), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (11136, 11142), False, 'import copy\n'), ((11144, 11166), 'copy.deepcopy', 'copy.deepcopy', (['targets'], {}), '(targets)\n', (11157, 11166), False, 'import copy\n'), ((11168, 11190), 'copy.deepcopy', 'copy.deepcopy', (['weights'], {}), '(weights)\n', (11181, 11190), False, 'import copy\n'), ((11803, 11822), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (11816, 11822), False, 'import copy\n'), ((11824, 11846), 'copy.deepcopy', 'copy.deepcopy', (['targets'], {}), '(targets)\n', (11837, 11846), False, 'import copy\n'), ((11848, 11870), 'copy.deepcopy', 'copy.deepcopy', (['weights'], {}), '(weights)\n', (11861, 11870), False, 'import copy\n'), ((13646, 13668), 'copy.deepcopy', 'copy.deepcopy', (['targets'], {}), '(targets)\n', (13659, 13668), False, 'import copy\n'), ((13126, 13148), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (13146, 13148), False, 'import inspect\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import requests
import numpy as np
import json
import time
from multiprocessing import Pool
urlBase = 'https://login.commodity.llc'
# 172.16.58.3
# urlBase = 'http://172.16.17.3236:5000'
# urlBase = 'http://localhost:5000'
api = '/api/v1/accounts'
# api = '/api/v2/accounts'
url = urlBase + api
def JTest(name=None):
jTemplate = {'account': {
'name': 'qa3',
'owner_key': '<KEY>',
'active_key': '<KEY>',
'memo_key': '<KEY>',
'refcode': '',
'referrer': ''}}
if isinstance(name, type(None)):
name = 'j1-' + str(np.random.randint(100000000000000000))
jTest = jTemplate
jTest['account']['name'] = name
return jTest
def TestStates(jTest):
r = requests.post(url, json=jTest, timeout=(300, 600))
text = r.text
text = json.loads(text)
return text
def TestStatesAll():
jTest = JTest()
print('Test Started')
while True:
text = TestStates(jTest)
if "account" in text:
break
print('Test Account Creation Started')
while True:
text = TestStates(jTest)
print(text)
if "error" in text:
if text["error"]["base"][0] == "Account init":
break
print('Test Account in queue')
while True:
text = TestStates(jTest)
# print(text)
if "error" in text:
if text["error"]["base"][0] == "Account run":
break
print('Test Account is in the current worker process')
while True:
text = TestStates(jTest)
# print(text)
if "error" in text:
if text["error"]["base"][0] == "Account exists":
break
print('Test Account Created')
print("Test Successful, All transaction states are reproduced")
def TestV1():
jTest = JTest()
print('Test Started')
text = TestStates(jTest)
return text
def Bombard(jTest):
tic = time.time()
try:
r = requests.post(url, json=jTest, timeout=(300, 600))
tocReq = time.time() - tic
# print('time = ', time.time() - tic)
text = r.text
# print(jTest)
textDict = json.loads(text)
if 'account' in textDict:
print('name:', textDict[
'account']['name'], tocReq, time.time() - tic)
return (True, time.time(), tocReq)
# return True, textDict
else:
print('FAILED:', text, tocReq, time.time() - tic)
# return False, textDict
return (False, time.time(), tocReq)
except Exception as e:
# print('text:', text)
print('exception Type:', type(e))
print('exception Args:', e.args)
print('e', e)
return (False, time.time(), 0)
def Bombards(count, numberOfProcesses):
jTests = []
for k in range(count):
jTest = JTest()
jTests.append(jTest)
# resBombards = []
# textDicts = []
p = Pool(processes=numberOfProcesses)
print('starting pool map')
tic = time.time()
r = p.map(Bombard, jTests)
print('done pool map')
p.close()
print('closed pool')
# r = list(map(Bombard, jTests))
toc = time.time() - tic
print('time Total=', toc, 'count=', count, 'average=', toc/count)
# print(
# 'timePerCall = ', toc / count, 'succeesScore:', np.sum(r) * 100 / count)
return r
# for k in range(count):
# jTest = jTests[k]
# print(k, jTest)
# resBombard, textDict = Bombard(jTest)
# resBombards.append(resBombard)
# textDicts.append(textDict)
# return resBombards, textDicts
class Testcases(unittest.TestCase):
def test_account_creation(self):
text = TestV1()
self.assertEqual(
text["account"]["active_key"],
'<KEY>')
if __name__ == "__main__":
# TestStatesAll()
# TestV1()
testcases = Testcases()
| [
"json.loads",
"time.time",
"numpy.random.randint",
"multiprocessing.Pool",
"requests.post"
] | [((787, 837), 'requests.post', 'requests.post', (['url'], {'json': 'jTest', 'timeout': '(300, 600)'}), '(url, json=jTest, timeout=(300, 600))\n', (800, 837), False, 'import requests\n'), ((867, 883), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (877, 883), False, 'import json\n'), ((1983, 1994), 'time.time', 'time.time', ([], {}), '()\n', (1992, 1994), False, 'import time\n'), ((2999, 3032), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'numberOfProcesses'}), '(processes=numberOfProcesses)\n', (3003, 3032), False, 'from multiprocessing import Pool\n'), ((3074, 3085), 'time.time', 'time.time', ([], {}), '()\n', (3083, 3085), False, 'import time\n'), ((2016, 2066), 'requests.post', 'requests.post', (['url'], {'json': 'jTest', 'timeout': '(300, 600)'}), '(url, json=jTest, timeout=(300, 600))\n', (2029, 2066), False, 'import requests\n'), ((2212, 2228), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (2222, 2228), False, 'import json\n'), ((3230, 3241), 'time.time', 'time.time', ([], {}), '()\n', (3239, 3241), False, 'import time\n'), ((2084, 2095), 'time.time', 'time.time', ([], {}), '()\n', (2093, 2095), False, 'import time\n'), ((640, 677), 'numpy.random.randint', 'np.random.randint', (['(100000000000000000)'], {}), '(100000000000000000)\n', (657, 677), True, 'import numpy as np\n'), ((2389, 2400), 'time.time', 'time.time', ([], {}), '()\n', (2398, 2400), False, 'import time\n'), ((2586, 2597), 'time.time', 'time.time', ([], {}), '()\n', (2595, 2597), False, 'import time\n'), ((2793, 2804), 'time.time', 'time.time', ([], {}), '()\n', (2802, 2804), False, 'import time\n'), ((2344, 2355), 'time.time', 'time.time', ([], {}), '()\n', (2353, 2355), False, 'import time\n'), ((2503, 2514), 'time.time', 'time.time', ([], {}), '()\n', (2512, 2514), False, 'import time\n')] |
"""
KKZ 1994 algorithm
See: A new initialization technique for generalized Lloyd iteration
https://ieeexplore.ieee.org/abstract/document/329844/
"""
import numpy as np
from initialisations.base import Initialisation
from kmeans import distance_table
class KKZ(Initialisation):
"""KKZ 1994 initialisation algorithm"""
def find_centers(self):
"""Main method"""
# L2/Euclidean norm, as suggested by the R kkz() documentation
norms = np.linalg.norm(self._data, axis=1)
first = self._data[np.argmax(norms)]
codebook = np.array([first])
while codebook.shape[0] < self._num_clusters:
distances = distance_table(self._data, codebook)
mins = np.min(distances, axis=1)
amax = np.argmax(mins, axis=0)
nxt = self._data[amax]
codebook = np.append(codebook, [nxt], axis=0)
return codebook
def generate(data, num_clusters):
"""The common interface"""
kkz = KKZ(data, num_clusters)
return kkz.find_centers()
| [
"numpy.argmax",
"kmeans.distance_table",
"numpy.append",
"numpy.min",
"numpy.array",
"numpy.linalg.norm"
] | [((469, 503), 'numpy.linalg.norm', 'np.linalg.norm', (['self._data'], {'axis': '(1)'}), '(self._data, axis=1)\n', (483, 503), True, 'import numpy as np\n'), ((569, 586), 'numpy.array', 'np.array', (['[first]'], {}), '([first])\n', (577, 586), True, 'import numpy as np\n'), ((532, 548), 'numpy.argmax', 'np.argmax', (['norms'], {}), '(norms)\n', (541, 548), True, 'import numpy as np\n'), ((666, 702), 'kmeans.distance_table', 'distance_table', (['self._data', 'codebook'], {}), '(self._data, codebook)\n', (680, 702), False, 'from kmeans import distance_table\n'), ((722, 747), 'numpy.min', 'np.min', (['distances'], {'axis': '(1)'}), '(distances, axis=1)\n', (728, 747), True, 'import numpy as np\n'), ((767, 790), 'numpy.argmax', 'np.argmax', (['mins'], {'axis': '(0)'}), '(mins, axis=0)\n', (776, 790), True, 'import numpy as np\n'), ((849, 883), 'numpy.append', 'np.append', (['codebook', '[nxt]'], {'axis': '(0)'}), '(codebook, [nxt], axis=0)\n', (858, 883), True, 'import numpy as np\n')] |
from datetime import datetime
import numpy as np
import pytz
def pivot_time(input_datetime=None):
"""
"pivot time" is defined as the nearest floor t00z for any given datetime.
If this function is called without arguments, it will return the pivot time
for the current datetime in UTC.
"""
input_datetime = nearest_cycle_date() if input_datetime is None else \
localize_datetime(input_datetime).astimezone(pytz.utc)
return localize_datetime(
datetime(input_datetime.year, input_datetime.month, input_datetime.day)
)
def nearest_cycle_date(input_datetime=None, period=6):
if input_datetime is None:
input_datetime = localize_datetime(datetime.utcnow())
current_cycle = int(period * np.floor(input_datetime.hour / period))
return pytz.timezone('UTC').localize(
datetime(input_datetime.year, input_datetime.month,
input_datetime.day, current_cycle))
def localize_datetime(d):
# datetime is naïve iff:
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
return pytz.timezone('UTC').localize(d)
return d
| [
"datetime.datetime.utcnow",
"numpy.floor",
"pytz.timezone",
"datetime.datetime"
] | [((488, 559), 'datetime.datetime', 'datetime', (['input_datetime.year', 'input_datetime.month', 'input_datetime.day'], {}), '(input_datetime.year, input_datetime.month, input_datetime.day)\n', (496, 559), False, 'from datetime import datetime\n'), ((843, 933), 'datetime.datetime', 'datetime', (['input_datetime.year', 'input_datetime.month', 'input_datetime.day', 'current_cycle'], {}), '(input_datetime.year, input_datetime.month, input_datetime.day,\n current_cycle)\n', (851, 933), False, 'from datetime import datetime\n'), ((701, 718), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (716, 718), False, 'from datetime import datetime\n'), ((753, 791), 'numpy.floor', 'np.floor', (['(input_datetime.hour / period)'], {}), '(input_datetime.hour / period)\n', (761, 791), True, 'import numpy as np\n'), ((804, 824), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (817, 824), False, 'import pytz\n'), ((1078, 1098), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (1091, 1098), False, 'import pytz\n')] |
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(
max_eval_batches = 1,
batchsize_eval = 1024,
batchsize = 1024,
lr = 0.01,
end_lr = 0.0001,
warmup_steps = 8000,
decay_start = 48000,
decay_steps = 24000,
vvgpu = [[0]],
repeat_dataset = True,
i64_input_key = True
)
reader = hugectr.DataReaderParams(
data_reader_type = hugectr.DataReaderType_t.Parquet,
source = ["./multi_cross/data/train/_file_list.txt"],
eval_source = "./multi_cross/data/test/_file_list.txt",
check_type = hugectr.Check_t.Sum,
slot_size_array = [10001, 10001, 10001, 10001]
)
optimizer = hugectr.CreateOptimizer(
optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Local,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.0000001
)
model = hugectr.Model(solver, reader, optimizer)
num_gpus = 1
workspace_size_per_gpu_in_mb = (
int(
40004
* 16
* 4
/ 1000000
)
+ 10
)
model.add(
hugectr.Input(
label_dim=3,
label_name="label",
dense_dim=3,
dense_name="dense",
data_reader_sparse_param_array=[
hugectr.DataReaderSparseParam(
"data1",
[1,1,1,1],
False,
4,
)
],
)
)
model.add(
hugectr.SparseEmbedding(
embedding_type=hugectr.Embedding_t.LocalizedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb=workspace_size_per_gpu_in_mb,
embedding_vec_size=16,
combiner="mean",
sparse_embedding_name="sparse_embedding1",
bottom_name="data1",
optimizer=optimizer,
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.InnerProduct,
bottom_names=["dense"],
top_names=["fc1"],
num_output=16,
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.ReLU,
bottom_names=["fc1"],
top_names=["relu1"],
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.Interaction,
bottom_names=["relu1", "sparse_embedding1"],
top_names=["interaction1"],
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.InnerProduct,
bottom_names=["interaction1"],
top_names=["fc4"],
num_output=32,
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.ReLU,
bottom_names=["fc4"],
top_names=["relu4"],
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.InnerProduct,
bottom_names=["relu4"],
top_names=["fc8"],
num_output=3,
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.MultiCrossEntropyLoss,
bottom_names=["fc8", "label"],
top_names=["loss"],
target_weight_vec=[0.2,0.4,0.4]
)
)
model.compile()
model.summary()
model.graph_to_json(graph_config_file='/dump_infer/multi_cross_entropy_loss.json')
model.fit(max_iter = 1001, display = 100, eval_interval = 1000, snapshot = 1000, snapshot_prefix = "/dump_infer/multi_cross_entropy_loss")
model.export_predictions("/dump_infer/multi_cross_entropy_loss_pred_" + str(1000), "/dump_infer/multi_cross_entropy_loss_label_" + str(1000))
from hugectr.inference import InferenceParams, CreateInferenceSession
from mpi4py import MPI
import hugectr
import pandas as pd
import numpy as np
inference_params = InferenceParams(
model_name="multi_cross_entropy_loss",
max_batchsize=1024,
hit_rate_threshold=1.0,
dense_model_file="/dump_infer/multi_cross_entropy_loss_dense_1000.model",
sparse_model_files=["/dump_infer/multi_cross_entropy_loss0_sparse_1000.model"],
device_id=0,
use_gpu_embedding_cache=True,
cache_size_percentage=0.5,
use_mixed_precision=False,
i64_input_key=True,
)
inference_session = CreateInferenceSession(
'/dump_infer/multi_cross_entropy_loss.json', inference_params
)
preds = inference_session.predict(
num_batches=1,
source="./multi_cross/data/test/_file_list.txt",
data_reader_type=hugectr.DataReaderType_t.Parquet,
check_type=hugectr.Check_t.Sum,
slot_size_array=[10001, 10001, 10001, 10001],
)
ground_truth = np.loadtxt("/dump_infer/multi_cross_entropy_loss_pred_1000")
predictions = preds.flatten()
diff = predictions-ground_truth
mse = np.mean(diff*diff)
if mse > 1e-3:
raise RuntimeError("Too large mse between multi_cross_entropy_loss inference and training: {}".format(mse))
sys.exit(1)
else:
print("multi_cross_entropy_loss inference results are consistent with those during training, mse: {}".format(mse)) | [
"hugectr.CreateSolver",
"hugectr.inference.CreateInferenceSession",
"hugectr.DenseLayer",
"hugectr.SparseEmbedding",
"hugectr.inference.InferenceParams",
"hugectr.CreateOptimizer",
"hugectr.DataReaderSparseParam",
"hugectr.Model",
"numpy.mean",
"numpy.loadtxt",
"hugectr.DataReaderParams"
] | [((48, 269), 'hugectr.CreateSolver', 'hugectr.CreateSolver', ([], {'max_eval_batches': '(1)', 'batchsize_eval': '(1024)', 'batchsize': '(1024)', 'lr': '(0.01)', 'end_lr': '(0.0001)', 'warmup_steps': '(8000)', 'decay_start': '(48000)', 'decay_steps': '(24000)', 'vvgpu': '[[0]]', 'repeat_dataset': '(True)', 'i64_input_key': '(True)'}), '(max_eval_batches=1, batchsize_eval=1024, batchsize=\n 1024, lr=0.01, end_lr=0.0001, warmup_steps=8000, decay_start=48000,\n decay_steps=24000, vvgpu=[[0]], repeat_dataset=True, i64_input_key=True)\n', (68, 269), False, 'import hugectr\n'), ((338, 611), 'hugectr.DataReaderParams', 'hugectr.DataReaderParams', ([], {'data_reader_type': 'hugectr.DataReaderType_t.Parquet', 'source': "['./multi_cross/data/train/_file_list.txt']", 'eval_source': '"""./multi_cross/data/test/_file_list.txt"""', 'check_type': 'hugectr.Check_t.Sum', 'slot_size_array': '[10001, 10001, 10001, 10001]'}), "(data_reader_type=hugectr.DataReaderType_t.Parquet,\n source=['./multi_cross/data/train/_file_list.txt'], eval_source=\n './multi_cross/data/test/_file_list.txt', check_type=hugectr.Check_t.\n Sum, slot_size_array=[10001, 10001, 10001, 10001])\n", (362, 611), False, 'import hugectr\n'), ((642, 785), 'hugectr.CreateOptimizer', 'hugectr.CreateOptimizer', ([], {'optimizer_type': 'hugectr.Optimizer_t.Adam', 'update_type': 'hugectr.Update_t.Local', 'beta1': '(0.9)', 'beta2': '(0.999)', 'epsilon': '(1e-07)'}), '(optimizer_type=hugectr.Optimizer_t.Adam,\n update_type=hugectr.Update_t.Local, beta1=0.9, beta2=0.999, epsilon=1e-07)\n', (665, 785), False, 'import hugectr\n'), ((826, 866), 'hugectr.Model', 'hugectr.Model', (['solver', 'reader', 'optimizer'], {}), '(solver, reader, optimizer)\n', (839, 866), False, 'import hugectr\n'), ((3461, 3856), 'hugectr.inference.InferenceParams', 'InferenceParams', ([], {'model_name': '"""multi_cross_entropy_loss"""', 'max_batchsize': '(1024)', 'hit_rate_threshold': '(1.0)', 'dense_model_file': '"""/dump_infer/multi_cross_entropy_loss_dense_1000.model"""', 'sparse_model_files': "['/dump_infer/multi_cross_entropy_loss0_sparse_1000.model']", 'device_id': '(0)', 'use_gpu_embedding_cache': '(True)', 'cache_size_percentage': '(0.5)', 'use_mixed_precision': '(False)', 'i64_input_key': '(True)'}), "(model_name='multi_cross_entropy_loss', max_batchsize=1024,\n hit_rate_threshold=1.0, dense_model_file=\n '/dump_infer/multi_cross_entropy_loss_dense_1000.model',\n sparse_model_files=[\n '/dump_infer/multi_cross_entropy_loss0_sparse_1000.model'], device_id=0,\n use_gpu_embedding_cache=True, cache_size_percentage=0.5,\n use_mixed_precision=False, i64_input_key=True)\n", (3476, 3856), False, 'from hugectr.inference import InferenceParams, CreateInferenceSession\n'), ((3895, 3984), 'hugectr.inference.CreateInferenceSession', 'CreateInferenceSession', (['"""/dump_infer/multi_cross_entropy_loss.json"""', 'inference_params'], {}), "('/dump_infer/multi_cross_entropy_loss.json',\n inference_params)\n", (3917, 3984), False, 'from hugectr.inference import InferenceParams, CreateInferenceSession\n'), ((4254, 4314), 'numpy.loadtxt', 'np.loadtxt', (['"""/dump_infer/multi_cross_entropy_loss_pred_1000"""'], {}), "('/dump_infer/multi_cross_entropy_loss_pred_1000')\n", (4264, 4314), True, 'import numpy as np\n'), ((4383, 4403), 'numpy.mean', 'np.mean', (['(diff * diff)'], {}), '(diff * diff)\n', (4390, 4403), True, 'import numpy as np\n'), ((1350, 1644), 'hugectr.SparseEmbedding', 'hugectr.SparseEmbedding', ([], {'embedding_type': 'hugectr.Embedding_t.LocalizedSlotSparseEmbeddingHash', 'workspace_size_per_gpu_in_mb': 'workspace_size_per_gpu_in_mb', 'embedding_vec_size': '(16)', 'combiner': '"""mean"""', 'sparse_embedding_name': '"""sparse_embedding1"""', 'bottom_name': '"""data1"""', 'optimizer': 'optimizer'}), "(embedding_type=hugectr.Embedding_t.\n LocalizedSlotSparseEmbeddingHash, workspace_size_per_gpu_in_mb=\n workspace_size_per_gpu_in_mb, embedding_vec_size=16, combiner='mean',\n sparse_embedding_name='sparse_embedding1', bottom_name='data1',\n optimizer=optimizer)\n", (1373, 1644), False, 'import hugectr\n'), ((1707, 1829), 'hugectr.DenseLayer', 'hugectr.DenseLayer', ([], {'layer_type': 'hugectr.Layer_t.InnerProduct', 'bottom_names': "['dense']", 'top_names': "['fc1']", 'num_output': '(16)'}), "(layer_type=hugectr.Layer_t.InnerProduct, bottom_names=[\n 'dense'], top_names=['fc1'], num_output=16)\n", (1725, 1829), False, 'import hugectr\n'), ((1881, 1979), 'hugectr.DenseLayer', 'hugectr.DenseLayer', ([], {'layer_type': 'hugectr.Layer_t.ReLU', 'bottom_names': "['fc1']", 'top_names': "['relu1']"}), "(layer_type=hugectr.Layer_t.ReLU, bottom_names=['fc1'],\n top_names=['relu1'])\n", (1899, 1979), False, 'import hugectr\n'), ((2024, 2160), 'hugectr.DenseLayer', 'hugectr.DenseLayer', ([], {'layer_type': 'hugectr.Layer_t.Interaction', 'bottom_names': "['relu1', 'sparse_embedding1']", 'top_names': "['interaction1']"}), "(layer_type=hugectr.Layer_t.Interaction, bottom_names=[\n 'relu1', 'sparse_embedding1'], top_names=['interaction1'])\n", (2042, 2160), False, 'import hugectr\n'), ((2204, 2333), 'hugectr.DenseLayer', 'hugectr.DenseLayer', ([], {'layer_type': 'hugectr.Layer_t.InnerProduct', 'bottom_names': "['interaction1']", 'top_names': "['fc4']", 'num_output': '(32)'}), "(layer_type=hugectr.Layer_t.InnerProduct, bottom_names=[\n 'interaction1'], top_names=['fc4'], num_output=32)\n", (2222, 2333), False, 'import hugectr\n'), ((2385, 2483), 'hugectr.DenseLayer', 'hugectr.DenseLayer', ([], {'layer_type': 'hugectr.Layer_t.ReLU', 'bottom_names': "['fc4']", 'top_names': "['relu4']"}), "(layer_type=hugectr.Layer_t.ReLU, bottom_names=['fc4'],\n top_names=['relu4'])\n", (2403, 2483), False, 'import hugectr\n'), ((2528, 2649), 'hugectr.DenseLayer', 'hugectr.DenseLayer', ([], {'layer_type': 'hugectr.Layer_t.InnerProduct', 'bottom_names': "['relu4']", 'top_names': "['fc8']", 'num_output': '(3)'}), "(layer_type=hugectr.Layer_t.InnerProduct, bottom_names=[\n 'relu4'], top_names=['fc8'], num_output=3)\n", (2546, 2649), False, 'import hugectr\n'), ((2701, 2864), 'hugectr.DenseLayer', 'hugectr.DenseLayer', ([], {'layer_type': 'hugectr.Layer_t.MultiCrossEntropyLoss', 'bottom_names': "['fc8', 'label']", 'top_names': "['loss']", 'target_weight_vec': '[0.2, 0.4, 0.4]'}), "(layer_type=hugectr.Layer_t.MultiCrossEntropyLoss,\n bottom_names=['fc8', 'label'], top_names=['loss'], target_weight_vec=[\n 0.2, 0.4, 0.4])\n", (2719, 2864), False, 'import hugectr\n'), ((1177, 1239), 'hugectr.DataReaderSparseParam', 'hugectr.DataReaderSparseParam', (['"""data1"""', '[1, 1, 1, 1]', '(False)', '(4)'], {}), "('data1', [1, 1, 1, 1], False, 4)\n", (1206, 1239), False, 'import hugectr\n')] |
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import AutoMinorLocator
import numpy as np
# =================================================================================
def ecg_plot(ecg):
fs = 128
Time=np.linspace(0, len(ecg)/fs, num=len(ecg))
fig, ax = plt.subplots(figsize=(16,5))
ax.plot(Time,ecg,'-', lw=1.0, color='k')
ax.set_xticks(np.arange(0,10,0.2),)
plt.xticks( rotation='vertical')
ax.set_yticks(np.arange(-1,1,0.03))
ax.minorticks_on()
ax.xaxis.set_minor_locator(AutoMinorLocator(5))
ax.yaxis.set_minor_locator(AutoMinorLocator(5))
ax.grid(which='major', linestyle='-', linewidth='0.5', color='red')
ax.grid(which='minor', linestyle='-', linewidth='0.5', color=(1, 0.7, 0.7))
ax.set_ylim(-0.3, 0.4)
ax.set_xlim(0, 10)
plt.ylabel('ECG0(mV)')
plt.xlabel('time(s)')
# plt.title('Abnormal Record')
plt.show() | [
"matplotlib.pyplot.show",
"matplotlib.ticker.AutoMinorLocator",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlabel"
] | [((329, 358), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 5)'}), '(figsize=(16, 5))\n', (341, 358), True, 'import matplotlib.pyplot as plt\n'), ((450, 481), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (460, 481), True, 'import matplotlib.pyplot as plt\n'), ((862, 884), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECG0(mV)"""'], {}), "('ECG0(mV)')\n", (872, 884), True, 'import matplotlib.pyplot as plt\n'), ((889, 910), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time(s)"""'], {}), "('time(s)')\n", (899, 910), True, 'import matplotlib.pyplot as plt\n'), ((950, 960), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (958, 960), True, 'import matplotlib.pyplot as plt\n'), ((422, 443), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.2)'], {}), '(0, 10, 0.2)\n', (431, 443), True, 'import numpy as np\n'), ((503, 525), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.03)'], {}), '(-1, 1, 0.03)\n', (512, 525), True, 'import numpy as np\n'), ((581, 600), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', (['(5)'], {}), '(5)\n', (597, 600), False, 'from matplotlib.ticker import AutoMinorLocator\n'), ((633, 652), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', (['(5)'], {}), '(5)\n', (649, 652), False, 'from matplotlib.ticker import AutoMinorLocator\n')] |
import sys
import argparse
from yolo import YOLO, detect_video
from PIL import Image, ImageDraw, ImageFont
import glob
import os
from convertannotation import convert_annotation
import cv2
from datetime import datetime
import numpy as np
def bb_intersection_over_union(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def convert_center_width_height_to_x_y(box):
x_min = box['center_x'] - int(box['width']/2)
x_max = box['center_x'] + int(box['width']/2)
y_min = box['center_y'] - int(box['height']/2)
y_max = box['center_y'] + int(box['height']/2)
return {
'x_min': x_min,
'x_max': x_max,
'y_min': y_min,
'y_max': y_max,
}
def detect_img(yolo, img):
box_heli, box_arrow = yolo.detect_image(img)
result = {'time': datetime.timestamp(
datetime.now()), 'helipad': None, 'arrow': None}
if (len(box_heli) > 0):
width = box_heli[2] - box_heli[0]
height = box_heli[3] - box_heli[1]
center_x = (box_heli[2] + box_heli[0])/2
center_y = (box_heli[3] + box_heli[1])/2
result['helipad'] = {'center_x': center_x,
'center_y': center_y, 'width': width, 'height': height}
if (len(box_arrow) > 0):
width = box_arrow[2] - box_arrow[0]
height = box_arrow[3] - box_arrow[1]
center_x = (box_arrow[2] + box_arrow[0])/2
center_y = (box_arrow[3] + box_arrow[1])/2
result['arrow'] = {'center_x': center_x,
'center_y': center_y, 'width': width, 'height': height}
return result
def draw_box(image, result):
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
if result['helipad'] is not None:
color = 'red'
helipad_box = convert_center_width_height_to_x_y(result['helipad'])
draw.rectangle(((helipad_box['x_min'], helipad_box['y_min']),
(helipad_box['x_max'], helipad_box['y_max'])), outline=color, width = 3)
label = 'helipad'
label_size = draw.textsize(label, font)
if helipad_box['y_min'] - label_size[1] >= 0:
text_origin = np.array([helipad_box['x_min'], helipad_box['y_min'] - label_size[1]])
else:
text_origin = np.array([helipad_box['x_min'], helipad_box['y_min'] + 1])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=color)
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
if result['arrow'] is not None:
color = 'blue'
arrow_box = convert_center_width_height_to_x_y(result['arrow'])
draw.rectangle(((arrow_box['x_min'], arrow_box['y_min']),
(arrow_box['x_max'], arrow_box['y_max'])), width=3, outline=color)
label = 'arrow'
label_size = draw.textsize(label, font)
if arrow_box['y_min'] - label_size[1] >= 0:
text_origin = np.array([arrow_box['x_min'], arrow_box['y_min'] - label_size[1]])
else:
text_origin = np.array([arrow_box['x_min'], arrow_box['y_min'] + 1])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=color)
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
if __name__ == '__main__':
yolo = YOLO()
file_names = glob.glob('./video/aaa/*.jpg')
fourcc = cv2.VideoWriter_fourcc(*'MPEG')
video = cv2.VideoWriter('./video/output2.avi', fourcc, 20, (640, 480))
for file_name in file_names:
image = Image.open(file_name)
result = detect_img(yolo, image)
draw_box(image, result)
video.write(cv2.cvtColor(np.array(image.copy()), cv2.COLOR_RGB2BGR))
video.release()
| [
"cv2.VideoWriter_fourcc",
"numpy.floor",
"PIL.Image.open",
"numpy.array",
"glob.glob",
"cv2.VideoWriter",
"PIL.ImageDraw.Draw",
"datetime.datetime.now",
"yolo.YOLO"
] | [((1972, 1993), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (1986, 1993), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3814, 3820), 'yolo.YOLO', 'YOLO', ([], {}), '()\n', (3818, 3820), False, 'from yolo import YOLO, detect_video\n'), ((3838, 3868), 'glob.glob', 'glob.glob', (['"""./video/aaa/*.jpg"""'], {}), "('./video/aaa/*.jpg')\n", (3847, 3868), False, 'import glob\n'), ((3882, 3913), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MPEG'"], {}), "(*'MPEG')\n", (3904, 3913), False, 'import cv2\n'), ((3926, 3988), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""./video/output2.avi"""', 'fourcc', '(20)', '(640, 480)'], {}), "('./video/output2.avi', fourcc, 20, (640, 480))\n", (3941, 3988), False, 'import cv2\n'), ((4038, 4059), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (4048, 4059), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1164, 1178), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1176, 1178), False, 'from datetime import datetime\n'), ((2616, 2686), 'numpy.array', 'np.array', (["[helipad_box['x_min'], helipad_box['y_min'] - label_size[1]]"], {}), "([helipad_box['x_min'], helipad_box['y_min'] - label_size[1]])\n", (2624, 2686), True, 'import numpy as np\n'), ((2727, 2785), 'numpy.array', 'np.array', (["[helipad_box['x_min'], helipad_box['y_min'] + 1]"], {}), "([helipad_box['x_min'], helipad_box['y_min'] + 1])\n", (2735, 2785), True, 'import numpy as np\n'), ((3422, 3488), 'numpy.array', 'np.array', (["[arrow_box['x_min'], arrow_box['y_min'] - label_size[1]]"], {}), "([arrow_box['x_min'], arrow_box['y_min'] - label_size[1]])\n", (3430, 3488), True, 'import numpy as np\n'), ((3529, 3583), 'numpy.array', 'np.array', (["[arrow_box['x_min'], arrow_box['y_min'] + 1]"], {}), "([arrow_box['x_min'], arrow_box['y_min'] + 1])\n", (3537, 3583), True, 'import numpy as np\n'), ((2082, 2118), 'numpy.floor', 'np.floor', (['(0.03 * image.size[1] + 0.5)'], {}), '(0.03 * image.size[1] + 0.5)\n', (2090, 2118), True, 'import numpy as np\n')] |
"""
Gripper for Kinova's Jaco robot arm (has three fingers).
"""
import numpy as np
from robosuite.models.grippers.gripper_model import GripperModel
from robosuite.utils.mjcf_utils import xml_path_completion
class JacoThreeFingerGripperBase(GripperModel):
"""
Gripper for Kinova's Jaco robot arm (has three fingers).
Args:
idn (int or str): Number or some other unique identification string for this gripper instance
"""
def __init__(self, idn=0):
super().__init__(xml_path_completion("grippers/jaco_three_finger_gripper.xml"), idn=idn)
def format_action(self, action):
return action
@property
def init_qpos(self):
return np.array([0.5, 0, 0.5, 0, 0.5, 0])
@property
def _important_geoms(self):
return {
"left_finger": [
"index_proximal_collision",
"index_distal_collision",
"index_tip_collision",
"pinky_proximal_collision",
"pinky_distal_collision",
"pinky_tip_collision",
"index_tip_collision",
"pinky_pad_collision",
],
"right_finger": [
"thumb_proximal_collision",
"thumb_distal_collision",
"thumb_tip_collision",
"thumb_pad_collision",
],
"left_fingerpad": ["index_pad_collision", "pinky_pad_collision"],
"right_fingerpad": ["thumb_pad_collision"],
}
class JacoThreeFingerGripper(JacoThreeFingerGripperBase):
"""
Modifies JacoThreeFingerGripperBase to only take one action.
"""
def format_action(self, action):
"""
Maps continuous action into binary output
-1 => open, 1 => closed
Args:
action (np.array): gripper-specific action
Raises:
AssertionError: [Invalid action dimension size]
"""
assert len(action) == self.dof
self.current_action = np.clip(self.current_action - self.speed * np.sign(action), -1.0, 1.0)
return self.current_action
@property
def speed(self):
return 0.005
@property
def dof(self):
return 1
class JacoThreeFingerDexterousGripper(JacoThreeFingerGripperBase):
"""
Dexterous variation of the Jaco gripper in which all finger are actuated independently
"""
def format_action(self, action):
"""
Maps continuous action into binary output
all -1 => open, all 1 => closed
Args:
action (np.array): gripper-specific action
Raises:
AssertionError: [Invalid action dimension size]
"""
assert len(action) == self.dof
self.current_action = np.clip(self.current_action - self.speed * np.sign(action), -1.0, 1.0)
return self.current_action
@property
def speed(self):
return 0.005
@property
def dof(self):
return 3
| [
"numpy.array",
"numpy.sign",
"robosuite.utils.mjcf_utils.xml_path_completion"
] | [((693, 727), 'numpy.array', 'np.array', (['[0.5, 0, 0.5, 0, 0.5, 0]'], {}), '([0.5, 0, 0.5, 0, 0.5, 0])\n', (701, 727), True, 'import numpy as np\n'), ((506, 567), 'robosuite.utils.mjcf_utils.xml_path_completion', 'xml_path_completion', (['"""grippers/jaco_three_finger_gripper.xml"""'], {}), "('grippers/jaco_three_finger_gripper.xml')\n", (525, 567), False, 'from robosuite.utils.mjcf_utils import xml_path_completion\n'), ((2061, 2076), 'numpy.sign', 'np.sign', (['action'], {}), '(action)\n', (2068, 2076), True, 'import numpy as np\n'), ((2819, 2834), 'numpy.sign', 'np.sign', (['action'], {}), '(action)\n', (2826, 2834), True, 'import numpy as np\n')] |
import numpy as np
from string import Template
from collections import deque
from xml.dom.minidom import parseString as parse_xml
from .. import entities as entities_mod
from ..arc import arc_center
from ...resources import get_resource
from ...constants import log
from ...constants import res_path as res
from ... import util
_template_svg = Template(get_resource('svg.xml.template'))
try:
from svg.path import parse_path
except BaseException:
log.warning('SVG path loading unavailable!')
def svg_to_path(file_obj, file_type=None):
"""
Load an SVG file into a Path2D object.
Parameters
-----------
file_obj: open file object
file_type: unused
Returns
-----------
loaded: dict with kwargs for Path2D constructor
"""
# first, we grab all of the path strings from the xml file
xml = parse_xml(file_obj.read())
paths = [p.attributes['d'].value for p in xml.getElementsByTagName('path')]
return _svg_path_convert(paths)
def _svg_path_convert(paths):
"""
Convert an SVG path string into a Path2D object
Parameters
-------------
paths: list of strings
Returns
-------------
drawing: loaded, dict with kwargs for Path2D constructor
"""
def complex_to_float(values):
return np.array([[i.real, i.imag] for i in values])
def load_line(svg_line):
points = complex_to_float([svg_line.point(0.0),
svg_line.point(1.0)])
if not starting:
points[0] = vertices[-1]
entities.append(entities_mod.Line(np.arange(2) + len(vertices)))
vertices.extend(points)
def load_arc(svg_arc):
points = complex_to_float([svg_arc.start,
svg_arc.point(.5),
svg_arc.end])
if not starting:
points[0] = vertices[-1]
entities.append(entities_mod.Arc(np.arange(3) + len(vertices)))
vertices.extend(points)
def load_quadratic(svg_quadratic):
points = complex_to_float([svg_quadratic.start,
svg_quadratic.control,
svg_quadratic.end])
if not starting:
points[0] = vertices[-1]
entities.append(entities_mod.Bezier(np.arange(3) + len(vertices)))
vertices.extend(points)
def load_cubic(svg_cubic):
points = complex_to_float([svg_cubic.start,
svg_cubic.control1,
svg_cubic.control2,
svg_cubic.end])
if not starting:
points[0] = vertices[-1]
entities.append(entities_mod.Bezier(np.arange(4) +
len(vertices)))
vertices.extend(points)
entities = deque()
vertices = deque()
loaders = {'Arc': load_arc,
'Line': load_line,
'CubicBezier': load_cubic,
'QuadraticBezier': load_quadratic}
for svg_string in paths:
starting = True
for svg_entity in parse_path(svg_string):
type_name = svg_entity.__class__.__name__
if type_name in loaders:
loaders[type_name](svg_entity)
loaded = {'entities': np.array(entities),
'vertices': np.array(vertices)}
return loaded
def export_svg(drawing,
return_path=False,
layers=None,
**kwargs):
"""
Export a Path2D object into an SVG file.
Parameters
-----------
drawing : Path2D
Source geometry
return_path : bool
If True return only path string
layers : None, or [str]
Only export specified layers
Returns
-----------
as_svg: str, XML formatted as SVG
"""
if not util.is_instance_named(drawing, 'Path2D'):
raise ValueError('drawing must be Path2D object!')
points = drawing.vertices.view(np.ndarray).copy()
def circle_to_svgpath(center, radius, reverse):
radius_str = format(radius, res.export)
path_str = ' M ' + format(center[0] - radius, res.export) + ','
path_str += format(center[1], res.export)
path_str += ' a ' + radius_str + ',' + radius_str
path_str += ',0,1,' + str(int(reverse)) + ','
path_str += format(2 * radius, res.export) + ',0'
path_str += ' a ' + radius_str + ',' + radius_str
path_str += ',0,1,' + str(int(reverse)) + ','
path_str += format(-2 * radius, res.export) + ',0 Z'
return path_str
def svg_arc(arc, reverse):
"""
arc string: (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+
large-arc-flag: greater than 180 degrees
sweep flag: direction (cw/ccw)
"""
arc_idx = arc.points[::((reverse * -2) + 1)]
vertices = points[arc_idx]
vertex_start, vertex_mid, vertex_end = vertices
center_info = arc_center(vertices)
C, R, angle = (center_info['center'],
center_info['radius'],
center_info['span'])
if arc.closed:
return circle_to_svgpath(C, R, reverse)
large_flag = str(int(angle > np.pi))
sweep_flag = str(int(np.cross(vertex_mid - vertex_start,
vertex_end - vertex_start) > 0.0))
arc_str = move_to(arc_idx[0])
arc_str += 'A {},{} 0 {}, {} {},{}'.format(R,
R,
large_flag,
sweep_flag,
vertex_end[0],
vertex_end[1])
return arc_str
def move_to(vertex_id):
x_ex = format(points[vertex_id][0], res.export)
y_ex = format(points[vertex_id][1], res.export)
move_str = ' M ' + x_ex + ',' + y_ex
return move_str
def svg_discrete(entity, reverse):
"""
Use an entities discrete representation to export a
curve as a polyline
"""
discrete = entity.discrete(points)
# if entity contains no geometry return
if len(discrete) == 0:
return ''
# are we reversing the entity
if reverse:
discrete = discrete[::-1]
# the format string for the SVG path
template = ' M {},{} ' + (' L {},{}' * (len(discrete) - 1))
# apply the data from the discrete curve
result = template.format(*discrete.reshape(-1))
return result
def convert_path(path,
reverse=False,
close=True):
"""
Convert a list of entity indices to SVG.
Parameters
----------------
path : [int]
List of entity indices
reverse : bool
Reverse exported path
close : bool
If True, connect last vertex to first
Returns
-------------
as_svg : str
SVG path string of input path
"""
# if we are only exporting some layers check here
if layers is not None:
# only export if every entity is on layer whitelist
if not all(drawing.layers[i] in layers for i in path):
return ''
path = path[::(reverse * -2) + 1]
converted = []
for i, entity_id in enumerate(path):
# the entity object
entity = drawing.entities[entity_id]
# the class name of the entity
etype = entity.__class__.__name__
if etype in converters:
# export the exact version of the entity
converted.append(converters[etype](entity,
reverse))
else:
# just export the polyline version of the entity
converted.append(svg_discrete(entity,
reverse))
# remove leading and trailing whitespace
as_svg = ' '.join(converted) + ' '
return as_svg
# only converters where we want to do something
# other than export a curve as a polyline
converters = {'Arc': svg_arc}
converted = []
for index, path in enumerate(drawing.paths):
# holes are determined by winding
# trimesh makes all paths clockwise
reverse = not (index in drawing.root)
converted.append(convert_path(path,
reverse=reverse,
close=True))
# entities which haven't been included in a closed path
converted.append(convert_path(drawing.dangling,
reverse=False,
close=False))
# append list of converted into a string
path_str = ''.join(converted).strip()
# return path string without XML wrapping
if return_path:
return path_str
# format as XML
if 'stroke_width' in kwargs:
stroke_width = float(kwargs['stroke_width'])
else:
stroke_width = drawing.extents.max() / 800.0
subs = {'PATH_STRING': path_str,
'MIN_X': points[:, 0].min(),
'MIN_Y': points[:, 1].min(),
'WIDTH': drawing.extents[0],
'HEIGHT': drawing.extents[1],
'STROKE': stroke_width}
result = _template_svg.substitute(subs)
return result
| [
"numpy.cross",
"numpy.array",
"numpy.arange",
"svg.path.parse_path",
"collections.deque"
] | [((2848, 2855), 'collections.deque', 'deque', ([], {}), '()\n', (2853, 2855), False, 'from collections import deque\n'), ((2871, 2878), 'collections.deque', 'deque', ([], {}), '()\n', (2876, 2878), False, 'from collections import deque\n'), ((1294, 1338), 'numpy.array', 'np.array', (['[[i.real, i.imag] for i in values]'], {}), '([[i.real, i.imag] for i in values])\n', (1302, 1338), True, 'import numpy as np\n'), ((3117, 3139), 'svg.path.parse_path', 'parse_path', (['svg_string'], {}), '(svg_string)\n', (3127, 3139), False, 'from svg.path import parse_path\n'), ((3305, 3323), 'numpy.array', 'np.array', (['entities'], {}), '(entities)\n', (3313, 3323), True, 'import numpy as np\n'), ((3351, 3369), 'numpy.array', 'np.array', (['vertices'], {}), '(vertices)\n', (3359, 3369), True, 'import numpy as np\n'), ((1586, 1598), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (1595, 1598), True, 'import numpy as np\n'), ((1933, 1945), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1942, 1945), True, 'import numpy as np\n'), ((2311, 2323), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (2320, 2323), True, 'import numpy as np\n'), ((2725, 2737), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2734, 2737), True, 'import numpy as np\n'), ((5280, 5342), 'numpy.cross', 'np.cross', (['(vertex_mid - vertex_start)', '(vertex_end - vertex_start)'], {}), '(vertex_mid - vertex_start, vertex_end - vertex_start)\n', (5288, 5342), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import startup
import os
import numpy as np
import scipy.io
from util.point_cloud import point_cloud_distance
from util.simple_dataset import Dataset3D
from util.app_config import config as app_config
from util.tools import partition_range, to_np_object
from util.quaternion import quaternion_rotate
from util.euler import ypr_from_campos
import torch
from torch.utils.tensorboard import SummaryWriter
from models import model_pc_to as model_pc
from util.system import setup_environment
from run.ShapeRecords import ShapeRecords
import pickle
import pdb
def compute_distance(cfg, source_np, target_np):
"""
compute projection from source to target
"""
num_parts = cfg.pc_eval_chamfer_num_parts
partition = partition_range(source_np.shape[0], num_parts)
min_dist_np = np.zeros((0,))
idx_np = np.zeros((0,))
source_pc = torch.from_numpy(source_np).cuda()
target_pc = torch.from_numpy(target_np).cuda()
for k in range(num_parts):
r = partition[k, :]
src = source_pc[r[0]:r[1]]
_, min_dist, min_idx = point_cloud_distance(src, target_pc)
min_dist_0_np = min_dist.cpu().numpy()
idx_0_np = min_idx.cpu().numpy()
min_dist_np = np.concatenate((min_dist_np, min_dist_0_np), axis=0)
idx_np = np.concatenate((idx_np, idx_0_np), axis=0)
return min_dist_np, idx_np
def get_group(pos):
divs = 2
scale = divs/2
yaw, pitch, roll = ypr_from_campos(pos[0], pos[1], pos[2])
yaw = yaw + np.pi
# get everything from 0 to 2*pi
yaw = yaw%(2*np.pi)+0.00000001
pitch = pitch%(2*np.pi)+0.00000001
roll = roll%(2*np.pi) + 0.00000001
q1 = np.ceil(scale*yaw/np.pi)-1
q2 = np.ceil(scale*pitch/np.pi)-1
q3 = np.ceil(scale*roll/np.pi)-1
return q1*np.square(divs)+q2*divs+q3
def run_eval():
divs = 2
cfg = app_config
exp_dir = cfg.checkpoint_dir
num_views = cfg.num_views
eval_unsup = cfg.eval_unsupervised_shape
dataset_folder = cfg.inp_dir
gt_dir = os.path.join(cfg.gt_pc_dir, cfg.synth_set)
# g = tf.Graph()
# with g.as_default():
# source_pc = tf.placeholder(dtype=tf.float64, shape=[None, 3])
# target_pc = tf.placeholder(dtype=tf.float64, shape=[None, 3])
# quat_tf = tf.placeholder(dtype=tf.float64, shape=[1, 4])
# _, min_dist, min_idx = point_cloud_distance(source_pc, target_pc)
# source_pc_2 = tf.placeholder(dtype=tf.float64, shape=[1, None, 3])
# rotated_pc = quaternion_rotate(source_pc_2, quat_tf)
# sess = tf.Session(config=config)
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
save_pred_name = "{}_{}".format(cfg.save_predictions_dir, cfg.eval_split)
save_dir = os.path.join(exp_dir, cfg.save_predictions_dir)
if eval_unsup:
reference_rotation = scipy.io.loadmat("{}/final_reference_rotation.mat".format(exp_dir))["rotation"]
dataset = ShapeRecords(dataset_folder, cfg, 'test')
if cfg.models_list:
model_names = parse_lines(cfg.models_list)
else:
model_names = dataset.file_names
num_models = len(model_names)
# making groups for samples and views according to 8 groups of yaw, pitch, roll
chamfer_dict = {}
for j in range(np.power(divs, 3)):
chamfer_dict[j] = np.zeros((0,2))
for k in range(num_models):
sample = dataset.__getitem__(k)
print("{}/{}".format(k, num_models))
print(model_names[k])
gt_filename = "{}/{}.mat".format(gt_dir, model_names[k]).replace('_features.p', '')
mat_filename = "{}/{}_pc.pkl".format(save_dir, model_names[k])
if not os.path.isfile(gt_filename) or not os.path.isfile(mat_filename):
continue
with open(mat_filename, 'rb') as handle:
data = pickle.load(handle)
all_pcs = np.squeeze(data["points"])
if "num_points" in data:
all_pcs_nums = np.squeeze(data["num_points"])
has_number = True
else:
has_number = False
obj = scipy.io.loadmat(gt_filename)
Vgt = obj["points"]
for i in range(num_views):
chamfer_dists_current = np.zeros((2), dtype=np.float64)
pred = all_pcs[i, :, :]
if has_number:
pred = pred[0:all_pcs_nums[i], :]
if eval_unsup:
pred = np.expand_dims(pred, 0)
pred = quaternion_rotate(torch.from_numpy(pred).cuda(),
torch.from_numpy(reference_rotation).cuda()).cpu().numpy()
pred = np.squeeze(pred)
pred_to_gt, idx_np = compute_distance(cfg, pred, Vgt)
gt_to_pred, _ = compute_distance(cfg, Vgt, pred)
chamfer_dists_current[0] = np.mean(pred_to_gt)
chamfer_dists_current[1] = np.mean(gt_to_pred)
is_nan = np.isnan(pred_to_gt)
assert (not np.any(is_nan))
campos = sample['cam_pos'][i]
g = get_group(campos)
chamfer_dict[g] = np.concatenate((chamfer_dict[g], np.expand_dims(chamfer_dists_current, 0)))
print(i, ":", chamfer_dists_current)
# current_mean = np.mean(chamfer_dists_current, 0)
# print("total:", current_mean)
for key in chamfer_dict:
print(key, np.mean(chamfer_dict[key],0)*100)
# scipy.io.savemat(os.path.join(exp_dir, "chamfer_{}.mat".format(save_pred_name)),
# {"chamfer": chamfer_dists,
# "model_names": to_np_object(model_names)})
#
# file = open(os.path.join(exp_dir, "chamfer_{}.txt".format(save_pred_name)), "w")
# file.write("{} {}\n".format(final[0], final[1]))
# file.close()
def eval():
cfg = app_config
setup_environment(cfg)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
train_dir = cfg.checkpoint_dir
split_name = "eval"
dataset_folder = cfg.inp_dir
dataset = ShapeRecords(dataset_folder, cfg, 'test')
dataset_loader = torch.utils.data.DataLoader(dataset,
batch_size=cfg.batch_size, shuffle=cfg.shuffle_dataset,
num_workers=4, drop_last=True)
run_eval()
def test_experiment():
cfg = app_config
dataset_folder = cfg.inp_dir
dataset = ShapeRecords(dataset_folder, cfg, 'test')
dataset_loader = torch.utils.data.DataLoader(dataset,
batch_size=cfg.batch_size, shuffle=cfg.shuffle_dataset,
num_workers=4, drop_last=True)
sample = dataset.__getitem__(1)
campos = sample['cam_pos']
yaw, pitch, roll = ypr_from_campos(pos[0], pos[1], pos[2])
yaw = yaw + np.pi
def main():
eval()
#test_experiment()
if __name__ == '__main__':
# tf.app.run()
main()
| [
"run.ShapeRecords.ShapeRecords",
"util.euler.ypr_from_campos",
"numpy.isnan",
"os.path.isfile",
"pickle.load",
"numpy.mean",
"os.path.join",
"util.point_cloud.point_cloud_distance",
"torch.utils.data.DataLoader",
"numpy.power",
"util.system.setup_environment",
"numpy.ceil",
"numpy.square",
... | [((755, 801), 'util.tools.partition_range', 'partition_range', (['source_np.shape[0]', 'num_parts'], {}), '(source_np.shape[0], num_parts)\n', (770, 801), False, 'from util.tools import partition_range, to_np_object\n'), ((820, 834), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (828, 834), True, 'import numpy as np\n'), ((848, 862), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (856, 862), True, 'import numpy as np\n'), ((1459, 1498), 'util.euler.ypr_from_campos', 'ypr_from_campos', (['pos[0]', 'pos[1]', 'pos[2]'], {}), '(pos[0], pos[1], pos[2])\n', (1474, 1498), False, 'from util.euler import ypr_from_campos\n'), ((2032, 2074), 'os.path.join', 'os.path.join', (['cfg.gt_pc_dir', 'cfg.synth_set'], {}), '(cfg.gt_pc_dir, cfg.synth_set)\n', (2044, 2074), False, 'import os\n'), ((2789, 2836), 'os.path.join', 'os.path.join', (['exp_dir', 'cfg.save_predictions_dir'], {}), '(exp_dir, cfg.save_predictions_dir)\n', (2801, 2836), False, 'import os\n'), ((2981, 3022), 'run.ShapeRecords.ShapeRecords', 'ShapeRecords', (['dataset_folder', 'cfg', '"""test"""'], {}), "(dataset_folder, cfg, 'test')\n", (2993, 3022), False, 'from run.ShapeRecords import ShapeRecords\n'), ((5848, 5870), 'util.system.setup_environment', 'setup_environment', (['cfg'], {}), '(cfg)\n', (5865, 5870), False, 'from util.system import setup_environment\n'), ((6039, 6080), 'run.ShapeRecords.ShapeRecords', 'ShapeRecords', (['dataset_folder', 'cfg', '"""test"""'], {}), "(dataset_folder, cfg, 'test')\n", (6051, 6080), False, 'from run.ShapeRecords import ShapeRecords\n'), ((6102, 6230), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'cfg.batch_size', 'shuffle': 'cfg.shuffle_dataset', 'num_workers': '(4)', 'drop_last': '(True)'}), '(dataset, batch_size=cfg.batch_size, shuffle=cfg\n .shuffle_dataset, num_workers=4, drop_last=True)\n', (6129, 6230), False, 'import torch\n'), ((6432, 6473), 'run.ShapeRecords.ShapeRecords', 'ShapeRecords', (['dataset_folder', 'cfg', '"""test"""'], {}), "(dataset_folder, cfg, 'test')\n", (6444, 6473), False, 'from run.ShapeRecords import ShapeRecords\n'), ((6495, 6623), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'cfg.batch_size', 'shuffle': 'cfg.shuffle_dataset', 'num_workers': '(4)', 'drop_last': '(True)'}), '(dataset, batch_size=cfg.batch_size, shuffle=cfg\n .shuffle_dataset, num_workers=4, drop_last=True)\n', (6522, 6623), False, 'import torch\n'), ((6808, 6847), 'util.euler.ypr_from_campos', 'ypr_from_campos', (['pos[0]', 'pos[1]', 'pos[2]'], {}), '(pos[0], pos[1], pos[2])\n', (6823, 6847), False, 'from util.euler import ypr_from_campos\n'), ((1090, 1126), 'util.point_cloud.point_cloud_distance', 'point_cloud_distance', (['src', 'target_pc'], {}), '(src, target_pc)\n', (1110, 1126), False, 'from util.point_cloud import point_cloud_distance\n'), ((1237, 1289), 'numpy.concatenate', 'np.concatenate', (['(min_dist_np, min_dist_0_np)'], {'axis': '(0)'}), '((min_dist_np, min_dist_0_np), axis=0)\n', (1251, 1289), True, 'import numpy as np\n'), ((1307, 1349), 'numpy.concatenate', 'np.concatenate', (['(idx_np, idx_0_np)'], {'axis': '(0)'}), '((idx_np, idx_0_np), axis=0)\n', (1321, 1349), True, 'import numpy as np\n'), ((1681, 1709), 'numpy.ceil', 'np.ceil', (['(scale * yaw / np.pi)'], {}), '(scale * yaw / np.pi)\n', (1688, 1709), True, 'import numpy as np\n'), ((1717, 1747), 'numpy.ceil', 'np.ceil', (['(scale * pitch / np.pi)'], {}), '(scale * pitch / np.pi)\n', (1724, 1747), True, 'import numpy as np\n'), ((1755, 1784), 'numpy.ceil', 'np.ceil', (['(scale * roll / np.pi)'], {}), '(scale * roll / np.pi)\n', (1762, 1784), True, 'import numpy as np\n'), ((3310, 3327), 'numpy.power', 'np.power', (['divs', '(3)'], {}), '(divs, 3)\n', (3318, 3327), True, 'import numpy as np\n'), ((3356, 3372), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (3364, 3372), True, 'import numpy as np\n'), ((3907, 3933), 'numpy.squeeze', 'np.squeeze', (["data['points']"], {}), "(data['points'])\n", (3917, 3933), True, 'import numpy as np\n'), ((5894, 5919), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5917, 5919), False, 'import torch\n'), ((879, 906), 'torch.from_numpy', 'torch.from_numpy', (['source_np'], {}), '(source_np)\n', (895, 906), False, 'import torch\n'), ((930, 957), 'torch.from_numpy', 'torch.from_numpy', (['target_np'], {}), '(target_np)\n', (946, 957), False, 'import torch\n'), ((3869, 3888), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (3880, 3888), False, 'import pickle\n'), ((3994, 4024), 'numpy.squeeze', 'np.squeeze', (["data['num_points']"], {}), "(data['num_points'])\n", (4004, 4024), True, 'import numpy as np\n'), ((4244, 4273), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float64'}), '(2, dtype=np.float64)\n', (4252, 4273), True, 'import numpy as np\n'), ((4844, 4863), 'numpy.mean', 'np.mean', (['pred_to_gt'], {}), '(pred_to_gt)\n', (4851, 4863), True, 'import numpy as np\n'), ((4903, 4922), 'numpy.mean', 'np.mean', (['gt_to_pred'], {}), '(gt_to_pred)\n', (4910, 4922), True, 'import numpy as np\n'), ((4945, 4965), 'numpy.isnan', 'np.isnan', (['pred_to_gt'], {}), '(pred_to_gt)\n', (4953, 4965), True, 'import numpy as np\n'), ((1798, 1813), 'numpy.square', 'np.square', (['divs'], {}), '(divs)\n', (1807, 1813), True, 'import numpy as np\n'), ((3714, 3741), 'os.path.isfile', 'os.path.isfile', (['gt_filename'], {}), '(gt_filename)\n', (3728, 3741), False, 'import os\n'), ((3749, 3777), 'os.path.isfile', 'os.path.isfile', (['mat_filename'], {}), '(mat_filename)\n', (3763, 3777), False, 'import os\n'), ((4441, 4464), 'numpy.expand_dims', 'np.expand_dims', (['pred', '(0)'], {}), '(pred, 0)\n', (4455, 4464), True, 'import numpy as np\n'), ((4660, 4676), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (4670, 4676), True, 'import numpy as np\n'), ((4990, 5004), 'numpy.any', 'np.any', (['is_nan'], {}), '(is_nan)\n', (4996, 5004), True, 'import numpy as np\n'), ((5402, 5431), 'numpy.mean', 'np.mean', (['chamfer_dict[key]', '(0)'], {}), '(chamfer_dict[key], 0)\n', (5409, 5431), True, 'import numpy as np\n'), ((5160, 5200), 'numpy.expand_dims', 'np.expand_dims', (['chamfer_dists_current', '(0)'], {}), '(chamfer_dists_current, 0)\n', (5174, 5200), True, 'import numpy as np\n'), ((4506, 4528), 'torch.from_numpy', 'torch.from_numpy', (['pred'], {}), '(pred)\n', (4522, 4528), False, 'import torch\n'), ((4578, 4614), 'torch.from_numpy', 'torch.from_numpy', (['reference_rotation'], {}), '(reference_rotation)\n', (4594, 4614), False, 'import torch\n')] |
# --- Do not remove these libs ---
import numpy as np
# --------------------------------
import talib.abstract as ta
from pandas import DataFrame
import freqtrade.vendor.qtpylib.indicators as qtpylib
from freqtrade.strategy.interface import IStrategy
def bollinger_bands(stock_price, window_size, num_of_std):
rolling_mean = stock_price.rolling(window=window_size).mean()
rolling_std = stock_price.rolling(window=window_size).std()
lower_band = rolling_mean - (rolling_std * num_of_std)
return rolling_mean, lower_band
class CombinedBinHAndCluc(IStrategy):
# Based on a backtesting:
# - the best perfomance is reached with "max_open_trades" = 2 (in average for any market),
# so it is better to increase "stake_amount" value rather then "max_open_trades" to get more profit
# - if the market is constantly green(like in JAN 2018) the best performance is reached with
# "max_open_trades" = 2 and minimal_roi = 0.01
minimal_roi = {
"0": 0.02
}
stoploss = -0.15
ticker_interval = '5m'
def populate_indicators(self, dataframe: DataFrame) -> DataFrame:
mid, lower = bollinger_bands(dataframe['close'], window_size=40, num_of_std=2)
dataframe['mid'] = np.nan_to_num(mid)
dataframe['lower'] = np.nan_to_num(lower)
dataframe['bbdelta'] = (dataframe['mid'] - dataframe['lower']).abs()
dataframe['pricedelta'] = (dataframe['open'] - dataframe['close']).abs()
dataframe['closedelta'] = (dataframe['close'] - dataframe['close'].shift()).abs()
dataframe['tail'] = (dataframe['close'] - dataframe['low']).abs()
dataframe['rsi'] = ta.RSI(dataframe, timeperiod=5)
rsiframe = DataFrame(dataframe['rsi']).rename(columns={'rsi': 'close'})
dataframe['emarsi'] = ta.EMA(rsiframe, timeperiod=5)
macd = ta.MACD(dataframe)
dataframe['macd'] = macd['macd']
dataframe['adx'] = ta.ADX(dataframe)
bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2)
dataframe['bb_lowerband'] = bollinger['lower']
dataframe['bb_middleband'] = bollinger['mid']
dataframe['bb_upperband'] = bollinger['upper']
dataframe['ema100'] = ta.EMA(dataframe, timeperiod=50)
return dataframe
def populate_buy_trend(self, dataframe: DataFrame) -> DataFrame:
dataframe.loc[
(
dataframe['lower'].shift().gt(0) &
dataframe['bbdelta'].gt(dataframe['close'] * 0.008) &
dataframe['closedelta'].gt(dataframe['close'] * 0.0175) &
dataframe['tail'].lt(dataframe['bbdelta'] * 0.25) &
dataframe['close'].lt(dataframe['lower'].shift()) &
dataframe['close'].le(dataframe['close'].shift())
)
|
(
(dataframe['close'] < dataframe['ema100']) &
(dataframe['close'] < 0.985 * dataframe['bb_lowerband']) &
(dataframe['volume'] < (dataframe['volume'].rolling(window=30).mean().shift(1) * 20))
)
,
'buy'] = 1
return dataframe
def populate_sell_trend(self, dataframe: DataFrame) -> DataFrame:
"""
"""
dataframe.loc[
(
(dataframe['close'] > dataframe['bb_middleband'])
),
'sell'] = 1
return dataframe
| [
"pandas.DataFrame",
"numpy.nan_to_num",
"talib.abstract.EMA",
"talib.abstract.RSI",
"talib.abstract.ADX",
"freqtrade.vendor.qtpylib.indicators.typical_price",
"talib.abstract.MACD"
] | [((1235, 1253), 'numpy.nan_to_num', 'np.nan_to_num', (['mid'], {}), '(mid)\n', (1248, 1253), True, 'import numpy as np\n'), ((1283, 1303), 'numpy.nan_to_num', 'np.nan_to_num', (['lower'], {}), '(lower)\n', (1296, 1303), True, 'import numpy as np\n'), ((1653, 1684), 'talib.abstract.RSI', 'ta.RSI', (['dataframe'], {'timeperiod': '(5)'}), '(dataframe, timeperiod=5)\n', (1659, 1684), True, 'import talib.abstract as ta\n'), ((1795, 1825), 'talib.abstract.EMA', 'ta.EMA', (['rsiframe'], {'timeperiod': '(5)'}), '(rsiframe, timeperiod=5)\n', (1801, 1825), True, 'import talib.abstract as ta\n'), ((1841, 1859), 'talib.abstract.MACD', 'ta.MACD', (['dataframe'], {}), '(dataframe)\n', (1848, 1859), True, 'import talib.abstract as ta\n'), ((1928, 1945), 'talib.abstract.ADX', 'ta.ADX', (['dataframe'], {}), '(dataframe)\n', (1934, 1945), True, 'import talib.abstract as ta\n'), ((2237, 2269), 'talib.abstract.EMA', 'ta.EMA', (['dataframe'], {'timeperiod': '(50)'}), '(dataframe, timeperiod=50)\n', (2243, 2269), True, 'import talib.abstract as ta\n'), ((1990, 2022), 'freqtrade.vendor.qtpylib.indicators.typical_price', 'qtpylib.typical_price', (['dataframe'], {}), '(dataframe)\n', (2011, 2022), True, 'import freqtrade.vendor.qtpylib.indicators as qtpylib\n'), ((1704, 1731), 'pandas.DataFrame', 'DataFrame', (["dataframe['rsi']"], {}), "(dataframe['rsi'])\n", (1713, 1731), False, 'from pandas import DataFrame\n')] |
import math
import numpy as np
import torch
from torch import nn
class BufferList(nn.Module):
"""
Similar to nn.ParameterList, but for buffers
"""
def __init__(self, buffers=None):
super(BufferList, self).__init__()
if buffers is not None:
self.extend(buffers)
def extend(self, buffers):
offset = len(self)
for i, buffer in enumerate(buffers):
self.register_buffer(str(offset + i), buffer)
return self
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.values())
class AnchorGenerator(nn.Module):
def __init__(
self,
sizes=(4, 8, 16),
anchor_stride=8
):
super(AnchorGenerator, self).__init__()
cell_anchors = [
generate_anchors(anchor_stride, sizes).float()
]
self.stride = anchor_stride
self.cell_anchors = BufferList(cell_anchors)
def num_anchors_per_location(self):
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def grid_anchors(self, time_width):
anchors = []
for base_anchors in self.cell_anchors:
device = base_anchors.device
shifts = torch.arange(
0, time_width+1, step=self.stride, dtype=torch.float32, device=device
)
anchors.append(
(shifts.view(-1, 1, 1) + base_anchors.view(1, -1, 1)).reshape(-1, 2)
)
return anchors
def forward(self, rel_feats):
time_width = rel_feats.shape[2] # NxCxT (time dimension)
anchors = self.grid_anchors(time_width)
return anchors
def generate_anchors(
stride=8, sizes=(4, 8, 16)
):
return _generate_anchors(
stride,
np.array(sizes, dtype=np.float) / stride
)
def _generate_anchors(stride, sizes):
"""Generate anchor (reference) windows by enumerating aspect ratios X
sizes wrt a reference (stride - 1) window.
"""
anchor = np.array([0, stride], dtype=np.float)
anchors = _scale_enum(anchor, sizes)
return torch.from_numpy(anchors)
def _scale_enum(anchor, sizes):
"""Enumerate a set of anchors for each scale wrt an anchor."""
ctr, width = anchor[0], anchor[1]
ws = width * sizes
anchors = _mkanchors(ws, ctr)
return anchors
def _mkanchors(ws, ctr):
"""Given a vector of widths (ws) around a center (ctr),
output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
anchors = np.hstack(
(
ctr - 0.5 * ws,
ctr + 0.5 * ws,
)
)
return anchors
def make_anchor_generator(cfg):
anchor_sizes = cfg.RELPN.DPN.ANCHOR_SIZES
anchor_stride = cfg.RELPN.DPN.ANCHOR_STRIDE
assert len(anchor_stride) == 1, "should have a single ANCHOR_STRIDE"
anchor_generator = AnchorGenerator(anchor_sizes, anchor_stride)
return anchor_generator
if __name__=='__main__':
rel_feats = torch.randn(2,4,60) # NxCxT
anchor_sizes = (15, 30, 45, 60)
anchor_stride = 7.5
anchor_generator = AnchorGenerator(anchor_sizes, anchor_stride)
anchors = anchor_generator(rel_feats)
print(anchors)
print(anchor_generator.num_anchors_per_location()[0]) | [
"torch.randn",
"numpy.hstack",
"numpy.array",
"torch.arange",
"torch.from_numpy"
] | [((2043, 2080), 'numpy.array', 'np.array', (['[0, stride]'], {'dtype': 'np.float'}), '([0, stride], dtype=np.float)\n', (2051, 2080), True, 'import numpy as np\n'), ((2133, 2158), 'torch.from_numpy', 'torch.from_numpy', (['anchors'], {}), '(anchors)\n', (2149, 2158), False, 'import torch\n'), ((2549, 2592), 'numpy.hstack', 'np.hstack', (['(ctr - 0.5 * ws, ctr + 0.5 * ws)'], {}), '((ctr - 0.5 * ws, ctr + 0.5 * ws))\n', (2558, 2592), True, 'import numpy as np\n'), ((2999, 3020), 'torch.randn', 'torch.randn', (['(2)', '(4)', '(60)'], {}), '(2, 4, 60)\n', (3010, 3020), False, 'import torch\n'), ((1260, 1349), 'torch.arange', 'torch.arange', (['(0)', '(time_width + 1)'], {'step': 'self.stride', 'dtype': 'torch.float32', 'device': 'device'}), '(0, time_width + 1, step=self.stride, dtype=torch.float32,\n device=device)\n', (1272, 1349), False, 'import torch\n'), ((1814, 1845), 'numpy.array', 'np.array', (['sizes'], {'dtype': 'np.float'}), '(sizes, dtype=np.float)\n', (1822, 1845), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import seaborn as sb
import numpy as np
def set_figure(size, subplots=(1,1), context='paper', style='darkgrid',font_scale = 1, l=0.2, w=0.1, h=0.1, b=0.1):
sb.set(context=context,style=style, font_scale=font_scale)
fig, ax = plt.subplots(subplots[0],subplots[1], figsize=size)
plt.subplots_adjust(left=l, wspace=w, hspace=h, bottom=b)
return fig, ax
def plot_logistic_loss(alpha, font_scale):
N = 200
size = (4,3)
x = (np.arange(N)-N/2)/10
shift = np.log((1 - alpha) / alpha)
softplus = np.log(np.exp(x)+np.exp(-shift)) + (alpha-1)*x
loss_grad_i = (np.exp(x + shift) / (1 + np.exp(x + shift)) + alpha - 1)
hessian_i = np.exp(x + shift) / ((1 + np.exp(x + shift)) ** 2)
shift = 0 #- np.log((1 - alpha) / alpha)
softplus_s = np.log(np.exp(x)+np.exp(-shift)) + (alpha-1)*x
loss_grad_i_s = (np.exp(x + shift) / (1 + np.exp(x + shift)) + alpha - 1)
hessian_i_s = np.exp(x + shift) / ((1 + np.exp(x + shift)) ** 2)
colors = plt.get_cmap('plasma',12)
sb.set(context='paper',style='dark', font_scale=font_scale)
plt.figure(figsize=size, tight_layout=True)
ax1 = plt.subplot(111)
l1 = plt.plot(x,softplus, c=colors(2))
l1_s = plt.plot(x, softplus_s, c=colors(2), linestyle='--')
ax2 = ax1.twinx()
l2 = plt.plot(x, loss_grad_i, c=colors(5), label=r'$\tilde{\mathcal{l}}^{\ \prime}_q$')
l3 = plt.plot(x, hessian_i, c=colors(7), label=r'$\tilde{\mathcal{l}}^{\ \prime\prime}_q$')
l2_s = plt.plot(x, loss_grad_i_s, c=colors(5), linestyle='--', label=r'$\tilde{\mathcal{l}}^{\ \prime}_q$')
l3_s = plt.plot(x, hessian_i_s, c=colors(7), linestyle='--', label=r'$\tilde{\mathcal{l}}^{\ \prime\prime}_q$')
ax1.legend(l1+l2+l3, [r'$\tilde{\mathcal{l}}_q$',r'$\tilde{\mathcal{l}}^{\ \prime}_q$',r'$\tilde{\mathcal{l}}^{\ \prime\prime}_q$'], loc=0, fontsize='small')
ax1.set_ylabel(r'$\tilde{\mathcal{l}}_q$')
ax2.set_ylabel(r"$\tilde{\mathcal{l}}^{\ \prime}_q, \tilde{\mathcal{l}}^{\ \prime\prime}_q$")
ax1.vlines(0,ax1.get_ylim()[0],ax1.get_ylim()[1]+0.1,linestyles='--',color='gray')
ax2.hlines(0,ax2.get_xlim()[0],ax2.get_xlim()[1],linestyles='--',color='gray')
ax1.set_xlabel(r'$\epsilon_{\tau}$')
plt.savefig('figs/quantile_loss.pdf')
def plot_crsp(results,size,font_scale):
name = 'mean_crps'
f,ax = set_figure(size=size,subplots=(2,1),w=0.3, h=0,b=0.2, font_scale=font_scale)
for k,v in results.items():
plt.sca(ax[0])
plt.plot(v['crps_t'], label=k)
plt.legend(fontsize='small')
plt.ylabel(r'$Qs \quad [kW]$')
for k,v in results.items():
plt.sca(ax[1])
plt.plot(v['crosses'], label=k)
plt.legend(fontsize='small')
plt.ylabel(r'$\overline{\chi} \quad [-]$')
plt.xlabel('step ahead [h]')
plt.savefig('figs/{}.pdf'.format(name))
def plot_reliability(results,size,font_scale):
name = 'reliability'
n_res = len(results)
f,ax = set_figure(size=size,subplots=(1,n_res),w=0.05, h=0,b=0.2,font_scale=font_scale)
z = 0
n_q = results[list(results.keys())[0]]['reliability'].shape[0]
alphas = np.linspace(1 / n_q, 1 - 1 / n_q, n_q)
for k,v in results.items():
plt.sca(ax[z])
n = v['reliability'].shape[1]
cm = plt.get_cmap('viridis',n)
plt.plot(alphas,alphas)
for i in range(n):
d = v['reliability'][:, i]
plt.plot(alphas, d, label=k, c = cm(i), alpha=0.8)
plt.xticks(rotation=90)
ax[z].set_title(k)
ax[z].set_xticks(alphas)
ax[z].set_xticklabels(['{:.2f}'.format(alpha) for alpha in alphas])
ax[z].set_xlabel(r'$\alpha$ [-]')
if z > 0:
ax[z].set_yticklabels([])
z += 1
uniform_lims(ax, 'y')
plt.sca(ax[0])
ax[0].set_ylabel(r'$r_{\tau_i}$ [-]')
add_colorbar(cm, 1, 24, 'step ahead [h]')
plt.savefig('figs/{}.pdf'.format(name))
def get_dists_from_xy(x,y):
p = (x+y)/2
d = ((p-x)**2 + (p-y)**2)**0.5
return d
def plot_reliability_tilted(results,size,font_scale):
name = 'reliability_tilted'
n_res = len(results)
f,ax = set_figure(size=size,subplots=(1,n_res),w=0.05, h=0,b=0.2,font_scale=font_scale)
z = 0
n_q = results[list(results.keys())[0]]['reliability'].shape[0]
alphas = np.linspace(1 / n_q, 1 - 1 / n_q, n_q)
for k,v in results.items():
plt.sca(ax[z])
n = v['reliability'].shape[1]
cm = plt.get_cmap('viridis',n)
for i in range(n):
d = alphas - v['reliability'][:, i]
plt.plot(alphas, d, label=k, c = cm(i), alpha=0.8)
plt.xticks(rotation=90)
ax[z].set_title(k)
ax[z].set_xticks(alphas)
ax[z].set_xticklabels(['{:.2f}'.format(alpha) for alpha in alphas])
ax[z].set_xlabel(r'$\alpha$ [-]')
if z > 0:
ax[z].set_yticklabels([])
z += 1
uniform_lims(ax, 'y')
plt.sca(ax[0])
ax[0].set_ylabel(r'$\hat{\alpha}$ [-]')
add_colorbar(cm, 1, 24, 'step ahead [h]')
plt.savefig('figs/{}.pdf'.format(name))
def plot_reliability_diff(results,size,font_scale):
name = 'reliability_diff_tilted'
n_res = len(results)
f,ax = set_figure(size=size,subplots=(1,n_res-1),w=0.05, h=0,b=0.2,font_scale=font_scale)
z = 0
n_q = results[list(results.keys())[0]]['reliability'].shape[0]
alphas = np.linspace(1 / n_q, 1 - 1 / n_q, n_q)
for k,v in results.items():
if k=='mimo':
continue
plt.sca(ax[z])
n = v['reliability'].shape[1]
cm = plt.get_cmap('viridis',n)
for i in range(n):
d = np.abs(alphas - v['reliability'][:, i])
d_mimo = np.abs(alphas - results['mimo']['reliability'][:, i])
plt.plot(alphas, d_mimo-d, label=k, c = cm(i), alpha=0.8)
plt.xticks(rotation=90)
ax[z].set_title(k)
ax[z].set_xticks(alphas)
ax[z].set_xticklabels(['{:.2f}'.format(alpha) for alpha in alphas])
ax[z].set_xlabel(r'$\alpha$ [-]')
ax[z].hlines(0,0,1,linestyle='--',color='grey')
if z > 0:
ax[z].set_yticklabels([])
z += 1
uniform_lims(ax, 'y')
plt.sca(ax[0])
ax[0].set_ylabel(r'$Rs$ [-]')
add_colorbar(cm, 1, 24, 'step ahead [h]')
plt.savefig('figs/{}.pdf'.format(name))
def plot_QS(results,size,font_scale):
name = 'QS'
n_res = len(results)
f,ax = set_figure(size=size,subplots=(1,n_res),w=0.05, h=0,b=0.2,font_scale=font_scale)
z = 0
n_q = results[list(results.keys())[0]]['reliability'].shape[0]
alphas = np.linspace(1 / n_q, 1 - 1 / n_q, n_q)
for k,v in results.items():
plt.sca(ax[z])
n = v['skill'].shape[1]
cm = plt.get_cmap('viridis',n)
for i in range(n):
d = v['skill'][:, i]
plt.plot(alphas, d, label=k, c = cm(i), alpha=0.8)
plt.xticks(rotation=90)
ax[z].set_title(k)
ax[z].set_xticks(alphas)
ax[z].set_xticklabels(['{:.2f}'.format(alpha) for alpha in alphas])
ax[z].set_xlabel(r'$\alpha$ [-]')
if z>0:
ax[z].set_yticklabels([])
z += 1
uniform_lims(ax,'y')
plt.sca(ax[0])
ax[0].set_ylabel(r'$\bar{l}_q$ [kW]')
add_colorbar(cm, 1, 24, 'step ahead [h]')
plt.savefig('figs/{}.pdf'.format(name))
def plot_QS_diff(results,size,font_scale):
name = 'QS_diff'
n_res = len(results)
f,ax = set_figure(size=size,subplots=(1,n_res-1),w=0.05, h=0,b=0.2,font_scale=font_scale)
z = 0
n_q = results[list(results.keys())[0]]['reliability'].shape[0]
alphas = np.linspace(1 / n_q, 1 - 1 / n_q, n_q)
for k,v in results.items():
if k=='mimo':
continue
plt.sca(ax[z])
n = v['skill'].shape[1]
cm = plt.get_cmap('viridis',n)
for i in range(n):
d_mimo = results['mimo']['skill'][:, i]
d = v['skill'][:, i]
plt.plot(alphas, d_mimo-d, label=k, c = cm(i), alpha=0.8)
plt.xticks(rotation=90)
ax[z].set_title(k)
ax[z].set_xticks(alphas)
ax[z].set_xticklabels(['{:.2f}'.format(alpha) for alpha in alphas])
ax[z].set_xlabel(r'$\alpha$ [-]')
ax[z].hlines(0, 0, 1, linestyle='--', color='grey')
if z>0:
ax[z].set_yticklabels([])
z += 1
uniform_lims(ax,'y')
plt.sca(ax[0])
ax[0].set_ylabel(r'$\Delta \bar{l}_q$ [kW]')
add_colorbar(cm, 1, 24, 'step ahead [h]')
plt.savefig('figs/{}.pdf'.format(name))
def uniform_lims(ax,coord):
if coord == 'x':
x_lims = []
for a in ax:
x_lims.append(np.array(a.get_xlim()))
x_lims = (np.min(np.vstack(x_lims)[:,0]),np.max(np.vstack(x_lims)[:,1]))
for a in ax:
a.set_xlim(x_lims)
elif coord == 'y':
y_lims = []
for a in ax:
y_lims.append(np.array(a.get_ylim()))
y_lims = (np.min(np.vstack(y_lims)[:,0]),np.max(np.vstack(y_lims)[:,1]))
for a in ax:
a.set_ylim(y_lims)
def add_colorbar(cm,scale_min, scale_max, label):
plt.subplots_adjust(bottom=0.1, right=0.85, top=0.9)
cax = plt.axes([0.88, 0.1, 0.025, 0.8])
sm = plt.cm.ScalarMappable(cmap=cm)
cb = plt.colorbar(mappable=sm, cax=cax)
cb.set_label(label)
cb.ax.set_yticklabels(['{}'.format(i) for i in
np.linspace(scale_min, scale_max,
6,dtype=int)], rotation=90)
| [
"numpy.abs",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"seaborn.set",
"matplotlib.pyplot.get_cmap",
"matpl... | [((194, 253), 'seaborn.set', 'sb.set', ([], {'context': 'context', 'style': 'style', 'font_scale': 'font_scale'}), '(context=context, style=style, font_scale=font_scale)\n', (200, 253), True, 'import seaborn as sb\n'), ((267, 319), 'matplotlib.pyplot.subplots', 'plt.subplots', (['subplots[0]', 'subplots[1]'], {'figsize': 'size'}), '(subplots[0], subplots[1], figsize=size)\n', (279, 319), True, 'import matplotlib.pyplot as plt\n'), ((323, 380), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': 'l', 'wspace': 'w', 'hspace': 'h', 'bottom': 'b'}), '(left=l, wspace=w, hspace=h, bottom=b)\n', (342, 380), True, 'import matplotlib.pyplot as plt\n'), ((516, 543), 'numpy.log', 'np.log', (['((1 - alpha) / alpha)'], {}), '((1 - alpha) / alpha)\n', (522, 543), True, 'import numpy as np\n'), ((1020, 1046), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""plasma"""', '(12)'], {}), "('plasma', 12)\n", (1032, 1046), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1111), 'seaborn.set', 'sb.set', ([], {'context': '"""paper"""', 'style': '"""dark"""', 'font_scale': 'font_scale'}), "(context='paper', style='dark', font_scale=font_scale)\n", (1057, 1111), True, 'import seaborn as sb\n'), ((1115, 1158), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'size', 'tight_layout': '(True)'}), '(figsize=size, tight_layout=True)\n', (1125, 1158), True, 'import matplotlib.pyplot as plt\n'), ((1169, 1185), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1180, 1185), True, 'import matplotlib.pyplot as plt\n'), ((2257, 2294), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figs/quantile_loss.pdf"""'], {}), "('figs/quantile_loss.pdf')\n", (2268, 2294), True, 'import matplotlib.pyplot as plt\n'), ((2546, 2574), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""small"""'}), "(fontsize='small')\n", (2556, 2574), True, 'import matplotlib.pyplot as plt\n'), ((2579, 2609), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Qs \\\\quad [kW]$"""'], {}), "('$Qs \\\\quad [kW]$')\n", (2589, 2609), True, 'import matplotlib.pyplot as plt\n'), ((2710, 2738), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""small"""'}), "(fontsize='small')\n", (2720, 2738), True, 'import matplotlib.pyplot as plt\n'), ((2743, 2787), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\overline{\\\\chi} \\\\quad [-]$"""'], {}), "('$\\\\overline{\\\\chi} \\\\quad [-]$')\n", (2753, 2787), True, 'import matplotlib.pyplot as plt\n'), ((2790, 2818), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""step ahead [h]"""'], {}), "('step ahead [h]')\n", (2800, 2818), True, 'import matplotlib.pyplot as plt\n'), ((3144, 3182), 'numpy.linspace', 'np.linspace', (['(1 / n_q)', '(1 - 1 / n_q)', 'n_q'], {}), '(1 / n_q, 1 - 1 / n_q, n_q)\n', (3155, 3182), True, 'import numpy as np\n'), ((3788, 3802), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[0]'], {}), '(ax[0])\n', (3795, 3802), True, 'import matplotlib.pyplot as plt\n'), ((4325, 4363), 'numpy.linspace', 'np.linspace', (['(1 / n_q)', '(1 - 1 / n_q)', 'n_q'], {}), '(1 / n_q, 1 - 1 / n_q, n_q)\n', (4336, 4363), True, 'import numpy as np\n'), ((4946, 4960), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[0]'], {}), '(ax[0])\n', (4953, 4960), True, 'import matplotlib.pyplot as plt\n'), ((5395, 5433), 'numpy.linspace', 'np.linspace', (['(1 / n_q)', '(1 - 1 / n_q)', 'n_q'], {}), '(1 / n_q, 1 - 1 / n_q, n_q)\n', (5406, 5433), True, 'import numpy as np\n'), ((6205, 6219), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[0]'], {}), '(ax[0])\n', (6212, 6219), True, 'import matplotlib.pyplot as plt\n'), ((6607, 6645), 'numpy.linspace', 'np.linspace', (['(1 / n_q)', '(1 - 1 / n_q)', 'n_q'], {}), '(1 / n_q, 1 - 1 / n_q, n_q)\n', (6618, 6645), True, 'import numpy as np\n'), ((7206, 7220), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[0]'], {}), '(ax[0])\n', (7213, 7220), True, 'import matplotlib.pyplot as plt\n'), ((7628, 7666), 'numpy.linspace', 'np.linspace', (['(1 / n_q)', '(1 - 1 / n_q)', 'n_q'], {}), '(1 / n_q, 1 - 1 / n_q, n_q)\n', (7639, 7666), True, 'import numpy as np\n'), ((8389, 8403), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[0]'], {}), '(ax[0])\n', (8396, 8403), True, 'import matplotlib.pyplot as plt\n'), ((9121, 9173), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.1)', 'right': '(0.85)', 'top': '(0.9)'}), '(bottom=0.1, right=0.85, top=0.9)\n', (9140, 9173), True, 'import matplotlib.pyplot as plt\n'), ((9184, 9217), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.88, 0.1, 0.025, 0.8]'], {}), '([0.88, 0.1, 0.025, 0.8])\n', (9192, 9217), True, 'import matplotlib.pyplot as plt\n'), ((9227, 9257), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cm'}), '(cmap=cm)\n', (9248, 9257), True, 'import matplotlib.pyplot as plt\n'), ((9267, 9301), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'mappable': 'sm', 'cax': 'cax'}), '(mappable=sm, cax=cax)\n', (9279, 9301), True, 'import matplotlib.pyplot as plt\n'), ((698, 715), 'numpy.exp', 'np.exp', (['(x + shift)'], {}), '(x + shift)\n', (704, 715), True, 'import numpy as np\n'), ((955, 972), 'numpy.exp', 'np.exp', (['(x + shift)'], {}), '(x + shift)\n', (961, 972), True, 'import numpy as np\n'), ((2488, 2502), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[0]'], {}), '(ax[0])\n', (2495, 2502), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2541), 'matplotlib.pyplot.plot', 'plt.plot', (["v['crps_t']"], {'label': 'k'}), "(v['crps_t'], label=k)\n", (2519, 2541), True, 'import matplotlib.pyplot as plt\n'), ((2651, 2665), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[1]'], {}), '(ax[1])\n', (2658, 2665), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2705), 'matplotlib.pyplot.plot', 'plt.plot', (["v['crosses']"], {'label': 'k'}), "(v['crosses'], label=k)\n", (2682, 2705), True, 'import matplotlib.pyplot as plt\n'), ((3223, 3237), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[z]'], {}), '(ax[z])\n', (3230, 3237), True, 'import matplotlib.pyplot as plt\n'), ((3289, 3315), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""', 'n'], {}), "('viridis', n)\n", (3301, 3315), True, 'import matplotlib.pyplot as plt\n'), ((3323, 3347), 'matplotlib.pyplot.plot', 'plt.plot', (['alphas', 'alphas'], {}), '(alphas, alphas)\n', (3331, 3347), True, 'import matplotlib.pyplot as plt\n'), ((3484, 3507), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (3494, 3507), True, 'import matplotlib.pyplot as plt\n'), ((4404, 4418), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[z]'], {}), '(ax[z])\n', (4411, 4418), True, 'import matplotlib.pyplot as plt\n'), ((4470, 4496), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""', 'n'], {}), "('viridis', n)\n", (4482, 4496), True, 'import matplotlib.pyplot as plt\n'), ((4642, 4665), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (4652, 4665), True, 'import matplotlib.pyplot as plt\n'), ((5517, 5531), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[z]'], {}), '(ax[z])\n', (5524, 5531), True, 'import matplotlib.pyplot as plt\n'), ((5583, 5609), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""', 'n'], {}), "('viridis', n)\n", (5595, 5609), True, 'import matplotlib.pyplot as plt\n'), ((5845, 5868), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (5855, 5868), True, 'import matplotlib.pyplot as plt\n'), ((6686, 6700), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[z]'], {}), '(ax[z])\n', (6693, 6700), True, 'import matplotlib.pyplot as plt\n'), ((6746, 6772), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""', 'n'], {}), "('viridis', n)\n", (6758, 6772), True, 'import matplotlib.pyplot as plt\n'), ((6903, 6926), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (6913, 6926), True, 'import matplotlib.pyplot as plt\n'), ((7750, 7764), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[z]'], {}), '(ax[z])\n', (7757, 7764), True, 'import matplotlib.pyplot as plt\n'), ((7810, 7836), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""', 'n'], {}), "('viridis', n)\n", (7822, 7836), True, 'import matplotlib.pyplot as plt\n'), ((8026, 8049), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (8036, 8049), True, 'import matplotlib.pyplot as plt\n'), ((483, 495), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (492, 495), True, 'import numpy as np\n'), ((5652, 5691), 'numpy.abs', 'np.abs', (["(alphas - v['reliability'][:, i])"], {}), "(alphas - v['reliability'][:, i])\n", (5658, 5691), True, 'import numpy as np\n'), ((5713, 5766), 'numpy.abs', 'np.abs', (["(alphas - results['mimo']['reliability'][:, i])"], {}), "(alphas - results['mimo']['reliability'][:, i])\n", (5719, 5766), True, 'import numpy as np\n'), ((566, 575), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (572, 575), True, 'import numpy as np\n'), ((576, 590), 'numpy.exp', 'np.exp', (['(-shift)'], {}), '(-shift)\n', (582, 590), True, 'import numpy as np\n'), ((625, 642), 'numpy.exp', 'np.exp', (['(x + shift)'], {}), '(x + shift)\n', (631, 642), True, 'import numpy as np\n'), ((724, 741), 'numpy.exp', 'np.exp', (['(x + shift)'], {}), '(x + shift)\n', (730, 741), True, 'import numpy as np\n'), ((819, 828), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (825, 828), True, 'import numpy as np\n'), ((829, 843), 'numpy.exp', 'np.exp', (['(-shift)'], {}), '(-shift)\n', (835, 843), True, 'import numpy as np\n'), ((880, 897), 'numpy.exp', 'np.exp', (['(x + shift)'], {}), '(x + shift)\n', (886, 897), True, 'import numpy as np\n'), ((981, 998), 'numpy.exp', 'np.exp', (['(x + shift)'], {}), '(x + shift)\n', (987, 998), True, 'import numpy as np\n'), ((9404, 9451), 'numpy.linspace', 'np.linspace', (['scale_min', 'scale_max', '(6)'], {'dtype': 'int'}), '(scale_min, scale_max, 6, dtype=int)\n', (9415, 9451), True, 'import numpy as np\n'), ((650, 667), 'numpy.exp', 'np.exp', (['(x + shift)'], {}), '(x + shift)\n', (656, 667), True, 'import numpy as np\n'), ((905, 922), 'numpy.exp', 'np.exp', (['(x + shift)'], {}), '(x + shift)\n', (911, 922), True, 'import numpy as np\n'), ((8710, 8727), 'numpy.vstack', 'np.vstack', (['x_lims'], {}), '(x_lims)\n', (8719, 8727), True, 'import numpy as np\n'), ((8741, 8758), 'numpy.vstack', 'np.vstack', (['x_lims'], {}), '(x_lims)\n', (8750, 8758), True, 'import numpy as np\n'), ((8957, 8974), 'numpy.vstack', 'np.vstack', (['y_lims'], {}), '(y_lims)\n', (8966, 8974), True, 'import numpy as np\n'), ((8988, 9005), 'numpy.vstack', 'np.vstack', (['y_lims'], {}), '(y_lims)\n', (8997, 9005), True, 'import numpy as np\n')] |
import numpy as np
import stopit
import sys
from spider.featurization_base import FeaturizationTransformerPrimitiveBase
Inputs = list
Outputs = list
class AudioSlicer(FeaturizationTransformerPrimitiveBase[Inputs, Outputs]):
def __init__(
self,
sampling_rate=44100,
frame_length=3.0,
overlap=0.0,
pad=True
):
"""
DARPA D3M Audio Slicer Primitive
Arguments:
- sampling_rate: integer-valued uniform sampling rate of the
incoming audio data
- frame_length: float-valued duration in seconds that defines the
length of the sliced clips
- overlap: float-valued duration in seconds that defines the step
size taken along the time series during adjacent clip
extraction
- stretch: boolean value indicating whether clips shorter than
frame_length should be padded with zeros to a duration of
exactly frame_length
"""
self.sampling_rate = sampling_rate
self.frame_length = frame_length
self.overlap = overlap
self.pad = pad
self.samples_per_clip = int(frame_length * sampling_rate)
self.step = max(int((frame_length - overlap) * sampling_rate), 1)
if overlap >= frame_length:
raise ValueError(
'AudioSlicer: Consecutive audio frames of length ' +\
str(frame_length) + ' seconds cannot facilitate ' +\
str(overlap) + 'seconds of overlap.'
)
def produce(self, inputs, timeout=None, iterations=None):
with stopit.ThreadingTimeout(timeout) as timer:
features = []
for i, datum in enumerate(inputs):
# Handle multi-channel audio data
if datum.ndim > 2 or (datum.ndim == 2 and datum.shape[0] > 2):
raise ValueError(
'Time series datum ' + str(i) + ' found with ' + \
'incompatible shape ' + str(datum.shape) + '.'
)
elif datum.ndim == 2:
datum = datum.mean(axis=0)
# Iterate through audio extracting clips
clips = []
for i in xrange(0, len(datum), self.step):
if i + self.samples_per_clip <= len(datum):
clips.append(datum[i : i + self.samples_per_clip])
elif self.pad:
clips.append(
np.concatenate([
datum[i:],
np.zeros(
self.samples_per_clip - len(datum[i:]))
])
)
features.append(np.array(clips))
if timer.state == timer.EXECUTED:
return features
else:
raise TimeoutError('AudioSlicer exceeded time limit')
| [
"stopit.ThreadingTimeout",
"numpy.array"
] | [((1671, 1703), 'stopit.ThreadingTimeout', 'stopit.ThreadingTimeout', (['timeout'], {}), '(timeout)\n', (1694, 1703), False, 'import stopit\n'), ((2862, 2877), 'numpy.array', 'np.array', (['clips'], {}), '(clips)\n', (2870, 2877), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 4 18:14:29 2016
@author: becker
"""
import numpy as np
import scipy.linalg as linalg
import scipy.sparse as sparse
from simfempy.fems import femsys, cr1
from simfempy.tools import barycentric, npext
#=================================================================#
class CR1sys(femsys.Femsys):
def __init__(self, ncomp, mesh=None):
super().__init__(cr1.CR1(mesh=mesh), ncomp, mesh)
def setMesh(self, mesh):
super().setMesh(mesh)
def tonode(self, u):
ncomp, nnodes = self.ncomp, self.mesh.nnodes
unodes = np.zeros(ncomp*nnodes)
for i in range(ncomp):
unodes[i::ncomp] = self.fem.tonode(u[i::ncomp])
return unodes
def interpolateBoundary(self, colors, f, lumped=False):
# fs={col:f[col] for col in colors if col in f.keys()}
if len(colors) == 0 or len(f) == 0: return
# print(f"{f=}")
if isinstance(next(iter(f.values())), list):
import inspect
fct = next(iter(f.values()))[0]
# print(f"{str(inspect.signature(fct))=}")
if 'nx' in str(inspect.signature(fct)):
return np.vstack([self.fem.interpolateBoundary(colors, {col:np.vectorize(f[col][icomp], signature='(n),(n),(n),(n),(n),(n)->(n)') for col in colors if col in f.keys()},lumped) for icomp in range(self.ncomp)]).T
else:
return np.vstack([self.fem.interpolateBoundary(colors, {col:np.vectorize(f[col][icomp]) for col in colors if col in f.keys()},lumped) for icomp in range(self.ncomp)]).T
else:
raise ValueError(f"don't know how to handle {type(next(iter(f.values())))=}")
def matrixBoundary(self, A, bdrydata, method):
facesdirflux, facesinner, facesdirall, colorsdir = bdrydata.facesdirflux, bdrydata.facesinner, bdrydata.facesdirall, bdrydata.colorsdir
x, y, z = self.mesh.pointsf.T
nfaces, ncomp = self.mesh.nfaces, self.ncomp
for color, faces in facesdirflux.items():
ind = np.repeat(ncomp * faces, ncomp)
for icomp in range(ncomp): ind[icomp::ncomp] += icomp
nb = faces.shape[0]
help = sparse.dok_matrix((ncomp *nb, ncomp * nfaces))
for icomp in range(ncomp):
for i in range(nb): help[icomp + ncomp * i, icomp + ncomp * faces[i]] = 1
bdrydata.Asaved[color] = help.dot(A)
indin = np.repeat(ncomp * facesinner, ncomp)
for icomp in range(ncomp): indin[icomp::ncomp] += icomp
inddir = np.repeat(ncomp * facesdirall, ncomp)
for icomp in range(ncomp): inddir[icomp::ncomp] += icomp
bdrydata.A_inner_dir = A[indin, :][:, inddir]
if method == 'strong':
help = np.ones((ncomp * nfaces))
help[inddir] = 0
help = sparse.dia_matrix((help, 0), shape=(ncomp * nfaces, ncomp * nfaces))
A = help.dot(A.dot(help))
help = np.zeros((ncomp * nfaces))
help[inddir] = 1.0
help = sparse.dia_matrix((help, 0), shape=(ncomp * nfaces, ncomp * nfaces))
A += help
else:
bdrydata.A_dir_dir = A[inddir, :][:, inddir]
help = np.ones((ncomp * nfaces))
help[inddir] = 0
help = sparse.dia_matrix((help, 0), shape=(ncomp * nfaces, ncomp * nfaces))
help2 = np.zeros((ncomp * nfaces))
help2[inddir] = 1
help2 = sparse.dia_matrix((help2, 0), shape=(ncomp * nfaces, ncomp * nfaces))
A = help.dot(A.dot(help)) + help2.dot(A.dot(help2))
return A
def formBoundary(self, b, bdrydata, method):
facesdirall, ncomp = bdrydata.facesdirall, self.ncomp
inddir = np.repeat(ncomp * facesdirall, ncomp)
for icomp in range(ncomp): inddir[icomp::ncomp] += icomp
b[inddir] = 0
def vectorBoundary(self, b, bdryfct, bdrydata, method):
facesdirflux, facesinner, facesdirall, colorsdir = bdrydata.facesdirflux, bdrydata.facesinner, bdrydata.facesdirall, bdrydata.colorsdir
x, y, z = self.mesh.pointsf.T
nfaces, ncomp = self.mesh.nfaces, self.ncomp
for color, faces in facesdirflux.items():
ind = np.repeat(ncomp * faces, ncomp)
for icomp in range(ncomp): ind[icomp::ncomp] += icomp
bdrydata.bsaved[color] = b[ind]
indin = np.repeat(ncomp * facesinner, ncomp)
for icomp in range(ncomp): indin[icomp::ncomp] += icomp
inddir = np.repeat(ncomp * facesdirall, ncomp)
for icomp in range(ncomp): inddir[icomp::ncomp] += icomp
help = np.zeros_like(b)
for color in colorsdir:
faces = self.mesh.bdrylabels[color]
if color in bdryfct:
# dirichlets = bdryfct[color](x[faces], y[faces], z[faces])
dirichlets = np.vstack([f(x[faces], y[faces], z[faces]) for f in bdryfct[color]])
for icomp in range(ncomp):
help[icomp + ncomp * faces] = dirichlets[icomp]
b[indin] -= bdrydata.A_inner_dir * help[inddir]
if method == 'strong':
b[inddir] = help[inddir]
# print(f"{b[inddir]=}")
else:
b[inddir] = bdrydata.A_dir_dir * help[inddir]
return b
# for color in colorsdir:
# faces = self.mesh.bdrylabels[color]
# if color in bdryfct.keys():
# dirichlets = bdryfct[color](x[faces], y[faces], z[faces])
# for icomp in range(ncomp):
# u[icomp + ncomp * faces] = dirichlets[icomp]
# else:
# for icomp in range(ncomp):
# u[icomp + ncomp * faces] = 0
# b[indin] -= bdrydata.A_inner_dir * u[inddir]
# if self.fem.dirichletmethod == 'strong':
# b[inddir] = u[inddir]
# else:
# b[inddir] = bdrydata.A_dir_dir * u[inddir]
# # print(f"{b=}")
# return b, u, bdrydata
def computeRhsBoundary(self, b, colors, bdryfct):
for color in colors:
if not color in bdryfct or not bdryfct[color]: continue
faces = self.mesh.bdrylabels[color]
normalsS = self.mesh.normals[faces]
dS = linalg.norm(normalsS,axis=1)
xf, yf, zf = self.mesh.pointsf[faces].T
nx, ny, nz = normalsS.T / dS
neumanns = bdryfct[color](xf, yf, zf, nx, ny, nz)
for i in range(self.ncomp):
bS = dS * neumanns[i]
indices = i + self.ncomp * faces
b[indices] += bS
return b
def computeMatrixDivergence(self):
nfaces, ncells, ncomp, dV = self.mesh.nfaces, self.mesh.ncells, self.ncomp, self.mesh.dV
nloc, cellgrads, foc = self.fem.nloc, self.fem.cellgrads, self.mesh.facesOfCells
rowsB = np.repeat(np.arange(ncells), ncomp * nloc).ravel()
colsB = ncomp*np.repeat(foc, ncomp).reshape(ncells * nloc, ncomp) + np.arange(ncomp)
mat = np.einsum('nkl,n->nkl', cellgrads[:, :, :ncomp], dV)
B = sparse.coo_matrix((mat.ravel(), (rowsB, colsB.ravel())),shape=(ncells, nfaces * ncomp)).tocsr()
return B
def computeFormDivGrad(self, dv, dp, v, p):
ncomp, dV, cellgrads, foc = self.ncomp, self.mesh.dV, self.fem.cellgrads, self.mesh.facesOfCells
for icomp in range(ncomp):
r = np.einsum('n,ni->ni', -dV*p, cellgrads[:,:,icomp])
np.add.at(dv[icomp::ncomp], foc, r)
dp += np.einsum('n,ni,ni->n', dV, cellgrads[:,:,icomp], v[icomp::ncomp][foc])
def computeMatrixLaplace(self, mucell):
return self.matrix2systemdiagonal(self.fem.computeMatrixDiffusion(mucell), self.ncomp).tocsr()
# OLD VERSION: twice slower
# import time
# t0 = time.time()
# nfaces, ncells, ncomp, dV = self.mesh.nfaces, self.mesh.ncells, self.ncomp, self.mesh.dV
# nloc, rows, cols, cellgrads = self.fem.nloc, self.rowssys, self.colssys, self.fem.cellgrads
# mat = np.zeros(shape=rows.shape, dtype=float).reshape(ncells, ncomp * nloc, ncomp * nloc)
# for icomp in range(ncomp):
# mat[:, icomp::ncomp, icomp::ncomp] += (np.einsum('nkj,nlj->nkl', cellgrads, cellgrads).T * dV * mucell).T
# A = sparse.coo_matrix((mat.ravel(), (rows, cols)), shape=(ncomp*nfaces, ncomp*nfaces)).tocsr()
# t1 = time.time()
# B = self.fem.computeMatrixDiffusion(mucell)
# B = self.matrix2systemdiagonal(B, self.ncomp).tocsr()
# t2 = time.time()
# print(f"{(t2-t1)/(t1-t0)=}")
# if not np.allclose(A.A,B.A):
# raise ValueError(f"{A.diagonal()=} {B.diagonal()=}")
# return A
def computeFormLaplace(self, mu, dv, v):
ncomp, dV, cellgrads, foc = self.ncomp, self.mesh.dV, self.fem.cellgrads, self.mesh.facesOfCells
for icomp in range(ncomp):
r = np.einsum('n,nil,njl,nj->ni', dV*mu, cellgrads, cellgrads, v[icomp::ncomp][foc])
np.add.at(dv[icomp::ncomp], foc, r)
def computeRhsNitscheDiffusion(self, b, diffcoff, colorsdir, udir, ncomp, coeff=1):
for icomp in range(ncomp):
self.fem.computeRhsNitscheDiffusion(b[icomp::ncomp], diffcoff, colorsdir, udir[:,icomp], coeff)
def computeMatrixNitscheDiffusion(self, diffcoff, colorsdir, ncomp, coeff=1):
A = self.fem.computeMatrixNitscheDiffusion(diffcoff, colorsdir, coeff)
return self.matrix2systemdiagonal(A, ncomp)
def computeFormNitscheDiffusion(self, du, u, diffcoff, colorsdir, ncomp):
for icomp in range(ncomp):
self.fem.computeFormNitscheDiffusion(du[icomp::ncomp], u[icomp::ncomp], diffcoff, colorsdir)
def computeMatrixElasticity(self, mucell, lamcell):
nfaces, ncells, ncomp, dV = self.mesh.nfaces, self.mesh.ncells, self.ncomp, self.mesh.dV
nloc, rows, cols, cellgrads = self.fem.nloc, self.rowssys, self.colssys, self.fem.cellgrads
mat = np.zeros(shape=rows.shape, dtype=float).reshape(ncells, ncomp * nloc, ncomp * nloc)
for i in range(ncomp):
for j in range(self.ncomp):
mat[:, i::ncomp, j::ncomp] += (np.einsum('nk,nl->nkl', cellgrads[:, :, i], cellgrads[:, :, j]).T * dV * lamcell).T
mat[:, i::ncomp, j::ncomp] += (np.einsum('nk,nl->nkl', cellgrads[:, :, j], cellgrads[:, :, i]).T * dV * mucell).T
mat[:, i::ncomp, i::ncomp] += (np.einsum('nk,nl->nkl', cellgrads[:, :, j], cellgrads[:, :, j]).T * dV * mucell).T
A = sparse.coo_matrix((mat.ravel(), (rows, cols)), shape=(ncomp*nfaces, ncomp*nfaces)).tocsr()
A += self.computeMatrixKorn(mucell)
return A
def computeMatrixKorn(self, mucell):
ncomp = self.ncomp
dimension, dV, ndofs = self.mesh.dimension, self.mesh.dV, self.nunknowns()
nloc, dofspercell, nall = self.nlocal(), self.dofspercell(), ncomp*ndofs
ci0 = self.mesh.cellsOfInteriorFaces[:,0]
ci1 = self.mesh.cellsOfInteriorFaces[:,1]
assert np.all(ci1>=0)
normalsS = self.mesh.normals[self.mesh.innerfaces]
dS = linalg.norm(normalsS, axis=1)
faces = self.mesh.faces[self.mesh.innerfaces]
ind0 = npext.positionin(faces, self.mesh.simplices[ci0])
ind1 = npext.positionin(faces, self.mesh.simplices[ci1])
fi0 = np.take_along_axis(self.mesh.facesOfCells[ci0], ind0, axis=1)
fi1 = np.take_along_axis(self.mesh.facesOfCells[ci1], ind1, axis=1)
d = self.mesh.dimension
massloc = barycentric.crbdryothers(d)
if isinstance(mucell,(int,float)):
scale = mucell*dS/(dV[ci0]+ dV[ci1])
else:
scale = (mucell[ci0] + mucell[ci1]) * dS / (dV[ci0] + dV[ci1])
scale *= 8
A = sparse.coo_matrix((nall, nall))
mat = np.einsum('n,kl->nkl', dS*scale, massloc).reshape(-1)
for icomp in range(ncomp):
d0 = ncomp*fi0+icomp
d1 = ncomp*fi1+icomp
rows0 = d0.repeat(nloc-1)
cols0 = np.tile(d0,nloc-1).reshape(-1)
rows1 = d1.repeat(nloc-1)
cols1 = np.tile(d1,nloc-1).reshape(-1)
# print(f"{mat.shape=}")
# print(f"{rows0.shape=}")
# print(f"{cols0.shape=}")
# print(f"{fi0.shape=}")
# print(f"{fi1.shape=}")
A += sparse.coo_matrix((mat, (rows0, cols0)), shape=(nall, nall))
A += sparse.coo_matrix((-mat, (rows0, cols1)), shape=(nall, nall))
A += sparse.coo_matrix((-mat, (rows1, cols0)), shape=(nall, nall))
A += sparse.coo_matrix((mat, (rows1, cols1)), shape=(nall, nall))
return A
def computeBdryNormalFlux(self, u, colors, bdrydata):
flux, omega = np.zeros(shape=(len(colors),self.ncomp)), np.zeros(len(colors))
for i,color in enumerate(colors):
faces = self.mesh.bdrylabels[color]
normalsS = self.mesh.normals[faces]
dS = linalg.norm(normalsS, axis=1)
omega[i] = np.sum(dS)
bs, As = bdrydata.bsaved[color], bdrydata.Asaved[color]
res = bs - As * u
for icomp in range(self.ncomp):
flux[i, icomp] = np.sum(res[icomp::self.ncomp])
return flux
# ------------------------------------- #
if __name__ == '__main__':
from simfempy.meshes import testmeshes
from simfempy.meshes import plotmesh
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
mesh = testmeshes.backwardfacingstep(h=0.2)
fem = CR1sys(ncomp=2, mesh=mesh)
u = fem.test()
point_data = fem.getPointData(u)
fig = plt.figure(figsize=(10, 8))
outer = gridspec.GridSpec(1, 2, wspace=0.2, hspace=0.2)
plotmesh.meshWithBoundaries(mesh, fig=fig, outer=outer[0])
plotmesh.meshWithData(mesh, point_data=point_data, title="P1 Test", alpha=1, fig=fig, outer=outer[1])
plt.show()
| [
"numpy.sum",
"numpy.einsum",
"simfempy.meshes.testmeshes.backwardfacingstep",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.tile",
"numpy.add.at",
"simfempy.tools.npext.positionin",
"numpy.zeros_like",
"simfempy.tools.barycentric.crbdryothers",
"simfempy.fems.cr1.CR1",
"si... | [((13520, 13556), 'simfempy.meshes.testmeshes.backwardfacingstep', 'testmeshes.backwardfacingstep', ([], {'h': '(0.2)'}), '(h=0.2)\n', (13549, 13556), False, 'from simfempy.meshes import testmeshes\n'), ((13660, 13687), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (13670, 13687), True, 'import matplotlib.pyplot as plt\n'), ((13700, 13747), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {'wspace': '(0.2)', 'hspace': '(0.2)'}), '(1, 2, wspace=0.2, hspace=0.2)\n', (13717, 13747), True, 'import matplotlib.gridspec as gridspec\n'), ((13752, 13810), 'simfempy.meshes.plotmesh.meshWithBoundaries', 'plotmesh.meshWithBoundaries', (['mesh'], {'fig': 'fig', 'outer': 'outer[0]'}), '(mesh, fig=fig, outer=outer[0])\n', (13779, 13810), False, 'from simfempy.meshes import plotmesh\n'), ((13815, 13920), 'simfempy.meshes.plotmesh.meshWithData', 'plotmesh.meshWithData', (['mesh'], {'point_data': 'point_data', 'title': '"""P1 Test"""', 'alpha': '(1)', 'fig': 'fig', 'outer': 'outer[1]'}), "(mesh, point_data=point_data, title='P1 Test', alpha=1,\n fig=fig, outer=outer[1])\n", (13836, 13920), False, 'from simfempy.meshes import plotmesh\n'), ((13921, 13931), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13929, 13931), True, 'import matplotlib.pyplot as plt\n'), ((601, 625), 'numpy.zeros', 'np.zeros', (['(ncomp * nnodes)'], {}), '(ncomp * nnodes)\n', (609, 625), True, 'import numpy as np\n'), ((2445, 2481), 'numpy.repeat', 'np.repeat', (['(ncomp * facesinner)', 'ncomp'], {}), '(ncomp * facesinner, ncomp)\n', (2454, 2481), True, 'import numpy as np\n'), ((2563, 2600), 'numpy.repeat', 'np.repeat', (['(ncomp * facesdirall)', 'ncomp'], {}), '(ncomp * facesdirall, ncomp)\n', (2572, 2600), True, 'import numpy as np\n'), ((3747, 3784), 'numpy.repeat', 'np.repeat', (['(ncomp * facesdirall)', 'ncomp'], {}), '(ncomp * facesdirall, ncomp)\n', (3756, 3784), True, 'import numpy as np\n'), ((4393, 4429), 'numpy.repeat', 'np.repeat', (['(ncomp * facesinner)', 'ncomp'], {}), '(ncomp * facesinner, ncomp)\n', (4402, 4429), True, 'import numpy as np\n'), ((4511, 4548), 'numpy.repeat', 'np.repeat', (['(ncomp * facesdirall)', 'ncomp'], {}), '(ncomp * facesdirall, ncomp)\n', (4520, 4548), True, 'import numpy as np\n'), ((4629, 4645), 'numpy.zeros_like', 'np.zeros_like', (['b'], {}), '(b)\n', (4642, 4645), True, 'import numpy as np\n'), ((7022, 7074), 'numpy.einsum', 'np.einsum', (['"""nkl,n->nkl"""', 'cellgrads[:, :, :ncomp]', 'dV'], {}), "('nkl,n->nkl', cellgrads[:, :, :ncomp], dV)\n", (7031, 7074), True, 'import numpy as np\n'), ((11043, 11059), 'numpy.all', 'np.all', (['(ci1 >= 0)'], {}), '(ci1 >= 0)\n', (11049, 11059), True, 'import numpy as np\n'), ((11130, 11159), 'scipy.linalg.norm', 'linalg.norm', (['normalsS'], {'axis': '(1)'}), '(normalsS, axis=1)\n', (11141, 11159), True, 'import scipy.linalg as linalg\n'), ((11229, 11278), 'simfempy.tools.npext.positionin', 'npext.positionin', (['faces', 'self.mesh.simplices[ci0]'], {}), '(faces, self.mesh.simplices[ci0])\n', (11245, 11278), False, 'from simfempy.tools import barycentric, npext\n'), ((11294, 11343), 'simfempy.tools.npext.positionin', 'npext.positionin', (['faces', 'self.mesh.simplices[ci1]'], {}), '(faces, self.mesh.simplices[ci1])\n', (11310, 11343), False, 'from simfempy.tools import barycentric, npext\n'), ((11358, 11419), 'numpy.take_along_axis', 'np.take_along_axis', (['self.mesh.facesOfCells[ci0]', 'ind0'], {'axis': '(1)'}), '(self.mesh.facesOfCells[ci0], ind0, axis=1)\n', (11376, 11419), True, 'import numpy as np\n'), ((11434, 11495), 'numpy.take_along_axis', 'np.take_along_axis', (['self.mesh.facesOfCells[ci1]', 'ind1'], {'axis': '(1)'}), '(self.mesh.facesOfCells[ci1], ind1, axis=1)\n', (11452, 11495), True, 'import numpy as np\n'), ((11546, 11573), 'simfempy.tools.barycentric.crbdryothers', 'barycentric.crbdryothers', (['d'], {}), '(d)\n', (11570, 11573), False, 'from simfempy.tools import barycentric, npext\n'), ((11786, 11817), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(nall, nall)'], {}), '((nall, nall))\n', (11803, 11817), True, 'import scipy.sparse as sparse\n'), ((414, 432), 'simfempy.fems.cr1.CR1', 'cr1.CR1', ([], {'mesh': 'mesh'}), '(mesh=mesh)\n', (421, 432), False, 'from simfempy.fems import femsys, cr1\n'), ((2055, 2086), 'numpy.repeat', 'np.repeat', (['(ncomp * faces)', 'ncomp'], {}), '(ncomp * faces, ncomp)\n', (2064, 2086), True, 'import numpy as np\n'), ((2204, 2251), 'scipy.sparse.dok_matrix', 'sparse.dok_matrix', (['(ncomp * nb, ncomp * nfaces)'], {}), '((ncomp * nb, ncomp * nfaces))\n', (2221, 2251), True, 'import scipy.sparse as sparse\n'), ((2770, 2793), 'numpy.ones', 'np.ones', (['(ncomp * nfaces)'], {}), '(ncomp * nfaces)\n', (2777, 2793), True, 'import numpy as np\n'), ((2844, 2912), 'scipy.sparse.dia_matrix', 'sparse.dia_matrix', (['(help, 0)'], {'shape': '(ncomp * nfaces, ncomp * nfaces)'}), '((help, 0), shape=(ncomp * nfaces, ncomp * nfaces))\n', (2861, 2912), True, 'import scipy.sparse as sparse\n'), ((2970, 2994), 'numpy.zeros', 'np.zeros', (['(ncomp * nfaces)'], {}), '(ncomp * nfaces)\n', (2978, 2994), True, 'import numpy as np\n'), ((3047, 3115), 'scipy.sparse.dia_matrix', 'sparse.dia_matrix', (['(help, 0)'], {'shape': '(ncomp * nfaces, ncomp * nfaces)'}), '((help, 0), shape=(ncomp * nfaces, ncomp * nfaces))\n', (3064, 3115), True, 'import scipy.sparse as sparse\n'), ((3228, 3251), 'numpy.ones', 'np.ones', (['(ncomp * nfaces)'], {}), '(ncomp * nfaces)\n', (3235, 3251), True, 'import numpy as np\n'), ((3302, 3370), 'scipy.sparse.dia_matrix', 'sparse.dia_matrix', (['(help, 0)'], {'shape': '(ncomp * nfaces, ncomp * nfaces)'}), '((help, 0), shape=(ncomp * nfaces, ncomp * nfaces))\n', (3319, 3370), True, 'import scipy.sparse as sparse\n'), ((3391, 3415), 'numpy.zeros', 'np.zeros', (['(ncomp * nfaces)'], {}), '(ncomp * nfaces)\n', (3399, 3415), True, 'import numpy as np\n'), ((3468, 3537), 'scipy.sparse.dia_matrix', 'sparse.dia_matrix', (['(help2, 0)'], {'shape': '(ncomp * nfaces, ncomp * nfaces)'}), '((help2, 0), shape=(ncomp * nfaces, ncomp * nfaces))\n', (3485, 3537), True, 'import scipy.sparse as sparse\n'), ((4235, 4266), 'numpy.repeat', 'np.repeat', (['(ncomp * faces)', 'ncomp'], {}), '(ncomp * faces, ncomp)\n', (4244, 4266), True, 'import numpy as np\n'), ((6262, 6291), 'scipy.linalg.norm', 'linalg.norm', (['normalsS'], {'axis': '(1)'}), '(normalsS, axis=1)\n', (6273, 6291), True, 'import scipy.linalg as linalg\n'), ((6991, 7007), 'numpy.arange', 'np.arange', (['ncomp'], {}), '(ncomp)\n', (7000, 7007), True, 'import numpy as np\n'), ((7404, 7458), 'numpy.einsum', 'np.einsum', (['"""n,ni->ni"""', '(-dV * p)', 'cellgrads[:, :, icomp]'], {}), "('n,ni->ni', -dV * p, cellgrads[:, :, icomp])\n", (7413, 7458), True, 'import numpy as np\n'), ((7467, 7502), 'numpy.add.at', 'np.add.at', (['dv[icomp::ncomp]', 'foc', 'r'], {}), '(dv[icomp::ncomp], foc, r)\n', (7476, 7502), True, 'import numpy as np\n'), ((7521, 7594), 'numpy.einsum', 'np.einsum', (['"""n,ni,ni->n"""', 'dV', 'cellgrads[:, :, icomp]', 'v[icomp::ncomp][foc]'], {}), "('n,ni,ni->n', dV, cellgrads[:, :, icomp], v[icomp::ncomp][foc])\n", (7530, 7594), True, 'import numpy as np\n'), ((8926, 9013), 'numpy.einsum', 'np.einsum', (['"""n,nil,njl,nj->ni"""', '(dV * mu)', 'cellgrads', 'cellgrads', 'v[icomp::ncomp][foc]'], {}), "('n,nil,njl,nj->ni', dV * mu, cellgrads, cellgrads, v[icomp::ncomp\n ][foc])\n", (8935, 9013), True, 'import numpy as np\n'), ((9019, 9054), 'numpy.add.at', 'np.add.at', (['dv[icomp::ncomp]', 'foc', 'r'], {}), '(dv[icomp::ncomp], foc, r)\n', (9028, 9054), True, 'import numpy as np\n'), ((12371, 12431), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(mat, (rows0, cols0))'], {'shape': '(nall, nall)'}), '((mat, (rows0, cols0)), shape=(nall, nall))\n', (12388, 12431), True, 'import scipy.sparse as sparse\n'), ((12449, 12510), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(-mat, (rows0, cols1))'], {'shape': '(nall, nall)'}), '((-mat, (rows0, cols1)), shape=(nall, nall))\n', (12466, 12510), True, 'import scipy.sparse as sparse\n'), ((12528, 12589), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(-mat, (rows1, cols0))'], {'shape': '(nall, nall)'}), '((-mat, (rows1, cols0)), shape=(nall, nall))\n', (12545, 12589), True, 'import scipy.sparse as sparse\n'), ((12607, 12667), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(mat, (rows1, cols1))'], {'shape': '(nall, nall)'}), '((mat, (rows1, cols1)), shape=(nall, nall))\n', (12624, 12667), True, 'import scipy.sparse as sparse\n'), ((12984, 13013), 'scipy.linalg.norm', 'linalg.norm', (['normalsS'], {'axis': '(1)'}), '(normalsS, axis=1)\n', (12995, 13013), True, 'import scipy.linalg as linalg\n'), ((13037, 13047), 'numpy.sum', 'np.sum', (['dS'], {}), '(dS)\n', (13043, 13047), True, 'import numpy as np\n'), ((9986, 10025), 'numpy.zeros', 'np.zeros', ([], {'shape': 'rows.shape', 'dtype': 'float'}), '(shape=rows.shape, dtype=float)\n', (9994, 10025), True, 'import numpy as np\n'), ((11832, 11875), 'numpy.einsum', 'np.einsum', (['"""n,kl->nkl"""', '(dS * scale)', 'massloc'], {}), "('n,kl->nkl', dS * scale, massloc)\n", (11841, 11875), True, 'import numpy as np\n'), ((13223, 13253), 'numpy.sum', 'np.sum', (['res[icomp::self.ncomp]'], {}), '(res[icomp::self.ncomp])\n', (13229, 13253), True, 'import numpy as np\n'), ((1142, 1164), 'inspect.signature', 'inspect.signature', (['fct'], {}), '(fct)\n', (1159, 1164), False, 'import inspect\n'), ((6874, 6891), 'numpy.arange', 'np.arange', (['ncells'], {}), '(ncells)\n', (6883, 6891), True, 'import numpy as np\n'), ((12045, 12066), 'numpy.tile', 'np.tile', (['d0', '(nloc - 1)'], {}), '(d0, nloc - 1)\n', (12052, 12066), True, 'import numpy as np\n'), ((12134, 12155), 'numpy.tile', 'np.tile', (['d1', '(nloc - 1)'], {}), '(d1, nloc - 1)\n', (12141, 12155), True, 'import numpy as np\n'), ((6937, 6958), 'numpy.repeat', 'np.repeat', (['foc', 'ncomp'], {}), '(foc, ncomp)\n', (6946, 6958), True, 'import numpy as np\n'), ((10188, 10251), 'numpy.einsum', 'np.einsum', (['"""nk,nl->nkl"""', 'cellgrads[:, :, i]', 'cellgrads[:, :, j]'], {}), "('nk,nl->nkl', cellgrads[:, :, i], cellgrads[:, :, j])\n", (10197, 10251), True, 'import numpy as np\n'), ((10319, 10382), 'numpy.einsum', 'np.einsum', (['"""nk,nl->nkl"""', 'cellgrads[:, :, j]', 'cellgrads[:, :, i]'], {}), "('nk,nl->nkl', cellgrads[:, :, j], cellgrads[:, :, i])\n", (10328, 10382), True, 'import numpy as np\n'), ((10449, 10512), 'numpy.einsum', 'np.einsum', (['"""nk,nl->nkl"""', 'cellgrads[:, :, j]', 'cellgrads[:, :, j]'], {}), "('nk,nl->nkl', cellgrads[:, :, j], cellgrads[:, :, j])\n", (10458, 10512), True, 'import numpy as np\n'), ((1243, 1312), 'numpy.vectorize', 'np.vectorize', (['f[col][icomp]'], {'signature': '"""(n),(n),(n),(n),(n),(n)->(n)"""'}), "(f[col][icomp], signature='(n),(n),(n),(n),(n),(n)->(n)')\n", (1255, 1312), True, 'import numpy as np\n'), ((1488, 1515), 'numpy.vectorize', 'np.vectorize', (['f[col][icomp]'], {}), '(f[col][icomp])\n', (1500, 1515), True, 'import numpy as np\n')] |
# 2D Convolution (Image Filtering)
# coding: utf-8
import numpy as np
from cv2 import cv2
import matplotlib.pyplot as plt
img = cv2.imread(r'pictures\opencv.png')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
kernel = np.ones((5, 5), np.float32)/25
# ddepth = -1: The same with the original image
dst = cv2.filter2D(img, -1, kernel)
plt.subplot(121), plt.imshow(img), plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(dst), plt.title('Averaging')
plt.xticks([]), plt.yticks([])
plt.show()
# As for one-dimensional signals, images also can be filtered with various low-pass filters (LPF), high-pass filters (HPF), etc. A LPF helps in removing noise, or blurring the image. A HPF filters helps in finding edges in an image.
# OpenCV provides a function, cv2.filter2D()
# Filtering with the above kernel results in the following being performed: for each pixel, a 5x5 window is centered on this pixel, all pixels falling within this window are summed up, and the result is then divided by 25. This equates to computing the average of the pixel values inside that window. This operation is performed for all the pixels in the image to produce the output filtered image.
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"cv2.cv2.filter2D",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"numpy.ones",
"cv2.cv2.imread",
"matplotlib.pyplot.xticks",
"cv2.cv2.cvtColor"
] | [((129, 163), 'cv2.cv2.imread', 'cv2.imread', (['"""pictures\\\\opencv.png"""'], {}), "('pictures\\\\opencv.png')\n", (139, 163), False, 'from cv2 import cv2\n'), ((170, 206), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (182, 206), False, 'from cv2 import cv2\n'), ((302, 331), 'cv2.cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'kernel'], {}), '(img, -1, kernel)\n', (314, 331), False, 'from cv2 import cv2\n'), ((510, 520), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (518, 520), True, 'import matplotlib.pyplot as plt\n'), ((217, 244), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.float32'], {}), '((5, 5), np.float32)\n', (224, 244), True, 'import numpy as np\n'), ((333, 349), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (344, 349), True, 'import matplotlib.pyplot as plt\n'), ((351, 366), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (361, 366), True, 'import matplotlib.pyplot as plt\n'), ((368, 389), 'matplotlib.pyplot.title', 'plt.title', (['"""Original"""'], {}), "('Original')\n", (377, 389), True, 'import matplotlib.pyplot as plt\n'), ((390, 404), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (400, 404), True, 'import matplotlib.pyplot as plt\n'), ((406, 420), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (416, 420), True, 'import matplotlib.pyplot as plt\n'), ((421, 437), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (432, 437), True, 'import matplotlib.pyplot as plt\n'), ((439, 454), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dst'], {}), '(dst)\n', (449, 454), True, 'import matplotlib.pyplot as plt\n'), ((456, 478), 'matplotlib.pyplot.title', 'plt.title', (['"""Averaging"""'], {}), "('Averaging')\n", (465, 478), True, 'import matplotlib.pyplot as plt\n'), ((479, 493), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (489, 493), True, 'import matplotlib.pyplot as plt\n'), ((495, 509), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (505, 509), True, 'import matplotlib.pyplot as plt\n')] |
'''
Unit test of the COTAT plus driver using same data as top-level
Fortran FRUIT unit tests
Created on Oct 19, 2016
@author: thomasriddick
'''
import unittest
import numpy as np
import os
import re
import textwrap
import subprocess
from subprocess import CalledProcessError
from Dynamic_HD_Scripts.base import field
from Dynamic_HD_Scripts.tools import cotat_plus_driver
from Dynamic_HD_Scripts import context as scripts_context
from Dynamic_HD_Script_Tests.context import data_dir,valgrind_path
class Test(unittest.TestCase):
"""Unit test object"""
show_output = False
input_fine_river_directions_test_data = np.array([[-1,-1,-1, -1,0,4, 2,2,2, 2,2,2, 3,2,2],
[-1,-1,-1, -1,-1,-1, 0,4,4, 4,4,1, 4,4,1],
[-1,-1,-1, -1,-1,9, 8,8,8, 1,7,7, 4,4,4],
[-1,-1,-1, -1,-1,0, 4,4,4, 6,8,5, 8,4,8],
[-1,0,4, 4,4,5, 7,1,8, 4,6,7, 6,5,4],
[-1,0,4, 4,5,7, 4,4,1, 9,6,8, 1,6,2],
[-1,-1,0, 4,6,8, 4,4,6, 6,6,7, 4,2,5],
[-1,-1,-1, 7,6,8, 4,1,3, 9,9,8, 8,7,4],
[-1,0,0, 4,6,8, 4,4,5, 1,5,5, 9,1,7],
[0,8,7, 7,7,7, 7,4,4, 6,7,4, 4,1,2],
[8,8,8, 8,1,2, 6,7,5, 9,8,6, 8,7,4],
[9,2,7, 4,4,2, 9,8,7, 9,8,4, 9,8,8],
[6,6,8, 4,8,1, 2,1,8, 3,8,2, 3,5,8],
[4,6,8, 7,8,7, 4,4,3, 3,2,6, 6,5,8],
[9,8,8, 7,8,5, 8,9,8, 6,6,8, 9,8,7]],
dtype=np.int64)
input_fine_total_cumulative_flow_test_data = np.array([[1,1,1, 1,1,1, 1,1,1, 1,1,1, 1,1,1],
[1,1,1, 1,1,1, 52,48,45, 42,11,6, 4,3,2],
[1,1,1, 1,1,1, 1,1,1, 1,29,9, 8,5,2],
[1,1,1, 1,1,8, 6,5,4, 1,22,1, 2,1,1],
[1,55,54, 53,52,1, 1,1,2, 1,2,20, 1,3,1],
[1,3,2, 1,1,51, 3,1,1, 1,16,17, 1,1,2],
[1,1,3, 1,1,47, 3,2,1, 2,4,15, 7,1,3],
[1,1,1, 1,1,42, 1,1,1, 1,1,1, 1,5,1],
[1,35,5, 2,2,39, 3,1,1, 24,1,1, 1,1,1],
[2,32,2, 2,1,1, 33,26,25, 1,22,14, 13,1,1],
[1,31,1, 1,1,1, 1,6,1, 1,5,1, 3,8,5],
[1,1,29, 15,13,2, 1,1,2, 1,3,1, 1,1,3],
[1,3,13, 1,12,3, 1,1,1, 1,1,1, 1,1,2],
[1,4,7, 1,5,6, 5,1,3, 1,2,11, 12,17,1],
[1,1,1, 1,1,1, 1,1,1, 4,8,9, 1,1,1]],
dtype=np.int64)
small_grid_expected_result = np.array([[-1,6,0,4,4],
[0,4,4,8,5],
[0,7,4,8,7],
[8,4,7,4,7],
[8,7,7,6,5]],dtype=np.int64)
directory = None
def setUp(self):
"""Unit test setup. Creates a temporary directory for results if necessary"""
#create files
if False:
self.directory = os.path.expanduser('~')+ '/temp'
else:
self.directory = data_dir + '/temp'
try:
os.stat(self.directory)
except:
os.mkdir(self.directory)
self.cotat_params_file_path = os.path.join(self.directory,'cotat_plus_params_temp.nl')
def testUsingSmallGrid(self):
"""
Test using a small 5 by 5 grid
Same data was used in FRUIT unit testing
"""
input_fine_river_directions_test_field = field.makeField(self.input_fine_river_directions_test_data,
field_type='RiverDirections',
grid_type='LatLong',nlat=15,nlong=15)
input_fine_total_cumulative_flow_test_field = field.makeField(self.input_fine_total_cumulative_flow_test_data,
field_type='CumulativeFlow',
grid_type='LatLong',nlat=15,nlong=15)
cotat_params_text =\
"""
&cotat_parameters
MUFP = 1.5
area_threshold = 9
run_check_for_sinks = .True.
/
"""
with open(self.cotat_params_file_path,'w') as f:
f.write(textwrap.dedent(cotat_params_text))
output_course_river_directions = \
cotat_plus_driver.run_cotat_plus(fine_rdirs_field=input_fine_river_directions_test_field,
fine_total_cumulative_flow_field=\
input_fine_total_cumulative_flow_test_field,
cotat_plus_parameters_filepath=self.cotat_params_file_path,
course_grid_type='LatLong',nlat=5,nlong=5)
np.testing.assert_array_equal(output_course_river_directions.get_data(),
self.small_grid_expected_result,
"Running scaling code over small 5 by 5 grid doesn't"
" produce expected results")
@unittest.skip("Valgrind not working at present")
def testForMemoryLeaksWithValgrind(self):
"""Run valgrind to check no new memory leaks are occurring"""
try:
valgrind_output = subprocess.check_output([valgrind_path,'--leak-check=full',
scripts_context.fortran_project_executable_path],
stderr=subprocess.STDOUT,
cwd=scripts_context.fortran_project_include_path)
except CalledProcessError as cperror:
raise RuntimeError("Failure in called process {0}; return code {1}; output:\n{2}".format(cperror.cmd,
cperror.returncode,
cperror.output))
direct_mem_loss_match = re.search(r'definitely lost: ([,0-9]*)',valgrind_output)
indirect_mem_loss_match = re.search(r'indirectly lost: ([,0-9]*)',valgrind_output)
if self.show_output:
print(valgrind_output)
direct_mem_loss = int(direct_mem_loss_match.group(1).replace(',',''))
indirect_mem_loss = int(indirect_mem_loss_match.group(1).replace(',',''))
# 80 byte loss is a known problem that occurs sometimes related to using valgrind in python
self.assertTrue((direct_mem_loss == 0 or direct_mem_loss == 80),"Direct memory leak detected")
# 68 byte loss is a known problem that occurs sometimes related to using valgrind in python
self.assertTrue((indirect_mem_loss == 0 or indirect_mem_loss == 68),"Indirect memory leak detected")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"unittest.main",
"Dynamic_HD_Scripts.base.field.makeField",
"os.path.expanduser",
"os.mkdir",
"textwrap.dedent",
"os.stat",
"subprocess.check_output",
"unittest.skip",
"Dynamic_HD_Scripts.tools.cotat_plus_driver.run_cotat_plus",
"numpy.array",
"re.search",
"os.path.join"
] | [((629, 1432), 'numpy.array', 'np.array', (['[[-1, -1, -1, -1, 0, 4, 2, 2, 2, 2, 2, 2, 3, 2, 2], [-1, -1, -1, -1, -1, -1,\n 0, 4, 4, 4, 4, 1, 4, 4, 1], [-1, -1, -1, -1, -1, 9, 8, 8, 8, 1, 7, 7, 4,\n 4, 4], [-1, -1, -1, -1, -1, 0, 4, 4, 4, 6, 8, 5, 8, 4, 8], [-1, 0, 4, 4,\n 4, 5, 7, 1, 8, 4, 6, 7, 6, 5, 4], [-1, 0, 4, 4, 5, 7, 4, 4, 1, 9, 6, 8,\n 1, 6, 2], [-1, -1, 0, 4, 6, 8, 4, 4, 6, 6, 6, 7, 4, 2, 5], [-1, -1, -1,\n 7, 6, 8, 4, 1, 3, 9, 9, 8, 8, 7, 4], [-1, 0, 0, 4, 6, 8, 4, 4, 5, 1, 5,\n 5, 9, 1, 7], [0, 8, 7, 7, 7, 7, 7, 4, 4, 6, 7, 4, 4, 1, 2], [8, 8, 8, 8,\n 1, 2, 6, 7, 5, 9, 8, 6, 8, 7, 4], [9, 2, 7, 4, 4, 2, 9, 8, 7, 9, 8, 4, \n 9, 8, 8], [6, 6, 8, 4, 8, 1, 2, 1, 8, 3, 8, 2, 3, 5, 8], [4, 6, 8, 7, 8,\n 7, 4, 4, 3, 3, 2, 6, 6, 5, 8], [9, 8, 8, 7, 8, 5, 8, 9, 8, 6, 6, 8, 9, \n 8, 7]]'], {'dtype': 'np.int64'}), '([[-1, -1, -1, -1, 0, 4, 2, 2, 2, 2, 2, 2, 3, 2, 2], [-1, -1, -1, -\n 1, -1, -1, 0, 4, 4, 4, 4, 1, 4, 4, 1], [-1, -1, -1, -1, -1, 9, 8, 8, 8,\n 1, 7, 7, 4, 4, 4], [-1, -1, -1, -1, -1, 0, 4, 4, 4, 6, 8, 5, 8, 4, 8],\n [-1, 0, 4, 4, 4, 5, 7, 1, 8, 4, 6, 7, 6, 5, 4], [-1, 0, 4, 4, 5, 7, 4, \n 4, 1, 9, 6, 8, 1, 6, 2], [-1, -1, 0, 4, 6, 8, 4, 4, 6, 6, 6, 7, 4, 2, 5\n ], [-1, -1, -1, 7, 6, 8, 4, 1, 3, 9, 9, 8, 8, 7, 4], [-1, 0, 0, 4, 6, 8,\n 4, 4, 5, 1, 5, 5, 9, 1, 7], [0, 8, 7, 7, 7, 7, 7, 4, 4, 6, 7, 4, 4, 1, \n 2], [8, 8, 8, 8, 1, 2, 6, 7, 5, 9, 8, 6, 8, 7, 4], [9, 2, 7, 4, 4, 2, 9,\n 8, 7, 9, 8, 4, 9, 8, 8], [6, 6, 8, 4, 8, 1, 2, 1, 8, 3, 8, 2, 3, 5, 8],\n [4, 6, 8, 7, 8, 7, 4, 4, 3, 3, 2, 6, 6, 5, 8], [9, 8, 8, 7, 8, 5, 8, 9,\n 8, 6, 6, 8, 9, 8, 7]], dtype=np.int64)\n', (637, 1432), True, 'import numpy as np\n'), ((2101, 2914), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 52, 48, \n 45, 42, 11, 6, 4, 3, 2], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 29, 9, 8, 5, 2],\n [1, 1, 1, 1, 1, 8, 6, 5, 4, 1, 22, 1, 2, 1, 1], [1, 55, 54, 53, 52, 1, \n 1, 1, 2, 1, 2, 20, 1, 3, 1], [1, 3, 2, 1, 1, 51, 3, 1, 1, 1, 16, 17, 1,\n 1, 2], [1, 1, 3, 1, 1, 47, 3, 2, 1, 2, 4, 15, 7, 1, 3], [1, 1, 1, 1, 1,\n 42, 1, 1, 1, 1, 1, 1, 1, 5, 1], [1, 35, 5, 2, 2, 39, 3, 1, 1, 24, 1, 1,\n 1, 1, 1], [2, 32, 2, 2, 1, 1, 33, 26, 25, 1, 22, 14, 13, 1, 1], [1, 31,\n 1, 1, 1, 1, 1, 6, 1, 1, 5, 1, 3, 8, 5], [1, 1, 29, 15, 13, 2, 1, 1, 2, \n 1, 3, 1, 1, 1, 3], [1, 3, 13, 1, 12, 3, 1, 1, 1, 1, 1, 1, 1, 1, 2], [1,\n 4, 7, 1, 5, 6, 5, 1, 3, 1, 2, 11, 12, 17, 1], [1, 1, 1, 1, 1, 1, 1, 1, \n 1, 4, 8, 9, 1, 1, 1]]'], {'dtype': 'np.int64'}), '([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1,\n 52, 48, 45, 42, 11, 6, 4, 3, 2], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 29, 9, \n 8, 5, 2], [1, 1, 1, 1, 1, 8, 6, 5, 4, 1, 22, 1, 2, 1, 1], [1, 55, 54, \n 53, 52, 1, 1, 1, 2, 1, 2, 20, 1, 3, 1], [1, 3, 2, 1, 1, 51, 3, 1, 1, 1,\n 16, 17, 1, 1, 2], [1, 1, 3, 1, 1, 47, 3, 2, 1, 2, 4, 15, 7, 1, 3], [1, \n 1, 1, 1, 1, 42, 1, 1, 1, 1, 1, 1, 1, 5, 1], [1, 35, 5, 2, 2, 39, 3, 1, \n 1, 24, 1, 1, 1, 1, 1], [2, 32, 2, 2, 1, 1, 33, 26, 25, 1, 22, 14, 13, 1,\n 1], [1, 31, 1, 1, 1, 1, 1, 6, 1, 1, 5, 1, 3, 8, 5], [1, 1, 29, 15, 13, \n 2, 1, 1, 2, 1, 3, 1, 1, 1, 3], [1, 3, 13, 1, 12, 3, 1, 1, 1, 1, 1, 1, 1,\n 1, 2], [1, 4, 7, 1, 5, 6, 5, 1, 3, 1, 2, 11, 12, 17, 1], [1, 1, 1, 1, 1,\n 1, 1, 1, 1, 4, 8, 9, 1, 1, 1]], dtype=np.int64)\n', (2109, 2914), True, 'import numpy as np\n'), ((3643, 3760), 'numpy.array', 'np.array', (['[[-1, 6, 0, 4, 4], [0, 4, 4, 8, 5], [0, 7, 4, 8, 7], [8, 4, 7, 4, 7], [8, 7,\n 7, 6, 5]]'], {'dtype': 'np.int64'}), '([[-1, 6, 0, 4, 4], [0, 4, 4, 8, 5], [0, 7, 4, 8, 7], [8, 4, 7, 4, \n 7], [8, 7, 7, 6, 5]], dtype=np.int64)\n', (3651, 3760), True, 'import numpy as np\n'), ((6318, 6366), 'unittest.skip', 'unittest.skip', (['"""Valgrind not working at present"""'], {}), "('Valgrind not working at present')\n", (6331, 6366), False, 'import unittest\n'), ((8175, 8190), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8188, 8190), False, 'import unittest\n'), ((4341, 4398), 'os.path.join', 'os.path.join', (['self.directory', '"""cotat_plus_params_temp.nl"""'], {}), "(self.directory, 'cotat_plus_params_temp.nl')\n", (4353, 4398), False, 'import os\n'), ((4596, 4730), 'Dynamic_HD_Scripts.base.field.makeField', 'field.makeField', (['self.input_fine_river_directions_test_data'], {'field_type': '"""RiverDirections"""', 'grid_type': '"""LatLong"""', 'nlat': '(15)', 'nlong': '(15)'}), "(self.input_fine_river_directions_test_data, field_type=\n 'RiverDirections', grid_type='LatLong', nlat=15, nlong=15)\n", (4611, 4730), False, 'from Dynamic_HD_Scripts.base import field\n'), ((4908, 5046), 'Dynamic_HD_Scripts.base.field.makeField', 'field.makeField', (['self.input_fine_total_cumulative_flow_test_data'], {'field_type': '"""CumulativeFlow"""', 'grid_type': '"""LatLong"""', 'nlat': '(15)', 'nlong': '(15)'}), "(self.input_fine_total_cumulative_flow_test_data, field_type\n ='CumulativeFlow', grid_type='LatLong', nlat=15, nlong=15)\n", (4923, 5046), False, 'from Dynamic_HD_Scripts.base import field\n'), ((5548, 5842), 'Dynamic_HD_Scripts.tools.cotat_plus_driver.run_cotat_plus', 'cotat_plus_driver.run_cotat_plus', ([], {'fine_rdirs_field': 'input_fine_river_directions_test_field', 'fine_total_cumulative_flow_field': 'input_fine_total_cumulative_flow_test_field', 'cotat_plus_parameters_filepath': 'self.cotat_params_file_path', 'course_grid_type': '"""LatLong"""', 'nlat': '(5)', 'nlong': '(5)'}), "(fine_rdirs_field=\n input_fine_river_directions_test_field,\n fine_total_cumulative_flow_field=\n input_fine_total_cumulative_flow_test_field,\n cotat_plus_parameters_filepath=self.cotat_params_file_path,\n course_grid_type='LatLong', nlat=5, nlong=5)\n", (5580, 5842), False, 'from Dynamic_HD_Scripts.tools import cotat_plus_driver\n'), ((7310, 7366), 're.search', 're.search', (['"""definitely lost: ([,0-9]*)"""', 'valgrind_output'], {}), "('definitely lost: ([,0-9]*)', valgrind_output)\n", (7319, 7366), False, 'import re\n'), ((7401, 7457), 're.search', 're.search', (['"""indirectly lost: ([,0-9]*)"""', 'valgrind_output'], {}), "('indirectly lost: ([,0-9]*)', valgrind_output)\n", (7410, 7457), False, 'import re\n'), ((4226, 4249), 'os.stat', 'os.stat', (['self.directory'], {}), '(self.directory)\n', (4233, 4249), False, 'import os\n'), ((6527, 6722), 'subprocess.check_output', 'subprocess.check_output', (["[valgrind_path, '--leak-check=full', scripts_context.\n fortran_project_executable_path]"], {'stderr': 'subprocess.STDOUT', 'cwd': 'scripts_context.fortran_project_include_path'}), "([valgrind_path, '--leak-check=full',\n scripts_context.fortran_project_executable_path], stderr=subprocess.\n STDOUT, cwd=scripts_context.fortran_project_include_path)\n", (6550, 6722), False, 'import subprocess\n'), ((4106, 4129), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (4124, 4129), False, 'import os\n'), ((4278, 4302), 'os.mkdir', 'os.mkdir', (['self.directory'], {}), '(self.directory)\n', (4286, 4302), False, 'import os\n'), ((5457, 5491), 'textwrap.dedent', 'textwrap.dedent', (['cotat_params_text'], {}), '(cotat_params_text)\n', (5472, 5491), False, 'import textwrap\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
def bilinear_interp_np(input,
out_h,
out_w,
out_size=None,
actual_shape=None,
align_corners=True,
align_mode=0):
"""bilinear interpolation implement in shape [N, C, H, W]"""
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
batch_size, channel, in_h, in_w = input.shape
ratio_h = ratio_w = 0.0
if out_h > 1:
if (align_corners):
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
ratio_h = 1.0 * in_h / out_h
if out_w > 1:
if (align_corners):
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
ratio_w = 1.0 * in_w / out_w
out = np.zeros((batch_size, channel, out_h, out_w))
for i in range(out_h):
if (align_mode == 0 and not align_corners):
h = int(ratio_h * (i + 0.5) - 0.5)
else:
h = int(ratio_h * i)
h = max(0, h)
hid = 1 if h < in_h - 1 else 0
if (align_mode == 0 and not align_corners):
h1lambda = ratio_h * (i + 0.5) - 0.5 - h
else:
h1lambda = ratio_h * i - h
h2lambda = 1.0 - h1lambda
for j in range(out_w):
if (align_mode == 0 and not align_corners):
w = int(ratio_w * (j + 0.5) - 0.5)
else:
w = int(ratio_w * j)
w = max(0, w)
wid = 1 if w < in_w - 1 else 0
if (align_mode == 0 and not align_corners):
w1lambda = ratio_w * (j + 0.5) - 0.5 - w
else:
w1lambda = ratio_w * j - w
w2lambda = 1.0 - w1lambda
out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] +
w1lambda*input[:, :, h, w+wid]) + \
h1lambda*(w2lambda*input[:, :, h+hid, w] +
w1lambda*input[:, :, h+hid, w+wid])
return out.astype(input.dtype)
class TestBilinearInterpOp(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "bilinear_interp"
input_np = np.random.random(self.input_shape).astype("float32")
#input_np = np.load('linear.npy')
if self.scale > 0:
out_h = int(self.input_shape[2] * self.scale)
out_w = int(self.input_shape[3] * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size,
self.actual_shape, self.align_corners,
self.align_mode)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'align_mode': self.align_mode
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(is_interp=True)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 19, 32, 32]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.out_size = np.array([512, 512]).astype("int32")
self.align_corners = False
self.align_mode = 1
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.random.random",
"numpy.zeros",
"numpy.array"
] | [((1641, 1686), 'numpy.zeros', 'np.zeros', (['(batch_size, channel, out_h, out_w)'], {}), '((batch_size, channel, out_h, out_w))\n', (1649, 1686), True, 'import numpy as np\n'), ((4489, 4504), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4502, 4504), False, 'import unittest\n'), ((3102, 3136), 'numpy.random.random', 'np.random.random', (['self.input_shape'], {}), '(self.input_shape)\n', (3118, 3136), True, 'import numpy as np\n'), ((4356, 4376), 'numpy.array', 'np.array', (['[512, 512]'], {}), '([512, 512])\n', (4364, 4376), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.