code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import nlopt as nl
from .types import Folds
from dora.regressors.gp import predict
from dora.regressors.gp import linalg
from dora.regressors.gp import types
import sklearn.cluster as skcluster
import scipy.interpolate as interp
from scipy.spatial import Delaunay
import scipy.stats as stats
from scipy.linalg import solve_triangular
import scipy.linalg as la
import copy
# Compute the log marginal likelihood
def negative_log_marginal_likelihood(Y, L, alpha):
n = L.shape[0]
t1 = np.dot(Y.ravel(), alpha.ravel())
log_det_k = 2.*np.sum(np.log(np.diag(L)))
nll = 0.5 * (t1 + log_det_k + n * np.log(2.0 * np.pi))
return nll
# neCompute the leave one out neg log prob
def negative_log_prob_cross_val(Y, L, alpha):
n = L.shape[0]
Kinv = np.linalg.solve(L.T, solve_triangular(L, np.eye(n), lower=True))
logprob = 0
for i in range(n):
Kinvii = Kinv[i][i]
mu_i = Y[i] - alpha[i]/Kinvii
sig2i = 1/Kinvii
logprob += stats.norm.logpdf(Y[i], loc=mu_i, scale=sig2i)
return -float(logprob)
# The inverse of opt_config_copys_to_vector - gets called a lot
def unpack(theta, unpackinfo):
return [[theta[tup[0]].reshape(tup[1]) if tup[1]!=() else theta[tup[0]][0] for tup in item] for item in unpackinfo]
def make_folds(X, y, target_size, method='random'):
n_Y = y.shape[0]
n_folds = int(n_Y/target_size) + int(target_size>n_Y)
if method == 'random':
fold_assignment = np.random.permutation(n_Y)%n_folds
elif method == 'cluster':
# Thanks scikit
print('Clustering [sklearn.cluster] inputs')
clusterer = skcluster.MiniBatchKMeans(n_clusters=n_folds, batch_size=1000)
fold_assignment = clusterer.fit_predict(X)
elif method == 'rcluster':
print('Clustering [sklearn.cluster] inputs')
clusters = skcluster.MiniBatchKMeans(n_clusters=n_folds,
batch_size=1000, compute_labels=True).fit(X)
Xcluster = clusters.cluster_centers_
print('Interpolating probability')
n_X = X.shape[0]
assign_prob = np.zeros((n_folds, n_X))
tris = Delaunay(Xcluster)
base_labels = clusters.labels_
for i in range(n_folds):
indicator = np.zeros(n_folds)
indicator[i] = 1.
row = interp.LinearNDInterpolator(tris, indicator,
fill_value=-1)(X)
row[row<0] = base_labels[row<0] == i
assign_prob[i] = row
# now use these as selection probabilities
assign_prob = np.cumsum(assign_prob, axis=0)
rvec = np.random.random(n_X)
fold_assignment = np.sum(rvec[np.newaxis, :] <assign_prob, axis=0)
# veryfy fold assignment?
# pl.scatter(X[:, 0], X[:, 1], c=fold_assignment)
# pl.show()
# exit()
else:
raise NameError('Unrecognised fold method:'+method)
fold_inds = np.unique(fold_assignment)
folds = Folds(n_folds, [], [], []) # might contain lists in the multitask case
where = lambda y, v:y[np.where(v)[0]]
for f in fold_inds:
folds.X.append(where(X, fold_assignment==f))
folds.Y.append(where(y, fold_assignment==f))
folds.flat_y.append(where(y, fold_assignment==f))
return folds
# Extended to allow lists of arrays of opt_config_copyeters for sigma, signal and noise so we can use multiple kernels etc
# unlike unpack, this doesn't need to be super efficient - its only called once
def pack(theta, noisepar):
unpackinfo = [[], []]
aopt_config_copys = []
count = 0
for ind, item in enumerate((theta, noisepar)):
for value in item:
aitem = np.array(value)
newval = aitem.ravel()
aopt_config_copys.append(newval)
packshape = aitem.shape
nextcount = count+newval.shape[0]
unpackinfo[ind].append((list(range(count, nextcount)), packshape)) # had to make these lists for comptibility with python3.4
count = nextcount
opt_config_copys = np.concatenate(aopt_config_copys)
return opt_config_copys, unpackinfo
def condition(X, y, kernelFn, hyper_opt_config_copys):
assert len(y.shape) == 1 #y must be shapeless (n, )
h_kernel, noise_std = hyper_opt_config_copys
kernel = lambda x1, x2: kernelFn(x1, x2, h_kernel)
noise_vector = predict.noise_vector(X, noise_std)
L = linalg.cholesky(X, kernel, noise_vector)
alpha = predict.alpha(y, L)
return types.RegressionParams(X, L, alpha, kernel, y, noise_std)
def chol_up(L, Sn, Snn, Snn_noise_std_vec):
# Incremental cholesky update
Ln = la.solve_triangular(L, Sn, lower=True).T
On = np.zeros(Ln.shape).T
noise = np.diag(Snn_noise_std_vec ** 2)
Lnn = linalg.jitchol(Snn+noise - Ln.dot(Ln.T))
top = np.concatenate((L, On), axis=1)
bottom = np.concatenate((Ln, Lnn), axis=1)
return np.concatenate((top, bottom), axis=0)
def chol_up_insert(L, V12, V23, V22, Snn_noise_std_vec, insertionID):
R = L.T
N = R.shape[0]
n = V22.shape[0]
noise = np.diag(Snn_noise_std_vec ** 2)
R11 = R[:insertionID, :insertionID]
R33 = R[insertionID:, insertionID:]
S11 = R11
S12 = la.solve_triangular(R11.T, V12, lower=True)
S13 = R[:insertionID, insertionID:]
S22 = linalg.jitchol(V22+noise - S12.T.dot(S12)).T
if V23.shape[1] != 0: # The data is being inserted between columns
S23 = la.solve_triangular(S22.T, (V23-S12.T.dot(S13)), lower=True)
S33 = linalg.jitchol(R33.T.dot(R33)-S23.T.dot(S23)).T
else: #the data is being appended at the end of the matrix
S23 = np.zeros((n, 0))
S33 = np.zeros((0, 0))
On1 = np.zeros((n, insertionID))
On2 = np.zeros((N-insertionID, insertionID))
On3 = np.zeros((N-insertionID, n))
top = np.concatenate((S11, S12, S13), axis=1)
middle = np.concatenate((On1, S22, S23), axis=1)
bottom = np.concatenate((On2, On3, S33), axis=1)
return np.concatenate((top, middle, bottom), axis=0).T
def chol_down(L, remIDList):
# This works but it might potentially be slower than the naive approach of
# recomputing the cholesky decomposition from scratch.
# The jitchol line can apparently be replaces with a chol that exploits the
# structure of the problem according to Osbourne's Thesis (as
# cholupdate does).
remIDList = np.sort(remIDList)
for i in range(len(remIDList)):
remID = remIDList[i]
S = L.T
n = S.shape[0]
On = np.zeros((n-(remID+1), remID))
# Incremental cholesky downdate
top = np.concatenate((S[:remID, :remID], S[:(remID), (remID+1):]), axis=1)
S23 = S[remID, (remID+1):][np.newaxis, :]
S23TS23 = S23.T.dot(S23)
S33TS33 = S[(remID+1):, (remID+1):].T.dot(S[(remID+1):, (remID+1):])
R33 = linalg.jitchol(S23TS23+S33TS33).T
bottom = np.concatenate((On, R33), axis=1)
L = np.concatenate((top, bottom), axis=0).T
remIDList -= 1
return L
def add_data(newX, newY, regressor, query=None, insertionID=None):
assert(isinstance(regressor, types.RegressionParams))
assert(not query or isinstance(query, types.QueryParams))
assert(len(newX.shape) == 2)
assert(len(newY.shape) == 1)
if not(insertionID): #No insterionID provide. Append data to the end.
# Compute the new rows and columns of the covariance matrix
Kxn = regressor.kernel(regressor.X, newX)
Knn = regressor.kernel(newX, newX)
nn_noise_std = predict.noise_vector(newX, regressor.noise_std)
# Update the regression opt_config_copys
regressor.X = np.vstack((regressor.X, newX))
regressor.y = np.hstack((regressor.y, newY))
regressor.L = chol_up(regressor.L, Kxn, Knn,
nn_noise_std)
# sadly, this is still expensive. However osborne's thesis appendix B can
# be used to speed up this step too. Maybe by a factor of 2.
regressor.alpha = predict.alpha(regressor.y, regressor.L)
# Optionally update the query
if query is not None:
Kxsn = regressor.kernel(newX, query.Xs)
query.K_xxs = np.vstack((query.K_xxs, Kxsn))
else:
# Compute the new rows and columns of the covariance matrix
Kx1n = regressor.kernel(regressor.X[:insertionID, :], newX)
Knx2 = regressor.kernel(newX, regressor.X[insertionID:, :])
Knn = regressor.kernel(newX, newX)
nn_noise_std = predict.noise_vector(newX, regressor.noise_std)
regressor.X = np.vstack((regressor.X[:insertionID, :], newX,
regressor.X[insertionID:, :]))
regressor.y = np.hstack((regressor.y[:insertionID], newY,
regressor.y[insertionID:]))
regressor.L = chol_up_insert(regressor.L, Kx1n, Knx2, Knn,
nn_noise_std, insertionID)
# sadly, this is still expensive. However osborne's thesis appendix B can
# be used to speed up this step too. Maybe by a factor of 2.
regressor.alpha = predict.alpha(regressor.y, regressor.L)
if query is not None:
Kxsn = regressor.kernel(newX, query.Xs)
query.K_xxs = np.vstack((query.K_xxs[:insertionID, :], Kxsn,
query.K_xxs[insertionID:, :]))
def remove_data(regressor, remID, query=None):
assert(isinstance(regressor, types.RegressionParams))
assert(not query or isinstance(query, types.QueryParams))
regressor.X = np.delete(regressor.X, remID, axis=0)
regressor.y = np.delete(regressor.y, remID, axis=0)
# regressor.L = chol_down(regressor.L, remID)
noise_vector = predict.noise_vector(regressor.X, regressor.noise_std)
regressor.L = linalg.cholesky(regressor.X, regressor.kernel, noise_vector)
regressor.alpha = predict.alpha(regressor.y, regressor.L)
# Optionally update the query
if query is not None:
query.K_xxs = np.delete(query.K_xxs, remID, axis=0)
def learn(X, Y, cov_fn, optParams, optCrition='logMarg', returnLogMarg=False,
verbose=False):
# Normal criterion with all the data
def criterion(sigma, noise):
k = lambda x1, x2: cov_fn(x1, x2, sigma)
X_noise = predict.noise_vector(X, noise)
L = linalg.cholesky(X, k, X_noise)
a = predict.alpha(Y, L)
if optCrition == 'logMarg':
val = negative_log_marginal_likelihood(Y, L, a)
elif optCrition == 'crossVal':
val = negative_log_prob_cross_val(Y, L, a)
if verbose is True:
print('['+str(val)+'] ', sigma, noise)
return val
sigma, noise, optval = optimise_hypers(criterion, optParams)
if verbose:
print('[',optval,']:', sigma, noise)
if returnLogMarg:
return sigma, noise, -optval
else:
return sigma, noise
def learn_folds(folds, cov_fn, optParams, optCrition='logMarg', verbose=False):
# Same as learn, but using multiple folds jointly
# todo: distribute computation!
def criterion(sigma, noise):
k = lambda x1, x2: cov_fn(x1, x2, sigma)
val = 0
for f in range(folds.n_folds):
Xf = folds.X[f]
Yf = folds.flat_y[f]
Xf_noise = predict.noise_vector(Xf, noise)
Lf = linalg.cholesky(Xf, k, Xf_noise)
af = predict.alpha(Yf, Lf)
if optCrition == 'logMarg':
val += negative_log_marginal_likelihood(Yf, Lf, af)
elif optCrition == 'crossVal':
val += negative_log_prob_cross_val(Yf, Lf, af)
if verbose is True:
print('['+str(val)+'] ', sigma, noise)
return val
sigma, noise, optval = optimise_hypers(criterion, optParams)
if verbose:
print('[', optval, ']:', sigma, noise)
return sigma, noise
def optimise_hypers(criterion, optParams):
objective = lambda theta, grad: criterion(*unpack(theta, unpackinfo))
theta_low, _ = pack(optParams.sigma.lowerBound, optParams.noise.lowerBound)
theta_0, unpackinfo = pack(optParams.sigma.initialVal, optParams.noise.initialVal)
theta_high, _ = pack(optParams.sigma.upperBound, optParams.noise.upperBound)
nParams = theta_0.shape[0]
opt = nl.opt(nl.LN_BOBYQA, nParams)
opt.set_lower_bounds(theta_low)
opt.set_upper_bounds(theta_high)
opt.set_min_objective(objective)
opt.set_maxtime(optParams.walltime)
if optParams.global_opt is True:
opt = nl.opt(nl.G_MLSL_LDS, nParams)
local_opt = nl.opt(nl.LN_BOBYQA, nParams)
local_opt.set_ftol_rel(1e-4)
opt.set_local_optimizer(local_opt)
else:
opt.set_ftol_rel(1e-6)
assert( (theta_low<=theta_0).all())
assert( (theta_high>=theta_0).all())
theta_opt = opt.optimize(theta_0)
sigma, noise_sigma = unpack(theta_opt, unpackinfo)
opt_val = opt.last_optimum_value()
return sigma, noise_sigma, opt_val
def batch_start(opt_config, initial_values):
"""
Sets initial values of the optimiser parameters
Returned as an OptConfig instance or a list of OptConfig instances
Arguments:
opt_config : An instance of OptConfig
initial_values : List or np.array of initial parameters values
Returns:
batch_config : A OptConfig instance or a list of OptConfig instances
"""
if hasattr(initial_values[0], '__iter__'):
batch_config = []
for value in initial_values:
opt_config_copy = copy.deepcopy(opt_config)
opt_config_copy.sigma.initialVal = value
batch_config.append(opt_config_copy)
else:
batch_config = copy.deepcopy(opt_config)
batch_config.sigma.initialVal = initial_values
return batch_config
| [
"numpy.hstack",
"numpy.log",
"numpy.array",
"dora.regressors.gp.types.RegressionParams",
"scipy.stats.norm.logpdf",
"copy.deepcopy",
"nlopt.opt",
"numpy.random.random",
"numpy.delete",
"numpy.sort",
"numpy.where",
"dora.regressors.gp.linalg.cholesky",
"scipy.linalg.solve_triangular",
"nump... | [((2972, 2998), 'numpy.unique', 'np.unique', (['fold_assignment'], {}), '(fold_assignment)\n', (2981, 2998), True, 'import numpy as np\n'), ((4096, 4129), 'numpy.concatenate', 'np.concatenate', (['aopt_config_copys'], {}), '(aopt_config_copys)\n', (4110, 4129), True, 'import numpy as np\n'), ((4408, 4442), 'dora.regressors.gp.predict.noise_vector', 'predict.noise_vector', (['X', 'noise_std'], {}), '(X, noise_std)\n', (4428, 4442), False, 'from dora.regressors.gp import predict\n'), ((4451, 4491), 'dora.regressors.gp.linalg.cholesky', 'linalg.cholesky', (['X', 'kernel', 'noise_vector'], {}), '(X, kernel, noise_vector)\n', (4466, 4491), False, 'from dora.regressors.gp import linalg\n'), ((4504, 4523), 'dora.regressors.gp.predict.alpha', 'predict.alpha', (['y', 'L'], {}), '(y, L)\n', (4517, 4523), False, 'from dora.regressors.gp import predict\n'), ((4535, 4592), 'dora.regressors.gp.types.RegressionParams', 'types.RegressionParams', (['X', 'L', 'alpha', 'kernel', 'y', 'noise_std'], {}), '(X, L, alpha, kernel, y, noise_std)\n', (4557, 4592), False, 'from dora.regressors.gp import types\n'), ((4765, 4796), 'numpy.diag', 'np.diag', (['(Snn_noise_std_vec ** 2)'], {}), '(Snn_noise_std_vec ** 2)\n', (4772, 4796), True, 'import numpy as np\n'), ((4858, 4889), 'numpy.concatenate', 'np.concatenate', (['(L, On)'], {'axis': '(1)'}), '((L, On), axis=1)\n', (4872, 4889), True, 'import numpy as np\n'), ((4903, 4936), 'numpy.concatenate', 'np.concatenate', (['(Ln, Lnn)'], {'axis': '(1)'}), '((Ln, Lnn), axis=1)\n', (4917, 4936), True, 'import numpy as np\n'), ((4948, 4985), 'numpy.concatenate', 'np.concatenate', (['(top, bottom)'], {'axis': '(0)'}), '((top, bottom), axis=0)\n', (4962, 4985), True, 'import numpy as np\n'), ((5123, 5154), 'numpy.diag', 'np.diag', (['(Snn_noise_std_vec ** 2)'], {}), '(Snn_noise_std_vec ** 2)\n', (5130, 5154), True, 'import numpy as np\n'), ((5259, 5302), 'scipy.linalg.solve_triangular', 'la.solve_triangular', (['R11.T', 'V12'], {'lower': '(True)'}), '(R11.T, V12, lower=True)\n', (5278, 5302), True, 'import scipy.linalg as la\n'), ((5743, 5769), 'numpy.zeros', 'np.zeros', (['(n, insertionID)'], {}), '((n, insertionID))\n', (5751, 5769), True, 'import numpy as np\n'), ((5780, 5820), 'numpy.zeros', 'np.zeros', (['(N - insertionID, insertionID)'], {}), '((N - insertionID, insertionID))\n', (5788, 5820), True, 'import numpy as np\n'), ((5829, 5859), 'numpy.zeros', 'np.zeros', (['(N - insertionID, n)'], {}), '((N - insertionID, n))\n', (5837, 5859), True, 'import numpy as np\n'), ((5869, 5908), 'numpy.concatenate', 'np.concatenate', (['(S11, S12, S13)'], {'axis': '(1)'}), '((S11, S12, S13), axis=1)\n', (5883, 5908), True, 'import numpy as np\n'), ((5922, 5961), 'numpy.concatenate', 'np.concatenate', (['(On1, S22, S23)'], {'axis': '(1)'}), '((On1, S22, S23), axis=1)\n', (5936, 5961), True, 'import numpy as np\n'), ((5975, 6014), 'numpy.concatenate', 'np.concatenate', (['(On2, On3, S33)'], {'axis': '(1)'}), '((On2, On3, S33), axis=1)\n', (5989, 6014), True, 'import numpy as np\n'), ((6428, 6446), 'numpy.sort', 'np.sort', (['remIDList'], {}), '(remIDList)\n', (6435, 6446), True, 'import numpy as np\n'), ((9617, 9654), 'numpy.delete', 'np.delete', (['regressor.X', 'remID'], {'axis': '(0)'}), '(regressor.X, remID, axis=0)\n', (9626, 9654), True, 'import numpy as np\n'), ((9673, 9710), 'numpy.delete', 'np.delete', (['regressor.y', 'remID'], {'axis': '(0)'}), '(regressor.y, remID, axis=0)\n', (9682, 9710), True, 'import numpy as np\n'), ((9782, 9836), 'dora.regressors.gp.predict.noise_vector', 'predict.noise_vector', (['regressor.X', 'regressor.noise_std'], {}), '(regressor.X, regressor.noise_std)\n', (9802, 9836), False, 'from dora.regressors.gp import predict\n'), ((9855, 9915), 'dora.regressors.gp.linalg.cholesky', 'linalg.cholesky', (['regressor.X', 'regressor.kernel', 'noise_vector'], {}), '(regressor.X, regressor.kernel, noise_vector)\n', (9870, 9915), False, 'from dora.regressors.gp import linalg\n'), ((9938, 9977), 'dora.regressors.gp.predict.alpha', 'predict.alpha', (['regressor.y', 'regressor.L'], {}), '(regressor.y, regressor.L)\n', (9951, 9977), False, 'from dora.regressors.gp import predict\n'), ((12357, 12386), 'nlopt.opt', 'nl.opt', (['nl.LN_BOBYQA', 'nParams'], {}), '(nl.LN_BOBYQA, nParams)\n', (12363, 12386), True, 'import nlopt as nl\n'), ((998, 1044), 'scipy.stats.norm.logpdf', 'stats.norm.logpdf', (['Y[i]'], {'loc': 'mu_i', 'scale': 'sig2i'}), '(Y[i], loc=mu_i, scale=sig2i)\n', (1015, 1044), True, 'import scipy.stats as stats\n'), ((4682, 4720), 'scipy.linalg.solve_triangular', 'la.solve_triangular', (['L', 'Sn'], {'lower': '(True)'}), '(L, Sn, lower=True)\n', (4701, 4720), True, 'import scipy.linalg as la\n'), ((4732, 4750), 'numpy.zeros', 'np.zeros', (['Ln.shape'], {}), '(Ln.shape)\n', (4740, 4750), True, 'import numpy as np\n'), ((5685, 5701), 'numpy.zeros', 'np.zeros', (['(n, 0)'], {}), '((n, 0))\n', (5693, 5701), True, 'import numpy as np\n'), ((5716, 5732), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {}), '((0, 0))\n', (5724, 5732), True, 'import numpy as np\n'), ((6026, 6071), 'numpy.concatenate', 'np.concatenate', (['(top, middle, bottom)'], {'axis': '(0)'}), '((top, middle, bottom), axis=0)\n', (6040, 6071), True, 'import numpy as np\n'), ((6564, 6598), 'numpy.zeros', 'np.zeros', (['(n - (remID + 1), remID)'], {}), '((n - (remID + 1), remID))\n', (6572, 6598), True, 'import numpy as np\n'), ((6649, 6715), 'numpy.concatenate', 'np.concatenate', (['(S[:remID, :remID], S[:remID, remID + 1:])'], {'axis': '(1)'}), '((S[:remID, :remID], S[:remID, remID + 1:]), axis=1)\n', (6663, 6715), True, 'import numpy as np\n'), ((6943, 6976), 'numpy.concatenate', 'np.concatenate', (['(On, R33)'], {'axis': '(1)'}), '((On, R33), axis=1)\n', (6957, 6976), True, 'import numpy as np\n'), ((7580, 7627), 'dora.regressors.gp.predict.noise_vector', 'predict.noise_vector', (['newX', 'regressor.noise_std'], {}), '(newX, regressor.noise_std)\n', (7600, 7627), False, 'from dora.regressors.gp import predict\n'), ((7699, 7729), 'numpy.vstack', 'np.vstack', (['(regressor.X, newX)'], {}), '((regressor.X, newX))\n', (7708, 7729), True, 'import numpy as np\n'), ((7752, 7782), 'numpy.hstack', 'np.hstack', (['(regressor.y, newY)'], {}), '((regressor.y, newY))\n', (7761, 7782), True, 'import numpy as np\n'), ((8057, 8096), 'dora.regressors.gp.predict.alpha', 'predict.alpha', (['regressor.y', 'regressor.L'], {}), '(regressor.y, regressor.L)\n', (8070, 8096), False, 'from dora.regressors.gp import predict\n'), ((8555, 8602), 'dora.regressors.gp.predict.noise_vector', 'predict.noise_vector', (['newX', 'regressor.noise_std'], {}), '(newX, regressor.noise_std)\n', (8575, 8602), False, 'from dora.regressors.gp import predict\n'), ((8625, 8702), 'numpy.vstack', 'np.vstack', (['(regressor.X[:insertionID, :], newX, regressor.X[insertionID:, :])'], {}), '((regressor.X[:insertionID, :], newX, regressor.X[insertionID:, :]))\n', (8634, 8702), True, 'import numpy as np\n'), ((8758, 8829), 'numpy.hstack', 'np.hstack', (['(regressor.y[:insertionID], newY, regressor.y[insertionID:])'], {}), '((regressor.y[:insertionID], newY, regressor.y[insertionID:]))\n', (8767, 8829), True, 'import numpy as np\n'), ((9164, 9203), 'dora.regressors.gp.predict.alpha', 'predict.alpha', (['regressor.y', 'regressor.L'], {}), '(regressor.y, regressor.L)\n', (9177, 9203), False, 'from dora.regressors.gp import predict\n'), ((10061, 10098), 'numpy.delete', 'np.delete', (['query.K_xxs', 'remID'], {'axis': '(0)'}), '(query.K_xxs, remID, axis=0)\n', (10070, 10098), True, 'import numpy as np\n'), ((10346, 10376), 'dora.regressors.gp.predict.noise_vector', 'predict.noise_vector', (['X', 'noise'], {}), '(X, noise)\n', (10366, 10376), False, 'from dora.regressors.gp import predict\n'), ((10389, 10419), 'dora.regressors.gp.linalg.cholesky', 'linalg.cholesky', (['X', 'k', 'X_noise'], {}), '(X, k, X_noise)\n', (10404, 10419), False, 'from dora.regressors.gp import linalg\n'), ((10432, 10451), 'dora.regressors.gp.predict.alpha', 'predict.alpha', (['Y', 'L'], {}), '(Y, L)\n', (10445, 10451), False, 'from dora.regressors.gp import predict\n'), ((12588, 12618), 'nlopt.opt', 'nl.opt', (['nl.G_MLSL_LDS', 'nParams'], {}), '(nl.G_MLSL_LDS, nParams)\n', (12594, 12618), True, 'import nlopt as nl\n'), ((12639, 12668), 'nlopt.opt', 'nl.opt', (['nl.LN_BOBYQA', 'nParams'], {}), '(nl.LN_BOBYQA, nParams)\n', (12645, 12668), True, 'import nlopt as nl\n'), ((13763, 13788), 'copy.deepcopy', 'copy.deepcopy', (['opt_config'], {}), '(opt_config)\n', (13776, 13788), False, 'import copy\n'), ((825, 834), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (831, 834), True, 'import numpy as np\n'), ((1476, 1502), 'numpy.random.permutation', 'np.random.permutation', (['n_Y'], {}), '(n_Y)\n', (1497, 1502), True, 'import numpy as np\n'), ((1638, 1700), 'sklearn.cluster.MiniBatchKMeans', 'skcluster.MiniBatchKMeans', ([], {'n_clusters': 'n_folds', 'batch_size': '(1000)'}), '(n_clusters=n_folds, batch_size=1000)\n', (1663, 1700), True, 'import sklearn.cluster as skcluster\n'), ((3727, 3742), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (3735, 3742), True, 'import numpy as np\n'), ((6892, 6925), 'dora.regressors.gp.linalg.jitchol', 'linalg.jitchol', (['(S23TS23 + S33TS33)'], {}), '(S23TS23 + S33TS33)\n', (6906, 6925), False, 'from dora.regressors.gp import linalg\n'), ((6989, 7026), 'numpy.concatenate', 'np.concatenate', (['(top, bottom)'], {'axis': '(0)'}), '((top, bottom), axis=0)\n', (7003, 7026), True, 'import numpy as np\n'), ((8244, 8274), 'numpy.vstack', 'np.vstack', (['(query.K_xxs, Kxsn)'], {}), '((query.K_xxs, Kxsn))\n', (8253, 8274), True, 'import numpy as np\n'), ((9313, 9390), 'numpy.vstack', 'np.vstack', (['(query.K_xxs[:insertionID, :], Kxsn, query.K_xxs[insertionID:, :])'], {}), '((query.K_xxs[:insertionID, :], Kxsn, query.K_xxs[insertionID:, :]))\n', (9322, 9390), True, 'import numpy as np\n'), ((11360, 11391), 'dora.regressors.gp.predict.noise_vector', 'predict.noise_vector', (['Xf', 'noise'], {}), '(Xf, noise)\n', (11380, 11391), False, 'from dora.regressors.gp import predict\n'), ((11409, 11441), 'dora.regressors.gp.linalg.cholesky', 'linalg.cholesky', (['Xf', 'k', 'Xf_noise'], {}), '(Xf, k, Xf_noise)\n', (11424, 11441), False, 'from dora.regressors.gp import linalg\n'), ((11459, 11480), 'dora.regressors.gp.predict.alpha', 'predict.alpha', (['Yf', 'Lf'], {}), '(Yf, Lf)\n', (11472, 11480), False, 'from dora.regressors.gp import predict\n'), ((13602, 13627), 'copy.deepcopy', 'copy.deepcopy', (['opt_config'], {}), '(opt_config)\n', (13615, 13627), False, 'import copy\n'), ((576, 586), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (583, 586), True, 'import numpy as np\n'), ((627, 646), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (633, 646), True, 'import numpy as np\n'), ((2113, 2137), 'numpy.zeros', 'np.zeros', (['(n_folds, n_X)'], {}), '((n_folds, n_X))\n', (2121, 2137), True, 'import numpy as np\n'), ((2153, 2171), 'scipy.spatial.Delaunay', 'Delaunay', (['Xcluster'], {}), '(Xcluster)\n', (2161, 2171), False, 'from scipy.spatial import Delaunay\n'), ((2610, 2640), 'numpy.cumsum', 'np.cumsum', (['assign_prob'], {'axis': '(0)'}), '(assign_prob, axis=0)\n', (2619, 2640), True, 'import numpy as np\n'), ((2657, 2678), 'numpy.random.random', 'np.random.random', (['n_X'], {}), '(n_X)\n', (2673, 2678), True, 'import numpy as np\n'), ((2705, 2754), 'numpy.sum', 'np.sum', (['(rvec[np.newaxis, :] < assign_prob)'], {'axis': '(0)'}), '(rvec[np.newaxis, :] < assign_prob, axis=0)\n', (2711, 2754), True, 'import numpy as np\n'), ((3109, 3120), 'numpy.where', 'np.where', (['v'], {}), '(v)\n', (3117, 3120), True, 'import numpy as np\n'), ((2268, 2285), 'numpy.zeros', 'np.zeros', (['n_folds'], {}), '(n_folds)\n', (2276, 2285), True, 'import numpy as np\n'), ((1855, 1942), 'sklearn.cluster.MiniBatchKMeans', 'skcluster.MiniBatchKMeans', ([], {'n_clusters': 'n_folds', 'batch_size': '(1000)', 'compute_labels': '(True)'}), '(n_clusters=n_folds, batch_size=1000,\n compute_labels=True)\n', (1880, 1942), True, 'import sklearn.cluster as skcluster\n'), ((2334, 2393), 'scipy.interpolate.LinearNDInterpolator', 'interp.LinearNDInterpolator', (['tris', 'indicator'], {'fill_value': '(-1)'}), '(tris, indicator, fill_value=-1)\n', (2361, 2393), True, 'import scipy.interpolate as interp\n')] |
import os
import argparse
import importlib
from natsort import natsorted
from tqdm import tqdm, trange
from collections import Counter
import numpy as np
from imageio import imwrite
from scipy.spatial.transform import Rotation
from lib.misc.pano_lsd_align import rotatePanorama, panoEdgeDetection
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from lib.config import config, update_config, infer_exp_id
from lib import dataset
def eval_metric(pred, gt, dmax):
gt = gt.clamp(0.01, dmax)
pred = pred.clamp(0.01, dmax)
mre = ((gt - pred).abs() / gt).mean().item()
mae = (gt - pred).abs().mean().item()
rmse = ((gt - pred)**2).mean().sqrt().item()
rmse_log = ((gt.log10() - pred.log10())**2).mean().sqrt().item()
log10 = (gt.log10() - pred.log10()).abs().mean().item()
delta = torch.max(pred/gt, gt/pred)
delta_1 = (delta < 1.25).float().mean().item()
delta_2 = (delta < 1.25**2).float().mean().item()
delta_3 = (delta < 1.25**3).float().mean().item()
return {
'mre': mre, 'mae': mae, 'rmse': rmse, 'rmse_log': rmse_log, 'log10': log10,
'delta_1': delta_1, 'delta_2': delta_2, 'delta_3': delta_3,
}
if __name__ == '__main__':
# Parse args & config
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg', required=True)
parser.add_argument('--pth')
parser.add_argument('--out')
parser.add_argument('--vis_dir')
parser.add_argument('--clip', default=10, type=float)
parser.add_argument('--y', action='store_true')
parser.add_argument('--pitch', default=0, type=float)
parser.add_argument('--roll', default=0, type=float)
parser.add_argument('opts',
help='Modify config options using the command-line',
default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
update_config(config, args)
device = 'cuda' if config.cuda else 'cpu'
if not args.pth:
from glob import glob
exp_id = infer_exp_id(args.cfg)
exp_ckpt_root = os.path.join(config.ckpt_root, exp_id)
args.pth = natsorted(glob(os.path.join(exp_ckpt_root, 'ep*pth')))[-1]
print(f'No pth given, inferring the trained pth: {args.pth}')
if not args.out:
out = [os.path.splitext(args.pth)[0]]
if args.pitch > 0:
out.append(f'.pitch{args.pitch:.0f}')
if args.roll > 0:
out.append(f'.roll{args.roll:.0f}')
args.out = ''.join(out + ['.npz'])
print(f'No out given, inferring the output path: {args.out}')
if os.path.isfile(args.out) and not args.y:
print(f'{args.out} is existed:')
print(dict(np.load(args.out)))
print('Re-write this results ?', end=' ')
input()
# Init dataset
DatasetClass = getattr(dataset, config.dataset.name)
config.dataset.valid_kwargs.update(config.dataset.common_kwargs)
config.dataset.valid_kwargs['fix_pitch'] = args.pitch
config.dataset.valid_kwargs['fix_roll'] = args.roll
valid_dataset = DatasetClass(**config.dataset.valid_kwargs)
# Init network
model_file = importlib.import_module(config.model.file)
model_class = getattr(model_file, config.model.modelclass)
net = model_class(**config.model.kwargs).to(device)
net.load_state_dict(torch.load(args.pth))
net.eval()
# Run evaluation
evaluation_metric = Counter()
for batch in tqdm(valid_dataset):
# Add batch dim and move to gpu
color = batch['x'][None].to(device)
depth = batch['depth'][None].to(device)
mask = (depth > 0)
# feed forward
with torch.no_grad():
pred_depth = net.infer(color)
if not torch.is_tensor(pred_depth):
viz_dict = pred_depth
pred_depth = viz_dict.pop('depth')
pred_depth = pred_depth.clamp(0.01)
if args.pitch:
vp = Rotation.from_rotvec([-args.pitch * np.pi / 180, 0, 0]).as_matrix()
pred_depth = pred_depth.squeeze()[...,None].cpu().numpy()
pred_depth = rotatePanorama(pred_depth, vp, order=0)[...,0]
pred_depth = torch.from_numpy(pred_depth[None,None]).to(depth.device)
if args.roll:
vp = Rotation.from_rotvec([0, -args.roll * np.pi / 180, 0]).as_matrix()
pred_depth = pred_depth.squeeze()[...,None].cpu().numpy()
pred_depth = rotatePanorama(pred_depth, vp, order=0)[...,0]
pred_depth = torch.from_numpy(pred_depth[None,None]).to(depth.device)
if args.vis_dir:
fname = batch['fname'].strip()
os.makedirs(args.vis_dir, exist_ok=True)
rgb = (batch['x'].permute(1,2,0) * 255).cpu().numpy().astype(np.uint8)
dep = pred_depth.squeeze().mul(512).cpu().numpy().astype(np.uint16)
dep[~mask.squeeze().cpu().numpy()] = 0
gtdep = depth.squeeze().mul(512).cpu().numpy().astype(np.uint16)
imwrite(os.path.join(args.vis_dir, fname + '.rgb' + '.jpg'), rgb)
imwrite(os.path.join(args.vis_dir, fname + '.rgb' + '.png'), gtdep)
imwrite(os.path.join(args.vis_dir, fname + '.depth' + '.png'), dep)
for k, v in viz_dict.items():
if v.dtype == np.uint8 or v.dtype == np.uint16:
imwrite(os.path.join(args.vis_dir, fname + '.' + k + '.png'), v)
else:
raise NotImplementedError
evaluation_metric['N'] += 1
for metric, v in eval_metric(pred_depth[mask], depth[mask], args.clip).items():
evaluation_metric[metric] += v
N = evaluation_metric.pop('N')
for metric, v in evaluation_metric.items():
evaluation_metric[metric] = v / N
for metric, v in evaluation_metric.items():
print(f'{metric:20s} {v:.4f}')
np.savez(args.out, **evaluation_metric)
| [
"scipy.spatial.transform.Rotation.from_rotvec",
"lib.misc.pano_lsd_align.rotatePanorama",
"torch.max",
"torch.from_numpy",
"lib.config.update_config",
"numpy.savez",
"argparse.ArgumentParser",
"lib.config.infer_exp_id",
"lib.config.config.dataset.valid_kwargs.update",
"importlib.import_module",
... | [((871, 902), 'torch.max', 'torch.max', (['(pred / gt)', '(gt / pred)'], {}), '(pred / gt, gt / pred)\n', (880, 902), False, 'import torch\n'), ((1298, 1377), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (1321, 1377), False, 'import argparse\n'), ((1962, 1989), 'lib.config.update_config', 'update_config', (['config', 'args'], {}), '(config, args)\n', (1975, 1989), False, 'from lib.config import config, update_config, infer_exp_id\n'), ((2948, 3012), 'lib.config.config.dataset.valid_kwargs.update', 'config.dataset.valid_kwargs.update', (['config.dataset.common_kwargs'], {}), '(config.dataset.common_kwargs)\n', (2982, 3012), False, 'from lib.config import config, update_config, infer_exp_id\n'), ((3228, 3270), 'importlib.import_module', 'importlib.import_module', (['config.model.file'], {}), '(config.model.file)\n', (3251, 3270), False, 'import importlib\n'), ((3497, 3506), 'collections.Counter', 'Counter', ([], {}), '()\n', (3504, 3506), False, 'from collections import Counter\n'), ((3524, 3543), 'tqdm.tqdm', 'tqdm', (['valid_dataset'], {}), '(valid_dataset)\n', (3528, 3543), False, 'from tqdm import tqdm, trange\n'), ((5928, 5967), 'numpy.savez', 'np.savez', (['args.out'], {}), '(args.out, **evaluation_metric)\n', (5936, 5967), True, 'import numpy as np\n'), ((2105, 2127), 'lib.config.infer_exp_id', 'infer_exp_id', (['args.cfg'], {}), '(args.cfg)\n', (2117, 2127), False, 'from lib.config import config, update_config, infer_exp_id\n'), ((2152, 2190), 'os.path.join', 'os.path.join', (['config.ckpt_root', 'exp_id'], {}), '(config.ckpt_root, exp_id)\n', (2164, 2190), False, 'import os\n'), ((2680, 2704), 'os.path.isfile', 'os.path.isfile', (['args.out'], {}), '(args.out)\n', (2694, 2704), False, 'import os\n'), ((3414, 3434), 'torch.load', 'torch.load', (['args.pth'], {}), '(args.pth)\n', (3424, 3434), False, 'import torch\n'), ((3741, 3756), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3754, 3756), False, 'import torch\n'), ((3815, 3842), 'torch.is_tensor', 'torch.is_tensor', (['pred_depth'], {}), '(pred_depth)\n', (3830, 3842), False, 'import torch\n'), ((4713, 4753), 'os.makedirs', 'os.makedirs', (['args.vis_dir'], {'exist_ok': '(True)'}), '(args.vis_dir, exist_ok=True)\n', (4724, 4753), False, 'import os\n'), ((2377, 2403), 'os.path.splitext', 'os.path.splitext', (['args.pth'], {}), '(args.pth)\n', (2393, 2403), False, 'import os\n'), ((2781, 2798), 'numpy.load', 'np.load', (['args.out'], {}), '(args.out)\n', (2788, 2798), True, 'import numpy as np\n'), ((4173, 4212), 'lib.misc.pano_lsd_align.rotatePanorama', 'rotatePanorama', (['pred_depth', 'vp'], {'order': '(0)'}), '(pred_depth, vp, order=0)\n', (4187, 4212), False, 'from lib.misc.pano_lsd_align import rotatePanorama, panoEdgeDetection\n'), ((4503, 4542), 'lib.misc.pano_lsd_align.rotatePanorama', 'rotatePanorama', (['pred_depth', 'vp'], {'order': '(0)'}), '(pred_depth, vp, order=0)\n', (4517, 4542), False, 'from lib.misc.pano_lsd_align import rotatePanorama, panoEdgeDetection\n'), ((5065, 5116), 'os.path.join', 'os.path.join', (['args.vis_dir', "(fname + '.rgb' + '.jpg')"], {}), "(args.vis_dir, fname + '.rgb' + '.jpg')\n", (5077, 5116), False, 'import os\n'), ((5143, 5194), 'os.path.join', 'os.path.join', (['args.vis_dir', "(fname + '.rgb' + '.png')"], {}), "(args.vis_dir, fname + '.rgb' + '.png')\n", (5155, 5194), False, 'import os\n'), ((5223, 5276), 'os.path.join', 'os.path.join', (['args.vis_dir', "(fname + '.depth' + '.png')"], {}), "(args.vis_dir, fname + '.depth' + '.png')\n", (5235, 5276), False, 'import os\n'), ((2225, 2262), 'os.path.join', 'os.path.join', (['exp_ckpt_root', '"""ep*pth"""'], {}), "(exp_ckpt_root, 'ep*pth')\n", (2237, 2262), False, 'import os\n'), ((4010, 4065), 'scipy.spatial.transform.Rotation.from_rotvec', 'Rotation.from_rotvec', (['[-args.pitch * np.pi / 180, 0, 0]'], {}), '([-args.pitch * np.pi / 180, 0, 0])\n', (4030, 4065), False, 'from scipy.spatial.transform import Rotation\n'), ((4245, 4285), 'torch.from_numpy', 'torch.from_numpy', (['pred_depth[None, None]'], {}), '(pred_depth[None, None])\n', (4261, 4285), False, 'import torch\n'), ((4341, 4395), 'scipy.spatial.transform.Rotation.from_rotvec', 'Rotation.from_rotvec', (['[0, -args.roll * np.pi / 180, 0]'], {}), '([0, -args.roll * np.pi / 180, 0])\n', (4361, 4395), False, 'from scipy.spatial.transform import Rotation\n'), ((4575, 4615), 'torch.from_numpy', 'torch.from_numpy', (['pred_depth[None, None]'], {}), '(pred_depth[None, None])\n', (4591, 4615), False, 'import torch\n'), ((5417, 5469), 'os.path.join', 'os.path.join', (['args.vis_dir', "(fname + '.' + k + '.png')"], {}), "(args.vis_dir, fname + '.' + k + '.png')\n", (5429, 5469), False, 'import os\n')] |
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.dummy import DummyClassifier
from sklearn.dummy import DummyRegressor
def _check_predict_proba(clf, X, y):
out = clf.predict_proba(X)
assert_equal(out.shape[0], len(X))
assert_equal(out.shape[1], len(np.unique(y)))
assert_array_equal(out.sum(axis=1), np.ones(len(X)))
def test_most_frequent_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 1000
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 1000
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_regressor():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [5. / 4] * len(X))
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
| [
"sklearn.utils.testing.assert_raises",
"numpy.unique",
"sklearn.dummy.DummyRegressor",
"sklearn.dummy.DummyClassifier",
"numpy.bincount",
"sklearn.utils.testing.assert_almost_equal"
] | [((631, 688), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""most_frequent"""', 'random_state': '(0)'}), "(strategy='most_frequent', random_state=0)\n", (646, 688), False, 'from sklearn.dummy import DummyClassifier\n'), ((896, 950), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""stratified"""', 'random_state': '(0)'}), "(strategy='stratified', random_state=0)\n", (911, 950), False, 'from sklearn.dummy import DummyClassifier\n'), ((1067, 1112), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['p[1]', '(3.0 / 5)'], {'decimal': '(1)'}), '(p[1], 3.0 / 5, decimal=1)\n', (1086, 1112), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((1116, 1161), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['p[2]', '(2.0 / 5)'], {'decimal': '(1)'}), '(p[2], 2.0 / 5, decimal=1)\n', (1135, 1161), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((1288, 1339), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""uniform"""', 'random_state': '(0)'}), "(strategy='uniform', random_state=0)\n", (1303, 1339), False, 'from sklearn.dummy import DummyClassifier\n'), ((1456, 1497), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['p[1]', '(0.5)'], {'decimal': '(1)'}), '(p[1], 0.5, decimal=1)\n', (1475, 1497), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((1502, 1543), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['p[2]', '(0.5)'], {'decimal': '(1)'}), '(p[2], 0.5, decimal=1)\n', (1521, 1543), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((1695, 1736), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (1710, 1736), False, 'from sklearn.dummy import DummyClassifier\n'), ((1855, 1890), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""unknown"""'}), "(strategy='unknown')\n", (1870, 1890), False, 'from sklearn.dummy import DummyClassifier\n'), ((1895, 1937), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'clf.fit', '[]', '[]'], {}), '(ValueError, clf.fit, [], [])\n', (1908, 1937), False, 'from sklearn.utils.testing import assert_raises\n'), ((1943, 1985), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'clf.predict', '[]'], {}), '(ValueError, clf.predict, [])\n', (1956, 1985), False, 'from sklearn.utils.testing import assert_raises\n'), ((1990, 2038), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'clf.predict_proba', '[]'], {}), '(ValueError, clf.predict_proba, [])\n', (2003, 2038), False, 'from sklearn.utils.testing import assert_raises\n'), ((2124, 2140), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ([], {}), '()\n', (2138, 2140), False, 'from sklearn.dummy import DummyRegressor\n'), ((2262, 2278), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ([], {}), '()\n', (2276, 2278), False, 'from sklearn.dummy import DummyRegressor\n'), ((2283, 2325), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'reg.predict', '[]'], {}), '(ValueError, reg.predict, [])\n', (2296, 2325), False, 'from sklearn.utils.testing import assert_raises\n'), ((1027, 1046), 'numpy.bincount', 'np.bincount', (['y_pred'], {}), '(y_pred)\n', (1038, 1046), True, 'import numpy as np\n'), ((1416, 1435), 'numpy.bincount', 'np.bincount', (['y_pred'], {}), '(y_pred)\n', (1427, 1435), True, 'import numpy as np\n'), ((450, 462), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (459, 462), True, 'import numpy as np\n')] |
import numpy as np
from ..hrf import spm_hrf
def _put(a, ind, v, mode='raise'):
"""np.put, not in-place."""
arr = a.copy()
np.put(arr, ind, v, mode)
return arr
def detection_power(X, tr, contrasts=None, weights=None):
"""Estimate detection power of design matrix.
Parameters
----------
X : array, shape=(n_acq,n_cond)
Design matrix (i.e. boxcars convolved with HRF).
tr : float
Repetition time (in seconds).
contrasts : array, shape=(n_cond,n_cond)
Binary matrix denoting pairwise contrasts to estimate. Only
reads values from lower diagonal. If None, defaults to all
pairwise contrasts.
weights : array, shape=(n_contrasts)
Weight of each contrast (event and pairwise) in average
detection power estimate.
Returns
-------
R_tot : float
Detection power.
References
----------
[1] <NAME>., & <NAME>. (2004). Efficiency, power, and entropy in event-related
fmri with multiple trial types: Part i: Theory. NeuroImage, 21(1), 387-400.
[2] <NAME>. (2004). Efficiency, power, and entropy in event-related fMRI with multiple
trial types. Part II: design of experiments. NeuroImage, 21(1), 401–413.
"""
## Compute (inverse) Fisher information matrix.
finv = np.linalg.inv(X.T @ X)
## Compute normalization term.
hrf = spm_hrf(tr)
hh = hrf @ hrf
## Preallocate space.
tmp = np.zeros(X.shape[-1])
R = []
## Event contrasts.
for i in range(X.shape[-1]):
D = _put(tmp, i, 1)
R = np.append( R, 1. / (np.squeeze(D @ finv @ D.T) * hh) )
## Pairwise contrasts.
if contrasts is None: contrasts = np.ones((X.shape[-1], X.shape[-1]))
contrasts = np.tril(contrasts, k=-1)
for j, i in np.column_stack(np.where(contrasts)):
D = _put(tmp, [i,j], [1,-1])
R = np.append( R, 1. / (np.squeeze(D @ finv @ D.T) * hh) )
## Compute average.
R_tot = 1. / np.average(1./R, weights=weights)
return R_tot
def design_efficiency(X, q, k, contrasts=None, weights=None):
"""Estimate design efficiency of design matrix.
Parameters
----------
X : array, shape=(n_acq,n_cond)
Design matrix (i.e. boxcars convolved with HRF).
q : int
Number of conditions.
k : int
Size of HRF window.
contrasts : array, shape=(n_cond,n_cond)
Binary matrix denoting pairwise contrasts to estimate. Only
reads values from lower diagonal. If None, defaults to all
pairwise contrasts.
weights : array, shape=(n_contrasts)
Weight of each contrast (event and pairwise) in average
detection power estimate.
Returns
-------
C_tot : float
Estimation efficiency.
References
----------
[1] <NAME>., & <NAME>. (2004). Efficiency, power, and entropy in event-related
fmri with multiple trial types: Part i: Theory. NeuroImage, 21(1), 387-400.
[2] <NAME>. (2004). Efficiency, power, and entropy in event-related fMRI with multiple
trial types. Part II: design of experiments. NeuroImage, 21(1), 401–413.
"""
assert isinstance(q, int) and isinstance(k, int)
## Compute (inverse) Fisher information matrix.
try: finv = np.linalg.inv(X.T @ X)
except np.linalg.LinAlgError: finv = np.linalg.pinv(X.T @ X)
## Preallocate space.
tmp = np.zeros(q)
K = np.identity(k)
C = []
## Event contrasts.
for i in range(q):
L = np.kron(_put(tmp, i, 1), K)
C = np.append( C, np.trace(L @ finv @ L.T) )
## Pairwise contrasts.
if contrasts is None: contrasts = np.tril(np.ones((q,q)), k=-1)
contrasts = np.tril(contrasts, k=-1)
for j, i in np.column_stack(np.where(contrasts)):
L = np.kron(_put(tmp, [i,j], [1,-1]), K)
C = np.append( C, np.trace(L @ finv @ L.T) )
## Compute average.
C_tot = 1. / np.average(C, weights=weights)
return C_tot | [
"numpy.identity",
"numpy.trace",
"numpy.ones",
"numpy.linalg.pinv",
"numpy.average",
"numpy.put",
"numpy.where",
"numpy.squeeze",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.tril"
] | [((136, 161), 'numpy.put', 'np.put', (['arr', 'ind', 'v', 'mode'], {}), '(arr, ind, v, mode)\n', (142, 161), True, 'import numpy as np\n'), ((1347, 1369), 'numpy.linalg.inv', 'np.linalg.inv', (['(X.T @ X)'], {}), '(X.T @ X)\n', (1360, 1369), True, 'import numpy as np\n'), ((1484, 1505), 'numpy.zeros', 'np.zeros', (['X.shape[-1]'], {}), '(X.shape[-1])\n', (1492, 1505), True, 'import numpy as np\n'), ((1800, 1824), 'numpy.tril', 'np.tril', (['contrasts'], {'k': '(-1)'}), '(contrasts, k=-1)\n', (1807, 1824), True, 'import numpy as np\n'), ((3492, 3503), 'numpy.zeros', 'np.zeros', (['q'], {}), '(q)\n', (3500, 3503), True, 'import numpy as np\n'), ((3512, 3526), 'numpy.identity', 'np.identity', (['k'], {}), '(k)\n', (3523, 3526), True, 'import numpy as np\n'), ((3803, 3827), 'numpy.tril', 'np.tril', (['contrasts'], {'k': '(-1)'}), '(contrasts, k=-1)\n', (3810, 3827), True, 'import numpy as np\n'), ((1748, 1783), 'numpy.ones', 'np.ones', (['(X.shape[-1], X.shape[-1])'], {}), '((X.shape[-1], X.shape[-1]))\n', (1755, 1783), True, 'import numpy as np\n'), ((1857, 1876), 'numpy.where', 'np.where', (['contrasts'], {}), '(contrasts)\n', (1865, 1876), True, 'import numpy as np\n'), ((2033, 2069), 'numpy.average', 'np.average', (['(1.0 / R)'], {'weights': 'weights'}), '(1.0 / R, weights=weights)\n', (2043, 2069), True, 'import numpy as np\n'), ((3367, 3389), 'numpy.linalg.inv', 'np.linalg.inv', (['(X.T @ X)'], {}), '(X.T @ X)\n', (3380, 3389), True, 'import numpy as np\n'), ((3860, 3879), 'numpy.where', 'np.where', (['contrasts'], {}), '(contrasts)\n', (3868, 3879), True, 'import numpy as np\n'), ((4034, 4064), 'numpy.average', 'np.average', (['C'], {'weights': 'weights'}), '(C, weights=weights)\n', (4044, 4064), True, 'import numpy as np\n'), ((3431, 3454), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(X.T @ X)'], {}), '(X.T @ X)\n', (3445, 3454), True, 'import numpy as np\n'), ((3656, 3680), 'numpy.trace', 'np.trace', (['(L @ finv @ L.T)'], {}), '(L @ finv @ L.T)\n', (3664, 3680), True, 'import numpy as np\n'), ((3765, 3780), 'numpy.ones', 'np.ones', (['(q, q)'], {}), '((q, q))\n', (3772, 3780), True, 'import numpy as np\n'), ((3957, 3981), 'numpy.trace', 'np.trace', (['(L @ finv @ L.T)'], {}), '(L @ finv @ L.T)\n', (3965, 3981), True, 'import numpy as np\n'), ((1639, 1665), 'numpy.squeeze', 'np.squeeze', (['(D @ finv @ D.T)'], {}), '(D @ finv @ D.T)\n', (1649, 1665), True, 'import numpy as np\n'), ((1948, 1974), 'numpy.squeeze', 'np.squeeze', (['(D @ finv @ D.T)'], {}), '(D @ finv @ D.T)\n', (1958, 1974), True, 'import numpy as np\n')] |
"""
NE method: naively combine AttrPure and DeepWalk (AttrComb)
by <NAME> 2018
"""
import numpy as np
from . import node2vec
from .utils import dim_reduction
class ATTRCOMB(object):
def __init__(self, graph, dim, comb_method='concat', comb_with='deepWalk', number_walks=10, walk_length=80, window=10, workers=8):
self.g = graph
self.dim = dim
self.number_walks = number_walks
self.walk_length = walk_length
self.window = window
self.workers = workers
print("Learning representation...")
self.vectors = {}
print('attr naively combined method ', comb_method, '=====================')
if comb_method == 'concat':
print('comb_method == concat by default; dim/2 from attr and dim/2 from nrl.............')
attr_embeddings = self.train_attr(dim=int(self.dim/2))
nrl_embeddings = self.train_nrl(dim=int(self.dim/2), comb_with='deepWalk')
embeddings = np.concatenate((attr_embeddings, nrl_embeddings), axis=1)
print('shape of embeddings', embeddings.shape)
elif comb_method == 'elementwise-mean':
print('comb_method == elementwise-mean.............')
attr_embeddings = self.train_attr(dim=self.dim)
nrl_embeddings = self.train_nrl(dim=self.dim, comb_with='deepWalk') # we may try deepWalk, node2vec, line and etc...
embeddings = np.add(attr_embeddings, nrl_embeddings)/2.0
print('shape of embeddings', embeddings.shape)
elif comb_method == 'elementwise-max':
print('comb_method == elementwise-max.............')
attr_embeddings = self.train_attr(dim=self.dim)
nrl_embeddings = self.train_nrl(dim=self.dim, comb_with='deepWalk') # we may try deepWalk, node2vec, line and etc...
embeddings = np.zeros(shape=(attr_embeddings.shape[0], attr_embeddings.shape[1]))
for i in range(attr_embeddings.shape[0]): # size(attr_embeddings) = size(nrl_embeddings)
for j in range(attr_embeddings.shape[1]):
if attr_embeddings[i][j] > nrl_embeddings[i][j]:
embeddings[i][j] = attr_embeddings[i][j]
else:
embeddings[i][j] = nrl_embeddings[i][j]
print('shape of embeddings', embeddings.shape)
else:
print('error, no comb_method was found....')
exit(0)
for key, ind in self.g.look_up_dict.items():
self.vectors[key] = embeddings[ind]
def train_attr(self, dim):
X = self.g.get_attr_mat()
X_compressed = dim_reduction(X, dim=dim, method='svd') # svd or pca for dim reduction
print('X_compressed shape: ', X_compressed.shape)
return np.array(X_compressed) # n*dim matrix, each row corresponding to node ID stored in graph.look_back_list
def train_nrl(self, dim, comb_with):
print('attr naively combined with ', comb_with, '=====================')
if comb_with == 'deepWalk':
model = node2vec.Node2vec(graph=self.g, dim=dim, path_length=self.walk_length, # do not use self.dim here
num_paths=self.number_walks, workers=self.workers, window=self.window, dw=True)
nrl_embeddings = []
for key in self.g.look_back_list:
nrl_embeddings.append(model.vectors[key])
return np.array(nrl_embeddings)
elif comb_with == 'node2vec':
model = node2vec.Node2vec(graph=self.g, path_length=80, num_paths=self.number_walks,
dim=dim, workers=4, p=0.8, q=0.8, window=10)
nrl_embeddings = []
for key in self.g.look_back_list:
nrl_embeddings.append(model.vectors[key])
return np.array(nrl_embeddings)
else:
print('error, no comb_with was found....')
print('to do.... line, grarep, and etc...')
exit(0)
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors.keys())
fout.write("{} {}\n".format(node_num, self.dim))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node,
' '.join([str(x) for x in vec])))
fout.close()
| [
"numpy.array",
"numpy.zeros",
"numpy.add",
"numpy.concatenate"
] | [((2801, 2823), 'numpy.array', 'np.array', (['X_compressed'], {}), '(X_compressed)\n', (2809, 2823), True, 'import numpy as np\n'), ((983, 1040), 'numpy.concatenate', 'np.concatenate', (['(attr_embeddings, nrl_embeddings)'], {'axis': '(1)'}), '((attr_embeddings, nrl_embeddings), axis=1)\n', (997, 1040), True, 'import numpy as np\n'), ((3457, 3481), 'numpy.array', 'np.array', (['nrl_embeddings'], {}), '(nrl_embeddings)\n', (3465, 3481), True, 'import numpy as np\n'), ((3856, 3880), 'numpy.array', 'np.array', (['nrl_embeddings'], {}), '(nrl_embeddings)\n', (3864, 3880), True, 'import numpy as np\n'), ((1430, 1469), 'numpy.add', 'np.add', (['attr_embeddings', 'nrl_embeddings'], {}), '(attr_embeddings, nrl_embeddings)\n', (1436, 1469), True, 'import numpy as np\n'), ((1861, 1929), 'numpy.zeros', 'np.zeros', ([], {'shape': '(attr_embeddings.shape[0], attr_embeddings.shape[1])'}), '(shape=(attr_embeddings.shape[0], attr_embeddings.shape[1]))\n', (1869, 1929), True, 'import numpy as np\n')] |
import inspect
from logging import getLogger
from time import time
from typing import Any, Callable
import numpy as np
import torch
import yaml
from ptflops import get_model_complexity_info
from tqdm import tqdm
from .format import format_num, format_time
from .machine_info import get_machine_info
logger = getLogger("torch-benchmark")
_INVALID = float("nan")
def _is_valid(val):
return val == val
def get_call_arg_names(module_or_fn):
if isinstance(module_or_fn, torch.nn.Module):
return inspect.getfullargspec(module_or_fn.forward)[0][1:]
return inspect.getfullargspec(module_or_fn)[0]
def measure_flops(model, sample, print_details=False):
flops = _INVALID
def input_constructor(*args, **kwrags):
nonlocal sample
return {get_call_arg_names(model)[0]: sample}
try:
flops, _ = get_model_complexity_info(
model,
(1,), # dummy
print_per_layer_stat=print_details,
as_strings=False,
input_constructor=input_constructor,
verbose=print_details,
)
flops = int(flops)
except Exception as e: # pragma: no cover
logger.error(f"Unable to measure model FLOPs due to error: {e}")
return flops
def get_device(model):
return next(model.parameters()).device
def measure_params(model):
num_params = _INVALID
try:
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
except AttributeError as e:
logger.error(f"Unable to measure model params due to error: {e}")
return num_params
def measure_allocated_memory(
model,
sample,
model_device,
transfer_to_device_fn=torch.Tensor.to,
print_details=False,
):
assert model_device.type == "cuda"
torch.cuda.reset_peak_memory_stats(device=model_device)
pre_mem = torch.cuda.memory_allocated(device=model_device)
transfer_to_device_fn(
model(transfer_to_device_fn(sample, model_device)),
"cpu",
)
if print_details:
logger.info(torch.cuda.memory_summary(device=model_device, abbreviated=True))
post_mem = torch.cuda.memory_allocated(device=model_device)
max_mem = torch.cuda.max_memory_allocated(device=model_device)
return pre_mem, post_mem, max_mem
def warm_up(
model,
sample,
model_device,
transfer_to_device_fn=torch.Tensor.to,
num_runs=10,
batch_size: int = None,
):
for _ in tqdm(range(num_runs), desc=f"Warming up with batch_size={batch_size}"):
transfer_to_device_fn(
model(transfer_to_device_fn(sample, model_device)),
"cpu",
)
def measure_detailed_inference_timing(
model, sample, model_device, transfer_to_device_fn=torch.Tensor.to
):
try:
with torch.autograd.profiler.profile(
use_cuda=(model_device.type == "cuda"), profile_memory=True
) as prof:
transfer_to_device_fn(
model(transfer_to_device_fn(sample, model_device)),
"cpu",
)
detailed_timing = prof.key_averages().table(sort_by="self_cpu_time_total")
logger.info(detailed_timing)
except Exception as e:
logger.error(
f"Caught exception while attempting to measure detailed model inference: {e}"
)
def measure_repeated_inference_timing(
model,
sample,
model_device,
transfer_to_device_fn=torch.Tensor.to,
num_runs=100,
batch_size: int = None,
):
t_c2d = []
t_inf = []
t_d2c = []
t_tot = []
for _ in tqdm(
range(num_runs), desc=f"Measuring inference for batch_size={batch_size}"
):
start_on_cpu = time()
device_sample = transfer_to_device_fn(sample, model_device)
start_on_device = time()
device_result = model(device_sample)
stop_on_device = time()
transfer_to_device_fn(device_result, "cpu")
stop_on_cpu = time()
t_c2d.append(start_on_device - start_on_cpu)
t_inf.append(stop_on_device - start_on_device)
t_d2c.append(stop_on_cpu - stop_on_device)
t_tot.append(stop_on_cpu - start_on_cpu)
results_dict = {}
times_and_titles = [(t_inf, "on_device_inference")]
if model_device.type == "cuda":
times_and_titles.extend(
[
(t_c2d, "cpu_to_gpu"),
(t_d2c, "gpu_to_cpu"),
(t_tot, "total"),
]
)
for s_per_batch, title in times_and_titles:
s_per_batch = np.array(s_per_batch)
batches_per_s = 1 / s_per_batch
metrics = {
"batches_per_second_mean": float(batches_per_s.mean()),
"batches_per_second_std": float(batches_per_s.std()),
"batches_per_second_min": float(batches_per_s.min()),
"batches_per_second_max": float(batches_per_s.max()),
"seconds_per_batch_mean": float(s_per_batch.mean()),
"seconds_per_batch_std": float(s_per_batch.std()),
"seconds_per_batch_min": float(s_per_batch.min()),
"seconds_per_batch_max": float(s_per_batch.max()),
}
human_readable = {
"batches_per_second": f"{format_num(batches_per_s.mean())} +/- {format_num(batches_per_s.std())} [{format_num(batches_per_s.min())}, {format_num(batches_per_s.max())}]",
"batch_latency": f"{format_time(s_per_batch.mean())} +/- {format_time(s_per_batch.std())} [{format_time(s_per_batch.min())}, {format_time(s_per_batch.max())}]",
}
results_dict[title] = {"metrics": metrics, "human_readable": human_readable}
return results_dict
def measure_energy(
model,
sample,
model_device,
transfer_to_device_fn=torch.Tensor.to,
num_runs=100,
batch_size: int = None,
include_transfer_costs=True,
print_fn=logger.info,
):
inference_joules = _INVALID
def test_with_transfer():
nonlocal model, sample
transfer_to_device_fn(
model(transfer_to_device_fn(sample, model_device)),
"cpu",
)
def test_without_transfer():
nonlocal model, sample
model(sample)
if include_transfer_costs:
test_fn = test_with_transfer
else:
test_fn = test_without_transfer
sample = sample.to(model_device)
# # Try carbon-tracker: The library is still too young
# try:
# from carbontracker import tracker
# # Check if components are available (TODO: find a less brittle implementation for this)
# pids = tracker.CarbonTracker._get_pids(None)
# components = tracker.component.create_components(
# components="all", pids=pids, devices_by_pid=False
# )
# if not any([cmp for cmp in components if cmp.available()]):
# raise Exception("Valid CarbonTracker device not available")
# tracker = tracker.CarbonTracker(epochs=1, verbose=print_details)
# tracker.epoch_start()
# test_fn()
# tracker.epoch_end()
# # Grab results from logger
# except Exception:
# pass
# Try jetson power
try:
from .jetson_power import PowerEstimator
p_est = PowerEstimator(print_fn=print_fn)
# index 0 is total energy, index 1 is energy over idle consumption:
meas = []
for _ in tqdm(
range(num_runs), desc=f"Measuring energy for batch_size={batch_size}"
):
meas.append(p_est.estimate_fn_power(test_fn)[0] / 1000)
inference_joules = float(np.array(meas).mean())
except Exception:
pass
if not _is_valid(inference_joules):
logger.error(
"Unable to measure energy consumption. Device must be a NVIDIA Jetson."
)
return inference_joules
def fmt(d: dict):
return yaml.dump(d)
def benchmark(
model: torch.nn.Module,
sample: torch.Tensor,
num_runs: int = 100,
print_details=False,
get_device_fn: Callable[[Any], torch.device] = get_device,
transfer_to_device_fn=torch.Tensor.to,
sample_with_batch_size1: Any = None,
batch_size: int = None,
print_fn=logger.info,
warm_up_fn=warm_up,
):
results = {}
batch_size = batch_size or sample.shape[0]
sample = transfer_to_device_fn(sample, "cpu")
# Prepare sample with batch size 1
if sample_with_batch_size1:
sample1 = sample_with_batch_size1
else:
sample1_shape = (1, *sample.shape[1:])
sample1 = torch.randn(sample1_shape)
prevously_training = getattr(model, "training", False)
if hasattr(model, "eval"):
model.eval()
# Get machine info
machine_info = get_machine_info()
results["machine_info"] = machine_info
print_fn(fmt({"Machine info": machine_info}))
model_device = get_device_fn(model)
assert isinstance(
model_device, torch.device
), "model_device should be a `torch.device`"
results["device"] = model_device.type
print_fn(f"Model device: {model_device}")
# Measure params
params = measure_params(model)
if _is_valid(params):
results["params"] = params
print_fn(f"Model parameters: {params} ({format_num(params)})")
# Measure FLOPs
warm_up_fn(
model,
sample1,
model_device,
transfer_to_device_fn,
num_runs=1,
batch_size=1,
)
flops = measure_flops(model, sample1, print_details)
if _is_valid(flops):
results["flops"] = flops
print_fn(f"Model FLOPs: {flops} ({format_num(flops)})")
# Measure inference timing
memory = {}
timing = {}
energy = {}
with torch.no_grad():
for bs in sorted(set([1, batch_size])):
s = sample1 if bs == 1 else sample
# Measure Allocated Memory
if model_device.type == "cuda":
pre_mem, post_mem, max_mem = measure_allocated_memory(
model, s, model_device, transfer_to_device_fn, print_details
)
memory[f"batch_size_{bs}"] = {
"pre_inference_bytes": pre_mem,
"max_inference_bytes": max_mem,
"post_inference_bytes": post_mem,
"pre_inference": format_num(pre_mem, bytes=True),
"max_inference": format_num(max_mem, bytes=True),
"post_inference": format_num(post_mem, bytes=True),
}
print_fn(
fmt(
{
f"Memory results (batch_size={bs})": memory[
f"batch_size_{bs}"
]
}
)
)
else:
logger.warning(
"Measurement of allocated memory is only available on CUDA devices"
)
# Inference timing
warm_up_fn(
model,
s,
model_device,
transfer_to_device_fn,
num_runs=max(1, num_runs // 10),
batch_size=bs,
)
if print_details:
measure_detailed_inference_timing(model, s, model_device)
timing[f"batch_size_{bs}"] = measure_repeated_inference_timing(
model,
s,
model_device,
transfer_to_device_fn,
num_runs,
bs,
)
print_fn(
fmt({f"Timing results (batch_size={bs})": timing[f"batch_size_{bs}"]})
)
# Energy measurement
energy_joules = measure_energy(
model,
s,
model_device,
transfer_to_device_fn,
num_runs=max(1, num_runs // 10),
batch_size=bs,
include_transfer_costs=True,
print_fn=print_fn,
)
if _is_valid(energy_joules):
energy_kwh = energy_joules / 3.6e6
energy[f"batch_size_{bs}"] = {
"joules": energy_joules,
"kWh": energy_kwh,
}
print_fn(
fmt(
{
f"Energy results (batch_size={bs})": energy[
f"batch_size_{bs}"
]
}
)
)
results["timing"] = timing
if memory:
results["memory"] = memory
if energy:
results["energy"] = energy
if prevously_training:
model.train()
return results
| [
"logging.getLogger",
"yaml.dump",
"torch.cuda.memory_allocated",
"inspect.getfullargspec",
"torch.autograd.profiler.profile",
"numpy.array",
"torch.no_grad",
"torch.cuda.reset_peak_memory_stats",
"torch.cuda.max_memory_allocated",
"ptflops.get_model_complexity_info",
"time.time",
"torch.randn"... | [((311, 339), 'logging.getLogger', 'getLogger', (['"""torch-benchmark"""'], {}), "('torch-benchmark')\n", (320, 339), False, 'from logging import getLogger\n'), ((1789, 1844), 'torch.cuda.reset_peak_memory_stats', 'torch.cuda.reset_peak_memory_stats', ([], {'device': 'model_device'}), '(device=model_device)\n', (1823, 1844), False, 'import torch\n'), ((1859, 1907), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {'device': 'model_device'}), '(device=model_device)\n', (1886, 1907), False, 'import torch\n'), ((2142, 2190), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {'device': 'model_device'}), '(device=model_device)\n', (2169, 2190), False, 'import torch\n'), ((2205, 2257), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {'device': 'model_device'}), '(device=model_device)\n', (2236, 2257), False, 'import torch\n'), ((7837, 7849), 'yaml.dump', 'yaml.dump', (['d'], {}), '(d)\n', (7846, 7849), False, 'import yaml\n'), ((576, 612), 'inspect.getfullargspec', 'inspect.getfullargspec', (['module_or_fn'], {}), '(module_or_fn)\n', (598, 612), False, 'import inspect\n'), ((846, 1007), 'ptflops.get_model_complexity_info', 'get_model_complexity_info', (['model', '(1,)'], {'print_per_layer_stat': 'print_details', 'as_strings': '(False)', 'input_constructor': 'input_constructor', 'verbose': 'print_details'}), '(model, (1,), print_per_layer_stat=print_details,\n as_strings=False, input_constructor=input_constructor, verbose=\n print_details)\n', (871, 1007), False, 'from ptflops import get_model_complexity_info\n'), ((3692, 3698), 'time.time', 'time', ([], {}), '()\n', (3696, 3698), False, 'from time import time\n'), ((3793, 3799), 'time.time', 'time', ([], {}), '()\n', (3797, 3799), False, 'from time import time\n'), ((3870, 3876), 'time.time', 'time', ([], {}), '()\n', (3874, 3876), False, 'from time import time\n'), ((3951, 3957), 'time.time', 'time', ([], {}), '()\n', (3955, 3957), False, 'from time import time\n'), ((4537, 4558), 'numpy.array', 'np.array', (['s_per_batch'], {}), '(s_per_batch)\n', (4545, 4558), True, 'import numpy as np\n'), ((8503, 8529), 'torch.randn', 'torch.randn', (['sample1_shape'], {}), '(sample1_shape)\n', (8514, 8529), False, 'import torch\n'), ((9661, 9676), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9674, 9676), False, 'import torch\n'), ((2060, 2124), 'torch.cuda.memory_summary', 'torch.cuda.memory_summary', ([], {'device': 'model_device', 'abbreviated': '(True)'}), '(device=model_device, abbreviated=True)\n', (2085, 2124), False, 'import torch\n'), ((2791, 2885), 'torch.autograd.profiler.profile', 'torch.autograd.profiler.profile', ([], {'use_cuda': "(model_device.type == 'cuda')", 'profile_memory': '(True)'}), "(use_cuda=model_device.type == 'cuda',\n profile_memory=True)\n", (2822, 2885), False, 'import torch\n'), ((513, 557), 'inspect.getfullargspec', 'inspect.getfullargspec', (['module_or_fn.forward'], {}), '(module_or_fn.forward)\n', (535, 557), False, 'import inspect\n'), ((7562, 7576), 'numpy.array', 'np.array', (['meas'], {}), '(meas)\n', (7570, 7576), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
import matplotlib.pylab as plt
from agent import Qlearner, SARSAlearner
from environment import Game
#delete this
'''
Plots the rewards
'''
def plot_agent_reward(rewards):
plt.plot(np.cumsum(rewards))
plt.title('Agent Cumulative Reward vs. Iteration')
plt.ylabel('Reward')
plt.xlabel('Episode')
plt.show()
class PlayGame():
def __init__(self, agentType, numOfEpisodes, alpha=0.5, gamma=0.9, epsilon=0.1):
self.numOfEpisodes = numOfEpisodes
if agentType == "q":
self.agent = Qlearner(alpha,gamma,epsilon)
else:
self.agent = SARSAlearner(alpha,gamma,epsilon)
def userPlayAgent(self):
while True:
game = Game(self.agent)
game.start()
playAgain = input("Would you like to play again? ('y', 'n'): ")
if playAgain=='n':
print("See you later!")
break;
print()
print("Okay lets play again!")
print()
'''
Teach agent - intelligence depends on number of games
'''
def teachAgent(self):
iteration = 0
while iteration < self.numOfEpisodes:
game = Game(self.agent)
game.start(training=True)
iteration += 1
if iteration % 10000 == 0:
print("Training round: " + str(iteration))
plot_agent_reward(self.agent.rewards)
'''
Gets Specification on training iterations and agent type from user
'''
def getUserValues():
print("Welcome to Tic-Tac-Toe")
#get agentType
while True:
print()
agentType = input("Please input Agent Type (qlearning or sarsa) 'q' or 's': ")
if agentType == 'q' or agentType == 's':
print()
if agentType == 'q':
print('You entered Q-learning!')
else:
print('You entered Sarsa!')
break
print("Invalid agent type: " + agentType)
#getEpisodes
print()
print("For smart agent enter four hundred thousand (400000): ")
print()
numOfEpisodes = int(input("Please enter the number of episodes you want to train agent: "))
game = PlayGame(agentType, numOfEpisodes)
game.teachAgent()
print("Done Teaching!")
game.userPlayAgent()
if __name__=="__main__":
getUserValues()
| [
"agent.Qlearner",
"matplotlib.pylab.title",
"environment.Game",
"matplotlib.pylab.xlabel",
"matplotlib.pylab.show",
"numpy.cumsum",
"agent.SARSAlearner",
"matplotlib.pylab.ylabel"
] | [((251, 301), 'matplotlib.pylab.title', 'plt.title', (['"""Agent Cumulative Reward vs. Iteration"""'], {}), "('Agent Cumulative Reward vs. Iteration')\n", (260, 301), True, 'import matplotlib.pylab as plt\n'), ((306, 326), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Reward"""'], {}), "('Reward')\n", (316, 326), True, 'import matplotlib.pylab as plt\n'), ((331, 352), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (341, 352), True, 'import matplotlib.pylab as plt\n'), ((357, 367), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (365, 367), True, 'import matplotlib.pylab as plt\n'), ((227, 245), 'numpy.cumsum', 'np.cumsum', (['rewards'], {}), '(rewards)\n', (236, 245), True, 'import numpy as np\n'), ((569, 600), 'agent.Qlearner', 'Qlearner', (['alpha', 'gamma', 'epsilon'], {}), '(alpha, gamma, epsilon)\n', (577, 600), False, 'from agent import Qlearner, SARSAlearner\n'), ((638, 673), 'agent.SARSAlearner', 'SARSAlearner', (['alpha', 'gamma', 'epsilon'], {}), '(alpha, gamma, epsilon)\n', (650, 673), False, 'from agent import Qlearner, SARSAlearner\n'), ((743, 759), 'environment.Game', 'Game', (['self.agent'], {}), '(self.agent)\n', (747, 759), False, 'from environment import Game\n'), ((1226, 1242), 'environment.Game', 'Game', (['self.agent'], {}), '(self.agent)\n', (1230, 1242), False, 'from environment import Game\n')] |
"""This script is responsible for creating the main sketch dataset from the JSON files.
Some basic filtering is applied in order to exclude empty sketches. However, as this dataset
is intended to capture the original data, further filtering is left to scripts such as
`sketchgraphs.pipeline.make_sequence_dataset` which process dataset for learning.
"""
import argparse
import collections
import glob
import gzip
import itertools
import json
import multiprocessing as mp
import tarfile
import traceback
import os
import numpy as np
import tqdm
import zstandard as zstd
from sketchgraphs.data.sketch import Sketch
from sketchgraphs.data import flat_array
def _load_json(path):
open_ = gzip.open if path.endswith('gz') else open
with open_(path) as fh:
return json.load(fh)
def filter_sketch(sketch: Sketch):
"""Basic filtering which excludes empty sketches, or sketches with no constraints."""
return len(sketch.constraints) == 0 or len(sketch.entities) == 0
def parse_sketch_id(filename):
basename = os.path.basename(filename)
while '.' in basename:
basename, _ = os.path.splitext(basename)
document_id, part_id = basename.split('_')
return document_id, int(part_id)
def load_json_tarball(path):
"""Loads a json tarball as an iterable of sketches.
Parameters
----------
path : str
A path to the location of a single shard
Returns
-------
iterable of `Sketch`
An iterable of `Sketch` representing all the sketches present in the tarball.
"""
with open(path, 'rb') as base_file:
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(base_file) as tarball:
with tarfile.open(fileobj=tarball, mode='r|') as directory:
while True:
json_file = directory.next()
if json_file is None:
break
if not json_file.isfile():
continue
document_id, part_id = parse_sketch_id(json_file.name)
data = directory.extractfile(json_file).read()
if len(data) == 0:
# skip empty files
continue
try:
sketches_json = json.loads(data)
except json.JSONDecodeError as exc:
raise ValueError('Error decoding JSON for document {0} part {1}.'.format(document_id, part_id))
for i, sketch_json in enumerate(sketches_json):
yield (document_id, part_id, i), Sketch.from_fs_json(sketch_json)
def _worker(paths_queue, processed_sketches, max_sketches, sketch_counter):
num_filtered = 0
num_invalid = 0
while max_sketches is None or sketch_counter.value < max_sketches:
paths = paths_queue.get()
if paths is None:
break
sketches = []
for path in paths:
sketch_list = _load_json(path)
for sketch_json in sketch_list:
try:
sketch = Sketch.from_fs_json(sketch_json)
except Exception as err:
num_invalid += 1
print('Error processing sketch in file {0}'.format(path))
traceback.print_exception(type(err), err, err.__traceback__)
if filter_sketch(sketch):
num_filtered += 1
continue
sketches.append(sketch)
offsets, data = flat_array.raw_list_flat(sketches)
processed_sketches.put((offsets, data))
with sketch_counter.get_lock():
sketch_counter.value += len(sketches)
processed_sketches.put({
'num_filtered': num_filtered,
'num_invalid': num_invalid
})
def process(paths, threads, max_sketches=None):
path_queue = mp.Queue()
sketch_queue = mp.Queue()
sketch_counter = mp.Value('q', 0)
# Enqueue all the objects
print('Enqueueing files to process.')
paths_it = iter(paths)
while True:
path_chunk = list(itertools.islice(paths_it, 128))
if len(path_chunk) == 0:
break
path_queue.put_nowait(path_chunk)
workers = []
for _ in range(threads or mp.cpu_count()):
workers.append(
mp.Process(
target=_worker,
args=(path_queue, sketch_queue, max_sketches, sketch_counter)))
for worker in workers:
path_queue.put_nowait(None)
worker.start()
active_workers = len(workers)
offsets_arrays = []
data_arrays = []
statistics = collections.Counter()
# Read-in data
with tqdm.tqdm(total=len(paths)) as pbar:
while active_workers > 0:
result = sketch_queue.get()
if isinstance(result, dict):
statistics += collections.Counter(result)
active_workers -= 1
continue
offsets, data = result
offsets_arrays.append(offsets)
data_arrays.append(data)
pbar.update(128)
# Finalize workers
for worker in workers:
worker.join()
# Merge final flat array
all_offsets, all_data = flat_array.merge_raw_list(offsets_arrays, data_arrays)
total_sketches = len(all_offsets) - 1
del offsets_arrays
del data_arrays
# Pack as required
flat_data = flat_array.pack_list_flat(all_offsets, all_data)
del all_offsets
del all_data
print('Done processing data.\nProcessed sketches: {0}'.format(total_sketches))
print('Filtered sketches: {0}'.format(statistics['num_filtered']))
print('Invalid sketches: {0}'.format(statistics['num_invalid']))
return flat_data
def gather_sorted_paths(patterns):
if isinstance(patterns, str):
patterns = [patterns]
out = []
for pattern in patterns:
out.extend(glob.glob(pattern))
out.sort()
return out
def main():
parser = argparse.ArgumentParser(description='Process json files to create sketch dataset')
parser.add_argument('--glob_pattern', required=True, action='append',
help='Glob pattern(s) for json / json.gz files.')
parser.add_argument('--output_path', required=True, help='Path for output file.')
parser.add_argument('--max_files', type=int, help='Max number of json files to consider.')
parser.add_argument('--max_sketches', type=int, help='Maximum number of sketches to consider.')
parser.add_argument('--num_threads', type=int, default=0, help='Number of multiprocessing workers.')
args = parser.parse_args()
print('Globbing for sketch files to include.')
paths = gather_sorted_paths(args.glob_pattern)
print('Found %i files.' % len(paths))
if args.max_files is not None:
paths = paths[:args.max_files]
result = process(paths, args.num_threads, args.max_sketches)
print('Saving data to {0}'.format(args.output_path))
np.save(args.output_path, result)
if __name__ == '__main__':
main()
| [
"tarfile.open",
"multiprocessing.Process",
"multiprocessing.cpu_count",
"sketchgraphs.data.flat_array.raw_list_flat",
"numpy.save",
"argparse.ArgumentParser",
"multiprocessing.Value",
"sketchgraphs.data.sketch.Sketch.from_fs_json",
"zstandard.ZstdDecompressor",
"glob.glob",
"json.loads",
"os.p... | [((1039, 1065), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1055, 1065), False, 'import os\n'), ((3922, 3932), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (3930, 3932), True, 'import multiprocessing as mp\n'), ((3952, 3962), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (3960, 3962), True, 'import multiprocessing as mp\n'), ((3984, 4000), 'multiprocessing.Value', 'mp.Value', (['"""q"""', '(0)'], {}), "('q', 0)\n", (3992, 4000), True, 'import multiprocessing as mp\n'), ((4682, 4703), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (4701, 4703), False, 'import collections\n'), ((5282, 5336), 'sketchgraphs.data.flat_array.merge_raw_list', 'flat_array.merge_raw_list', (['offsets_arrays', 'data_arrays'], {}), '(offsets_arrays, data_arrays)\n', (5307, 5336), False, 'from sketchgraphs.data import flat_array\n'), ((5462, 5510), 'sketchgraphs.data.flat_array.pack_list_flat', 'flat_array.pack_list_flat', (['all_offsets', 'all_data'], {}), '(all_offsets, all_data)\n', (5487, 5510), False, 'from sketchgraphs.data import flat_array\n'), ((6032, 6119), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process json files to create sketch dataset"""'}), "(description=\n 'Process json files to create sketch dataset')\n", (6055, 6119), False, 'import argparse\n'), ((7028, 7061), 'numpy.save', 'np.save', (['args.output_path', 'result'], {}), '(args.output_path, result)\n', (7035, 7061), True, 'import numpy as np\n'), ((781, 794), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (790, 794), False, 'import json\n'), ((1115, 1141), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (1131, 1141), False, 'import os\n'), ((1608, 1631), 'zstandard.ZstdDecompressor', 'zstd.ZstdDecompressor', ([], {}), '()\n', (1629, 1631), True, 'import zstandard as zstd\n'), ((3570, 3604), 'sketchgraphs.data.flat_array.raw_list_flat', 'flat_array.raw_list_flat', (['sketches'], {}), '(sketches)\n', (3594, 3604), False, 'from sketchgraphs.data import flat_array\n'), ((4143, 4174), 'itertools.islice', 'itertools.islice', (['paths_it', '(128)'], {}), '(paths_it, 128)\n', (4159, 4174), False, 'import itertools\n'), ((4319, 4333), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (4331, 4333), True, 'import multiprocessing as mp\n'), ((4372, 4465), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_worker', 'args': '(path_queue, sketch_queue, max_sketches, sketch_counter)'}), '(target=_worker, args=(path_queue, sketch_queue, max_sketches,\n sketch_counter))\n', (4382, 4465), True, 'import multiprocessing as mp\n'), ((5955, 5973), 'glob.glob', 'glob.glob', (['pattern'], {}), '(pattern)\n', (5964, 5973), False, 'import glob\n'), ((1704, 1744), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'tarball', 'mode': '"""r|"""'}), "(fileobj=tarball, mode='r|')\n", (1716, 1744), False, 'import tarfile\n'), ((4916, 4943), 'collections.Counter', 'collections.Counter', (['result'], {}), '(result)\n', (4935, 4943), False, 'import collections\n'), ((3124, 3156), 'sketchgraphs.data.sketch.Sketch.from_fs_json', 'Sketch.from_fs_json', (['sketch_json'], {}), '(sketch_json)\n', (3143, 3156), False, 'from sketchgraphs.data.sketch import Sketch\n'), ((2313, 2329), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2323, 2329), False, 'import json\n'), ((2631, 2663), 'sketchgraphs.data.sketch.Sketch.from_fs_json', 'Sketch.from_fs_json', (['sketch_json'], {}), '(sketch_json)\n', (2650, 2663), False, 'from sketchgraphs.data.sketch import Sketch\n')] |
import numpy as np
def gaussPDF(data, mu, sigma):
"""
This function computes the Probability Density Function (PDF) of a
multivariate Gaussian represented by means and covariance matrix.
Inputs -----------------------------------------------------------------
o data: D x N array representing N datapoints of D dimensions.
o mu: D x K array representing the centers of the K GMM components.
o sigma: D x D x K array representing the covariance matrices of the
K GMM components.
Outputs ----------------------------------------------------------------
o prob: 1 x N array representing the probabilities for the
N datapoints.
Copyright (c) 2006 <NAME>, LASA Lab, EPFL, CH-1015 Lausanne,
Switzerland, http://lasa.epfl.ch
Ported to python by <NAME> & <NAME>
<EMAIL>
August 12, 2017
"""
nbVar, nbdata = data.shape
data = data.T - np.tile(mu.T, [nbdata,1])
prob = np.sum((data/sigma)*data, axis=1);
prob = np.exp(-0.5*prob) / np.sqrt((2*np.pi)**nbVar *
(np.abs(np.linalg.det(sigma))+
np.finifo(np.float64).min))
return prob
| [
"numpy.tile",
"numpy.linalg.det",
"numpy.exp",
"numpy.sum",
"numpy.finifo"
] | [((1045, 1080), 'numpy.sum', 'np.sum', (['(data / sigma * data)'], {'axis': '(1)'}), '(data / sigma * data, axis=1)\n', (1051, 1080), True, 'import numpy as np\n'), ((1008, 1034), 'numpy.tile', 'np.tile', (['mu.T', '[nbdata, 1]'], {}), '(mu.T, [nbdata, 1])\n', (1015, 1034), True, 'import numpy as np\n'), ((1091, 1110), 'numpy.exp', 'np.exp', (['(-0.5 * prob)'], {}), '(-0.5 * prob)\n', (1097, 1110), True, 'import numpy as np\n'), ((1185, 1205), 'numpy.linalg.det', 'np.linalg.det', (['sigma'], {}), '(sigma)\n', (1198, 1205), True, 'import numpy as np\n'), ((1248, 1269), 'numpy.finifo', 'np.finifo', (['np.float64'], {}), '(np.float64)\n', (1257, 1269), True, 'import numpy as np\n')] |
import numpy as np
from scipy import ndimage
from sHAM import nu_CWS
import gc
def find_index_first_dense(list_weights):
i = 0
for w in list_weights:
if len(w.shape)==2:
return i
i += 1
def idx_matrix_to_matrix(idx_matrix,centers):
return centers[idx_matrix.reshape(-1,1)].reshape(idx_matrix.shape)
def centroid_gradient_matrix(idx_matrix,gradient,cluster):
return ndimage.sum(gradient,idx_matrix,index=range(cluster)).reshape(cluster,1)
#STOCHASTIC COMPRESSION FUNCTIONS
def generate_intervals(W, n_intervals):
intervals = []
values_dict = {}
for i in range(n_intervals):
lower_extreme = np.quantile(W, i/n_intervals)
upper_extreme = np.quantile(W, (i+1)/n_intervals)
intervals.append((lower_extreme, upper_extreme))
values_dict[i] = lower_extreme
#The last extreme must also be included
values_dict[len(values_dict)]= intervals[-1][1]
return values_dict , intervals
def get_interval(w, intervals):
interval = None
for i in intervals:
if w >= i[0] and w < i[1]:
interval = i
break
if not interval:
interval = intervals[-1]
return interval
def binarize(w, intervals, indices_dict):
[v,V] = get_interval(w, intervals)
return indices_dict[V] if np.random.uniform() <= (w-v)/(V-v) else indices_dict[v]
def stochastic_compression(W, b, dtype=np.uint8):
n_intervals = (2**b) - 1
values_dict, intervals = generate_intervals(W, n_intervals)
indices_dict = {v: k for k,v in values_dict.items()}
vect_bin = np.vectorize(binarize)
vect_bin.excluded.add(1)
vect_bin.excluded.add(2)
return values_dict, vect_bin(W, intervals, indices_dict).astype(dtype)
#END STOCHASTIC COMPRESSION FUNCTIONS
class nu_PWS(nu_CWS.nu_CWS):
def __init__(self, model, bits_for_dense_layers, index_first_dense, apply_compression_bias=False, div=None):
self.model = model
self.bits = bits_for_dense_layers
self.clusters = [ 2**i for i in bits_for_dense_layers]
self.index_first_dense = index_first_dense
if div:
self.div=div
else:
self.div = 1 if apply_compression_bias else 2
def apply_stochastic(self, list_trainable=None, untrainable_per_layers=None):
if not list_trainable:
list_weights = self.model.get_weights()
else:
list_weights=[]
for w in (list_trainable):
list_weights.append(w.numpy())
d = self.index_first_dense
dtypelist = [ "uint8" if i <= 8 else "uint16" for i in self.bits]
result = [stochastic_compression(list_weights[i], self.bits[(i-d)//self.div], dtypelist[(i-d)//self.div]) for i in range (d, len(list_weights), self.div)]
values = [ v for (v , _) in result]
self.centers = []
i = 0
for d in values:
vect = np.zeros(shape=(self.clusters[i], 1), dtype="float32")
for key, v in d.items():
vect[key] = v
self.centers.append(vect)
i = i+1
self.idx_layers = [ m for (_ , m) in result]
if not list_trainable:
self.untrainable_per_layers = 0
self.model.set_weights(self.recompose_weight(list_weights))
else:
self.untrainable_per_layers = untrainable_per_layers
self.model.set_weights(self.recompose_weight(list_weights, True, untrainable_per_layers))
gc.collect()
def recompose_weight(self, list_weights, trainable_vars=False, untrainable_per_layers=None):
if not trainable_vars:
d = self.index_first_dense
return list_weights[:d]+[(idx_matrix_to_matrix(self.idx_layers[(i-d)//self.div], self.centers[(i-d)//self.div])) if i%self.div==0 else (list_weights[i]) for i in range(d,len(list_weights))]
else:
div = self.div + untrainable_per_layers
list_weights = self.trainable_to_weights(self.model.get_weights(), list_weights, untrainable_per_layers)
d = find_index_first_dense(list_weights)
return list_weights[:d]+[(idx_matrix_to_matrix(self.idx_layers[(i-d)//div], self.centers[(i-d)//div])) if i%div==0 else (list_weights[i]) for i in range(d,len(list_weights))]
| [
"numpy.quantile",
"numpy.zeros",
"gc.collect",
"numpy.random.uniform",
"numpy.vectorize"
] | [((1588, 1610), 'numpy.vectorize', 'np.vectorize', (['binarize'], {}), '(binarize)\n', (1600, 1610), True, 'import numpy as np\n'), ((660, 691), 'numpy.quantile', 'np.quantile', (['W', '(i / n_intervals)'], {}), '(W, i / n_intervals)\n', (671, 691), True, 'import numpy as np\n'), ((714, 751), 'numpy.quantile', 'np.quantile', (['W', '((i + 1) / n_intervals)'], {}), '(W, (i + 1) / n_intervals)\n', (725, 751), True, 'import numpy as np\n'), ((3522, 3534), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3532, 3534), False, 'import gc\n'), ((1316, 1335), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1333, 1335), True, 'import numpy as np\n'), ((2935, 2989), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.clusters[i], 1)', 'dtype': '"""float32"""'}), "(shape=(self.clusters[i], 1), dtype='float32')\n", (2943, 2989), True, 'import numpy as np\n')] |
#%%
import os
from pyteomics import mzid, mzml
import pandas as pd
import numpy as np
import glob
"""
Identically as how we did with the training data set, we randomly divided the test files into different
folders, then we generated different data frames and stored all of them in one single hdf file as our
validation daata set
"""
#%%
os.chdir('./test')
mzid_files=glob.glob('*.mzid')
indexed_mzid = mzid.chain.from_iterable(mzid_files,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid = []
for entry in(indexed_mzid):
all_mzid.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid)
mzid_df = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra)
spectra_df = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df = pd.merge(mzid_df,spectra_df,how='left',on=['file','id'])
merged_df = merged_df[['id','seq','mz','intensities']]
#%%
hdf_test = pd.HDFStore('/home/ubuntu/data/jiahao/files/test.hdf5', mode='w')
#%%
hdf_test.put(value=merged_df, key="df")
#%%
os.chdir('./test_1')
mzid_files_1 = glob.glob('*.mzid')
indexed_mzid_1 = mzid.chain.from_iterable(mzid_files_1,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_1 = []
for entry in(indexed_mzid_1):
all_mzid_1.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_1)
mzid_df_1 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_1 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_1.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_1)
spectra_df_1 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_1 = pd.merge(mzid_df_1,spectra_df_1,how='left',on=['file','id'])
merged_df_1 = merged_df_1[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_1, key="df1")
#%%
os.chdir('./test_2')
mzid_files_2 = glob.glob('*.mzid')
indexed_mzid_2 = mzid.chain.from_iterable(mzid_files_2,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_2 = []
for entry in(indexed_mzid_2):
all_mzid_2.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_2)
mzid_df_2 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_2 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_2.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_2)
spectra_df_2 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_2 = pd.merge(mzid_df_2,spectra_df_2,how='left',on=['file','id'])
merged_df_2 = merged_df_2[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_2, key="df2")
#%%
os.chdir('./test_4')
mzid_files_4 = glob.glob('*.mzid')
indexed_mzid_4 = mzid.chain.from_iterable(mzid_files_4,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_4 = []
for entry in(indexed_mzid_4):
all_mzid_4.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_4)
mzid_df_4 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_4 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_4.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_4)
spectra_df_4 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_4 = pd.merge(mzid_df_4,spectra_df_4,how='left',on=['file','id'])
merged_df_4 = merged_df_4[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_4, key="df4")
#%%
os.chdir('./test_5')
mzid_files_5 = glob.glob('*.mzid')
indexed_mzid_5 = mzid.chain.from_iterable(mzid_files_5,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_5 = []
for entry in(indexed_mzid_5):
all_mzid_5.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_5)
mzid_df_5 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_5 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_5.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_5)
spectra_df_5 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_5 = pd.merge(mzid_df_5,spectra_df_5,how='left',on=['file','id'])
merged_df_5 = merged_df_5[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_5, key="df5")
#%%
os.chdir('./test_6')
mzid_files_6 = glob.glob('*.mzid')
indexed_mzid_6 = mzid.chain.from_iterable(mzid_files_6,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_6 = []
for entry in(indexed_mzid_6):
all_mzid_6.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_6)
mzid_df_6 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_6 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_6.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_6)
spectra_df_6 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_6 = pd.merge(mzid_df_6,spectra_df_6,how='left',on=['file','id'])
merged_df_6 = merged_df_6[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_6, key="df6")
# %%
os.chdir('./test_7')
mzid_files_7 = glob.glob('*.mzid')
indexed_mzid_7 = mzid.chain.from_iterable(mzid_files_7,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_7 = []
for entry in(indexed_mzid_7):
all_mzid_7.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_7)
mzid_df_7 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_7 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_7.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_7)
spectra_df_7 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_7 = pd.merge(mzid_df_7,spectra_df_7,how='left',on=['file','id'])
merged_df_7 = merged_df_7[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_7, key="df7")
# %%
os.chdir('./test_8')
mzid_files_8 = glob.glob('*.mzid')
indexed_mzid_8 = mzid.chain.from_iterable(mzid_files_8,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_8 = []
for entry in(indexed_mzid_8):
all_mzid_8.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_8)
mzid_df_8 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_8 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_7.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_8)
spectra_df_8 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
merged_df_8 = pd.merge(mzid_df_8,spectra_df_8,how='left',on=['file','id'])
merged_df_8 = merged_df_8[['id','seq','mz','intensities']]
# %%
hdf.put(value=merged_df_8, key="df8")
| [
"pyteomics.mzml.MzML",
"numpy.unique",
"pandas.merge",
"pyteomics.mzid.chain.from_iterable",
"os.chdir",
"numpy.array",
"pandas.DataFrame",
"pandas.HDFStore",
"glob.glob"
] | [((339, 357), 'os.chdir', 'os.chdir', (['"""./test"""'], {}), "('./test')\n", (347, 357), False, 'import os\n'), ((370, 389), 'glob.glob', 'glob.glob', (['"""*.mzid"""'], {}), "('*.mzid')\n", (379, 389), False, 'import glob\n'), ((405, 457), 'pyteomics.mzid.chain.from_iterable', 'mzid.chain.from_iterable', (['mzid_files'], {'use_index': '(True)'}), '(mzid_files, use_index=True)\n', (429, 457), False, 'from pyteomics import mzid, mzml\n'), ((1029, 1098), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': file_location, 'id': spectrum_ids, 'seq': seq}"], {}), "({'file': file_location, 'id': spectrum_ids, 'seq': seq})\n", (1041, 1098), True, 'import pandas as pd\n'), ((1304, 1328), 'numpy.unique', 'np.unique', (['file_location'], {}), '(file_location)\n', (1313, 1328), True, 'import numpy as np\n'), ((1571, 1661), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': mzml_location, 'id': ids, 'mz': mz, 'intensities': intensities}"], {}), "({'file': mzml_location, 'id': ids, 'mz': mz, 'intensities':\n intensities})\n", (1583, 1661), True, 'import pandas as pd\n'), ((1689, 1749), 'pandas.merge', 'pd.merge', (['mzid_df', 'spectra_df'], {'how': '"""left"""', 'on': "['file', 'id']"}), "(mzid_df, spectra_df, how='left', on=['file', 'id'])\n", (1697, 1749), True, 'import pandas as pd\n'), ((1818, 1883), 'pandas.HDFStore', 'pd.HDFStore', (['"""/home/ubuntu/data/jiahao/files/test.hdf5"""'], {'mode': '"""w"""'}), "('/home/ubuntu/data/jiahao/files/test.hdf5', mode='w')\n", (1829, 1883), True, 'import pandas as pd\n'), ((1934, 1954), 'os.chdir', 'os.chdir', (['"""./test_1"""'], {}), "('./test_1')\n", (1942, 1954), False, 'import os\n'), ((1971, 1990), 'glob.glob', 'glob.glob', (['"""*.mzid"""'], {}), "('*.mzid')\n", (1980, 1990), False, 'import glob\n'), ((2008, 2062), 'pyteomics.mzid.chain.from_iterable', 'mzid.chain.from_iterable', (['mzid_files_1'], {'use_index': '(True)'}), '(mzid_files_1, use_index=True)\n', (2032, 2062), False, 'from pyteomics import mzid, mzml\n'), ((2644, 2713), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': file_location, 'id': spectrum_ids, 'seq': seq}"], {}), "({'file': file_location, 'id': spectrum_ids, 'seq': seq})\n", (2656, 2713), True, 'import pandas as pd\n'), ((2921, 2945), 'numpy.unique', 'np.unique', (['file_location'], {}), '(file_location)\n', (2930, 2945), True, 'import numpy as np\n'), ((3194, 3284), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': mzml_location, 'id': ids, 'mz': mz, 'intensities': intensities}"], {}), "({'file': mzml_location, 'id': ids, 'mz': mz, 'intensities':\n intensities})\n", (3206, 3284), True, 'import pandas as pd\n'), ((3314, 3378), 'pandas.merge', 'pd.merge', (['mzid_df_1', 'spectra_df_1'], {'how': '"""left"""', 'on': "['file', 'id']"}), "(mzid_df_1, spectra_df_1, how='left', on=['file', 'id'])\n", (3322, 3378), True, 'import pandas as pd\n'), ((3488, 3508), 'os.chdir', 'os.chdir', (['"""./test_2"""'], {}), "('./test_2')\n", (3496, 3508), False, 'import os\n'), ((3525, 3544), 'glob.glob', 'glob.glob', (['"""*.mzid"""'], {}), "('*.mzid')\n", (3534, 3544), False, 'import glob\n'), ((3562, 3616), 'pyteomics.mzid.chain.from_iterable', 'mzid.chain.from_iterable', (['mzid_files_2'], {'use_index': '(True)'}), '(mzid_files_2, use_index=True)\n', (3586, 3616), False, 'from pyteomics import mzid, mzml\n'), ((4198, 4267), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': file_location, 'id': spectrum_ids, 'seq': seq}"], {}), "({'file': file_location, 'id': spectrum_ids, 'seq': seq})\n", (4210, 4267), True, 'import pandas as pd\n'), ((4475, 4499), 'numpy.unique', 'np.unique', (['file_location'], {}), '(file_location)\n', (4484, 4499), True, 'import numpy as np\n'), ((4748, 4838), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': mzml_location, 'id': ids, 'mz': mz, 'intensities': intensities}"], {}), "({'file': mzml_location, 'id': ids, 'mz': mz, 'intensities':\n intensities})\n", (4760, 4838), True, 'import pandas as pd\n'), ((4868, 4932), 'pandas.merge', 'pd.merge', (['mzid_df_2', 'spectra_df_2'], {'how': '"""left"""', 'on': "['file', 'id']"}), "(mzid_df_2, spectra_df_2, how='left', on=['file', 'id'])\n", (4876, 4932), True, 'import pandas as pd\n'), ((5044, 5064), 'os.chdir', 'os.chdir', (['"""./test_4"""'], {}), "('./test_4')\n", (5052, 5064), False, 'import os\n'), ((5081, 5100), 'glob.glob', 'glob.glob', (['"""*.mzid"""'], {}), "('*.mzid')\n", (5090, 5100), False, 'import glob\n'), ((5118, 5172), 'pyteomics.mzid.chain.from_iterable', 'mzid.chain.from_iterable', (['mzid_files_4'], {'use_index': '(True)'}), '(mzid_files_4, use_index=True)\n', (5142, 5172), False, 'from pyteomics import mzid, mzml\n'), ((5754, 5823), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': file_location, 'id': spectrum_ids, 'seq': seq}"], {}), "({'file': file_location, 'id': spectrum_ids, 'seq': seq})\n", (5766, 5823), True, 'import pandas as pd\n'), ((6031, 6055), 'numpy.unique', 'np.unique', (['file_location'], {}), '(file_location)\n', (6040, 6055), True, 'import numpy as np\n'), ((6304, 6394), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': mzml_location, 'id': ids, 'mz': mz, 'intensities': intensities}"], {}), "({'file': mzml_location, 'id': ids, 'mz': mz, 'intensities':\n intensities})\n", (6316, 6394), True, 'import pandas as pd\n'), ((6424, 6488), 'pandas.merge', 'pd.merge', (['mzid_df_4', 'spectra_df_4'], {'how': '"""left"""', 'on': "['file', 'id']"}), "(mzid_df_4, spectra_df_4, how='left', on=['file', 'id'])\n", (6432, 6488), True, 'import pandas as pd\n'), ((6599, 6619), 'os.chdir', 'os.chdir', (['"""./test_5"""'], {}), "('./test_5')\n", (6607, 6619), False, 'import os\n'), ((6636, 6655), 'glob.glob', 'glob.glob', (['"""*.mzid"""'], {}), "('*.mzid')\n", (6645, 6655), False, 'import glob\n'), ((6673, 6727), 'pyteomics.mzid.chain.from_iterable', 'mzid.chain.from_iterable', (['mzid_files_5'], {'use_index': '(True)'}), '(mzid_files_5, use_index=True)\n', (6697, 6727), False, 'from pyteomics import mzid, mzml\n'), ((7309, 7378), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': file_location, 'id': spectrum_ids, 'seq': seq}"], {}), "({'file': file_location, 'id': spectrum_ids, 'seq': seq})\n", (7321, 7378), True, 'import pandas as pd\n'), ((7586, 7610), 'numpy.unique', 'np.unique', (['file_location'], {}), '(file_location)\n', (7595, 7610), True, 'import numpy as np\n'), ((7859, 7949), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': mzml_location, 'id': ids, 'mz': mz, 'intensities': intensities}"], {}), "({'file': mzml_location, 'id': ids, 'mz': mz, 'intensities':\n intensities})\n", (7871, 7949), True, 'import pandas as pd\n'), ((7979, 8043), 'pandas.merge', 'pd.merge', (['mzid_df_5', 'spectra_df_5'], {'how': '"""left"""', 'on': "['file', 'id']"}), "(mzid_df_5, spectra_df_5, how='left', on=['file', 'id'])\n", (7987, 8043), True, 'import pandas as pd\n'), ((8154, 8174), 'os.chdir', 'os.chdir', (['"""./test_6"""'], {}), "('./test_6')\n", (8162, 8174), False, 'import os\n'), ((8191, 8210), 'glob.glob', 'glob.glob', (['"""*.mzid"""'], {}), "('*.mzid')\n", (8200, 8210), False, 'import glob\n'), ((8228, 8282), 'pyteomics.mzid.chain.from_iterable', 'mzid.chain.from_iterable', (['mzid_files_6'], {'use_index': '(True)'}), '(mzid_files_6, use_index=True)\n', (8252, 8282), False, 'from pyteomics import mzid, mzml\n'), ((8864, 8933), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': file_location, 'id': spectrum_ids, 'seq': seq}"], {}), "({'file': file_location, 'id': spectrum_ids, 'seq': seq})\n", (8876, 8933), True, 'import pandas as pd\n'), ((9141, 9165), 'numpy.unique', 'np.unique', (['file_location'], {}), '(file_location)\n', (9150, 9165), True, 'import numpy as np\n'), ((9414, 9504), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': mzml_location, 'id': ids, 'mz': mz, 'intensities': intensities}"], {}), "({'file': mzml_location, 'id': ids, 'mz': mz, 'intensities':\n intensities})\n", (9426, 9504), True, 'import pandas as pd\n'), ((9534, 9598), 'pandas.merge', 'pd.merge', (['mzid_df_6', 'spectra_df_6'], {'how': '"""left"""', 'on': "['file', 'id']"}), "(mzid_df_6, spectra_df_6, how='left', on=['file', 'id'])\n", (9542, 9598), True, 'import pandas as pd\n'), ((9709, 9729), 'os.chdir', 'os.chdir', (['"""./test_7"""'], {}), "('./test_7')\n", (9717, 9729), False, 'import os\n'), ((9746, 9765), 'glob.glob', 'glob.glob', (['"""*.mzid"""'], {}), "('*.mzid')\n", (9755, 9765), False, 'import glob\n'), ((9783, 9837), 'pyteomics.mzid.chain.from_iterable', 'mzid.chain.from_iterable', (['mzid_files_7'], {'use_index': '(True)'}), '(mzid_files_7, use_index=True)\n', (9807, 9837), False, 'from pyteomics import mzid, mzml\n'), ((10419, 10488), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': file_location, 'id': spectrum_ids, 'seq': seq}"], {}), "({'file': file_location, 'id': spectrum_ids, 'seq': seq})\n", (10431, 10488), True, 'import pandas as pd\n'), ((10696, 10720), 'numpy.unique', 'np.unique', (['file_location'], {}), '(file_location)\n', (10705, 10720), True, 'import numpy as np\n'), ((10969, 11059), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': mzml_location, 'id': ids, 'mz': mz, 'intensities': intensities}"], {}), "({'file': mzml_location, 'id': ids, 'mz': mz, 'intensities':\n intensities})\n", (10981, 11059), True, 'import pandas as pd\n'), ((11089, 11153), 'pandas.merge', 'pd.merge', (['mzid_df_7', 'spectra_df_7'], {'how': '"""left"""', 'on': "['file', 'id']"}), "(mzid_df_7, spectra_df_7, how='left', on=['file', 'id'])\n", (11097, 11153), True, 'import pandas as pd\n'), ((11264, 11284), 'os.chdir', 'os.chdir', (['"""./test_8"""'], {}), "('./test_8')\n", (11272, 11284), False, 'import os\n'), ((11301, 11320), 'glob.glob', 'glob.glob', (['"""*.mzid"""'], {}), "('*.mzid')\n", (11310, 11320), False, 'import glob\n'), ((11338, 11392), 'pyteomics.mzid.chain.from_iterable', 'mzid.chain.from_iterable', (['mzid_files_8'], {'use_index': '(True)'}), '(mzid_files_8, use_index=True)\n', (11362, 11392), False, 'from pyteomics import mzid, mzml\n'), ((11974, 12043), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': file_location, 'id': spectrum_ids, 'seq': seq}"], {}), "({'file': file_location, 'id': spectrum_ids, 'seq': seq})\n", (11986, 12043), True, 'import pandas as pd\n'), ((12251, 12275), 'numpy.unique', 'np.unique', (['file_location'], {}), '(file_location)\n', (12260, 12275), True, 'import numpy as np\n'), ((12524, 12614), 'pandas.DataFrame', 'pd.DataFrame', (["{'file': mzml_location, 'id': ids, 'mz': mz, 'intensities': intensities}"], {}), "({'file': mzml_location, 'id': ids, 'mz': mz, 'intensities':\n intensities})\n", (12536, 12614), True, 'import pandas as pd\n'), ((12620, 12684), 'pandas.merge', 'pd.merge', (['mzid_df_8', 'spectra_df_8'], {'how': '"""left"""', 'on': "['file', 'id']"}), "(mzid_df_8, spectra_df_8, how='left', on=['file', 'id'])\n", (12628, 12684), True, 'import pandas as pd\n'), ((1160, 1188), 'numpy.array', 'np.array', (["entry['m/z array']"], {}), "(entry['m/z array'])\n", (1168, 1188), True, 'import numpy as np\n'), ((1207, 1241), 'numpy.array', 'np.array', (["entry['intensity array']"], {}), "(entry['intensity array'])\n", (1215, 1241), True, 'import numpy as np\n'), ((1360, 1375), 'pyteomics.mzml.MzML', 'mzml.MzML', (['file'], {}), '(file)\n', (1369, 1375), False, 'from pyteomics import mzid, mzml\n'), ((2775, 2803), 'numpy.array', 'np.array', (["entry['m/z array']"], {}), "(entry['m/z array'])\n", (2783, 2803), True, 'import numpy as np\n'), ((2822, 2856), 'numpy.array', 'np.array', (["entry['intensity array']"], {}), "(entry['intensity array'])\n", (2830, 2856), True, 'import numpy as np\n'), ((2977, 2992), 'pyteomics.mzml.MzML', 'mzml.MzML', (['file'], {}), '(file)\n', (2986, 2992), False, 'from pyteomics import mzid, mzml\n'), ((4329, 4357), 'numpy.array', 'np.array', (["entry['m/z array']"], {}), "(entry['m/z array'])\n", (4337, 4357), True, 'import numpy as np\n'), ((4376, 4410), 'numpy.array', 'np.array', (["entry['intensity array']"], {}), "(entry['intensity array'])\n", (4384, 4410), True, 'import numpy as np\n'), ((4531, 4546), 'pyteomics.mzml.MzML', 'mzml.MzML', (['file'], {}), '(file)\n', (4540, 4546), False, 'from pyteomics import mzid, mzml\n'), ((5885, 5913), 'numpy.array', 'np.array', (["entry['m/z array']"], {}), "(entry['m/z array'])\n", (5893, 5913), True, 'import numpy as np\n'), ((5932, 5966), 'numpy.array', 'np.array', (["entry['intensity array']"], {}), "(entry['intensity array'])\n", (5940, 5966), True, 'import numpy as np\n'), ((6087, 6102), 'pyteomics.mzml.MzML', 'mzml.MzML', (['file'], {}), '(file)\n', (6096, 6102), False, 'from pyteomics import mzid, mzml\n'), ((7440, 7468), 'numpy.array', 'np.array', (["entry['m/z array']"], {}), "(entry['m/z array'])\n", (7448, 7468), True, 'import numpy as np\n'), ((7487, 7521), 'numpy.array', 'np.array', (["entry['intensity array']"], {}), "(entry['intensity array'])\n", (7495, 7521), True, 'import numpy as np\n'), ((7642, 7657), 'pyteomics.mzml.MzML', 'mzml.MzML', (['file'], {}), '(file)\n', (7651, 7657), False, 'from pyteomics import mzid, mzml\n'), ((8995, 9023), 'numpy.array', 'np.array', (["entry['m/z array']"], {}), "(entry['m/z array'])\n", (9003, 9023), True, 'import numpy as np\n'), ((9042, 9076), 'numpy.array', 'np.array', (["entry['intensity array']"], {}), "(entry['intensity array'])\n", (9050, 9076), True, 'import numpy as np\n'), ((9197, 9212), 'pyteomics.mzml.MzML', 'mzml.MzML', (['file'], {}), '(file)\n', (9206, 9212), False, 'from pyteomics import mzid, mzml\n'), ((10550, 10578), 'numpy.array', 'np.array', (["entry['m/z array']"], {}), "(entry['m/z array'])\n", (10558, 10578), True, 'import numpy as np\n'), ((10597, 10631), 'numpy.array', 'np.array', (["entry['intensity array']"], {}), "(entry['intensity array'])\n", (10605, 10631), True, 'import numpy as np\n'), ((10752, 10767), 'pyteomics.mzml.MzML', 'mzml.MzML', (['file'], {}), '(file)\n', (10761, 10767), False, 'from pyteomics import mzid, mzml\n'), ((12105, 12133), 'numpy.array', 'np.array', (["entry['m/z array']"], {}), "(entry['m/z array'])\n", (12113, 12133), True, 'import numpy as np\n'), ((12152, 12186), 'numpy.array', 'np.array', (["entry['intensity array']"], {}), "(entry['intensity array'])\n", (12160, 12186), True, 'import numpy as np\n'), ((12307, 12322), 'pyteomics.mzml.MzML', 'mzml.MzML', (['file'], {}), '(file)\n', (12316, 12322), False, 'from pyteomics import mzid, mzml\n')] |
#
# Copyright © 2018 United States Government as represented by the Administrator of the
# National Aeronautics and Space Administration. All Rights Reserved.
#
import time
import numpy as np
import sunpy.map
import sunpy.io
import json
import astropy.units as u
import pandas as pd
from scipy import stats
from scipy.spatial import ConvexHull
from astropy.utils.data import download_file
import urllib
from datetime import datetime, timedelta
import requests
from pyquaternion import Quaternion
# quantity of polygons
n = 21
# domain definition
theta = np.linspace(0, np.pi / 2, n)
phi = np.linspace(0, 2 * np.pi, n)
theta, phi = np.meshgrid(theta, phi)
# Constant for aspect ratio of lemniscate silhouette
AU_REFERENCE_CUBE = 1
# constant for domain and grid inits
GRID_HALF_WIDTH = 800
# function takes care of updating all of the points for the different plots
def plot_update(radial, angular, long, lat):
# data calculation section for width and distance interaction with figure
# scalars of the lemniscate
# c3 is not stored because it is always 1
lem_distance_c_straight_pixel = (radial * u.solRad).to(u.km) * (
GRID_HALF_WIDTH / (AU_REFERENCE_CUBE * u.AU).to(u.km))
c_one = lem_distance_c_straight_pixel
c_two = c_one * np.tan(((angular / 2) * u.deg))
x_mod = c_one * np.cos(theta)
y_mod = c_two * np.cos(theta) * np.sin(theta) * np.cos(phi)
z_mod = c_two * np.cos(theta) * np.sin(theta) * np.sin(phi)
# data calculation for latitude and longitude interaction with figure
v = [x_mod, y_mod, z_mod]
return rotation(long, lat, v, n)
# function for satellite 3d visuals
# NOTE: Not Used
def functions_sphere(radius, smooth, distance):
theta = np.linspace(0, 2 * np.pi, smooth)
phi = np.linspace(0, np.pi, smooth)
theta, phi = np.meshgrid(theta, phi)
x = radius * np.cos(theta) * np.sin(phi) + distance
y = radius * np.sin(theta) * np.sin(phi)
z = radius * np.cos(phi)
return np.array([x, y, z])
# quaternion rotation function
def rotation(lo, la, v, smooth):
q1 = Quaternion(axis=[0.0, 0.0, 1.0], degrees=lo)
q2 = Quaternion(axis=[0.0, 1.0, 0.0], degrees=la)
q_rot = q2 * q1
rot_matrix = q_rot.rotation_matrix
format_v = np.array(list(zip(np.ravel(v[0]), np.ravel(v[1]), np.ravel(v[2]))))
format_v = format_v @ rot_matrix
return np.array(list(map(lambda x: np.reshape(x, (smooth, smooth)), zip(*format_v))))
# function for gamma correction
def gamma_correction(image_data, gamma):
for i in range(0, len(image_data[1, :])):
for j in range(0, len(image_data[:, 1])):
image_data[i, j] = image_data[i, j] ** gamma
return image_data
# grabs a list of url links upon a date
# 1:STEREO B
# 2:<NAME> C2 zeus
# 3:SOHO LASCO C3 zeus
# 4:STEREO A
def extract_images(date, start_time, end_time, satellite):
start_time = start_time.split(':')
date = date.replace(hour=int(start_time[0]), minute=int(start_time[1]))
frm = (datetime.strftime(date, '%Y-%m-%d %H:%M:%S.%f')).split(' ')
end_date = date + timedelta(hours=end_time)
to = datetime.strftime(end_date, '%Y-%m-%d %H:%M:%S').split(' ')
sats = {
1: 'https://iswa.gsfc.nasa.gov/IswaSystemWebApp/SwpcCATFits?time.min=' + frm[0] + 'T' + frm[1] +
'&time.max=' + to[0] + 'T' + to[1] + '.0&feed=Stereo-B%20Cor2',
2: 'https://iswa.gsfc.nasa.gov/IswaSystemWebApp/SwpcCATFits?time.min=' + frm[0] + 'T' + frm[1] +
'&time.max=' + to[0] + 'T' + to[1] + '.0&feed=SOHO%20C2',
3: 'https://iswa.gsfc.nasa.gov/IswaSystemWebApp/SwpcCATFits?time.min=' + frm[0] + 'T' + frm[1] +
'&time.max=' + to[0] + 'T' + to[1] + '.0&feed=SOHO%20C3',
4: 'https://iswa.gsfc.nasa.gov/IswaSystemWebApp/SwpcCATFits?time.min=' + frm[0] + 'T' + frm[1] +
'&time.max=' + to[0] + 'T' + to[1] + '.0&feed=Stereo-A%20Cor2',
}
instrument = {
1: 'Stereo-B Cor2',
2: 'SOHO C2',
3: 'SOHO C3',
4: 'Stereo-A Cor2'
}
# check if image directory is available, if not return empty array
try:
file_link = sats.get(satellite, 'IMPROPER NUMBER, RANGE OF [1,6]')
files = json.loads(requests.get(file_link).text)
link_dir = list()
for file in files[instrument.get(satellite)]['files']:
link_dir.append(file)
return np.array(link_dir)
except urllib.error.URLError:
return np.empty(1)
# normalizes values to a range of [0,255]
def byte_scale(values):
return 255 * ((values - values.min()) / (values.max() - values.min()))
# takes in two 2d arrays
# returns the difference of the two arrays
# by comparing each value to it's representation in the other array
def difference_image(current, previous, sat, current_exposure, previous_exposure, sat_id, current_off, previous_off):
if sat_id == 'LASCO':
diff = ((np.ravel(current) - current_off) / current_exposure) - (
(np.ravel(previous) - previous_off) / previous_exposure)
else:
diff = byte_scale((np.ravel(current) - current_off) / current_exposure) - byte_scale(
(np.ravel(previous) - previous_off) / previous_exposure)
clip = byte_scale(np.clip(diff, -sat, sat))
return np.reshape(clip, [int(np.sqrt(np.size(clip))), int(np.sqrt(np.size(clip)))])
# NOTE: in order for SOHO LASCO sub map in sunpy to work properly
# NOTE: I had to edit the Dataset formating within the sunpy library
# NOTE: Location of change: sunpy/map/sources/soho.py line 118
# NOTE: Added: if 'T' not in self.meta['date-obs']:
# takes in two urls and downloads both FITS images
# then rotates, interpolates, corrects, and differance the two
# then returns a new sunpy map, and the rotated and interpolated fits files
def new_map(current_file, previous_file, sat):
current = header_safe(sunpy.map.Map(current_file))
previous = header_safe(sunpy.map.Map(previous_file))
current_offset = current.meta['offset']
previous_offset = previous.meta['offset']
current_exposure = current.exposure_time / u.s
previous_exposure = previous.exposure_time / u.s
return sunpy.map.Map(difference_image(current.data, previous.data, sat, current_exposure, previous_exposure,
current.instrument, current_offset, previous_offset),
current.meta), current, previous
# returns length between the center of the picture and
# the pixel farthest right of it
# also returns the radius of the sun within the pictures perspective
def pic_wcs_length(observer, wx, wy, r_x=256, r_y=256):
# obtains farthest right pixel in relation to center
r_x_temp, r_y_temp = observer.reference_pixel
if 0 <= r_x <= 256:
r_x *= u.pix
else:
r_x = r_x_temp
if 0 <= r_y <= 256:
r_y *= u.pix
else:
r_y = r_y_temp
# turn the pixel into WCS
cc = observer.pixel_to_world(r_x, r_y, 1)
ca = observer.pixel_to_world(wx * u.pix, wy * u.pix, 1)
# determines distance between sun and observer
# obtains it from 'dsun_obs'
p_r = pic_wcs_solar(observer, all=1)
# used to get magnitude of the vector point from the cartiesan points
# cc(arcsec, arcsec) to ca(arcsec, arcsec)
magnitude_c = np.sqrt((ca.Tx - cc.Tx) ** 2 + (ca.Ty - cc.Ty) ** 2)
# returns the length of a straight distance
return (np.tan(magnitude_c) * p_r).to(u.km)
# return either the radius of the observer
# or the longitude, latitude and radius of the observer
# in referance to the sun or
# the Skycords of the observer
def pic_wcs_solar(observer, all=0):
if all == 0:
lo = observer.observer_coordinate.lon
la = observer.observer_coordinate.lat
r = observer.observer_coordinate.radius
return lo, la, r
if all == 1:
return observer.observer_coordinate.radius
else:
return observer.observer_coordinate
# edits LASCO FITS files so they don't produce a lot of warning messages
# also speeds up calculation by having the satellite not
# continue to access the .get_earth function
def header_safe(current):
if current.instrument == 'LASCO':
if 'hgln_obs' not in current.meta:
temp_earth = sunpy.coordinates.get_earth(time=current.date)
current.meta['hgln_obs'] = '0'
current.meta['hglt_obs'] = temp_earth.lat
current.meta['dsun_obs'] = temp_earth.radius
return current.rotate(order=3, recenter=True).resample((256, 256) * u.pix, 'linear')
# calculates json data for lemniscate plotting
def calc_plot_json(observer, sat_id, radial, angular, long, lat):
if sat_id == 0:
v = plot_update(radial, angular, -long, lat)
v = rotation(observer.observer_coordinate.lon / u.deg + 90, -observer.observer_coordinate.lat / u.deg, v, n)
else:
v = plot_update(radial, angular, -long, lat)
v = rotation(observer.observer_coordinate.lon / u.deg + 90, observer.observer_coordinate.lat / u.deg, v, n)
points = np.array(list(zip(np.ravel(v[0]), np.ravel(v[2]))))
hull = ConvexHull(points, qhull_options='QbB')
# sun center point on the picture
# obtains CRVAL1 = r_x and CRVAL = r_y
sun_x_center, sun_y_center = observer.reference_pixel
# used to determine how far the lemniscate must be moved
# due to the picture not having the sun in the exact center
# 128 = have of the dimensions of the plot
x_translate = (sun_x_center / u.pix) - 128
y_translate = (sun_y_center / u.pix) - 128
# determines aspect ratio for distance from satellite
pic_width_ratio = pic_wcs_length(observer, 256, 128, r_x=128, r_y=128)
lem_distance_c_ratio = (AU_REFERENCE_CUBE * u.AU).to(u.km) / pic_width_ratio
hull = np.array(((points[hull.vertices, 0] * lem_distance_c_ratio + x_translate),
(points[hull.vertices, 1] * lem_distance_c_ratio + y_translate)))
return json.dumps(hull.tolist())
# calculates json data for image plotting
def calc_image_json(image_data, gamma, stretch_top, stretch_bot):
# stretch bottom and top
image_data = np.clip(image_data, stretch_top, stretch_bot)
# correct image based on slider value
image_data = gamma_correction(image_data, gamma)
return json.dumps(image_data.tolist())
# returns plot data
def return_plot(observer, sat_id, radial, angular, long, lat):
return np.array(pd.read_json(calc_plot_json(observer, sat_id, radial, angular, long, lat,)))
# returns image data
def return_image(image_data, gamma, stretch_top, stretch_bot):
return np.array(json.loads(calc_image_json(image_data, gamma, stretch_top, stretch_bot)))
| [
"numpy.clip",
"numpy.sqrt",
"numpy.tan",
"numpy.reshape",
"numpy.size",
"requests.get",
"datetime.timedelta",
"scipy.spatial.ConvexHull",
"numpy.array",
"numpy.linspace",
"numpy.empty",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"pyquaternion.Quaternion",
"datetime.datetime.strftime",... | [((559, 587), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2)', 'n'], {}), '(0, np.pi / 2, n)\n', (570, 587), True, 'import numpy as np\n'), ((594, 622), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n'], {}), '(0, 2 * np.pi, n)\n', (605, 622), True, 'import numpy as np\n'), ((636, 659), 'numpy.meshgrid', 'np.meshgrid', (['theta', 'phi'], {}), '(theta, phi)\n', (647, 659), True, 'import numpy as np\n'), ((1729, 1762), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'smooth'], {}), '(0, 2 * np.pi, smooth)\n', (1740, 1762), True, 'import numpy as np\n'), ((1773, 1802), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'smooth'], {}), '(0, np.pi, smooth)\n', (1784, 1802), True, 'import numpy as np\n'), ((1820, 1843), 'numpy.meshgrid', 'np.meshgrid', (['theta', 'phi'], {}), '(theta, phi)\n', (1831, 1843), True, 'import numpy as np\n'), ((1987, 2006), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1995, 2006), True, 'import numpy as np\n'), ((2082, 2126), 'pyquaternion.Quaternion', 'Quaternion', ([], {'axis': '[0.0, 0.0, 1.0]', 'degrees': 'lo'}), '(axis=[0.0, 0.0, 1.0], degrees=lo)\n', (2092, 2126), False, 'from pyquaternion import Quaternion\n'), ((2136, 2180), 'pyquaternion.Quaternion', 'Quaternion', ([], {'axis': '[0.0, 1.0, 0.0]', 'degrees': 'la'}), '(axis=[0.0, 1.0, 0.0], degrees=la)\n', (2146, 2180), False, 'from pyquaternion import Quaternion\n'), ((7305, 7357), 'numpy.sqrt', 'np.sqrt', (['((ca.Tx - cc.Tx) ** 2 + (ca.Ty - cc.Ty) ** 2)'], {}), '((ca.Tx - cc.Tx) ** 2 + (ca.Ty - cc.Ty) ** 2)\n', (7312, 7357), True, 'import numpy as np\n'), ((9122, 9161), 'scipy.spatial.ConvexHull', 'ConvexHull', (['points'], {'qhull_options': '"""QbB"""'}), "(points, qhull_options='QbB')\n", (9132, 9161), False, 'from scipy.spatial import ConvexHull\n'), ((9798, 9939), 'numpy.array', 'np.array', (['(points[hull.vertices, 0] * lem_distance_c_ratio + x_translate, points[hull\n .vertices, 1] * lem_distance_c_ratio + y_translate)'], {}), '((points[hull.vertices, 0] * lem_distance_c_ratio + x_translate, \n points[hull.vertices, 1] * lem_distance_c_ratio + y_translate))\n', (9806, 9939), True, 'import numpy as np\n'), ((10156, 10201), 'numpy.clip', 'np.clip', (['image_data', 'stretch_top', 'stretch_bot'], {}), '(image_data, stretch_top, stretch_bot)\n', (10163, 10201), True, 'import numpy as np\n'), ((1276, 1303), 'numpy.tan', 'np.tan', (['(angular / 2 * u.deg)'], {}), '(angular / 2 * u.deg)\n', (1282, 1303), True, 'import numpy as np\n'), ((1329, 1342), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1335, 1342), True, 'import numpy as np\n'), ((1395, 1406), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1401, 1406), True, 'import numpy as np\n'), ((1459, 1470), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1465, 1470), True, 'import numpy as np\n'), ((1934, 1945), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1940, 1945), True, 'import numpy as np\n'), ((1963, 1974), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1969, 1974), True, 'import numpy as np\n'), ((3084, 3109), 'datetime.timedelta', 'timedelta', ([], {'hours': 'end_time'}), '(hours=end_time)\n', (3093, 3109), False, 'from datetime import datetime, timedelta\n'), ((4384, 4402), 'numpy.array', 'np.array', (['link_dir'], {}), '(link_dir)\n', (4392, 4402), True, 'import numpy as np\n'), ((5233, 5257), 'numpy.clip', 'np.clip', (['diff', '(-sat)', 'sat'], {}), '(diff, -sat, sat)\n', (5240, 5257), True, 'import numpy as np\n'), ((1379, 1392), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1385, 1392), True, 'import numpy as np\n'), ((1443, 1456), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1449, 1456), True, 'import numpy as np\n'), ((1878, 1889), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1884, 1889), True, 'import numpy as np\n'), ((1918, 1931), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1924, 1931), True, 'import numpy as np\n'), ((3001, 3048), 'datetime.datetime.strftime', 'datetime.strftime', (['date', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(date, '%Y-%m-%d %H:%M:%S.%f')\n", (3018, 3048), False, 'from datetime import datetime, timedelta\n'), ((3120, 3168), 'datetime.datetime.strftime', 'datetime.strftime', (['end_date', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(end_date, '%Y-%m-%d %H:%M:%S')\n", (3137, 3168), False, 'from datetime import datetime, timedelta\n'), ((4454, 4465), 'numpy.empty', 'np.empty', (['(1)'], {}), '(1)\n', (4462, 4465), True, 'import numpy as np\n'), ((1363, 1376), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1369, 1376), True, 'import numpy as np\n'), ((1427, 1440), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1433, 1440), True, 'import numpy as np\n'), ((1862, 1875), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1868, 1875), True, 'import numpy as np\n'), ((2274, 2288), 'numpy.ravel', 'np.ravel', (['v[0]'], {}), '(v[0])\n', (2282, 2288), True, 'import numpy as np\n'), ((2290, 2304), 'numpy.ravel', 'np.ravel', (['v[1]'], {}), '(v[1])\n', (2298, 2304), True, 'import numpy as np\n'), ((2306, 2320), 'numpy.ravel', 'np.ravel', (['v[2]'], {}), '(v[2])\n', (2314, 2320), True, 'import numpy as np\n'), ((4214, 4237), 'requests.get', 'requests.get', (['file_link'], {}), '(file_link)\n', (4226, 4237), False, 'import requests\n'), ((7419, 7438), 'numpy.tan', 'np.tan', (['magnitude_c'], {}), '(magnitude_c)\n', (7425, 7438), True, 'import numpy as np\n'), ((9077, 9091), 'numpy.ravel', 'np.ravel', (['v[0]'], {}), '(v[0])\n', (9085, 9091), True, 'import numpy as np\n'), ((9093, 9107), 'numpy.ravel', 'np.ravel', (['v[2]'], {}), '(v[2])\n', (9101, 9107), True, 'import numpy as np\n'), ((2401, 2432), 'numpy.reshape', 'np.reshape', (['x', '(smooth, smooth)'], {}), '(x, (smooth, smooth))\n', (2411, 2432), True, 'import numpy as np\n'), ((4908, 4925), 'numpy.ravel', 'np.ravel', (['current'], {}), '(current)\n', (4916, 4925), True, 'import numpy as np\n'), ((4982, 5000), 'numpy.ravel', 'np.ravel', (['previous'], {}), '(previous)\n', (4990, 5000), True, 'import numpy as np\n'), ((5300, 5313), 'numpy.size', 'np.size', (['clip'], {}), '(clip)\n', (5307, 5313), True, 'import numpy as np\n'), ((5329, 5342), 'numpy.size', 'np.size', (['clip'], {}), '(clip)\n', (5336, 5342), True, 'import numpy as np\n'), ((5075, 5092), 'numpy.ravel', 'np.ravel', (['current'], {}), '(current)\n', (5083, 5092), True, 'import numpy as np\n'), ((5155, 5173), 'numpy.ravel', 'np.ravel', (['previous'], {}), '(previous)\n', (5163, 5173), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import os
import datetime
TODAY = datetime.date(2020, 1, 1)
def diff_from_origin_date(d):
if d == 0:
return 0
d = pd.Timestamp(d).to_pydatetime().date()
return (d - TODAY).days
def preprocess(df):
df['Lab Status'] = df['Lab Status'].map({
'Negative ID': 0,
'Positive ID': 1,
'Unverified': 2
}).fillna(value=2)
df['Detection Date'] = pd.to_datetime(df['Detection Date'], errors='coerce')
df['Submission Date'] = pd.to_datetime(df['Submission Date'], errors='coerce')
df = df.fillna(0)
df['Detection Date'] = np.vectorize(diff_from_origin_date)(df['Detection Date'])
df['Submission Date'] = np.vectorize(diff_from_origin_date)(df['Submission Date'])
return df
def get_file_name(files):
return os.path.splitext(files)[0]
def load_and_process():
data = pd.read_excel('2021_MCM_Problem_C_Data/2021MCMProblemC_DataSet.xlsx', index_col=0)
image_id = pd.read_excel('2021_MCM_Problem_C_Data/2021MCM_ProblemC_ Images_by_GlobalID.xlsx', index_col=1)
data = preprocess(data)
image_id = image_id.iloc[:, :-1]
image_id['FileName'] = np.vectorize(get_file_name)(image_id['FileName'])
return data, image_id
| [
"os.path.splitext",
"datetime.date",
"pandas.read_excel",
"pandas.Timestamp",
"numpy.vectorize",
"pandas.to_datetime"
] | [((75, 100), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (88, 100), False, 'import datetime\n'), ((435, 488), 'pandas.to_datetime', 'pd.to_datetime', (["df['Detection Date']"], {'errors': '"""coerce"""'}), "(df['Detection Date'], errors='coerce')\n", (449, 488), True, 'import pandas as pd\n'), ((517, 571), 'pandas.to_datetime', 'pd.to_datetime', (["df['Submission Date']"], {'errors': '"""coerce"""'}), "(df['Submission Date'], errors='coerce')\n", (531, 571), True, 'import pandas as pd\n'), ((883, 969), 'pandas.read_excel', 'pd.read_excel', (['"""2021_MCM_Problem_C_Data/2021MCMProblemC_DataSet.xlsx"""'], {'index_col': '(0)'}), "('2021_MCM_Problem_C_Data/2021MCMProblemC_DataSet.xlsx',\n index_col=0)\n", (896, 969), True, 'import pandas as pd\n'), ((981, 1085), 'pandas.read_excel', 'pd.read_excel', (['"""2021_MCM_Problem_C_Data/2021MCM_ProblemC_ Images_by_GlobalID.xlsx"""'], {'index_col': '(1)'}), "(\n '2021_MCM_Problem_C_Data/2021MCM_ProblemC_ Images_by_GlobalID.xlsx',\n index_col=1)\n", (994, 1085), True, 'import pandas as pd\n'), ((621, 656), 'numpy.vectorize', 'np.vectorize', (['diff_from_origin_date'], {}), '(diff_from_origin_date)\n', (633, 656), True, 'import numpy as np\n'), ((707, 742), 'numpy.vectorize', 'np.vectorize', (['diff_from_origin_date'], {}), '(diff_from_origin_date)\n', (719, 742), True, 'import numpy as np\n'), ((819, 842), 'os.path.splitext', 'os.path.splitext', (['files'], {}), '(files)\n', (835, 842), False, 'import os\n'), ((1170, 1197), 'numpy.vectorize', 'np.vectorize', (['get_file_name'], {}), '(get_file_name)\n', (1182, 1197), True, 'import numpy as np\n'), ((173, 188), 'pandas.Timestamp', 'pd.Timestamp', (['d'], {}), '(d)\n', (185, 188), True, 'import pandas as pd\n')] |
import pickle
import sys
from pathlib import Path
import numpy as np
import toml
file_name = Path(sys.argv[1])
uncert_cut = float(sys.argv[2])
snemo_version = int(sys.argv[3])
mass_index = snemo_version + 1
file_directory = Path.cwd()/file_name.parent
data = pickle.load(open(file_name, 'rb'))
print(data['n_sne'])
count = 0
data_index = 0
passed_cut = []
for i, j in zip(data['obs_mBx1c_cov'], data['obs_mBx1c']):
data_index += 1
# cuts it down to 205 objects
if np.all(np.sqrt(np.diag(i)[1:mass_index]) < uncert_cut) and np.all(np.abs(j[1:mass_index]) < 5):
passed_cut.append(True)
count += 1
# remove outliers from <2018-12-04 data
# if count in [9, 21, 61, 64, 68, 96]:
# print(data_index)
# passed_cut[-1] = False # update passed cut to so to remove outliers
else:
passed_cut.append(False)
print(count)
print('passed cut', sum(map(int, passed_cut)))
data['n_sne'] = count
data['sn_set_inds'] = data['sn_set_inds'][passed_cut]
data['z_helio'] = data['z_helio'][passed_cut]
data['z_CMB'] = data['z_CMB'][passed_cut]
data['obs_mBx1c'] = data['obs_mBx1c'][passed_cut]
mean = np.mean(data['obs_mBx1c'][:,mass_index])
print('\n\nSHIFTING MASS BY MEAN OF SMAPLE: ', mean, '\n\n')
print('\n\nSHIFTING MASS BY MEAN OF SMAPLE: ', mean, '\n\n')
data['obs_mBx1c'][:,mass_index] = data['obs_mBx1c'][:,mass_index] - mean
print('\n\nSHIFTING MASS BY MEAN OF SMAPLE: ', mean, '\n\n')
print('\n\nSHIFTING MASS BY MEAN OF SMAPLE: ', mean, '\n\n')
data['obs_mBx1c_cov'] = data['obs_mBx1c_cov'][passed_cut]
print('\n\nADDING 0.1 IN QUAD TO MASS COV!\n\n')
print('\n\nADDING 0.1 IN QUAD TO MASS COV!\n\n')
data['obs_mBx1c_cov'][:, mass_index, mass_index] = np.sqrt(data['obs_mBx1c_cov'][:, mass_index, mass_index]**2 + 0.1**2)
print('\n\nADDING 0.1 IN QUAD TO MASS COV!\n\n')
print('\n\nADDING 0.1 IN QUAD TO MASS COV!\n\n')
data['n_sn_set'] = 1
data['sn_set_inds'] = np.zeros(count, dtype=np.int)
data['age_gaus_mean'] = np.zeros((0, count, 0))
data['age_gaus_std'] = np.zeros((0, count, 0))
data['age_gaus_A'] = np.zeros((0, count, 0))
output_name = file_directory/f'{file_name.stem}_err_lt{uncert_cut}.pkl'
with open(output_name.with_suffix('.txt'), 'w') as f:
print(toml.dumps({'count': count,
'names': data['names'][passed_cut]}),
file=f)
print('names: ', data['names'][passed_cut])
with open(output_name.with_suffix('.pkl'), 'wb') as f:
pickle.dump(data, f) | [
"numpy.mean",
"numpy.abs",
"numpy.sqrt",
"pickle.dump",
"pathlib.Path",
"pathlib.Path.cwd",
"toml.dumps",
"numpy.diag",
"numpy.zeros"
] | [((95, 112), 'pathlib.Path', 'Path', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (99, 112), False, 'from pathlib import Path\n'), ((1170, 1211), 'numpy.mean', 'np.mean', (["data['obs_mBx1c'][:, mass_index]"], {}), "(data['obs_mBx1c'][:, mass_index])\n", (1177, 1211), True, 'import numpy as np\n'), ((1735, 1808), 'numpy.sqrt', 'np.sqrt', (["(data['obs_mBx1c_cov'][:, mass_index, mass_index] ** 2 + 0.1 ** 2)"], {}), "(data['obs_mBx1c_cov'][:, mass_index, mass_index] ** 2 + 0.1 ** 2)\n", (1742, 1808), True, 'import numpy as np\n'), ((1947, 1976), 'numpy.zeros', 'np.zeros', (['count'], {'dtype': 'np.int'}), '(count, dtype=np.int)\n', (1955, 1976), True, 'import numpy as np\n'), ((2001, 2024), 'numpy.zeros', 'np.zeros', (['(0, count, 0)'], {}), '((0, count, 0))\n', (2009, 2024), True, 'import numpy as np\n'), ((2048, 2071), 'numpy.zeros', 'np.zeros', (['(0, count, 0)'], {}), '((0, count, 0))\n', (2056, 2071), True, 'import numpy as np\n'), ((2093, 2116), 'numpy.zeros', 'np.zeros', (['(0, count, 0)'], {}), '((0, count, 0))\n', (2101, 2116), True, 'import numpy as np\n'), ((226, 236), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (234, 236), False, 'from pathlib import Path\n'), ((2464, 2484), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (2475, 2484), False, 'import pickle\n'), ((2254, 2318), 'toml.dumps', 'toml.dumps', (["{'count': count, 'names': data['names'][passed_cut]}"], {}), "({'count': count, 'names': data['names'][passed_cut]})\n", (2264, 2318), False, 'import toml\n'), ((551, 574), 'numpy.abs', 'np.abs', (['j[1:mass_index]'], {}), '(j[1:mass_index])\n', (557, 574), True, 'import numpy as np\n'), ((500, 510), 'numpy.diag', 'np.diag', (['i'], {}), '(i)\n', (507, 510), True, 'import numpy as np\n')] |
import pydicom, sys
from colorama import Fore, Style, init
# colorama
init()
d = pydicom.read_file(sys.argv[1])
if len(sys.argv)>2:
frame = int(sys.argv[2])-1
print("Dumping frame "+str(frame))
else:
frame = None
print(d.Rows)
print(d.Columns)
print(d.NumberOfFrames)
totalPixels = int(d.Rows*d.Columns*d.NumberOfFrames/8)
if totalPixels%8:
totalPixels = totalPixels + 1
totalPixels = totalPixels + (totalPixels % 2)
print("Total pixels expected: %i" % totalPixels)
print("Total pixels actual: %i" % len(d.PixelData))
if not frame is None:
frames = [frame]
else:
frames = range(d.NumberOfFrames)
import numpy as np
unpacked = np.unpackbits(np.frombuffer(d.PixelData,dtype=np.uint8))
print("With numpy unpackbits:")
for f in frames:
print("Frame %i" % f)
for i in range(d.Rows):
for j in range(d.Columns):
pixelNumber = f*d.Rows*d.Columns+i*d.Columns+j
if int(pixelNumber/8)%2:
sys.stdout.write(Fore.RED)
else:
sys.stdout.write(Fore.WHITE)
if unpacked[pixelNumber]:
sys.stdout.write("X")
else:
sys.stdout.write(".")
print("")
print("\nWith manual unpacking:")
for f in frames:
print("Frame %i" % f)
for i in range(d.Rows):
for j in range(d.Columns):
pixelNumber = f*d.Rows*d.Columns+i*d.Columns+j
byteNumber = int(pixelNumber/8)
bitPosition = pixelNumber % 8
if byteNumber%2:
sys.stdout.write(Fore.RED)
else:
sys.stdout.write(Fore.WHITE)
if (d.PixelData[byteNumber] >> bitPosition) & 1:
sys.stdout.write("X")
else:
sys.stdout.write(".")
print("")
print(Style.RESET_ALL)
| [
"numpy.frombuffer",
"pydicom.read_file",
"colorama.init",
"sys.stdout.write"
] | [((71, 77), 'colorama.init', 'init', ([], {}), '()\n', (75, 77), False, 'from colorama import Fore, Style, init\n'), ((83, 113), 'pydicom.read_file', 'pydicom.read_file', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (100, 113), False, 'import pydicom, sys\n'), ((660, 702), 'numpy.frombuffer', 'np.frombuffer', (['d.PixelData'], {'dtype': 'np.uint8'}), '(d.PixelData, dtype=np.uint8)\n', (673, 702), True, 'import numpy as np\n'), ((927, 953), 'sys.stdout.write', 'sys.stdout.write', (['Fore.RED'], {}), '(Fore.RED)\n', (943, 953), False, 'import pydicom, sys\n'), ((974, 1002), 'sys.stdout.write', 'sys.stdout.write', (['Fore.WHITE'], {}), '(Fore.WHITE)\n', (990, 1002), False, 'import pydicom, sys\n'), ((1043, 1064), 'sys.stdout.write', 'sys.stdout.write', (['"""X"""'], {}), "('X')\n", (1059, 1064), False, 'import pydicom, sys\n'), ((1085, 1106), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (1101, 1106), False, 'import pydicom, sys\n'), ((1412, 1438), 'sys.stdout.write', 'sys.stdout.write', (['Fore.RED'], {}), '(Fore.RED)\n', (1428, 1438), False, 'import pydicom, sys\n'), ((1459, 1487), 'sys.stdout.write', 'sys.stdout.write', (['Fore.WHITE'], {}), '(Fore.WHITE)\n', (1475, 1487), False, 'import pydicom, sys\n'), ((1551, 1572), 'sys.stdout.write', 'sys.stdout.write', (['"""X"""'], {}), "('X')\n", (1567, 1572), False, 'import pydicom, sys\n'), ((1594, 1615), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (1610, 1615), False, 'import pydicom, sys\n')] |
import numpy as np
from self_supervised.env_wrapper.pixel_wrapper import PixelNormalizedBoxEnvWrapper
def pixel_rollout(
env: PixelNormalizedBoxEnvWrapper,
agent,
max_path_length=np.inf,
render=False,
render_kwargs=None,
):
"""
The following value for the following keys will be a 2D array, with the
first dimension corresponding to the time dimension.
- observations
- actions
- rewards
- next_observations
- terminals
The next two elements will be lists of dictionaries, with the index into
the list being the index into the time
- agent_infos
- env_infos
"""
observations = []
pixel_obs = []
actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
o = env.reset()
o = o['state_obs']
agent.reset()
next_o = None
path_length = 0
if render:
env.render(**render_kwargs)
while path_length < max_path_length:
a, agent_info = agent.get_action(o)
next_o, r, d, env_info = env.step(a)
observations.append(next_o['state_obs'])
pixel_obs.append(next_o['pixel_obs'])
rewards.append(r)
terminals.append(d)
actions.append(a)
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if max_path_length == np.inf and d:
break
o = next_o['state_obs']
if render:
env.render(**render_kwargs)
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
next_o = next_o['state_obs']
if len(observations.shape) == 1:
observations = np.expand_dims(observations, 1)
next_o = np.array([next_o['state_obs']])
next_observations = np.vstack(
(
observations[1:, :],
np.expand_dims(next_o, 0)
)
)
pixel_obs = np.stack(pixel_obs, axis=0)
return dict(
observations=observations,
actions=actions,
rewards=np.array(rewards).reshape(-1, 1),
next_observations=(next_observations, pixel_obs),
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
| [
"numpy.array",
"numpy.expand_dims",
"numpy.stack"
] | [((1525, 1542), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (1533, 1542), True, 'import numpy as np\n'), ((1639, 1661), 'numpy.array', 'np.array', (['observations'], {}), '(observations)\n', (1647, 1661), True, 'import numpy as np\n'), ((1984, 2011), 'numpy.stack', 'np.stack', (['pixel_obs'], {'axis': '(0)'}), '(pixel_obs, axis=0)\n', (1992, 2011), True, 'import numpy as np\n'), ((1593, 1619), 'numpy.expand_dims', 'np.expand_dims', (['actions', '(1)'], {}), '(actions, 1)\n', (1607, 1619), True, 'import numpy as np\n'), ((1755, 1786), 'numpy.expand_dims', 'np.expand_dims', (['observations', '(1)'], {}), '(observations, 1)\n', (1769, 1786), True, 'import numpy as np\n'), ((1804, 1835), 'numpy.array', 'np.array', (["[next_o['state_obs']]"], {}), "([next_o['state_obs']])\n", (1812, 1835), True, 'import numpy as np\n'), ((1926, 1951), 'numpy.expand_dims', 'np.expand_dims', (['next_o', '(0)'], {}), '(next_o, 0)\n', (1940, 1951), True, 'import numpy as np\n'), ((2105, 2122), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (2113, 2122), True, 'import numpy as np\n'), ((2215, 2234), 'numpy.array', 'np.array', (['terminals'], {}), '(terminals)\n', (2223, 2234), True, 'import numpy as np\n')] |
import numpy as np
class FeaturesExtractor:
def __init__(self):
self.__n_features = None
def features(self, patterns):
pass
def n_features(self):
return self.__n_features
def _set_n_features(self, n):
self.__n_features = n
class AllPeaksExtractor(FeaturesExtractor):
def __init__(self, sample_patterns, intensity_threshold=0.5, gaussion_height=10, gaussian_width=30):
FeaturesExtractor.__init__(self)
self.__threshold = intensity_threshold
self.__height = gaussion_height
self.__width = gaussian_width
self.__all_peaks = self.peak_union(sample_patterns)
self._set_n_features(len(self.__all_peaks))
def features(self, pattern):
peaks = _peaks_above_threshold(pattern, self.__threshold)
gaussians = [AllPeaksExtractor.__gaussian2d(p[0], p[1], 1.0 * p[2], self.__height, self.__width)
for p in peaks]
def intensity(X, Y):
sum = np.zeros_like(X, dtype='f')
for g in gaussians:
sum += g(X, Y)
return sum
intensities = intensity(self.__all_peaks[:, 0], self.__all_peaks[:, 1])
return intensities.tolist()
def peak_union(self, patterns):
peaks_set = set()
for pattern in patterns:
spots = _peaks_above_threshold(pattern, self.__threshold)
peaks = set([(p[0], p[1]) for p in spots])
peaks_set = AllPeaksExtractor.__merge_peak_sets(peaks_set, peaks)
return np.array(list(peaks_set))
@staticmethod
def __merge_peak_sets(set1, set2, r_nb=3):
# make sure set2 is smaller
if len(set1) < len(set2):
set1, set2 = set2, set1
for p2 in set2:
not_in_set1 = True
for n2 in AllPeaksExtractor.__neighbors(p2, r_nb):
if n2 in set1:
not_in_set1 = False
break
if not_in_set1:
set1.add(p2)
return set1
@staticmethod
def __neighbors(p, r=1):
yield p
for x in xrange(-r, r + 1):
ymax = int(np.floor(np.sqrt(r**2 - x**2)))
for y in xrange(-ymax, ymax + 1):
yield (p[0] + x, p[1] + y)
@staticmethod
def __gaussian2d(x0, y0, i, height=1, width=5):
sigma = width
A = i * height
def gx(X):
X0 = np.ones(X.shape, dtype='f') * x0
return (X -X0) * (X - X0) / (2 * sigma ** 2)
def gy(Y):
Y0 = np.ones(Y.shape, dtype='f') * y0
return (Y -Y0) * (Y - Y0) / (2 * sigma ** 2)
def g(X, Y):
return A * np.exp(-(gx(X) + gy(Y)))
return g
class PeaksNumberExtractor(FeaturesExtractor):
def __init__(self, intensity_threshold=0.0):
FeaturesExtractor.__init__(self)
self.__threshold = intensity_threshold
self._set_n_features(1)
def features(self, pattern):
peaks = _peaks_above_threshold(pattern, self.__threshold)
return [float(len(peaks))]
class MaxPeaksExtractor(FeaturesExtractor):
def __init__(self):
FeaturesExtractor.__init__(self)
def features(self, pattern):
peaks = sorted(_peaks_above_threshold(pattern, 0.0), key=lambda p: p[2], reverse=True)
max_peak = peaks[0]
return [max_peak[0], max_peak[1]]
def __gaussian2d(self, x0, y0, i, height=1, width=5):
sigma = width
A = i * height
def gx(X):
X0 = np.ones(X.shape, dtype='f') * x0
return (X -X0) * (X - X0) / (2 * sigma ** 2)
def gy(Y):
Y0 = np.ones(Y.shape, dtype='f') * y0
return (Y -Y0) * (Y - Y0) / (2 * sigma ** 2)
def g(X, Y):
return A * np.exp(-(gx(X) + gy(Y)))
return g
class CombinedExtractor(FeaturesExtractor):
def __init__(self, extractors):
FeaturesExtractor.__init__(self)
self.__extractors = extractors
def features(self, pattern):
fts = []
for extractor in self.__extractors:
fts += extractor.features(pattern)
return fts
def _peaks_above_threshold(pattern, threshold):
if len(pattern) > 0:
imax = max([p[2] for p in pattern])
ith = threshold * imax
return [(p[0], p[1], p[2]/imax) for p in pattern if p[2] >= ith]
else:
return [[0.0, 0.0, 0.0]]
| [
"numpy.sqrt",
"numpy.zeros_like",
"numpy.ones"
] | [((992, 1019), 'numpy.zeros_like', 'np.zeros_like', (['X'], {'dtype': '"""f"""'}), "(X, dtype='f')\n", (1005, 1019), True, 'import numpy as np\n'), ((2426, 2453), 'numpy.ones', 'np.ones', (['X.shape'], {'dtype': '"""f"""'}), "(X.shape, dtype='f')\n", (2433, 2453), True, 'import numpy as np\n'), ((2553, 2580), 'numpy.ones', 'np.ones', (['Y.shape'], {'dtype': '"""f"""'}), "(Y.shape, dtype='f')\n", (2560, 2580), True, 'import numpy as np\n'), ((3535, 3562), 'numpy.ones', 'np.ones', (['X.shape'], {'dtype': '"""f"""'}), "(X.shape, dtype='f')\n", (3542, 3562), True, 'import numpy as np\n'), ((3662, 3689), 'numpy.ones', 'np.ones', (['Y.shape'], {'dtype': '"""f"""'}), "(Y.shape, dtype='f')\n", (3669, 3689), True, 'import numpy as np\n'), ((2161, 2185), 'numpy.sqrt', 'np.sqrt', (['(r ** 2 - x ** 2)'], {}), '(r ** 2 - x ** 2)\n', (2168, 2185), True, 'import numpy as np\n')] |
import os
import copy
import datetime
import warnings
from matplotlib import pyplot as plt
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import math
from datetime import datetime
import random
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'
import numpy as np
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, Flatten, Embedding, Dropout, PReLU,ReLU
from keras.layers import Bidirectional, SpatialDropout1D, CuDNNGRU,CuDNNLSTM, Conv1D,Conv2D,MaxPool2D,Reshape
from keras.layers import GlobalAvgPool1D, GlobalMaxPool1D, concatenate,GlobalMaxPooling1D,GlobalAveragePooling1D
from keras.regularizers import l2,l1
from keras.layers.normalization import BatchNormalization
from keras.engine import Layer
from keras.layers.core import Flatten
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau,EarlyStopping
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation,BatchNormalization
from keras.regularizers import l1,l2
from keras.preprocessing.text import Tokenizer
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau,EarlyStopping,Callback
import gc
from tqdm import tqdm_notebook
class _Data_Preprocess:
def __init__(self):
self.int8_max = np.iinfo(np.int8).max
self.int8_min = np.iinfo(np.int8).min
self.int16_max = np.iinfo(np.int16).max
self.int16_min = np.iinfo(np.int16).min
self.int32_max = np.iinfo(np.int32).max
self.int32_min = np.iinfo(np.int32).min
self.int64_max = np.iinfo(np.int64).max
self.int64_min = np.iinfo(np.int64).min
self.float16_max = np.finfo(np.float16).max
self.float16_min = np.finfo(np.float16).min
self.float32_max = np.finfo(np.float32).max
self.float32_min = np.finfo(np.float32).min
self.float64_max = np.finfo(np.float64).max
self.float64_min = np.finfo(np.float64).min
'''
function: _get_type(self,min_val, max_val, types)
get the correct types that our columns can trans to
'''
def _get_type(self, min_val, max_val, types):
if types == 'int':
if max_val <= self.int8_max and min_val >= self.int8_min:
return np.int8
elif max_val <= self.int16_max <= max_val and min_val >= self.int16_min:
return np.int16
elif max_val <= self.int32_max and min_val >= self.int32_min:
return np.int32
return None
elif types == 'float':
if max_val <= self.float16_max and min_val >= self.float16_min:
return np.float16
if max_val <= self.float32_max and min_val >= self.float32_min:
return np.float32
if max_val <= self.float64_max and min_val >= self.float64_min:
return np.float64
return None
'''
function: _memory_process(self,df)
column data types trans, to save more memory
'''
def _memory_process(self, df):
init_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('Original data occupies {} GB memory.'.format(init_memory))
df_cols = df.columns
for col in tqdm_notebook(df_cols):
try:
if 'float' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'float')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
elif 'int' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'int')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
except:
print(' Can not do any process for column, {}.'.format(col))
afterprocess_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('After processing, the data occupies {} GB memory.'.format(afterprocess_memory))
return df
memory_preprocess = _Data_Preprocess()
user_app_actived = pd.read_csv('../../data/original_data/user_app_actived.csv',names=['uId', 'appId'])
x_train = pd.read_csv('../../data/original_data/age_train.csv',names=['uId','age_group'],dtype={'uId':np.int32, 'age_group':np.int8})
x_test = pd.read_csv('../data/original_data/age_test.csv',names=['uId'],dtype={'uId':np.int32})
usage_list = pd.read_csv('../../data/processed_data/usage_app_info.csv')
usage_appId = pd.read_csv('../../data/processed_data/usage_appId.csv')
train = pd.read_csv('../../data/features/base_train.csv')
test = pd.read_csv('../../data/features/base_test.csv')
train=memory_preprocess._memory_process(train)
test=memory_preprocess._memory_process(test)
print(test.info())
gc.collect()
actived_features_all = pd.read_csv('../../data/features/actived_features_all.csv')
actived_features_all=memory_preprocess._memory_process(actived_features_all)
train = pd.merge(train, actived_features_all, how='left', on='uId').fillna(0)
test = pd.merge(test, actived_features_all, how='left', on='uId').fillna(0)
del actived_features_all
gc.collect()
act_use_rnn_hide_train=pd.read_csv('../../data/features/act_use_rnn_hide_train.csv')
act_use_rnn_hide_train=memory_preprocess._memory_process(act_use_rnn_hide_train)
act_use_rnn_hide_train.rename(columns={'Unnamed: 0': 'uId'}, inplace=True)
train = pd.merge(train, act_use_rnn_hide_train, how='left', on='uId').fillna(0)
del act_use_rnn_hide_train
act_use_rnn_hide_test=pd.read_csv('../../data/features/act_use_rnn_hide_test.csv')
act_use_rnn_hide_test=memory_preprocess._memory_process(act_use_rnn_hide_test)
act_use_rnn_hide_test.rename(columns={'Unnamed: 0': 'uId'}, inplace=True)
test = pd.merge(test, act_use_rnn_hide_test, how='left', on='uId').fillna(0)
print(test.info())
del act_use_rnn_hide_test
gc.collect()
user_app_actived['app_list'] = user_app_actived.appId.str.split('#')
import ast
from tqdm import tqdm
usage_train = []
for idx in tqdm(usage_list.appId):
usage_train.append(ast.literal_eval(idx))
usage_list['app_list'] = usage_train
user_app_actived.drop('appId',axis=1,inplace=True)
usage_list.drop('appId',axis=1,inplace=True)
user_app_actived = pd.merge(user_app_actived, usage_list, how='left', on='uId')
result = []
for index,row in tqdm(user_app_actived.iterrows()):
try:
result.append(row['app_list_x'] + row['app_list_y'])
except:
result.append(row['app_list_x'])
user_app_actived['app_list'] = result
user_app_actived.drop(['app_list_x','app_list_y'],axis=1,inplace =True)
x_train = pd.merge(x_train, user_app_actived, how='left', on='uId')
x_test = pd.merge(x_test, user_app_actived, how='left', on='uId')
y_train = x_train.age_group - 1
x_train = x_train.drop('age_group',axis=1)
del user_app_actived
del usage_list
del usage_train
gc.collect()
train_uId = x_train.uId.tolist()
test_uId = x_test.uId.tolist()
test.index = test.uId.tolist()
train.index = train.uId.tolist()
test = test.loc[test_uId,:]
train = train.loc[train_uId,:]
appId = pd.read_csv('../../data/processed_data/appId.csv')
usage_appId = pd.read_csv('../../data/processed_data/usage_appId_top_num100000.csv')
usage_appId = usage_appId[-10000:]
usage_appId['id'] = np.arange(0,10000)
all_appid = list(set(appId.appId.tolist() + usage_appId.appId.tolist()))
app_dict = dict(zip(all_appid,np.arange(len(all_appid))))
x_train = [[x for x in apps if x in app_dict] for apps in x_train.app_list]
x_test = [[x for x in apps if x in app_dict] for apps in x_test.app_list]
x_train = [" ".join(app) for app in x_train]
x_test = [" ".join(app) for app in x_test]
c_vec1 = CountVectorizer(lowercase=False,ngram_range=(1,1),dtype=np.int8)
c_vec1.fit(x_train + x_test)
x_train = c_vec1.transform(x_train).toarray()
x_test = c_vec1.transform(x_test).toarray()
gc.collect()
train.drop(['uId','age_group'],axis=1,inplace=True)
test.drop('uId',axis=1,inplace=True)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
from sklearn.preprocessing import StandardScaler,MinMaxScaler
train = train.replace([np.inf, -np.inf], np.nan).fillna(0)
test = test.replace([np.inf, -np.inf], np.nan).fillna(0)
scaler = MinMaxScaler()
scaler.fit(pd.concat([train,test],axis=0))
train = scaler.transform(train)
test = scaler.transform(test)
train = memory_preprocess._memory_process(pd.DataFrame(train))
test = memory_preprocess._memory_process(pd.DataFrame(test))
gc.collect()
x_train = np.hstack((x_train,train.values))
x_test = np.hstack((x_test,test.values))
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=None)
def mlp_v3():
model = Sequential()
model.add(Dense(1024, input_shape=(13,400,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# model.add(BatchNormalization())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# model.add(BatchNormalization())
#
model.add(Dense(6))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='Nadam',
metrics=['accuracy'])
return model
from sklearn.model_selection import train_test_split, StratifiedKFold
kfold = StratifiedKFold(n_splits=5, random_state=10, shuffle=False)
y_test = np.zeros((x_test.shape[0],6))
y_val = np.zeros((x_train.shape[0],6))
for i, (train_index, valid_index) in enumerate(kfold.split(x_train, np.argmax(y_train,axis=1))):
X_train, X_val, Y_train, Y_val = x_train[train_index],x_train[valid_index], y_train[train_index], y_train[valid_index]
filepath="weights_best2.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=2, save_best_only=True, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.6, patience=1, min_lr=0.0001, verbose=2)
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto')
callbacks = [checkpoint, reduce_lr]
model = mlp_v3()
if i == 0:print(model.summary())
model.fit(X_train, Y_train, batch_size=128, epochs=5, validation_data=(X_val, Y_val), verbose=1, callbacks=callbacks,
)
model.load_weights(filepath)
y_val[valid_index] = model.predict(X_val, batch_size=128, verbose=1)
y_test += np.array(model.predict(x_test, batch_size=128, verbose=1))/5
y_val = pd.DataFrame(y_val,index=train_uId)
y_val.to_csv('../../data/prob_file/act_all_train_mlp.csv')
y_test = pd.DataFrame(y_test,index=test_uId)
y_test.to_csv('../../data/prob_file/act_all_test_mlp.csv') | [
"pandas.read_csv",
"numpy.hstack",
"numpy.iinfo",
"sklearn.model_selection.StratifiedKFold",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.arange",
"sklearn.feature_extraction.text.CountVectorizer",
"keras.callbacks.EarlyStopping",
"pandas.DataFrame",
"sklearn.preprocessing.MinMaxScale... | [((4591, 4679), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/original_data/user_app_actived.csv"""'], {'names': "['uId', 'appId']"}), "('../../data/original_data/user_app_actived.csv', names=['uId',\n 'appId'])\n", (4602, 4679), True, 'import pandas as pd\n'), ((4686, 4818), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/original_data/age_train.csv"""'], {'names': "['uId', 'age_group']", 'dtype': "{'uId': np.int32, 'age_group': np.int8}"}), "('../../data/original_data/age_train.csv', names=['uId',\n 'age_group'], dtype={'uId': np.int32, 'age_group': np.int8})\n", (4697, 4818), True, 'import pandas as pd\n'), ((4820, 4914), 'pandas.read_csv', 'pd.read_csv', (['"""../data/original_data/age_test.csv"""'], {'names': "['uId']", 'dtype': "{'uId': np.int32}"}), "('../data/original_data/age_test.csv', names=['uId'], dtype={\n 'uId': np.int32})\n", (4831, 4914), True, 'import pandas as pd\n'), ((4921, 4980), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/processed_data/usage_app_info.csv"""'], {}), "('../../data/processed_data/usage_app_info.csv')\n", (4932, 4980), True, 'import pandas as pd\n'), ((4996, 5052), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/processed_data/usage_appId.csv"""'], {}), "('../../data/processed_data/usage_appId.csv')\n", (5007, 5052), True, 'import pandas as pd\n'), ((5064, 5113), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/features/base_train.csv"""'], {}), "('../../data/features/base_train.csv')\n", (5075, 5113), True, 'import pandas as pd\n'), ((5122, 5170), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/features/base_test.csv"""'], {}), "('../../data/features/base_test.csv')\n", (5133, 5170), True, 'import pandas as pd\n'), ((5286, 5298), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5296, 5298), False, 'import gc\n'), ((5325, 5384), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/features/actived_features_all.csv"""'], {}), "('../../data/features/actived_features_all.csv')\n", (5336, 5384), True, 'import pandas as pd\n'), ((5646, 5658), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5656, 5658), False, 'import gc\n'), ((5685, 5746), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/features/act_use_rnn_hide_train.csv"""'], {}), "('../../data/features/act_use_rnn_hide_train.csv')\n", (5696, 5746), True, 'import pandas as pd\n'), ((6039, 6099), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/features/act_use_rnn_hide_test.csv"""'], {}), "('../../data/features/act_use_rnn_hide_test.csv')\n", (6050, 6099), True, 'import pandas as pd\n'), ((6381, 6393), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6391, 6393), False, 'import gc\n'), ((6531, 6553), 'tqdm.tqdm', 'tqdm', (['usage_list.appId'], {}), '(usage_list.appId)\n', (6535, 6553), False, 'from tqdm import tqdm\n'), ((6762, 6822), 'pandas.merge', 'pd.merge', (['user_app_actived', 'usage_list'], {'how': '"""left"""', 'on': '"""uId"""'}), "(user_app_actived, usage_list, how='left', on='uId')\n", (6770, 6822), True, 'import pandas as pd\n'), ((7141, 7198), 'pandas.merge', 'pd.merge', (['x_train', 'user_app_actived'], {'how': '"""left"""', 'on': '"""uId"""'}), "(x_train, user_app_actived, how='left', on='uId')\n", (7149, 7198), True, 'import pandas as pd\n'), ((7209, 7265), 'pandas.merge', 'pd.merge', (['x_test', 'user_app_actived'], {'how': '"""left"""', 'on': '"""uId"""'}), "(x_test, user_app_actived, how='left', on='uId')\n", (7217, 7265), True, 'import pandas as pd\n'), ((7399, 7411), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7409, 7411), False, 'import gc\n'), ((7618, 7668), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/processed_data/appId.csv"""'], {}), "('../../data/processed_data/appId.csv')\n", (7629, 7668), True, 'import pandas as pd\n'), ((7684, 7754), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/processed_data/usage_appId_top_num100000.csv"""'], {}), "('../../data/processed_data/usage_appId_top_num100000.csv')\n", (7695, 7754), True, 'import pandas as pd\n'), ((7812, 7831), 'numpy.arange', 'np.arange', (['(0)', '(10000)'], {}), '(0, 10000)\n', (7821, 7831), True, 'import numpy as np\n'), ((8222, 8289), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'lowercase': '(False)', 'ngram_range': '(1, 1)', 'dtype': 'np.int8'}), '(lowercase=False, ngram_range=(1, 1), dtype=np.int8)\n', (8237, 8289), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((8410, 8422), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8420, 8422), False, 'import gc\n'), ((8785, 8799), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (8797, 8799), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((9037, 9049), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9047, 9049), False, 'import gc\n'), ((9063, 9097), 'numpy.hstack', 'np.hstack', (['(x_train, train.values)'], {}), '((x_train, train.values))\n', (9072, 9097), True, 'import numpy as np\n'), ((9107, 9139), 'numpy.hstack', 'np.hstack', (['(x_test, test.values)'], {}), '((x_test, test.values))\n', (9116, 9139), True, 'import numpy as np\n'), ((9201, 9242), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['y_train'], {'num_classes': 'None'}), '(y_train, num_classes=None)\n', (9215, 9242), False, 'from keras.utils.np_utils import to_categorical\n'), ((9967, 10026), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'random_state': '(10)', 'shuffle': '(False)'}), '(n_splits=5, random_state=10, shuffle=False)\n', (9982, 10026), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((10037, 10067), 'numpy.zeros', 'np.zeros', (['(x_test.shape[0], 6)'], {}), '((x_test.shape[0], 6))\n', (10045, 10067), True, 'import numpy as np\n'), ((10076, 10107), 'numpy.zeros', 'np.zeros', (['(x_train.shape[0], 6)'], {}), '((x_train.shape[0], 6))\n', (10084, 10107), True, 'import numpy as np\n'), ((11134, 11170), 'pandas.DataFrame', 'pd.DataFrame', (['y_val'], {'index': 'train_uId'}), '(y_val, index=train_uId)\n', (11146, 11170), True, 'import pandas as pd\n'), ((11240, 11276), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {'index': 'test_uId'}), '(y_test, index=test_uId)\n', (11252, 11276), True, 'import pandas as pd\n'), ((8812, 8844), 'pandas.concat', 'pd.concat', (['[train, test]'], {'axis': '(0)'}), '([train, test], axis=0)\n', (8821, 8844), True, 'import pandas as pd\n'), ((8953, 8972), 'pandas.DataFrame', 'pd.DataFrame', (['train'], {}), '(train)\n', (8965, 8972), True, 'import pandas as pd\n'), ((9016, 9034), 'pandas.DataFrame', 'pd.DataFrame', (['test'], {}), '(test)\n', (9028, 9034), True, 'import pandas as pd\n'), ((9273, 9285), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9283, 9285), False, 'from keras.models import Sequential\n'), ((10380, 10474), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_loss"""', 'verbose': '(2)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath, monitor='val_loss', verbose=2, save_best_only=\n True, mode='min')\n", (10395, 10474), False, 'from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping, Callback\n'), ((10487, 10578), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.6)', 'patience': '(1)', 'min_lr': '(0.0001)', 'verbose': '(2)'}), "(monitor='val_loss', factor=0.6, patience=1, min_lr=0.0001,\n verbose=2)\n", (10504, 10578), False, 'from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping, Callback\n'), ((10596, 10687), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.0001)', 'patience': '(2)', 'verbose': '(2)', 'mode': '"""auto"""'}), "(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2,\n mode='auto')\n", (10609, 10687), False, 'from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping, Callback\n'), ((3538, 3560), 'tqdm.tqdm_notebook', 'tqdm_notebook', (['df_cols'], {}), '(df_cols)\n', (3551, 3560), False, 'from tqdm import tqdm_notebook\n'), ((5472, 5531), 'pandas.merge', 'pd.merge', (['train', 'actived_features_all'], {'how': '"""left"""', 'on': '"""uId"""'}), "(train, actived_features_all, how='left', on='uId')\n", (5480, 5531), True, 'import pandas as pd\n'), ((5550, 5608), 'pandas.merge', 'pd.merge', (['test', 'actived_features_all'], {'how': '"""left"""', 'on': '"""uId"""'}), "(test, actived_features_all, how='left', on='uId')\n", (5558, 5608), True, 'import pandas as pd\n'), ((5914, 5975), 'pandas.merge', 'pd.merge', (['train', 'act_use_rnn_hide_train'], {'how': '"""left"""', 'on': '"""uId"""'}), "(train, act_use_rnn_hide_train, how='left', on='uId')\n", (5922, 5975), True, 'import pandas as pd\n'), ((6263, 6322), 'pandas.merge', 'pd.merge', (['test', 'act_use_rnn_hide_test'], {'how': '"""left"""', 'on': '"""uId"""'}), "(test, act_use_rnn_hide_test, how='left', on='uId')\n", (6271, 6322), True, 'import pandas as pd\n'), ((6579, 6600), 'ast.literal_eval', 'ast.literal_eval', (['idx'], {}), '(idx)\n', (6595, 6600), False, 'import ast\n'), ((9301, 9335), 'keras.layers.Dense', 'Dense', (['(1024)'], {'input_shape': '(13, 400)'}), '(1024, input_shape=(13, 400))\n', (9306, 9335), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((9352, 9370), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9362, 9370), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((9387, 9399), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (9394, 9399), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((9418, 9428), 'keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (9423, 9428), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((9445, 9463), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9455, 9463), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((9480, 9492), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (9487, 9492), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((9550, 9560), 'keras.layers.Dense', 'Dense', (['(128)'], {}), '(128)\n', (9555, 9560), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((9577, 9595), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9587, 9595), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((9612, 9624), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (9619, 9624), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((9686, 9694), 'keras.layers.Dense', 'Dense', (['(6)'], {}), '(6)\n', (9691, 9694), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((9711, 9732), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (9721, 9732), False, 'from keras.layers import Dense, Dropout, Activation, BatchNormalization\n'), ((10176, 10202), 'numpy.argmax', 'np.argmax', (['y_train'], {'axis': '(1)'}), '(y_train, axis=1)\n', (10185, 10202), True, 'import numpy as np\n'), ((1548, 1565), 'numpy.iinfo', 'np.iinfo', (['np.int8'], {}), '(np.int8)\n', (1556, 1565), True, 'import numpy as np\n'), ((1595, 1612), 'numpy.iinfo', 'np.iinfo', (['np.int8'], {}), '(np.int8)\n', (1603, 1612), True, 'import numpy as np\n'), ((1643, 1661), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (1651, 1661), True, 'import numpy as np\n'), ((1692, 1710), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (1700, 1710), True, 'import numpy as np\n'), ((1741, 1759), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1749, 1759), True, 'import numpy as np\n'), ((1790, 1808), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1798, 1808), True, 'import numpy as np\n'), ((1839, 1857), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (1847, 1857), True, 'import numpy as np\n'), ((1888, 1906), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (1896, 1906), True, 'import numpy as np\n'), ((1939, 1959), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (1947, 1959), True, 'import numpy as np\n'), ((1992, 2012), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (2000, 2012), True, 'import numpy as np\n'), ((2045, 2065), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2053, 2065), True, 'import numpy as np\n'), ((2098, 2118), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2106, 2118), True, 'import numpy as np\n'), ((2151, 2171), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (2159, 2171), True, 'import numpy as np\n'), ((2204, 2224), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (2212, 2224), True, 'import numpy as np\n')] |
#!/usr/bin/env python
'''
This is a test script to do interpolation with the gain
measurement data.
By inspecting the results, InterpolatedUnivariateSpline
gives the smoother results. Therefore, this algorithm
will be adopted.
'''
from scipy.interpolate import Rbf, InterpolatedUnivariateSpline
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
if __name__ == '__main__':
'''
Test interpolation with numpy and scipy.
'''
df_gain_lookup = pd.read_csv('../preamp_calib_data/caen_measurement.csv')
x_meas = np.array(df_gain_lookup['High Gain'])
y_meas = np.array(df_gain_lookup['Gain (ch/pC)'])
x_plot = np.linspace(x_meas.min(), x_meas.max(), 101)
rbf = Rbf(x_meas, y_meas)
y_rbf = rbf(x_plot)
ius = InterpolatedUnivariateSpline(x_meas, y_meas)
y_ius = ius(x_plot)
plt.plot(x_meas, y_meas, 'bo', label='measured')
plt.plot(x_plot, y_rbf, 'g', label='RBF smoothing', alpha=.75)
plt.plot(x_plot, y_ius, 'y', label='spline', alpha=.9)
plt.legend()
plt.ylabel('gain (ADC/pC)')
plt.xlabel('preamp gain setting')
plt.savefig('adc_per_charge_vs_preamp_gain.png')
| [
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"scipy.interpolate.InterpolatedUnivariateSpline",
"scipy.interpolate.Rbf",
"matplotlib.pyplot.legend"
] | [((314, 335), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (328, 335), False, 'import matplotlib\n'), ((517, 573), 'pandas.read_csv', 'pd.read_csv', (['"""../preamp_calib_data/caen_measurement.csv"""'], {}), "('../preamp_calib_data/caen_measurement.csv')\n", (528, 573), True, 'import pandas as pd\n'), ((588, 625), 'numpy.array', 'np.array', (["df_gain_lookup['High Gain']"], {}), "(df_gain_lookup['High Gain'])\n", (596, 625), True, 'import numpy as np\n'), ((639, 679), 'numpy.array', 'np.array', (["df_gain_lookup['Gain (ch/pC)']"], {}), "(df_gain_lookup['Gain (ch/pC)'])\n", (647, 679), True, 'import numpy as np\n'), ((753, 772), 'scipy.interpolate.Rbf', 'Rbf', (['x_meas', 'y_meas'], {}), '(x_meas, y_meas)\n', (756, 772), False, 'from scipy.interpolate import Rbf, InterpolatedUnivariateSpline\n'), ((808, 852), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['x_meas', 'y_meas'], {}), '(x_meas, y_meas)\n', (836, 852), False, 'from scipy.interpolate import Rbf, InterpolatedUnivariateSpline\n'), ((882, 930), 'matplotlib.pyplot.plot', 'plt.plot', (['x_meas', 'y_meas', '"""bo"""'], {'label': '"""measured"""'}), "(x_meas, y_meas, 'bo', label='measured')\n", (890, 930), True, 'import matplotlib.pyplot as plt\n'), ((935, 998), 'matplotlib.pyplot.plot', 'plt.plot', (['x_plot', 'y_rbf', '"""g"""'], {'label': '"""RBF smoothing"""', 'alpha': '(0.75)'}), "(x_plot, y_rbf, 'g', label='RBF smoothing', alpha=0.75)\n", (943, 998), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1057), 'matplotlib.pyplot.plot', 'plt.plot', (['x_plot', 'y_ius', '"""y"""'], {'label': '"""spline"""', 'alpha': '(0.9)'}), "(x_plot, y_ius, 'y', label='spline', alpha=0.9)\n", (1010, 1057), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1073), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1071, 1073), True, 'import matplotlib.pyplot as plt\n'), ((1078, 1105), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""gain (ADC/pC)"""'], {}), "('gain (ADC/pC)')\n", (1088, 1105), True, 'import matplotlib.pyplot as plt\n'), ((1110, 1143), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""preamp gain setting"""'], {}), "('preamp gain setting')\n", (1120, 1143), True, 'import matplotlib.pyplot as plt\n'), ((1149, 1197), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""adc_per_charge_vs_preamp_gain.png"""'], {}), "('adc_per_charge_vs_preamp_gain.png')\n", (1160, 1197), True, 'import matplotlib.pyplot as plt\n')] |
# pylint: disable=import-error, no-name-in-module
import os
import sys
import numpy as np
import scipy.io
import wfdb
from data import Subject
def __load_file(file_path: str, file_name: str) -> []:
"""
Args:
file_path: E.g path to the folder on the server
file_name: Name of the file
Returns: Array with raw time series data.
Description
----
Loading raw data to work with from a file. (e.g. a single night)
Without the corresponding annotations.
Params
----
"""
file_input = scipy.io.loadmat(f"{file_path}{file_name}/{file_name}")
return file_input['val']
def __load_annotations(path: str, column_name: str) -> dict:
"""
Args:
path: path to the annotations file (e.g. "//srv39.itd-intern.de/
MachineLearningDatasets/")
column_name: name of the column in array (e.g. "arousal")
Returns: dict of annotations with position (e.g. [96000: 'W'])
Description
----
Loading annotations corresponding to the raw data. (e.g. for a single night)
Without the corresponding raw data.
Params
----
"""
annotations = wfdb.rdann(path, column_name)
clean_annotations = np.asarray(annotations.aux_note)
sample_annotations = annotations.sample
annotations_dict = dict()
for i, element in enumerate(sample_annotations):
annotations_dict[element] = clean_annotations[i]
return annotations_dict
def load_data(file_path=f"//srv11.itd-intern.de/MachineLearningDatasets"
f"/medical/physionet_challenge/training/",
frequency=200,
sections=30,
channel_names=None,
channel_types=None,
offset=0):
"""
Args:
file_path: E.g path to the folder on the server
frequency: Sampling frequency
sections: E.g. divided into 30s sections
channel_names: name for each channel. Has a not None default
inside the function.
channel_types: Type for each channel. Has a not None default
inside the function.
offset: Ignore the first x files.
Returns: A Subject object
Description
----
Loading transform raw data and the corresponding annotations into a
>>Subject<<. This simplifies the handling of different sources.
Params
----
"""
if channel_names is None:
channel_names = ['F3-M2', 'F4-M1', 'C3-M2', 'C4-M1', 'O1-M2', 'O2-M1',
'E1-M2', 'Chin1-Chin2', 'ABD', 'CHEST',
'AIRFLOW', 'SaO2', 'ECG']
if channel_types is None:
channel_types = ['eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
'misc', 'eeg', 'eeg', 'misc', 'eeg']
try:
file_list = os.listdir(file_path)
except FileNotFoundError:
sys.exit("\n[ParameterError]: Can not find files on the server. "
"Are you sure you connected to the filesystem?")
file_list.sort()
# sort the file_list to ensure that the data is in the same order on
# every system
try:
file = str(file_list[offset])
except IndexError:
sys.exit("\n[IndexError]: There is no file left to load.")
subject = Subject.Subject(subject_name=file,
subject_raw_units=__load_file(file_path, file),
subject_raw_units_labels=
__load_annotations(f"{file_path}{file}/{file}",
"arousal"),
subject_sample_frequency=frequency,
subject_sections=sections,
subject_channel_names=channel_names,
subject_channel_type=channel_types)
return subject
| [
"os.listdir",
"numpy.asarray",
"wfdb.rdann",
"sys.exit"
] | [((1145, 1174), 'wfdb.rdann', 'wfdb.rdann', (['path', 'column_name'], {}), '(path, column_name)\n', (1155, 1174), False, 'import wfdb\n'), ((1199, 1231), 'numpy.asarray', 'np.asarray', (['annotations.aux_note'], {}), '(annotations.aux_note)\n', (1209, 1231), True, 'import numpy as np\n'), ((2790, 2811), 'os.listdir', 'os.listdir', (['file_path'], {}), '(file_path)\n', (2800, 2811), False, 'import os\n'), ((2850, 2974), 'sys.exit', 'sys.exit', (['"""\n[ParameterError]: Can not find files on the server. Are you sure you connected to the filesystem?"""'], {}), '(\n """\n[ParameterError]: Can not find files on the server. Are you sure you connected to the filesystem?"""\n )\n', (2858, 2974), False, 'import sys\n'), ((3175, 3236), 'sys.exit', 'sys.exit', (['"""\n[IndexError]: There is no file left to load."""'], {}), '("""\n[IndexError]: There is no file left to load.""")\n', (3183, 3236), False, 'import sys\n')] |
#!/usr/bin/python3
# number of output figures = 1
import numpy as np
import helper.basis
from helper.figure import Figure
p = 3
l = 3
b = helper.basis.HierarchicalBSpline(p)
fig = Figure.create(figsize=(3, 2))
ax = fig.gca()
h = 2**(-l)
for i in range(1 - (p+1)//2, 2):
color = "C{}".format(i % 9)
x = i*h
lb, ub = x - (p+1)/2 * h, x + (p+1)/2 * h
xx = np.linspace(lb, ub, 200)
yy = (2 - i) * b.evaluate(l, i, xx)
ax.plot(xx, yy, "-", color=color, clip_on=False)
ax.text(x, max(yy)+0.05,
r"${} \bspl{{{},{}}}{{{}}}$".format(2-i, "l'", i, "p"),
ha="center", va="bottom", color=color)
i = 1
b = helper.basis.ModifiedHierarchicalBSpline(p)
xx = np.linspace(*b.getSupport(l, i), 200)
yy = b.evaluate(l, i, xx)
ax.plot(xx, yy, "k--", clip_on=False)
ax.text(i*h-0.02, 1.2,
r"$\bspl[{}]{{{},{}}}{{{}}}$".format(r"\modified", "l'", 1, "p"),
ha="left", va="bottom", color="k")
maxY = 2.1
ax.plot([0, 0], [0, maxY], "k-", clip_on=False)
I = np.arange(-3, 2+(p+1)//2)
ax.set_xticks(h * I)
ax.set_xticklabels([r"$\gp{{{},{}}}$".format("l'", i) for i in I])
ax.set_ylim(0, maxY)
ax.set_yticks([0, 1, 2])
fig.save()
| [
"helper.figure.Figure.create",
"numpy.linspace",
"numpy.arange"
] | [((184, 213), 'helper.figure.Figure.create', 'Figure.create', ([], {'figsize': '(3, 2)'}), '(figsize=(3, 2))\n', (197, 213), False, 'from helper.figure import Figure\n'), ((989, 1020), 'numpy.arange', 'np.arange', (['(-3)', '(2 + (p + 1) // 2)'], {}), '(-3, 2 + (p + 1) // 2)\n', (998, 1020), True, 'import numpy as np\n'), ((366, 390), 'numpy.linspace', 'np.linspace', (['lb', 'ub', '(200)'], {}), '(lb, ub, 200)\n', (377, 390), True, 'import numpy as np\n')] |
"""
This script serves to do recurrence analysis on the sv-gene pairs identified
We do the following things:
1. From the top 100 of each SV type (so top 400), which genes are there? Which are the top 15 most recurrent?
2. For these genes, also check which other mutations are found in these genes in different patients.
3. Then also check which genes are recurrent if we ignore the top 100, and just look across all positive SV-gne pairs.
"""
import sys
import numpy as np
import random
from scipy import stats
from statsmodels.sandbox.stats.multicomp import multipletests
import matplotlib.pyplot as plt
import os
import matplotlib
matplotlib.use('Agg')
outDir = sys.argv[1]
finalOutDir = outDir + '/figure4/'
if not os.path.exists(finalOutDir):
os.makedirs(finalOutDir)
finalOutDirFullFigure = outDir + '/figS5/'
if not os.path.exists(finalOutDirFullFigure):
os.makedirs(finalOutDirFullFigure)
#load the sv-gene pairs
positivePairs = np.loadtxt(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_pathogenicPairsFeatures.txt', dtype='object')
print(positivePairs.shape)
topPairs = dict()
topPairGenes = dict() #all unique genes
svTypes = ['DEL', 'DUP', 'INV', 'ITX']
for svType in svTypes:
svPairs = np.loadtxt(outDir + '/featureImportance/pairLabels_top100_' + svType + '.txt', dtype='object')
rankedPairs = []
ind = len(svPairs)
for pair in svPairs:
splitPair = pair.split('_')
topPairGenes[splitPair[0]] = 0
rankedPairs.append([pair, ind])
ind -= 1
topPairs[svType] = rankedPairs
degPairs = np.loadtxt(outDir + '/tadDisruptionsZScores/zScores.txt', dtype='object')
#format: gene as key, as values, the first is the number of times we found the gene,
#the second how many were dels, then dups, invs, itx.
splitPairs = dict()
genes = dict()
for pair in positivePairs: #get stats for all pairs
splitPair = pair[0].split('_')
if splitPair[0] + '_' + splitPair[7] not in splitPairs:
splitPairs[splitPair[0] + '_' + splitPair[7]] = []
splitPairs[splitPair[0] + '_' + splitPair[7]].append(pair)
if splitPair[0] not in genes:
#count, cross-patient count, nc: del, dup, inv, itx, snv, cnv amp, cnv del, sv del, sv dup, sv inv, sv itx
genes[splitPair[0]] = [0]*13
genes[splitPair[0]].append([]) #add the patient names here
genes[splitPair[0]].append(0) #negative dels
genes[splitPair[0]].append(0) #negative dups
genes[splitPair[0]].append(0) #negative invs
genes[splitPair[0]].append(0) #negative itx
genes[splitPair[0]][0] += 1
if splitPair[12] == 'DEL':
genes[splitPair[0]][1] += 1
elif splitPair[12] == 'DUP':
genes[splitPair[0]][2] += 1
elif splitPair[12] == 'INV':
genes[splitPair[0]][3] += 1
elif splitPair[12] == 'ITX':
genes[splitPair[0]][4] += 1
patient = splitPair[7]
genes[splitPair[0]][13].append(patient)
#convert to numpy array for easy ranking
recurrentGenes = []
for gene in genes:
#count how many unique patients affect the gene for recurrence
uniquePatients = np.unique(genes[gene][13])
data = [gene] + [len(uniquePatients)] + genes[gene]
recurrentGenes.append(data)
recurrentGenes = np.array(recurrentGenes, dtype='object')
#sort
sortedGenes = recurrentGenes[np.argsort(recurrentGenes[:,1])[::-1]]
sortedGenesTop = []
for gene in sortedGenes:
if gene[0] not in topPairGenes:
continue
sortedGenesTop.append(gene)
sortedGenesTop = np.array(sortedGenesTop, dtype='object')
#make a matrix in which we show visually which genes are affected in which patients
#this matrix is genes x patients
uniquePatients = dict()
top = 15 #making the matrix only for the top X genes
ind = 0
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
if patient not in uniquePatients:
uniquePatients[patient] = 0
uniquePatients[patient] += 1
ind += 1
#make a matrix of genes by patients
recurrenceMatrix = np.zeros([top, len(uniquePatients)])
ind = 0
patientOrder = dict() #order of patients in the matrix
for patientInd in range(0, len(uniquePatients)):
patient = list(uniquePatients.keys())[patientInd]
patientOrder[patient] = patientInd
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
patientInd = patientOrder[patient]
recurrenceMatrix[ind, patientInd] += 1
ind += 1
print(recurrenceMatrix)
#make a grid plot, showing the different SV types that the patients have
#color the genes with -/+ direction, see if it correlates with the SV types.
fig, ax = plt.subplots()
for row in range(0, recurrenceMatrix.shape[0]):
if row < recurrenceMatrix.shape[0]-1:
ax.axhline(row+0.5, linestyle='--', color='k', linewidth=0.5)
for col in range(0, recurrenceMatrix.shape[1]):
if col < recurrenceMatrix.shape[1]-1:
ax.axvline(col+0.5, linestyle='--', color='k', linewidth=0.5)
if recurrenceMatrix[row,col] > 0:
#get the sv type to see which symbol to assign
gene = sortedGenesTop[row, 0]
patient = list(uniquePatients.keys())[col]
pairs = splitPairs[gene + '_' + patient]
#generate some random offsets to avoid overlapping data
offsetsX = random.sample(range(-30,30), len(pairs))
offsetsX = [i / float(100) for i in offsetsX]
offsetsY = random.sample(range(-30,30), len(pairs))
offsetsY = [i / float(100) for i in offsetsY]
ind = 0
for pair in pairs:
splitPair = pair[0].split('_')
svType = splitPair[12]
markerType = '.'
if svType == 'DEL':
markerType = '.'
elif svType == 'DUP':
markerType = 's'
elif svType == 'INV':
markerType = '^'
elif svType == 'ITX':
markerType = '*'
#also get up/down color
if patient + '_' + gene in degPairs[:,0]:
#get the z-score of the pair.
degPairInfo = degPairs[degPairs[:,0] == patient + '_' + gene][0]
color = 'red'
if float(degPairInfo[5]) > 1.5:
color = 'red'
elif float(degPairInfo[5]) < -1.5:
color = 'blue'
else:
color = 'grey'
else:
continue #this is a pair with likely coding mutations, skip it
plt.scatter(col + offsetsY[ind], offsetsX[ind] + (recurrenceMatrix.shape[0] - row -1), marker=markerType, edgecolor=color,
facecolor='none', s=35)
ind += 1
#the genes are swapped around to show the most recurrent on top, so reverse thelabels as well
plt.yticks(range(0, recurrenceMatrix.shape[0]), sortedGenesTop[0:top,0][::-1])
plt.xticks(range(0, recurrenceMatrix.shape[1]), list(uniquePatients.keys()), rotation=90)
#plt.grid()
plt.tight_layout()
plt.savefig(finalOutDir + '/recurrence_top400.svg')
plt.clf()
#Next, we are interested in patients with alternative mutations.
#So here, for each gene, first show how many patients have an SNV, CNV, or SV
#keep in mind that a duplication could be non-coding if it is in the same patient
#this will later become obvious in the visualization
#load the patient-gene mutation pairs
mutationDir = outDir + '/patientGeneMutationPairs/'
snvPatients = np.load(mutationDir + 'snvPatients.npy', allow_pickle=True, encoding='latin1').item()
svPatientsDel = np.load(mutationDir + 'svPatientsDel.npy', allow_pickle=True, encoding='latin1').item()
svPatientsDup = np.load(mutationDir + 'svPatientsDup.npy', allow_pickle=True, encoding='latin1').item()
svPatientsInv = np.load(mutationDir + 'svPatientsInv.npy', allow_pickle=True, encoding='latin1').item()
svPatientsItx = np.load(mutationDir + 'svPatientsItx.npy', allow_pickle=True, encoding='latin1').item()
cnvPatientsDel = np.load(mutationDir + 'cnvPatientsDel.npy', allow_pickle=True, encoding='latin1').item()
cnvPatientsAmp = np.load(mutationDir + 'cnvPatientsAmp.npy', allow_pickle=True, encoding='latin1').item()
#also show the non-coding SVs that do not lead to expression changes
allPairs = np.loadtxt(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_', dtype='object')
for pair in allPairs:
splitPair = pair[0].split('_')
gene = splitPair[0]
patient = splitPair[7]
sortedGeneInd = np.where(sortedGenes[:,0] == gene)[0]
if gene in snvPatients[patient]:
sortedGenes[sortedGeneInd, 5] += 1
if gene in cnvPatientsDel[patient]:
sortedGenes[sortedGeneInd, 6] += 1
if gene in cnvPatientsAmp[patient]:
sortedGenes[sortedGeneInd, 7] += 1
if gene in svPatientsDel[patient]:
sortedGenes[sortedGeneInd, 8] += 1
if gene in svPatientsDup[patient]:
sortedGenes[sortedGeneInd, 9] += 1
if gene in svPatientsInv[patient]:
sortedGenes[sortedGeneInd, 10] += 1
if gene in svPatientsItx[patient]:
sortedGenes[sortedGeneInd, 11] += 1
#for the current pair, only add it if it is not in the positive set.
if pair[0] not in positivePairs[:,0]:
#then check the type of SV, and add it to the right gene.
svType = splitPair[12]
if svType == 'DEL':
sortedGenes[sortedGeneInd, 16] += 1
elif svType == 'DUP':
sortedGenes[sortedGeneInd, 17] += 1
elif svType == 'INV':
sortedGenes[sortedGeneInd, 18] += 1
elif svType == 'ITX':
sortedGenes[sortedGeneInd, 19] += 1
print(sortedGenesTop[0:15,:])
#show these data in a bar plot.
#for each type of mutation, add to the stacked bar chart.
#fig,ax = plt.subplots()
geneInd = 0
ymax = 0
for gene in sortedGenes:
if gene[0] not in sortedGenesTop[0:15,0]:
continue
print(gene)
plt.bar(geneInd, gene[5], color='#ffcc00ff')
plt.bar(geneInd, gene[6], bottom=gene[5], color='#9955ffff')
plt.bar(geneInd, gene[7], bottom=gene[5]+gene[6], color='#ff6600b5')
plt.bar(geneInd, gene[8], bottom=gene[5]+gene[6]+gene[7], color='#0000ffb4')
plt.bar(geneInd, gene[9], bottom=gene[5]+gene[6]+gene[7]+gene[8], color='#d40000c6')
plt.bar(geneInd, gene[10], bottom=gene[5]+gene[6]+gene[7]+gene[8]+gene[9], color='#ff00ccb8')
plt.bar(geneInd, gene[11], bottom=gene[5]+gene[6]+gene[7]+gene[8]+gene[9]+gene[10], color='#808080ff')
if gene[5]+gene[6]+gene[7]+gene[8]+gene[9]+gene[10]+gene[11] > ymax:
ymax = gene[5]+gene[6]+gene[7]+gene[8]+gene[9]+gene[10]+gene[11] + 1
geneInd += 1
plt.ylim(0,ymax+1)
plt.tight_layout()
plt.savefig(finalOutDir + '/recurrence_bars.svg')
plt.clf()
exit()
###Also make the full recurrence plot for all patients.
#this is quick and dirty, should have been a re-usable function.
#load the sv-gene pairs
topPairs = dict()
topPairGenes = dict() #all unique genes
svTypes = ['DEL', 'DUP', 'INV', 'ITX']
degPairs = np.loadtxt(outDir + '/tadDisruptionsZScores/zScores.txt', dtype='object')
#format: gene as key, as values, the first is the number of times we found the gene,
#the second how many were dels, then dups, invs, itx.
splitPairs = dict()
genes = dict()
for pair in positivePairs: #get stats for all pairs
splitPair = pair[0].split('_')
if splitPair[0] + '_' + splitPair[7] not in splitPairs:
splitPairs[splitPair[0] + '_' + splitPair[7]] = []
splitPairs[splitPair[0] + '_' + splitPair[7]].append(pair)
if splitPair[0] not in genes:
#count, cross-patient count, nc: del, dup, inv, itx, snv, cnv amp, cnv del, sv del, sv dup, sv inv, sv itx
genes[splitPair[0]] = [0]*13
genes[splitPair[0]].append([]) #add the patient names here
genes[splitPair[0]].append(0) #negative dels
genes[splitPair[0]].append(0) #negative dups
genes[splitPair[0]].append(0) #negative invs
genes[splitPair[0]].append(0) #negative itx
genes[splitPair[0]][0] += 1
if splitPair[12] == 'DEL':
genes[splitPair[0]][1] += 1
elif splitPair[12] == 'DUP':
genes[splitPair[0]][2] += 1
elif splitPair[12] == 'INV':
genes[splitPair[0]][3] += 1
elif splitPair[12] == 'ITX':
genes[splitPair[0]][4] += 1
patient = splitPair[7]
genes[splitPair[0]][13].append(patient)
#convert to numpy array for easy ranking
recurrentGenes = []
for gene in genes:
#count how many unique patients affect the gene for recurrence
uniquePatients = np.unique(genes[gene][13])
data = [gene] + [len(uniquePatients)] + genes[gene]
recurrentGenes.append(data)
recurrentGenes = np.array(recurrentGenes, dtype='object')
#sort
sortedGenes = recurrentGenes[np.argsort(recurrentGenes[:,1])[::-1]]
sortedGenesTop = []
for gene in sortedGenes:
#if gene[0] not in topPairGenes:
# continue
sortedGenesTop.append(gene)
sortedGenesTop = np.array(sortedGenesTop, dtype='object')
#make a matrix in which we show visually which genes are affected in which patients
#this matrix is genes x patients
uniquePatients = dict()
top = 50 #making the matrix only for the top X genes
ind = 0
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
if patient not in uniquePatients:
uniquePatients[patient] = 0
uniquePatients[patient] += 1
ind += 1
#make a matrix of genes by patients
recurrenceMatrix = np.zeros([top, len(uniquePatients)])
ind = 0
patientOrder = dict() #order of patients in the matrix
for patientInd in range(0, len(uniquePatients)):
patient = list(uniquePatients.keys())[patientInd]
patientOrder[patient] = patientInd
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
patientInd = patientOrder[patient]
recurrenceMatrix[ind, patientInd] += 1
ind += 1
print(recurrenceMatrix)
#make a grid plot, showing the different SV types that the patients have
#color the genes with -/+ direction, see if it correlates with the SV types.
fig, ax = plt.subplots(figsize=(20,10))
for row in range(0, recurrenceMatrix.shape[0]):
if row < recurrenceMatrix.shape[0]-1:
ax.axhline(row+0.5, linestyle='--', color='k', linewidth=0.5)
for col in range(0, recurrenceMatrix.shape[1]):
if col < recurrenceMatrix.shape[1]-1:
ax.axvline(col+0.5, linestyle='--', color='k', linewidth=0.5)
if recurrenceMatrix[row,col] > 0:
#get the sv type to see which symbol to assign
gene = sortedGenesTop[row, 0]
patient = list(uniquePatients.keys())[col]
pairs = splitPairs[gene + '_' + patient]
#generate some random offsets to avoid overlapping data
offsetsX = random.sample(range(-30,30), len(pairs))
offsetsX = [i / float(100) for i in offsetsX]
offsetsY = random.sample(range(-30,30), len(pairs))
offsetsY = [i / float(100) for i in offsetsY]
ind = 0
for pair in pairs:
splitPair = pair[0].split('_')
svType = splitPair[12]
markerType = '.'
if svType == 'DEL':
markerType = '.'
elif svType == 'DUP':
markerType = 's'
elif svType == 'INV':
markerType = '^'
elif svType == 'ITX':
markerType = '*'
#also get up/down color
if patient + '_' + gene in degPairs[:,0]:
#get the z-score of the pair.
degPairInfo = degPairs[degPairs[:,0] == patient + '_' + gene][0]
color = 'red'
if float(degPairInfo[5]) > 1.5:
color = 'red'
elif float(degPairInfo[5]) < -1.5:
color = 'blue'
else:
color = 'grey'
else:
continue #this is a pair with likely coding mutations, skip it
plt.scatter(col + offsetsY[ind], offsetsX[ind] + (recurrenceMatrix.shape[0] - row -1), marker=markerType, edgecolor=color,
facecolor='none', s=35)
ind += 1
#the genes are swapped around to show the most recurrent on top, so reverse thelabels as well
plt.yticks(range(0, recurrenceMatrix.shape[0]), sortedGenesTop[0:top,0][::-1])
plt.xticks(range(0, recurrenceMatrix.shape[1]), list(uniquePatients.keys()), rotation=90)
#plt.grid()
plt.tight_layout()
plt.savefig(finalOutDirFullFigure + '/recurrence_allPatients.svg')
plt.clf()
exit()
| [
"os.path.exists",
"matplotlib.pyplot.savefig",
"numpy.unique",
"os.makedirs",
"matplotlib.use",
"numpy.where",
"matplotlib.pyplot.clf",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"numpy.l... | [((662, 683), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (676, 683), False, 'import matplotlib\n'), ((982, 1101), 'numpy.loadtxt', 'np.loadtxt', (["(outDir +\n '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_pathogenicPairsFeatures.txt')"], {'dtype': '"""object"""'}), "(outDir +\n '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_pathogenicPairsFeatures.txt',\n dtype='object')\n", (992, 1101), True, 'import numpy as np\n'), ((1579, 1652), 'numpy.loadtxt', 'np.loadtxt', (["(outDir + '/tadDisruptionsZScores/zScores.txt')"], {'dtype': '"""object"""'}), "(outDir + '/tadDisruptionsZScores/zScores.txt', dtype='object')\n", (1589, 1652), True, 'import numpy as np\n'), ((3182, 3222), 'numpy.array', 'np.array', (['recurrentGenes'], {'dtype': '"""object"""'}), "(recurrentGenes, dtype='object')\n", (3190, 3222), True, 'import numpy as np\n'), ((3448, 3488), 'numpy.array', 'np.array', (['sortedGenesTop'], {'dtype': '"""object"""'}), "(sortedGenesTop, dtype='object')\n", (3456, 3488), True, 'import numpy as np\n'), ((4629, 4643), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4641, 4643), True, 'import matplotlib.pyplot as plt\n'), ((6681, 6699), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6697, 6699), True, 'import matplotlib.pyplot as plt\n'), ((6701, 6752), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(finalOutDir + '/recurrence_top400.svg')"], {}), "(finalOutDir + '/recurrence_top400.svg')\n", (6712, 6752), True, 'import matplotlib.pyplot as plt\n'), ((6754, 6763), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6761, 6763), True, 'import matplotlib.pyplot as plt\n'), ((7965, 8054), 'numpy.loadtxt', 'np.loadtxt', (["(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_')"], {'dtype': '"""object"""'}), "(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_', dtype=\n 'object')\n", (7975, 8054), True, 'import numpy as np\n'), ((10197, 10218), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(ymax + 1)'], {}), '(0, ymax + 1)\n', (10205, 10218), True, 'import matplotlib.pyplot as plt\n'), ((10217, 10235), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10233, 10235), True, 'import matplotlib.pyplot as plt\n'), ((10237, 10286), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(finalOutDir + '/recurrence_bars.svg')"], {}), "(finalOutDir + '/recurrence_bars.svg')\n", (10248, 10286), True, 'import matplotlib.pyplot as plt\n'), ((10288, 10297), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10295, 10297), True, 'import matplotlib.pyplot as plt\n'), ((10572, 10645), 'numpy.loadtxt', 'np.loadtxt', (["(outDir + '/tadDisruptionsZScores/zScores.txt')"], {'dtype': '"""object"""'}), "(outDir + '/tadDisruptionsZScores/zScores.txt', dtype='object')\n", (10582, 10645), True, 'import numpy as np\n'), ((12177, 12217), 'numpy.array', 'np.array', (['recurrentGenes'], {'dtype': '"""object"""'}), "(recurrentGenes, dtype='object')\n", (12185, 12217), True, 'import numpy as np\n'), ((12446, 12486), 'numpy.array', 'np.array', (['sortedGenesTop'], {'dtype': '"""object"""'}), "(sortedGenesTop, dtype='object')\n", (12454, 12486), True, 'import numpy as np\n'), ((13639, 13669), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (13651, 13669), True, 'import matplotlib.pyplot as plt\n'), ((15748, 15766), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15764, 15766), True, 'import matplotlib.pyplot as plt\n'), ((15768, 15834), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(finalOutDirFullFigure + '/recurrence_allPatients.svg')"], {}), "(finalOutDirFullFigure + '/recurrence_allPatients.svg')\n", (15779, 15834), True, 'import matplotlib.pyplot as plt\n'), ((15836, 15845), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15843, 15845), True, 'import matplotlib.pyplot as plt\n'), ((754, 781), 'os.path.exists', 'os.path.exists', (['finalOutDir'], {}), '(finalOutDir)\n', (768, 781), False, 'import os\n'), ((785, 809), 'os.makedirs', 'os.makedirs', (['finalOutDir'], {}), '(finalOutDir)\n', (796, 809), False, 'import os\n'), ((862, 899), 'os.path.exists', 'os.path.exists', (['finalOutDirFullFigure'], {}), '(finalOutDirFullFigure)\n', (876, 899), False, 'import os\n'), ((903, 937), 'os.makedirs', 'os.makedirs', (['finalOutDirFullFigure'], {}), '(finalOutDirFullFigure)\n', (914, 937), False, 'import os\n'), ((1260, 1358), 'numpy.loadtxt', 'np.loadtxt', (["(outDir + '/featureImportance/pairLabels_top100_' + svType + '.txt')"], {'dtype': '"""object"""'}), "(outDir + '/featureImportance/pairLabels_top100_' + svType +\n '.txt', dtype='object')\n", (1270, 1358), True, 'import numpy as np\n'), ((3047, 3073), 'numpy.unique', 'np.unique', (['genes[gene][13]'], {}), '(genes[gene][13])\n', (3056, 3073), True, 'import numpy as np\n'), ((9486, 9530), 'matplotlib.pyplot.bar', 'plt.bar', (['geneInd', 'gene[5]'], {'color': '"""#ffcc00ff"""'}), "(geneInd, gene[5], color='#ffcc00ff')\n", (9493, 9530), True, 'import matplotlib.pyplot as plt\n'), ((9533, 9593), 'matplotlib.pyplot.bar', 'plt.bar', (['geneInd', 'gene[6]'], {'bottom': 'gene[5]', 'color': '"""#9955ffff"""'}), "(geneInd, gene[6], bottom=gene[5], color='#9955ffff')\n", (9540, 9593), True, 'import matplotlib.pyplot as plt\n'), ((9596, 9666), 'matplotlib.pyplot.bar', 'plt.bar', (['geneInd', 'gene[7]'], {'bottom': '(gene[5] + gene[6])', 'color': '"""#ff6600b5"""'}), "(geneInd, gene[7], bottom=gene[5] + gene[6], color='#ff6600b5')\n", (9603, 9666), True, 'import matplotlib.pyplot as plt\n'), ((9667, 9752), 'matplotlib.pyplot.bar', 'plt.bar', (['geneInd', 'gene[8]'], {'bottom': '(gene[5] + gene[6] + gene[7])', 'color': '"""#0000ffb4"""'}), "(geneInd, gene[8], bottom=gene[5] + gene[6] + gene[7], color='#0000ffb4'\n )\n", (9674, 9752), True, 'import matplotlib.pyplot as plt\n'), ((9746, 9840), 'matplotlib.pyplot.bar', 'plt.bar', (['geneInd', 'gene[9]'], {'bottom': '(gene[5] + gene[6] + gene[7] + gene[8])', 'color': '"""#d40000c6"""'}), "(geneInd, gene[9], bottom=gene[5] + gene[6] + gene[7] + gene[8],\n color='#d40000c6')\n", (9753, 9840), True, 'import matplotlib.pyplot as plt\n'), ((9833, 9938), 'matplotlib.pyplot.bar', 'plt.bar', (['geneInd', 'gene[10]'], {'bottom': '(gene[5] + gene[6] + gene[7] + gene[8] + gene[9])', 'color': '"""#ff00ccb8"""'}), "(geneInd, gene[10], bottom=gene[5] + gene[6] + gene[7] + gene[8] +\n gene[9], color='#ff00ccb8')\n", (9840, 9938), True, 'import matplotlib.pyplot as plt\n'), ((9929, 10045), 'matplotlib.pyplot.bar', 'plt.bar', (['geneInd', 'gene[11]'], {'bottom': '(gene[5] + gene[6] + gene[7] + gene[8] + gene[9] + gene[10])', 'color': '"""#808080ff"""'}), "(geneInd, gene[11], bottom=gene[5] + gene[6] + gene[7] + gene[8] +\n gene[9] + gene[10], color='#808080ff')\n", (9936, 10045), True, 'import matplotlib.pyplot as plt\n'), ((12041, 12067), 'numpy.unique', 'np.unique', (['genes[gene][13]'], {}), '(genes[gene][13])\n', (12050, 12067), True, 'import numpy as np\n'), ((3262, 3294), 'numpy.argsort', 'np.argsort', (['recurrentGenes[:, 1]'], {}), '(recurrentGenes[:, 1])\n', (3272, 3294), True, 'import numpy as np\n'), ((7157, 7235), 'numpy.load', 'np.load', (["(mutationDir + 'snvPatients.npy')"], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(mutationDir + 'snvPatients.npy', allow_pickle=True, encoding='latin1')\n", (7164, 7235), True, 'import numpy as np\n'), ((7262, 7347), 'numpy.load', 'np.load', (["(mutationDir + 'svPatientsDel.npy')"], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(mutationDir + 'svPatientsDel.npy', allow_pickle=True, encoding='latin1'\n )\n", (7269, 7347), True, 'import numpy as np\n'), ((7367, 7452), 'numpy.load', 'np.load', (["(mutationDir + 'svPatientsDup.npy')"], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(mutationDir + 'svPatientsDup.npy', allow_pickle=True, encoding='latin1'\n )\n", (7374, 7452), True, 'import numpy as np\n'), ((7472, 7557), 'numpy.load', 'np.load', (["(mutationDir + 'svPatientsInv.npy')"], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(mutationDir + 'svPatientsInv.npy', allow_pickle=True, encoding='latin1'\n )\n", (7479, 7557), True, 'import numpy as np\n'), ((7577, 7662), 'numpy.load', 'np.load', (["(mutationDir + 'svPatientsItx.npy')"], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(mutationDir + 'svPatientsItx.npy', allow_pickle=True, encoding='latin1'\n )\n", (7584, 7662), True, 'import numpy as np\n'), ((7685, 7771), 'numpy.load', 'np.load', (["(mutationDir + 'cnvPatientsDel.npy')"], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(mutationDir + 'cnvPatientsDel.npy', allow_pickle=True, encoding=\n 'latin1')\n", (7692, 7771), True, 'import numpy as np\n'), ((7792, 7878), 'numpy.load', 'np.load', (["(mutationDir + 'cnvPatientsAmp.npy')"], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(mutationDir + 'cnvPatientsAmp.npy', allow_pickle=True, encoding=\n 'latin1')\n", (7799, 7878), True, 'import numpy as np\n'), ((8179, 8214), 'numpy.where', 'np.where', (['(sortedGenes[:, 0] == gene)'], {}), '(sortedGenes[:, 0] == gene)\n', (8187, 8214), True, 'import numpy as np\n'), ((12257, 12289), 'numpy.argsort', 'np.argsort', (['recurrentGenes[:, 1]'], {}), '(recurrentGenes[:, 1])\n', (12267, 12289), True, 'import numpy as np\n'), ((6232, 6383), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(col + offsetsY[ind])', '(offsetsX[ind] + (recurrenceMatrix.shape[0] - row - 1))'], {'marker': 'markerType', 'edgecolor': 'color', 'facecolor': '"""none"""', 's': '(35)'}), "(col + offsetsY[ind], offsetsX[ind] + (recurrenceMatrix.shape[0] -\n row - 1), marker=markerType, edgecolor=color, facecolor='none', s=35)\n", (6243, 6383), True, 'import matplotlib.pyplot as plt\n'), ((15299, 15450), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(col + offsetsY[ind])', '(offsetsX[ind] + (recurrenceMatrix.shape[0] - row - 1))'], {'marker': 'markerType', 'edgecolor': 'color', 'facecolor': '"""none"""', 's': '(35)'}), "(col + offsetsY[ind], offsetsX[ind] + (recurrenceMatrix.shape[0] -\n row - 1), marker=markerType, edgecolor=color, facecolor='none', s=35)\n", (15310, 15450), True, 'import matplotlib.pyplot as plt\n')] |
__author__ = "<NAME>"
import sys, urllib, scipy, pylab
import numpy as np
from urllib.request import urlopen
import matplotlib.pyplot as plot
from itertools import groupby
from collections import OrderedDict #Çok ilginç bir şekilde bunu yazmayınca "label types" listesini [R,M] yerine [M,R] şeklinde veriyor!!!
import scipy.stats as stats
source_url = ("https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data")
my_data = urlopen(source_url)
attList = []
for line in my_data:
row = line.strip().split(b",") #Eğer b yazılmazsa TypeError veriyor. str den binary e geçtik bu şekilde
attList.append(row)
row_length = len(attList)
col_length = len(attList[0])
#print(row_length)
#print(col_length)
print("Col\t\t" + "Number\t\t" + "String\t\t" + "Other\t\t")
count = [0] * 3
col_count = 0
for j in range(col_length):
for i in range(row_length):
try:
a = float(attList[i][j])
if isinstance (a, float):
count[0] += 1
except ValueError:
if len (attList[i][j])>0:
count[1] += 1
else:
count[2] += 1
print(str(col_count) + "\t\t" + str(count[0]) + "\t\t" + str(count[1]) + "\t\t" + str(count[2]) + "\n")
count = [0] * 3
col_count +=1
#print(count)
col_for_calc = 3
col3_data = []
for row in attList:
col3_data.append(float(row[col_for_calc]))
myArray = np.array(col3_data)
mean_col3 = myArray.mean()
s_dev_col3 = myArray.std()
#print(myArray)
print("For column 3:\n" + "Mean: " + str(mean_col3) + "\nStandart Deviation: " + str(s_dev_col3))
perc_eq = 4
percentile_col3 = []
for k in range(perc_eq+1):
percentile_col3.append(np.percentile(myArray, k*100/perc_eq))
print("Boundaries for 4 Equal Percentiles: "+ str(percentile_col3))
perc_eq = 10
percentile_col3 = []
for k in range(perc_eq+1):
percentile_col3.append(np.percentile(myArray, k*100/perc_eq))
print("Boundaries for 10 Equal Percentiles: "+ str(percentile_col3))
col_for_calc = 60
data_col60 = []
for row in attList:
data_col60.append(row[col_for_calc].decode('ascii'))
label_types = list(set(data_col60))
copy = label_types
print("Categorical Levels and Their Iterations:\n" + str(copy))
print([len(list(group)) for key, group in groupby(data_col60)])
scipy.stats.probplot (col3_data, dist = "norm", plot = pylab)
"""scipy.stats.probplot(x, sparams=(), dist='norm', fit=True, plot=None) Generates a probability plot of sample data
against the quantiles of a specified theoretical distribution (the normal distribution by default). probplot optionally calculates a best-fit line for the data and
plots the results using Matplotlib or a given plot function."""
pylab.show()
| [
"itertools.groupby",
"numpy.array",
"numpy.percentile",
"scipy.stats.probplot",
"urllib.request.urlopen",
"pylab.show"
] | [((491, 510), 'urllib.request.urlopen', 'urlopen', (['source_url'], {}), '(source_url)\n', (498, 510), False, 'from urllib.request import urlopen\n'), ((1495, 1514), 'numpy.array', 'np.array', (['col3_data'], {}), '(col3_data)\n', (1503, 1514), True, 'import numpy as np\n'), ((2398, 2454), 'scipy.stats.probplot', 'scipy.stats.probplot', (['col3_data'], {'dist': '"""norm"""', 'plot': 'pylab'}), "(col3_data, dist='norm', plot=pylab)\n", (2418, 2454), False, 'import sys, urllib, scipy, pylab\n'), ((2809, 2821), 'pylab.show', 'pylab.show', ([], {}), '()\n', (2819, 2821), False, 'import sys, urllib, scipy, pylab\n'), ((1780, 1821), 'numpy.percentile', 'np.percentile', (['myArray', '(k * 100 / perc_eq)'], {}), '(myArray, k * 100 / perc_eq)\n', (1793, 1821), True, 'import numpy as np\n'), ((1982, 2023), 'numpy.percentile', 'np.percentile', (['myArray', '(k * 100 / perc_eq)'], {}), '(myArray, k * 100 / perc_eq)\n', (1995, 2023), True, 'import numpy as np\n'), ((2373, 2392), 'itertools.groupby', 'groupby', (['data_col60'], {}), '(data_col60)\n', (2380, 2392), False, 'from itertools import groupby\n')] |
#!/usr/bin/env python3
import os
import random
import torch
import numpy as np
from faker import Faker
from loguru import logger
from transformers import GPT2LMHeadModel, GPT2Tokenizer
MODEL_NAME = os.environ.get('MODEL_NAME', 'gpt2')
if MODEL_NAME.lower() == 'gpt2':
logger.debug('***** Running basic GPT2 pretrained weights *****')
WEIGHTS_DIR = MODEL_NAME # Just use the pretrained weights on hugging faces
elif MODEL_NAME.lower() == '4chan':
# The docker container will automatically download weights to this location
logger.debug('***** Running GPT2 trained on 3.5 years of 4Chan /pol posts (WARNING: HIGHLY OFFENSIVE OUTPUTS - YOU HAVE BEEN WARNED!!!) *****')
WEIGHTS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../weights'))
else:
raise ValueError('Only supported models are original gpt2 or 4chan model!')
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
cities = {
'Arlington': 'Tarrant County',
'Austin': 'Travis County',
'Corpus Christi': 'Nueces County',
'Dallas': 'Collin County',
'El Paso': 'El Paso County',
'Fort Worth': 'Denton County',
'Garland': 'Collin County',
'Houston': 'Fort Bend County',
'Irving': 'Dallas County',
'Laredo': 'Webb County',
'Lubbock': 'Lubbock County',
'Plano': 'Collin County',
'San Antonio': 'Bexar County'
}
gop_members = [
'<NAME>', '<NAME>', '<NAME> Jr.', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>'
]
firstNames = ['Hannah', 'Olivia', 'Marcia', 'Sarah', 'Tara', 'Brooke', 'Wanda', 'Andrea', 'Julie']
lastNames = ['Morgan', 'Walker', 'Lewis', 'Butler', 'Jones', 'Barnes', 'Martin', 'Wright', 'Foster']
info_location = [
'A friend saw them', 'I work at the clinic', 'I know his secretary', 'He told me at the club', 'The police report', 'His wife told me'
]
# TX IPs gathered from here: https://www.xmyip.com/ip-addresses/united--states/texas
ips = [
"66.136.125.", # Abilene
"64.46.160.", # Addison
"65.254.4.", # Addison
"24.27.72.", # Allen
"65.65.132.", # Alpine
"64.243.53.", # Alvarado
"50.175.228.", # Alvin
"50.175.229.", # Alvin
"50.26.131.", # Amarillo
"12.163.172.", # Angleton
"23.117.126.", # Arlington
"68.93.254.", # Arlington
"8.34.145.", # Austin
"12.204.50.", # Austin
"24.153.156.", # Austin
"24.155.228.", # Austin
"50.94.23.", # Austin
"66.193.112.", # Austin
"66.193.113.", # Austin
"24.173.59.", # Beaumont
"63.174.138.", # Beaumont
"66.135.178.", # Bedford
"66.169.186.", # Bedford
"38.88.154.", # Bellaire
"38.110.200.", # Bellaire
"24.219.225.", # Benbrook
"64.40.218.", # Brownwood
"64.202.78.", # Carrollton
"66.193.242.", # Cedar Park
"24.32.117.", # Clarksville
"4.12.222.", # College Station
"50.24.101.", # College Station
"50.15.108.", # Conroe
"50.21.240.", # Conroe
"50.162.52.", # Conroe
"64.158.39.", # Conroe
"66.136.21.", # Conroe
"66.170.212.", # Conroe
"64.194.96.", # Copperas Cove
"67.67.45.", # Coppell
"38.30.130.", # Corpus Christi
"67.63.164.", # Corpus Christi
"4.26.150.", # Dallas
"4.68.19.", # Dallas
"4.71.196.", # Dallas
"12.13.48.", # Dallas
"12.21.88.", # Dallas
"12.41.199.", # Dallas
"12.53.23.", # Dallas
"12.56.225.", # Dallas
"12.96.170.", # Dallas
"12.132.16.", # Dallas
"12.134.216.", # Dallas
"12.135.64.", # Dallas
"12.148.133.", # Dallas
"12.167.120.", # Dallas
"12.182.130.", # Dallas
"12.201.56.", # Dallas
"12.209.171.", # Dallas
"12.209.212.", # Dallas
"12.210.242.", # Dallas
"12.214.237.", # Dallas
"12.233.59.", # Dallas
"15.249.0.", # Dallas
"17.253.118.", # Dallas
"67.216.80.", # Dallas
"67.216.81.", # Dallas
"67.216.82.", # Dallas
"67.216.83.", # Dallas
"67.216.84.", # Dallas
"67.216.85.", # Dallas
"67.216.86.", # Dallas
"67.216.87.", # Dallas
"67.216.88.", # Dallas
"67.216.89.", # Dallas
"67.216.90.", # Dallas
"67.216.91.", # Dallas
"67.216.92.", # Dallas
"67.216.93.", # Dallas
"67.216.94.", # Dallas
"67.216.95.", # Dallas
"23.19.108.", # Dallas
"23.50.48.", # Dallas
"23.119.13.", # Dallas
"23.119.14.", # Dallas
"23.119.15.", # Dallas
"64.197.59.", # Dallas
"24.242.248.", # Dallas
"23.33.244.", # Dallas
"23.33.245.", # Dallas
"23.33.246.", # Dallas
"23.33.247.", # Dallas
"23.95.39.", # Dallas
"23.216.55.", # Dallas
"23.218.192.", # Dallas
"24.153.219.", # Dallas
"24.162.85.", # Dallas
"24.175.0.", # Dallas
"24.219.28.", # Dallas
"24.219.165.", # Dallas
"24.227.251.", # Dallas
"32.144.6.", # Dallas
"32.144.7.", # Dallas
"32.144.40.", # Dallas
"32.145.187.", # Dallas
"32.149.78.", # Dallas
"32.149.122.", # Dallas
"32.149.194.", # Dallas
"32.149.195.", # Dallas
"32.149.196.", # Dallas
"32.149.197.", # Dallas
"32.153.78.", # Dallas
"32.153.79.", # Dallas
"32.153.80.", # Dallas
"32.153.81.", # Dallas
"32.153.82.", # Dallas
"32.153.83.", # Dallas
"32.153.84.", # Dallas
"32.153.85.", # Dallas
"32.153.86.", # Dallas
"32.153.87.", # Dallas
"32.153.88.", # Dallas
"32.153.89.", # Dallas
"32.153.90.", # Dallas
"32.153.91.", # Dallas
"32.153.92.", # Dallas
"32.153.93.", # Dallas
"32.153.94.", # Dallas
"32.153.95.", # Dallas
"32.153.96.", # Dallas
"32.153.97.", # Dallas
"32.153.98.", # Dallas
"32.155.162.", # Dallas
"32.156.36.", # Dallas
"32.156.168.", # Dallas
"32.159.17.", # Dallas
"63.133.167.", # Dallas
"66.155.134.", # Dallas
"66.155.135.", # Dallas
"68.109.248.", # Dallas
"64.56.170.", # Dallas
"32.153.104.", # Dallas
"32.168.139.", # Dallas
"68.90.101.", # Dallas
"38.107.254.", # Dallas
"40.139.103.", # Dallas
"50.58.239.", # Dallas
"50.84.221.", # Dallas
"54.182.134.", # Dallas
"54.192.4.", # Dallas
"63.25.84.", # Dallas
"63.97.48.", # Dallas
"63.97.88.", # Dallas
"63.133.145.", # Dallas
"63.158.21.", # Dallas
"63.234.233.", # Dallas
"63.253.117.", # Dallas
"64.68.223.", # Dallas
"64.125.5.", # Dallas
"64.130.250.", # Dallas
"64.145.92.", # Dallas
"64.152.237.", # Dallas
"64.195.173.", # Dallas
"64.201.132.", # Dallas
"64.205.163.", # Dallas
"64.245.210.", # Dallas
"65.44.75.", # Dallas
"65.69.15.", # Dallas
"65.71.67.", # Dallas
"65.99.215.", # Dallas
"66.106.98.", # Dallas
"65.118.54.", # Dallas
"65.152.83.", # Dallas
"65.227.224.", # Dallas
"66.226.240.", # Dallas
"66.253.55.", # Dallas
"67.48.192.", # Dallas
"67.79.58.", # Dallas
"67.110.83.", # Dallas
"67.192.56.", # Dallas
"68.95.146.", # Dallas
"68.107.253.", # Dallas
"24.206.145.", # Denton
"24.219.171.", # Denton
"47.184.118.", # Denton
"47.184.119.", # Denton
"47.184.120.", # Denton
"47.184.121.", # Denton
"68.116.255.", # Denton
"67.61.107.", # DeSoto
"50.30.93." # Edinburg
"67.10.46.", # Edinburg
"67.10.91.", # Edinburg
"24.242.98.", # El Paso
"65.117.156.", # Euless
"67.78.56.", # Farmers Branch
"47.185.148.", # Flower Mound
"47.187.133.", # Flower Mound
"63.22.204.", # Flower Mound
"12.251.72.", # Fort Stockton
"12.90.92.", # Fort Worth
"12.184.253.", # Fort Worth
"12.184.254.", # Fort Worth
"12.203.146.", # Fort Worth
"12.203.147.", # Fort Worth
"12.210.27.", # Fort Worth
"12.232.221.", # Fort Worth
"24.182.108.", # Fort Worth
"24.219.224.", # Fort Worth
"24.219.163.", # Fort Worth
"47.32.223.", # Fort Worth
"50.11.19.", # Fort Worth
"50.58.27.", # Fort Worth
"63.163.54.", # Fort Worth
"68.94.54.", # Fort Worth
"68.113.154.", # Fort Worth
"50.207.209.", # Friendswood
"24.155.190.", # Frisco
"45.21.225.", # Frisco
"47.184.185.", # Garland
"47.186.248.", # Garland
"47.187.226.", # Garland
"4.13.110.", # Georgetown
"66.112.246.", # Georgetown
"47.184.202.", # Grapevine
"63.133.160.", # Grapevine
"64.134.76.", # Grapevine
"66.169.188.", # Haltom City
"66.169.189.", # Haltom City
"8.23.67.", # Houston
"12.8.32.", # Houston
"12.8.38.", # Houston
"12.43.39.", # Houston
"12.68.245.", # Houston
"12.155.35.", # Houston
"12.195.152.", # Houston
"12.198.216.", # Houston
"12.219.120.", # Houston
"16.35.45.", # Houston
"16.35.199.", # Houston
"16.160.30.", # Houston
"16.186.156.", # Houston
"24.206.72.", # Houston
"24.206.149.", # Houston
"24.206.173.", # Houston
"24.238.235.", # Houston
"34.9.77.", # Houston
"34.131.207.", # Houston
"38.100.150.", # Houston
"45.17.135.", # Houston
"45.33.171.", # Houston
"50.24.234.", # Houston
"50.162.2.", # Houston
"50.162.44.", # Houston
"50.175.75.", # Houston
"50.206.107.", # Houston
"63.123.77.", # Houston
"63.145.123.", # Houston
"63.215.186.", # Houston
"63.236.223.", # Houston
"64.199.54.", # Houston
"64.211.171.", # Houston
"65.16.135.", # Houston
"65.122.33.", # Houston
"65.124.92.", # Houston
"65.175.33.", # Houston
"65.183.47.", # Houston
"65.201.78.", # Houston
"66.3.44.", # Houston
"66.3.45.", # Houston
"66.3.46.", # Houston
"66.67.94.", # Houston
"66.78.229.", # Houston
"66.78.230.", # Houston
"66.78.231.", # Houston
"66.140.130.", # Houston
"66.161.197.", # Houston
"68.88.232.", # Houston
"66.171.6.", # Huntsville
"68.91.35.", # Hurst
"50.84.165.", # Irving
"50.84.181.", # Irving
"63.94.213.", # Irving
"64.129.174.", # Irving
"64.195.138.", # Irving
"64.195.139.", # Irving
"64.195.140.", # Irving
"64.195.141.", # Irving
"64.195.142.", # Irving
"64.195.143.", # Irving
"66.25.22.", # Irving
"66.218.96.", # Irving
"12.109.21.", # Katy
"64.29.184.", # Katy
"64.244.179.", # Keller
"67.76.51.", # Keller
"24.243.227.", # Killeen
"24.32.224.", # Kingwood
"66.68.164.", # Kyle
"66.220.139.", # La Grange
"68.88.193.", # Lancaster
"64.6.50.", # Laredo
"24.28.20.", # Leander
"47.187.76.", # Lewisville
"24.204.52.", # Longview
"66.185.67.", # Longview
"12.2.116.", # Lubbock
"12.38.125.", # Lubbock
"50.94.139.", # Lubbock
"67.22.223.", # Lubbock
"38.114.200.", # Lufkin
"47.219.200.", # Lufkin
"12.218.97.", # McAllen
"24.243.98.", # McAllen
"67.10.39.", # McAllen
"24.243.150.", # McAllen
"24.243.151.", # McAllen
"24.243.152.", # McAllen
"38.103.227.", # McAllen
"67.10.80.", # McAllen
"47.182.27.", # McKinney
"66.190.64.", # Mineral Wells
"50.30.144.", # Missouri City
"66.76.77.", # Normangee
"24.32.137.", # Odessa
"50.252.46.", # Pasadena
"67.219.174.", # Perryton
"24.242.89.", # Pflugerville
"67.10.20.", # Pharr
"12.97.234.", # Plano
"12.190.83.", # Plano
"23.62.225.", # Plano
"24.173.213.", # Plano
"47.185.248.", # Plano
"50.84.81.", # Plano
"50.84.110.", # Plano
"65.42.136.", # Plano
"65.69.239.", # Plano
"65.71.223.", # Plano
"66.138.79.", # Plano
"66.140.20.", # Plano
"66.140.197.", # Plano
"66.141.151.", # Plano
"66.143.7.", # Plano
"67.65.12.", # Plano
"67.66.13.", # Plano
"68.20.41.", # Plano
"68.20.53.", # Plano
"68.22.119.", # Plano
"68.72.56.", # Plano
"68.88.24.", # Plano
"68.88.169.", # Plano
"68.90.204.", # Plano
"68.93.19.", # Plano
"68.93.208.", # Plano
"23.113.179.", # Richardson
"23.123.121.", # Richardson
"23.126.17.", # Richardson
"24.27.103.", # Richardson
"45.23.148.", # Richardson
"47.186.44.", # Richardson
"47.186.233.", # Richardson
"50.84.237.", # Richardson
"63.199.94.", # Richardson
"63.201.89.", # Richardson
"63.203.212.", # Richardson
"63.203.213.", # Richardson
"63.204.90.", # Richardson
"63.204.168.", # Richardson
"63.207.220.", # Richardson
"64.109.192.", # Richardson
"64.123.188.", # Richardson
"64.148.35.", # Richardson
"64.149.192.", # Richardson
"64.217.8.", # Richardson
"64.218.64.", # Richardson
"64.252.212.", # Richardson
"64.252.213.", # Richardson
"64.252.214.", # Richardson
"64.252.215.", # Richardson
"64.252.216.", # Richardson
"64.252.217.", # Richardson
"64.252.218.", # Richardson
"64.252.219.", # Richardson
"64.252.220.", # Richardson
"64.252.221.", # Richardson
"64.252.222.", # Richardson
"64.252.223.", # Richardson
"64.252.224.", # Richardson
"64.252.225.", # Richardson
"64.252.226.", # Richardson
"64.252.227.", # Richardson
"64.252.228.", # Richardson
"64.252.229.", # Richardson
"64.252.230.", # Richardson
"64.252.231.", # Richardson
"64.252.232.", # Richardson
"64.252.233.", # Richardson
"64.252.234.", # Richardson
"64.252.235.", # Richardson
"64.252.236.", # Richardson
"64.252.237.", # Richardson
"64.252.238.", # Richardson
"65.64.221.", # Richardson
"65.64.222.", # Richardson
"65.64.223.", # Richardson
"65.65.49.", # Richardson
"65.65.133.", # Richardson
"65.68.3.", # Richardson
"65.68.4.", # Richardson
"65.69.103.", # Richardson
"65.70.92.", # Richardson
"65.70.203.", # Richardson
"66.73.64.", # Richardson
"66.73.74.", # Richardson
"66.136.184.", # Richardson
"66.136.185.", # Richardson
"66.136.186.", # Richardson
"66.136.187.", # Richardson
"66.137.185.", # Richardson
"66.138.90.", # Richardson
"66.138.5.", # Richardson
"66.142.202.", # Richardson
"66.226.197.", # Richardson
"67.38.82.", # Richardson
"67.39.101.", # Richardson
"67.64.87.", # Richardson
"67.67.134.", # Richardson
"67.115.107.", # Richardson
"67.117.108.", # Richardson
"67.121.40.", # Richardson
"67.122.104.", # Richardson
"67.123.146.", # Richardson
"67.127.68.", # Richardson
"68.23.31.", # Richardson
"68.72.0.", # Richardson
"68.72.114.", # Richardson
"68.72.157.", # Richardson
"68.72.158.", # Richardson
"68.89.77.", # Richardson
"68.91.19.", # Richardson
"68.94.48.", # Richardson
"68.95.210.", # Richardson
"68.122.157.", # Richardson
"23.139.64.", # Rio Grande City
"63.174.141.", # Rocksprings
"66.235.81.", # Rosenberg
"52.144.99.", # Round Rock
"47.184.162.", # Sachse
"8.9.196.", # San Antonio
"12.7.34.", # San Antonio
"12.7.35.", # San Antonio
"12.27.88.", # San Antonio
"12.190.120.", # San Antonio
"12.207.43.", # San Antonio
"12.211.20.", # San Antonio
"15.105.28.", # San Antonio
"15.105.182.", # San Antonio
"15.109.33.", # San Antonio
"15.109.99.", # San Antonio
"15.110.110.", # San Antonio
"15.114.210.", # San Antonio
"15.115.59.", # San Antonio
"15.116.44.", # San Antonio
"15.117.166.", # San Antonio
"15.118.122.", # San Antonio
"15.118.179.", # San Antonio
"15.118.251.", # San Antonio
"15.120.12.", # San Antonio
"15.120.71.", # San Antonio
"15.120.150.", # San Antonio
"15.120.172.", # San Antonio
"15.121.102.", # San Antonio
"15.122.12.", # San Antonio
"15.122.23.", # San Antonio
"15.126.8.", # San Antonio
"15.127.180.", # San Antonio
"15.128.234.", # San Antonio
"15.128.235.", # San Antonio
"15.128.254.", # San Antonio
"15.129.7.", # San Antonio
"15.129.118.", # San Antonio
"15.131.196.", # San Antonio
"15.131.197.", # San Antonio
"15.131.198.", # San Antonio
"15.131.199.", # San Antonio
"15.131.200.", # San Antonio
"15.132.18.", # San Antonio
"15.132.71.", # San Antonio
"15.132.72.", # San Antonio
"15.133.222.", # San Antonio
"15.134.233.", # San Antonio
"15.134.234.", # San Antonio
"15.135.133.", # San Antonio
"15.135.219.", # San Antonio
"15.136.111.", # San Antonio
"15.137.122.", # San Antonio
"15.137.172.", # San Antonio
"15.138.0.", # San Antonio
"15.138.1.", # San Antonio
"15.140.41.", # San Antonio
"15.141.27.", # San Antonio
"15.142.164.", # San Antonio
"15.143.78.", # San Antonio
"15.143.175.", # San Antonio
"15.145.145.", # San Antonio
"15.145.242.", # San Antonio
"15.146.97.", # San Antonio
"15.149.7.", # San Antonio
"15.149.217.", # San Antonio
"15.149.233.", # San Antonio
"15.150.12.", # San Antonio
"15.150.168.", # San Antonio
"15.150.169.", # San Antonio
"15.152.9.", # San Antonio
"15.153.121.", # San Antonio
"15.153.133.", # San Antonio
"15.154.136.", # San Antonio
"15.154.137.", # San Antonio
"15.155.5.", # San Antonio
"15.155.249.", # San Antonio
"15.156.247.", # San Antonio
"15.156.248.", # San Antonio
"15.157.163.", # San Antonio
"15.158.33.", # San Antonio
"15.158.179.", # San Antonio
"15.159.219.", # San Antonio
"15.160.97.", # San Antonio
"15.160.98.", # San Antonio
"15.160.99.", # San Antonio
"15.160.200.", # San Antonio
"15.160.201.", # San Antonio
"15.160.202.", # San Antonio
"15.161.146.", # San Antonio
"15.161.233.", # San Antonio
"15.162.156.", # San Antonio
"15.162.246.", # San Antonio
"15.162.247.", # San Antonio
"15.162.248.", # San Antonio
"15.162.249.", # San Antonio
"15.162.231.", # San Antonio
"15.165.19.", # San Antonio
"15.165.122.", # San Antonio
"15.167.62.", # San Antonio
"15.168.166.", # San Antonio
"15.169.34.", # San Antonio
"15.169.145.", # San Antonio
"15.169.231.", # San Antonio
"15.170.39.", # San Antonio
"15.170.117.", # San Antonio
"15.173.25.", # San Antonio
"15.173.118.", # San Antonio
"15.173.231.", # San Antonio
"15.174.40.", # San Antonio
"15.176.53.", # San Antonio
"15.176.79.", # San Antonio
"15.176.80.", # San Antonio
"15.176.81.", # San Antonio
"15.176.129.", # San Antonio
"15.177.123.", # San Antonio
"15.176.164.", # San Antonio
"15.177.176.", # San Antonio
"15.177.254.", # San Antonio
"15.178.149.", # San Antonio
"15.180.1.", # San Antonio
"15.180.224.", # San Antonio
"15.181.151.", # San Antonio
"15.181.152.", # San Antonio
"15.181.177.", # San Antonio
"15.183.87.", # San Antonio
"15.183.211.", # San Antonio
"15.184.201.", # San Antonio
"15.188.81.", # San Antonio
"15.188.237.", # San Antonio
"15.189.87.", # San Antonio
"15.189.88.", # San Antonio
"15.190.90.", # San Antonio
"15.190.132.", # San Antonio
"15.191.51.", # San Antonio
"15.191.124.", # San Antonio
"15.193.69.", # San Antonio
"15.193.70.", # San Antonio
"15.193.183.", # San Antonio
"15.193.203.", # San Antonio
"15.204.130.", # San Antonio
"15.204.186.", # San Antonio
"15.208.102.", # San Antonio
"15.209.138.", # San Antonio
"15.211.169.", # San Antonio
"15.213.214.", # San Antonio
"15.213.241.", # San Antonio
"15.214.133.", # San Antonio
"15.214.237.", # San Antonio
"15.216.72.", # San Antonio
"15.216.199.", # San Antonio
"15.219.34.", # San Antonio
"12.219.40.", # San Antonio
"15.221.80.", # San Antonio
"15.224.59.", # San Antonio
"15.224.247.", # San Antonio
"15.225.148.", # San Antonio
"15.226.90.", # San Antonio
"15.227.75.", # San Antonio
"15.227.214.", # San Antonio
"15.234.104.", # San Antonio
"15.235.202.", # San Antonio
"15.235.203.", # San Antonio
"15.236.92.", # San Antonio
"15.237.79.", # San Antonio
"15.239.64.", # San Antonio
"15.243.228.", # San Antonio
"15.243.229.", # San Antonio
"15.243.241.", # San Antonio
"15.244.168.", # San Antonio
"15.248.37.", # San Antonio
"15.248.238.", # San Antonio
"15.250.151.", # San Antonio
"15.251.2.", # San Antonio
"15.251.230.", # San Antonio
"15.252.43.", # San Antonio
"15.252.185.", # San Antonio
"15.255.94.", # San Antonio
"15.255.200.", # San Antonio
"15.255.204.", # San Antonio
"40.141.126.", # San Antonio
"50.84.228.", # San Antonio
"50.95.50.", # San Antonio
"52.239.178.", # San Antonio
"64.129.98.", # San Antonio
"64.215.241.", # San Antonio
"67.65.14.", # San Antonio
"67.155.93.", # San Antonio
"68.98.252.", # San Antonio
"24.155.227.", # San Marcos
"45.21.35." # Schertz
"64.134.224.", # San Marcos
"66.90.132.", # San Marcos
"38.65.97.", # Schertz
"45.21.35.", # Schertz
"67.11.166.", # Schertz
"67.78.77.", # Seguin
"67.179.27.", # Seguin
"47.182.60.", # Sherman
"64.22.112.", # Spring
"65.174.248.", # Stafford
"67.21.188.", # Stephenville
"12.205.32.", # Sugar Land
"50.162.51.", # Sugar Land
"50.171.38.", # Sugar Land
"64.61.53.", # Sugar Land
"24.162.122.", # Temple
"24.119.145.", # Texarkana
"23.125.229.", # Tyler
"66.76.117.", # Tyler
"66.76.230.", # Tyler
"67.216.244.", # Tyler
"68.69.62.", # Tyler
"24.32.200.", # Vernon
"66.76.84.", # Victoria
"23.123.184.", # Waco
"65.65.52.", # Waco
"12.94.58.", # Weatherford
"66.69.161.", # Wichita Falls
"50.56.36.", # Windcrest
]
# random element from each list
def sign_up_page():
raise NotImplementedError()
def set_random_seed(seed, n_gpu):
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
def adjust_seq_length_to_model(length, max_sequence_length):
if length < 0 and max_sequence_length > 0:
length = max_sequence_length
elif 0 < max_sequence_length < length:
length = max_sequence_length # No generation bigger than model size
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
return length
def generate_text(prompt_text: str, k=50, p=0.9, seq_length=150, seed=None, temperature=1.0, num_return_sequences=1):
""" Create a synthetic text sequence using a pretrained model. """
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = 0 if device == 'cpu' else torch.cuda.device_count()
repetition_penalty = 1.0 # Primarily used for CTRL model, so hardcoding this value
stop_token = "<EOS>"
seed = seed if seed is not None else np.random.randint(0, 1000000)
set_random_seed(seed, n_gpu)
# Initialize the model and tokenizer
model_class, tokenizer_class = (GPT2LMHeadModel, GPT2Tokenizer)
tokenizer = tokenizer_class.from_pretrained(WEIGHTS_DIR)
model = model_class.from_pretrained(WEIGHTS_DIR)
model.to(device)
seq_length = adjust_seq_length_to_model(seq_length, max_sequence_length=model.config.max_position_embeddings)
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=True, return_tensors="pt")
encoded_prompt = encoded_prompt.to(device)
if encoded_prompt.size()[-1] == 0:
input_ids = None
else:
input_ids = encoded_prompt
output_sequences = model.generate(
input_ids=input_ids,
max_length=seq_length + len(encoded_prompt[0]),
temperature=temperature,
top_k=k,
top_p=p,
repetition_penalty=repetition_penalty,
do_sample=True,
num_return_sequences=num_return_sequences,
)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
# print("=== GENERATED SEQUENCE {} ===".format(generated_sequence_idx + 1))
generated_sequence = generated_sequence.tolist()
# Decode text
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
# Remove all text after the stop token
text = text[: text.find(stop_token) if stop_token else None]
# Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
total_sequence = (
prompt_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):]
)
generated_sequences.append(total_sequence)
# print(total_sequence)
return generated_sequences
def create_anonymous_form_batch(prompt_text='Dear <NAME>,', batch_size=5):
# Used for fake name generation
fake = Faker(['en_US', 'es_MX'])
text_sequences = generate_text(prompt_text, num_return_sequences=batch_size)
form_batch = []
for i in range(batch_size):
city, county = random.choice(list(cities.items()))
form_data = {
'textarea-1': text_sequences[i],
'text-1': random.choice(info_location),
'text-6': 'Dr. ' + fake.name(),
'text-2': city,
'text-3': 'Texas',
'text-4': fake.zipcode_in_state('TX'),
'text-5': county,
'hidden-1': random.choice(ips) + str(random.randint(0, 255)),
'checkbox-1[]': 'no',
}
form_batch.append(form_data)
return form_batch
def _test_form_generator():
prompt_text = f'Dear {random.choice(gop_members)},'
form_batch = create_anonymous_form_batch(prompt_text, batch_size=3)
logger.info(form_batch)
if __name__ == "__main__":
_test_form_generator()
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"random.choice",
"loguru.logger.debug",
"loguru.logger.info",
"os.environ.get",
"torch.cuda.device_count",
"faker.Faker",
"os.path.dirname",
"numpy.random.randint",
"torch.cuda.is_available",
"numpy.random.seed",
"random.randint"
] | [((202, 238), 'os.environ.get', 'os.environ.get', (['"""MODEL_NAME"""', '"""gpt2"""'], {}), "('MODEL_NAME', 'gpt2')\n", (216, 238), False, 'import os\n'), ((277, 342), 'loguru.logger.debug', 'logger.debug', (['"""***** Running basic GPT2 pretrained weights *****"""'], {}), "('***** Running basic GPT2 pretrained weights *****')\n", (289, 342), False, 'from loguru import logger\n'), ((21550, 21570), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (21564, 21570), True, 'import numpy as np\n'), ((21575, 21598), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (21592, 21598), False, 'import torch\n'), ((24632, 24657), 'faker.Faker', 'Faker', (["['en_US', 'es_MX']"], {}), "(['en_US', 'es_MX'])\n", (24637, 24657), False, 'from faker import Faker\n'), ((25494, 25517), 'loguru.logger.info', 'logger.info', (['form_batch'], {}), '(form_batch)\n', (25505, 25517), False, 'from loguru import logger\n'), ((545, 698), 'loguru.logger.debug', 'logger.debug', (['"""***** Running GPT2 trained on 3.5 years of 4Chan /pol posts (WARNING: HIGHLY OFFENSIVE OUTPUTS - YOU HAVE BEEN WARNED!!!) *****"""'], {}), "(\n '***** Running GPT2 trained on 3.5 years of 4Chan /pol posts (WARNING: HIGHLY OFFENSIVE OUTPUTS - YOU HAVE BEEN WARNED!!!) *****'\n )\n", (557, 698), False, 'from loguru import logger\n'), ((21625, 21657), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (21651, 21657), False, 'import torch\n'), ((22318, 22343), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (22341, 22343), False, 'import torch\n'), ((22500, 22529), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000000)'], {}), '(0, 1000000)\n', (22517, 22529), True, 'import numpy as np\n'), ((22242, 22267), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (22265, 22267), False, 'import torch\n'), ((24941, 24969), 'random.choice', 'random.choice', (['info_location'], {}), '(info_location)\n', (24954, 24969), False, 'import random\n'), ((25388, 25414), 'random.choice', 'random.choice', (['gop_members'], {}), '(gop_members)\n', (25401, 25414), False, 'import random\n'), ((736, 761), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (751, 761), False, 'import os\n'), ((25179, 25197), 'random.choice', 'random.choice', (['ips'], {}), '(ips)\n', (25192, 25197), False, 'import random\n'), ((25204, 25226), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (25218, 25226), False, 'import random\n')] |
"""
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer as skImputer
from ..utils.stat_utils import which_columns_are_binary
from causallib.estimation import Matching
# TODO: Entire module might be redundant, now that scikit-learn supports missing values
# in its preprocessing: https://scikit-learn.org/stable/whats_new/v0.20.html#highlights
# The only support now needed is:
# 1) Transforming from numpy-array to pandas DataFrame in a pipeline, before specifying a causal model.
# 2) Possible generic support for causallib's additional `a` parameter, along with `X` and `y`.
class StandardScaler(BaseEstimator, TransformerMixin):
"""
Standardize continuous features by removing the mean and scaling to unit variance while allowing nans.
X = (X - X.mean()) / X.std()
"""
def __init__(self, with_mean=True, with_std=True, ignore_nans=True):
"""
Args:
with_mean (bool): Whether to center the data before scaling.
with_std (bool): Whether to scale the data to unit variance.
ignore_nans (bool): Whether to ignore NaNs during calculation.
"""
self.with_mean = with_mean
self.with_std = with_std
self.ignore_nans = ignore_nans
def fit(self, X, y=None):
"""
Compute the mean and std to be used for later scaling.
Args:
X (pd.DataFrame): The data used to compute the mean and standard deviation used for later scaling along the
features axis (axis=0).
y: Passthrough for ``Pipeline`` compatibility.
Returns:
StandardScaler: A fitted standard-scaler
"""
continuous_features = self._get_relevant_features(X)
self._feature_mask_ = continuous_features
if self.with_mean:
means = X.loc[:, self._feature_mask_].mean(skipna=self.ignore_nans)
else:
means = pd.Series(0, index=continuous_features)
self.mean_ = means
if self.with_std:
scales = X.loc[:, self._feature_mask_].std(skipna=self.ignore_nans)
else:
scales = pd.Series(1, index=continuous_features)
self.scale_ = scales
return self
def transform(self, X, y='deprecated'):
"""
Perform standardization by centering and scaling
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] The data used to compute the mean and standard
deviation used for later scaling along the features axis (axis=0).
y: Passthrough for ``Pipeline`` compatibility.X:
Returns:
pd.DataFrame: Scaled dataset.
"""
# Taken from the sklearn implementation. Will probably need adjustment when a new scikit-learn version is out:
if not isinstance(y, str) or y != 'deprecated':
warnings.warn("The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
X = X.copy() # type: pd.DataFrame
if self.with_mean:
X.loc[:, self._feature_mask_] -= self.mean_
if self.with_std:
X.loc[:, self._feature_mask_] /= self.scale_
return X
def inverse_transform(self, X):
"""
Scale back the data to the original representation
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] The data used to compute the mean and standard
deviation used for later scaling along the features axis (axis=0).
Returns:
pd.DataFrame: Un-scaled dataset.
"""
X = X.copy() # type: pd.DataFrame
if self.with_std:
X.loc[:, self._feature_mask_] *= self.scale_
if self.with_mean:
X.loc[:, self._feature_mask_] += self.mean_
return X
@staticmethod
def _get_relevant_features(X):
"""
Returns a binary mask specifying the continuous features to operate on.
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] The data used to compute the mean and standard
deviation used for later scaling along the features axis (axis=0).
Returns:
pd.Index: a pd.Index with name of columns specifying which features to apply the transformation on.
"""
# FIXME utilize sklearn.utils.multiclass.type_of_target()
continuous_cols = X.columns[~which_columns_are_binary(X)]
return continuous_cols
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""
Scales features to 0-1, allowing for NaNs.
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
"""
def __init__(self, only_binary_features=True, ignore_nans=True):
"""
Args:
only_binary_features (bool): Whether to apply only on binary features or across all.
ignore_nans (bool): Whether to ignore NaNs during calculation.
"""
self.only_binary_features = only_binary_features
self.ignore_nans = ignore_nans
def fit(self, X, y=None):
"""
Compute the minimum and maximum to be used for later scaling.
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] The data used to compute the mean and standard
deviation used for later scaling along the features axis (axis=0).
y: Passthrough for ``Pipeline`` compatibility.
Returns:
MinMaxScaler: a fitted MinMaxScaler
"""
feature_mask = self._get_relevant_features(X)
self._feature_mask_ = feature_mask
self.min_ = X.min(skipna=self.ignore_nans)[feature_mask]
self.max_ = X.max(skipna=self.ignore_nans)[feature_mask]
self.scale_ = self.max_ - self.min_
# if feature_mask.size != X.shape[1]:
# self.scale_[~feature_mask] = 1
# self.min_[~feature_mask] = 0
# self.max_[~feature_mask] = 1
return self
def inverse_transform(self, X):
"""
Scaling chosen features of X to the range of 0 - 1.
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] Input data that will be transformed.
Returns:
pd.DataFrame: array-like, shape [n_samples, n_features]. Transformed data.
"""
# No warning for y, since there's no y variable.
# This correpsonds to function signature in scikit-learn's code base
X = X.copy() # type: pd.DataFrame
X.loc[:, self._feature_mask_] *= self.scale_
X.loc[:, self._feature_mask_] += self.min_
return X
def transform(self, X):
"""
Undo the scaling of X according to feature_range.
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] Input data that will be transformed.
Returns:
pd.DataFrame: array-like, shape [n_samples, n_features]. Transformed data.
"""
X = X.copy() # type: pd.DataFrame
X.loc[:, self._feature_mask_] -= self.min_
X.loc[:, self._feature_mask_] /= self.scale_
return X
def _get_relevant_features(self, X):
"""
Returns a binary mask specifying the features to operate on (either all features or binary features if
self.only_binary_features is True.
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] The data used to compute the mean and standard
deviation used for later scaling along the features axis (axis=0).
Returns:
pd.Index: a binary mask specifying which features to apply the transformation on.
"""
if self.only_binary_features:
feature_mask = which_columns_are_binary(X)
else:
feature_mask = np.ones(X.shape[1], dtype=bool)
return feature_mask
class Imputer(skImputer):
def transform(self, X):
X_transformed = super().transform(X.values)
X_transformed = pd.DataFrame(
X_transformed, index=X.index, columns=X.columns)
return X_transformed
class PropensityTransformer(BaseEstimator, TransformerMixin):
def __init__(self, learner, include_covariates=False):
"""Transform covariates by adding/replacing with the propensity score.
Args:
learner (sklearn.estimator) : A learner implementing `fit` and
`predict_proba` to use for predicting the propensity score.
include_covariates (bool) : Whether to return the original
covariates alongside the "propensity" column.
"""
self.include_covariates = include_covariates
self.learner = learner
def fit(self, X, a):
self.learner.fit(X, a)
return self
def transform(self, X, treatment_values=None):
"""Append propensity or replace covariates with propensity.
Args:
X (pd.DataFrame): A DataFrame of samples to transform. This will be
input to the learner trained by fit. If the columns are
different, the results will not be valid.
treatment_values (Any | None): A desired value/s to extract
propensity to (i.e. probabilities to what treatment value
should be calculated). If not specified, then the maximal
treatment value is chosen. This is since the usual case is of
treatment (A=1) control (A=0) setting.
Returns:
pd.DataFrame : DataFrame with a "propensity" column.
If "include_covariates" is `True`, it will include all of the
original features plus "propensity", else it will only have the
"propensity" column.
"""
treatment_values = 1 if treatment_values is None else treatment_values
res = self.learner.predict_proba(X)[:, treatment_values]
res = pd.DataFrame(res, index=X.index, columns=["propensity"])
if self.include_covariates:
res = X.join(res)
return res
class MatchingTransformer(object):
def __init__(
self,
propensity_transform=None,
caliper=None,
with_replacement=True,
n_neighbors=1,
matching_mode="both",
metric="mahalanobis",
knn_backend="sklearn",
):
"""Transform data by removing poorly matched samples.
Args:
propensity_transform (causallib.transformers.PropensityTransformer):
an object for data preprocessing which adds the propensity
score as a feature (default: None)
caliper (float) : maximal distance for a match to be accepted. If
not defined, all matches will be accepted. If defined, some
samples may not be matched and their outcomes will not be
estimated. (default: None)
with_replacement (bool): whether samples can be used multiple times
for matching. If set to False, the matching process will optimize
the linear sum of distances between pairs of treatment and
control samples and only `min(N_treatment, N_control)` samples
will be estimated. Matching with no replacement does not make
use of the `fit` data and is therefore not implemented for
out-of-sample data (default: True)
n_neighbors (int) : number of nearest neighbors to include in match.
Must be 1 if `with_replacement` is `False.` If larger than 1, the
estimate is calculated using the `regress_agg_function` or
`classify_agg_function` across the `n_neighbors`. Note that when
the `caliper` variable is set, some samples will have fewer than
`n_neighbors` matches. (default: 1).
matching_mode (str) : Direction of matching: `treatment_to_control`,
`control_to_treatment` or `both` to indicate which set should
be matched to which. All sets are cross-matched in `match`
and when `with_replacement` is `False` all matching modes
coincide. With replacement there is a difference.
metric (str) : Distance metric string for calculating distance
between samples. Note: if an external built `knn_backend`
object with a different metric is supplied, `metric` needs to
be changed to reflect that, because `Matching` will set its
inverse covariance matrix if "mahalanobis" is set. (default:
"mahalanobis", also supported: "euclidean")
knn_backend (str or callable) : Backend to use for nearest neighbor
search. Options are "sklearn" or a callable which returns an
object implementing `fit`, `kneighbors` and `set_params`
like the sklearn `NearestNeighbors` object. (default: "sklearn").
"""
self.matching = Matching(
propensity_transform=propensity_transform,
caliper=caliper,
with_replacement=with_replacement,
n_neighbors=n_neighbors,
matching_mode=matching_mode,
metric=metric,
knn_backend=knn_backend,
)
def fit(self, X, a, y):
"""Fit data to transform
This function loads the data for matching and must be called before
`transform`. For convenience, consider using `fit_transform`.
Args:
X (pd.DataFrame): DataFrame of shape (n,m) containing m covariates
for n samples.
a (pd.Series): Series of shape (n,) containing discrete treatment
values for the n samples.
y (pd.Series): Series of shape (n,) containing outcomes for
the n samples.
Returns:
self (MatchingTransformer) : Fitted object
"""
self.matching.fit(X, a, y)
return self
def transform(self, X, a, y):
"""Transform data by restricting it to samples which are matched
Following a matching process, not all of the samples will find matches.
Transforming the data by only allowing samples in treatment that have
close matches in control, or in control that have close matches in
treatment can make other causal methods more effective. This function
will call `match` on the underlying Matching object.
The attribute `matching_mode` changes the behavior of this function.
If set to `control_to_treatment` each control will attempt to find a
match among the treated, hence the transformed data will have a maximum
size of N_c + min(N_c,N_t).
If set to `treatment_to_control`, each treatment will attempt to find a
match among the control and the transformed data will have a maximum
size of N_t + min(N_c,N_t).
If set to `both`, both matching operations will be executed and if a
sample succeeds in either direction it will be included, hence the
maximum size of the transformed data will be `len(X)`.
If `with_replacement` is `False`, `matching_mode` does not change the
behavior. There will be up to `min(N_c,N_t)` samples in
the returned DataFrame, regardless.
Args:
X (pd.DataFrame): DataFrame of shape (n,m) containing m covariates
for n samples.
a (pd.Series): Series of shape (n,) containing discrete treatment
values for the n samples.
y (pd.Series): Series of shape (n,) containing outcomes for
the n samples.
Raises:
NotImplementedError: Raised if a value of attribute `matching_mode`
other than the supported values is set.
Returns:
Xm (pd.DataFrame): Covariates of samples that were matched
am (pd.Series): Treatment values of samples that were matched
ym (pd.Series): Outcome values of samples that were matched
"""
self.matching.match(X, a, use_cached_result=True)
matched_sample_indices = self.find_indices_of_matched_samples(X, a)
X = X.loc[matched_sample_indices]
a = a.loc[matched_sample_indices]
y = y.loc[matched_sample_indices]
return X, a, y
def find_indices_of_matched_samples(self, X, a):
"""Find indices of samples which matched successfully.
Given a DataFrame of samples `X` and treatment assignments `a`, return
a list of indices of samples which matched successfully.
Args:
X (pd.DataFrame): Covariates of samples
a (pd.Series): Treatment assignments
Returns:
pd.Series: indices of matched samples to be passed to `X.loc`
"""
matching_weights = self.matching.matches_to_weights()
matches_mask = self._filter_matching_weights_by_mode(matching_weights)
return matches_mask
def _filter_matching_weights_by_mode(self, matching_weights):
if self.matching.matching_mode == "control_to_treatment":
matches_mask = matching_weights.control_to_treatment
elif self.matching.matching_mode == "treatment_to_control":
matches_mask = matching_weights.treatment_to_control
elif self.matching.matching_mode == "both":
matches_mask = matching_weights.sum(axis=1)
else:
raise NotImplementedError("Matching mode {} not supported".format(
self.matching.matching_mode))
matches_mask = matches_mask.astype(bool)
return matches_mask
def fit_transform(self, X, a, y):
"""Match data and return matched subset.
This is a convenience method, calling `fit` and `transform` at once.
For details, see documentation of each function.
Args:
X (pd.DataFrame): DataFrame of shape (n,m) containing m covariates
for n samples.
a (pd.Series): Series of shape (n,) containing discrete treatment
values for the n samples.
y (pd.Series): Series of shape (n,) containing outcomes for
the n samples.
Returns:
Xm (pd.DataFrame): Covariates of samples that were matched
am (pd.Series): Treatment values of samples that were matched
ym (pd.Series): Outcome values of samples that were matched
"""
self.fit(X, a, y)
return self.transform(X, a, y)
def set_params(self, **kwargs):
"""Set parameters of matching engine. Supported parameters are:
Keyword Args:
propensity_transform (causallib.transformers.PropensityTransformer):
an object for data preprocessing which adds the propensity
score as a feature (default: None)
caliper (float) : maximal distance for a match to be accepted
(default: None)
with_replacement (bool): whether samples can be used multiple times
for matching (default: True)
n_neighbors (int) : number of nearest neighbors to include in match.
Must be 1 if `with_replacement` is False (default: 1).
matching_mode (str) : Direction of matching: `treatment_to_control`,
`control_to_treatment` or `both` to indicate which set should
be matched to which. All sets are cross-matched in `match`
and without replacement there is no difference in outcome,
but with replacement there is a difference and it impacts
the results of `transform`.
metric (str) : Distance metric string for calculating
distance between samples (default: "mahalanobis",
also supported: "euclidean")
knn_backend (str or callable) : Backend to use for nearest neighbor
search. Options are "sklearn" or a callable which returns an
object implementing `fit`, `kneighbors` and `set_params` like
the sklearn `NearestNeighbors` object. (default: "sklearn").
Returns:
self: (MatchingTransformer) object with new parameters set
"""
supported_params = [
"propensity_transform",
"caliper",
"n_neighbors",
"metric",
"with_replacement",
"matching_mode",
"knn_backend",
]
for key, value in kwargs.items():
if key in supported_params:
self.matching.__setattr__(key, value)
else:
warnings.warn(
"Received unsupported parameter: {}. Nothing done.".format(key))
return self
| [
"pandas.Series",
"numpy.ones",
"causallib.estimation.Matching",
"pandas.DataFrame",
"warnings.warn"
] | [((8827, 8888), 'pandas.DataFrame', 'pd.DataFrame', (['X_transformed'], {'index': 'X.index', 'columns': 'X.columns'}), '(X_transformed, index=X.index, columns=X.columns)\n', (8839, 8888), True, 'import pandas as pd\n'), ((10764, 10820), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'index': 'X.index', 'columns': "['propensity']"}), "(res, index=X.index, columns=['propensity'])\n", (10776, 10820), True, 'import pandas as pd\n'), ((13885, 14090), 'causallib.estimation.Matching', 'Matching', ([], {'propensity_transform': 'propensity_transform', 'caliper': 'caliper', 'with_replacement': 'with_replacement', 'n_neighbors': 'n_neighbors', 'matching_mode': 'matching_mode', 'metric': 'metric', 'knn_backend': 'knn_backend'}), '(propensity_transform=propensity_transform, caliper=caliper,\n with_replacement=with_replacement, n_neighbors=n_neighbors,\n matching_mode=matching_mode, metric=metric, knn_backend=knn_backend)\n', (13893, 14090), False, 'from causallib.estimation import Matching\n'), ((2611, 2650), 'pandas.Series', 'pd.Series', (['(0)'], {'index': 'continuous_features'}), '(0, index=continuous_features)\n', (2620, 2650), True, 'import pandas as pd\n'), ((2820, 2859), 'pandas.Series', 'pd.Series', (['(1)'], {'index': 'continuous_features'}), '(1, index=continuous_features)\n', (2829, 2859), True, 'import pandas as pd\n'), ((3573, 3703), 'warnings.warn', 'warnings.warn', (['"""The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21"""', 'DeprecationWarning'], {}), "(\n 'The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21'\n , DeprecationWarning)\n", (3586, 3703), False, 'import warnings\n'), ((8635, 8666), 'numpy.ones', 'np.ones', (['X.shape[1]'], {'dtype': 'bool'}), '(X.shape[1], dtype=bool)\n', (8642, 8666), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_SimulateJumpDiffMertonKou [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_SimulateJumpDiffMertonKou&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=NormalDoubleExpJumps).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from numpy import arange
import matplotlib.pyplot as plt
from matplotlib.pyplot import subplots, title, plot
plt.style.use('seaborn')
from ARPM_utils import save_plot
from JumpDiffusionMerton import JumpDiffusionMerton
from JumpDiffusionKou import JumpDiffusionKou
# initial parameters
tau = 1 # horizon
dt = 1 / 252 # time increment
t = arange(0, tau + dt, dt) # time vector
j_ = 15 # number of simulated processes
# -
# ## Simulate jump diffusion
# ## arithmetic Brownian motion component
# +
mu_m = -1 # drift
sigma_m = 0.5 # diffusion
# Poisson process component
lambda_m = 5 # intensity
mu_p = 1 # drift of log-jump
sigma_p = 0.2 # diffusion of log-jump
x_m = JumpDiffusionMerton(mu_m, sigma_m, lambda_m, mu_p, sigma_p, t, j_)
# -
# ## Simulate double-exponential
# +
mu_k = 0 # deterministic drift
sigma_k = 0.2 # Gaussian component
lambda_k = 4.25 # Poisson process intensity
p = .5 # probability of up-jump
e1 = 0.2 # parameter of up-jump
e2 = 0.3 # parameter of down-jump
x_k = JumpDiffusionKou(mu_k, sigma_k, lambda_k, p, e1, e2, t, j_)
# -
# ## Generate figure
# +
f, ax = subplots(2, 1)
plt.sca(ax[0])
plot(t, x_m.T)
title('Merton jump-diffusion')
plt.sca(ax[1])
plot(t, x_k.T)
title('double exponential')
plt.tight_layout();
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| [
"JumpDiffusionKou.JumpDiffusionKou",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.sca",
"JumpDiffusionMerton.JumpDiffusionMerton",
"matplotlib.pyplot.tight_layout",
"os.path.abspath",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((882, 906), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (895, 906), True, 'import matplotlib.pyplot as plt\n'), ((1116, 1139), 'numpy.arange', 'arange', (['(0)', '(tau + dt)', 'dt'], {}), '(0, tau + dt, dt)\n', (1122, 1139), False, 'from numpy import arange\n'), ((1454, 1520), 'JumpDiffusionMerton.JumpDiffusionMerton', 'JumpDiffusionMerton', (['mu_m', 'sigma_m', 'lambda_m', 'mu_p', 'sigma_p', 't', 'j_'], {}), '(mu_m, sigma_m, lambda_m, mu_p, sigma_p, t, j_)\n', (1473, 1520), False, 'from JumpDiffusionMerton import JumpDiffusionMerton\n'), ((1785, 1844), 'JumpDiffusionKou.JumpDiffusionKou', 'JumpDiffusionKou', (['mu_k', 'sigma_k', 'lambda_k', 'p', 'e1', 'e2', 't', 'j_'], {}), '(mu_k, sigma_k, lambda_k, p, e1, e2, t, j_)\n', (1801, 1844), False, 'from JumpDiffusionKou import JumpDiffusionKou\n'), ((1884, 1898), 'matplotlib.pyplot.subplots', 'subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (1892, 1898), False, 'from matplotlib.pyplot import subplots, title, plot\n'), ((1899, 1913), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[0]'], {}), '(ax[0])\n', (1906, 1913), True, 'import matplotlib.pyplot as plt\n'), ((1914, 1928), 'matplotlib.pyplot.plot', 'plot', (['t', 'x_m.T'], {}), '(t, x_m.T)\n', (1918, 1928), False, 'from matplotlib.pyplot import subplots, title, plot\n'), ((1929, 1959), 'matplotlib.pyplot.title', 'title', (['"""Merton jump-diffusion"""'], {}), "('Merton jump-diffusion')\n", (1934, 1959), False, 'from matplotlib.pyplot import subplots, title, plot\n'), ((1961, 1975), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[1]'], {}), '(ax[1])\n', (1968, 1975), True, 'import matplotlib.pyplot as plt\n'), ((1976, 1990), 'matplotlib.pyplot.plot', 'plot', (['t', 'x_k.T'], {}), '(t, x_k.T)\n', (1980, 1990), False, 'from matplotlib.pyplot import subplots, title, plot\n'), ((1991, 2018), 'matplotlib.pyplot.title', 'title', (['"""double exponential"""'], {}), "('double exponential')\n", (1996, 2018), False, 'from matplotlib.pyplot import subplots, title, plot\n'), ((2019, 2037), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2035, 2037), True, 'import matplotlib.pyplot as plt\n'), ((730, 768), 'os.path.abspath', 'path.abspath', (['"""../../functions-legacy"""'], {}), "('../../functions-legacy')\n", (742, 768), True, 'import os.path as path\n')] |
import numpy as np
import os
import math
import cv2 as cv
import random
from collections import OrderedDict
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
from colorspacious import cspace_converter
cmaps = OrderedDict()
# folder = 'debug'
# npy_list = os.listdir(folder)
# for npy_file in npy_list:
# var = np.load(os.path.join(folder, npy_file))
# if '.' in npy_file:
# print('{}: {}'.format(npy_file, var))
anchor = np.load('./priorbox_data.npy')
anchor = np.reshape(anchor, (-1,4))
print('anchor shape = {}'.format(anchor.shape))
print('anchor shape = {}'.format(anchor[:5,:]))
def gen_map(length = 36):
length = length // 6 * 6
h = range(length)
s = np.ones((1, length // 6))
v = np.ones((1, length // 6))
f = np.array([(x / 60) - (x % 6) for x in h])
f = np.reshape(f, [6, -1])
p = np.zeros((1, length // 6))
q = 1. - f
t = f
t = np.reshape(t, [6,-1])
rgb = []
print('s={},f={},p={},q={},t={}, v={}'.format(s.shape,f.shape,p.shape,q.shape,t.shape,v.shape))
print('v={},t={},p={}'.format(v,t[0,:],p))
# rgb1 = np.stack([v,[t[0,:]],p], axis=-1)
rgb.append(np.stack([v,[t[0,:]],p], axis=-1))
rgb.append(np.stack([[q[1,:]],v,p], axis=-1))
rgb.append(np.stack([p,v,[t[2,:]]], axis=-1))
rgb.append(np.stack([p,[q[3,:]],v], axis=-1))
rgb.append(np.stack([[t[4,:]],p,v], axis=-1))
rgb.append(np.stack([v,p,[q[5,:]]], axis=-1))
rgb = np.reshape(np.stack(rgb, axis = 0), [length, 3])
return rgb
def center2point( center_y, center_x, height, width):
angle = math.atan((center_y - 0.5)/( center_x - 0.5 + 1e-20))
pi = math.pi
angle = np.where(np.less_equal(center_x, 0.5) , pi - angle, -angle)
angle = np.where(np.less_equal(angle, 0.0), pi + pi + angle, angle)
rotation_matrix = np.stack([math.cos(angle), -math.sin(angle),
math.sin(angle),math.cos(angle)], axis=0)
rotation_matrix = np.reshape(rotation_matrix, (2, 2))
height, width = width, height
points = np.stack([[ -width / 2, -height / 2], [ width / 2, -height / 2], [ width / 2, height / 2], [ -width / 2, height / 2] ], axis=0)
points = np.matmul(points, rotation_matrix) + [center_x, center_y]
return points
image = np.zeros((512,512,3))
# anchor = anchor[3:1444:4,:]
# anchor = anchor[3+1444:1844:4,:]
anchor = anchor[-40::4,:]
color = gen_map()
for i in range(anchor.shape[0]):
yxhw = anchor[i,:]
points = center2point( yxhw[0], yxhw[1], yxhw[2], yxhw[3])
points = np.multiply(points, 512).astype(int)
points = np.reshape(points.astype(int), [-1,1,2])
cv.polylines(image,[points],True,color[(i*7)%36,:],1 + i%2)
cv.imshow('anchor', image)
cv.waitKey()
| [
"collections.OrderedDict",
"numpy.multiply",
"numpy.reshape",
"numpy.ones",
"cv2.polylines",
"numpy.less_equal",
"cv2.imshow",
"math.cos",
"numpy.array",
"numpy.zeros",
"numpy.stack",
"numpy.matmul",
"math.sin",
"numpy.load",
"cv2.waitKey",
"math.atan"
] | [((242, 255), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (253, 255), False, 'from collections import OrderedDict\n'), ((475, 505), 'numpy.load', 'np.load', (['"""./priorbox_data.npy"""'], {}), "('./priorbox_data.npy')\n", (482, 505), True, 'import numpy as np\n'), ((515, 542), 'numpy.reshape', 'np.reshape', (['anchor', '(-1, 4)'], {}), '(anchor, (-1, 4))\n', (525, 542), True, 'import numpy as np\n'), ((2300, 2323), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)'], {}), '((512, 512, 3))\n', (2308, 2323), True, 'import numpy as np\n'), ((2719, 2745), 'cv2.imshow', 'cv.imshow', (['"""anchor"""', 'image'], {}), "('anchor', image)\n", (2728, 2745), True, 'import cv2 as cv\n'), ((2746, 2758), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (2756, 2758), True, 'import cv2 as cv\n'), ((725, 750), 'numpy.ones', 'np.ones', (['(1, length // 6)'], {}), '((1, length // 6))\n', (732, 750), True, 'import numpy as np\n'), ((759, 784), 'numpy.ones', 'np.ones', (['(1, length // 6)'], {}), '((1, length // 6))\n', (766, 784), True, 'import numpy as np\n'), ((793, 832), 'numpy.array', 'np.array', (['[(x / 60 - x % 6) for x in h]'], {}), '([(x / 60 - x % 6) for x in h])\n', (801, 832), True, 'import numpy as np\n'), ((843, 865), 'numpy.reshape', 'np.reshape', (['f', '[6, -1]'], {}), '(f, [6, -1])\n', (853, 865), True, 'import numpy as np\n'), ((874, 900), 'numpy.zeros', 'np.zeros', (['(1, length // 6)'], {}), '((1, length // 6))\n', (882, 900), True, 'import numpy as np\n'), ((934, 956), 'numpy.reshape', 'np.reshape', (['t', '[6, -1]'], {}), '(t, [6, -1])\n', (944, 956), True, 'import numpy as np\n'), ((1608, 1662), 'math.atan', 'math.atan', (['((center_y - 0.5) / (center_x - 0.5 + 1e-20))'], {}), '((center_y - 0.5) / (center_x - 0.5 + 1e-20))\n', (1617, 1662), False, 'import math\n'), ((1985, 2020), 'numpy.reshape', 'np.reshape', (['rotation_matrix', '(2, 2)'], {}), '(rotation_matrix, (2, 2))\n', (1995, 2020), True, 'import numpy as np\n'), ((2069, 2196), 'numpy.stack', 'np.stack', (['[[-width / 2, -height / 2], [width / 2, -height / 2], [width / 2, height / \n 2], [-width / 2, height / 2]]'], {'axis': '(0)'}), '([[-width / 2, -height / 2], [width / 2, -height / 2], [width / 2, \n height / 2], [-width / 2, height / 2]], axis=0)\n', (2077, 2196), True, 'import numpy as np\n'), ((2658, 2726), 'cv2.polylines', 'cv.polylines', (['image', '[points]', '(True)', 'color[i * 7 % 36, :]', '(1 + i % 2)'], {}), '(image, [points], True, color[i * 7 % 36, :], 1 + i % 2)\n', (2670, 2726), True, 'import cv2 as cv\n'), ((1179, 1215), 'numpy.stack', 'np.stack', (['[v, [t[0, :]], p]'], {'axis': '(-1)'}), '([v, [t[0, :]], p], axis=-1)\n', (1187, 1215), True, 'import numpy as np\n'), ((1229, 1265), 'numpy.stack', 'np.stack', (['[[q[1, :]], v, p]'], {'axis': '(-1)'}), '([[q[1, :]], v, p], axis=-1)\n', (1237, 1265), True, 'import numpy as np\n'), ((1279, 1315), 'numpy.stack', 'np.stack', (['[p, v, [t[2, :]]]'], {'axis': '(-1)'}), '([p, v, [t[2, :]]], axis=-1)\n', (1287, 1315), True, 'import numpy as np\n'), ((1329, 1365), 'numpy.stack', 'np.stack', (['[p, [q[3, :]], v]'], {'axis': '(-1)'}), '([p, [q[3, :]], v], axis=-1)\n', (1337, 1365), True, 'import numpy as np\n'), ((1379, 1415), 'numpy.stack', 'np.stack', (['[[t[4, :]], p, v]'], {'axis': '(-1)'}), '([[t[4, :]], p, v], axis=-1)\n', (1387, 1415), True, 'import numpy as np\n'), ((1429, 1465), 'numpy.stack', 'np.stack', (['[v, p, [q[5, :]]]'], {'axis': '(-1)'}), '([v, p, [q[5, :]]], axis=-1)\n', (1437, 1465), True, 'import numpy as np\n'), ((1485, 1506), 'numpy.stack', 'np.stack', (['rgb'], {'axis': '(0)'}), '(rgb, axis=0)\n', (1493, 1506), True, 'import numpy as np\n'), ((1700, 1728), 'numpy.less_equal', 'np.less_equal', (['center_x', '(0.5)'], {}), '(center_x, 0.5)\n', (1713, 1728), True, 'import numpy as np\n'), ((1772, 1797), 'numpy.less_equal', 'np.less_equal', (['angle', '(0.0)'], {}), '(angle, 0.0)\n', (1785, 1797), True, 'import numpy as np\n'), ((2214, 2248), 'numpy.matmul', 'np.matmul', (['points', 'rotation_matrix'], {}), '(points, rotation_matrix)\n', (2223, 2248), True, 'import numpy as np\n'), ((1860, 1875), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (1868, 1875), False, 'import math\n'), ((1921, 1936), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (1929, 1936), False, 'import math\n'), ((1937, 1952), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (1945, 1952), False, 'import math\n'), ((2563, 2587), 'numpy.multiply', 'np.multiply', (['points', '(512)'], {}), '(points, 512)\n', (2574, 2587), True, 'import numpy as np\n'), ((1878, 1893), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (1886, 1893), False, 'import math\n')] |
"""
Train a logistic regression model.
Input:
data:
Output:
params: parameters of logistic regression model
"""
import os
import pandas as pd
from bokeh.plotting import figure
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn.utils import weight_norm
from torch.nn import ReLU
import numpy as np
import streamlit as st
import yaml
import pickle
from pathlib import Path, PurePath
import argparse
from datetime import datetime
from collections import OrderedDict
serverdir = Path(os.path.realpath(__file__)).parent.parent
he_for_medical_data = serverdir.parent.parent
def generate_random_data(num_data_samp, data_dim):
"Generate some random data for log reg."
a = np.random.rand(data_dim)+5
x_noise = 0.1*np.random.randn(num_data_samp,1)
x = 10*np.random.rand(num_data_samp,data_dim) - 5
b = np.array([-np.dot(a,x[row,...])-x_noise[row,...] for row in range(0,num_data_samp)])
b = np.exp(b)
y_float = 1/(1+b)
y = np.rint(y_float)
return {"x": x, "y": y}
class poly(nn.Module):
"""Polynomial activation function.
degreelist: list of powers of the polynomial.
"""
def __init__(self, degreelist):
super(poly,self).__init__()
self.degreelist = degreelist
p = len(degreelist)
arr = np.ones(p,dtype=np.float32)
coeff = torch.nn.Parameter(torch.tensor(arr), requires_grad=True)
self.register_parameter("coefficients", coeff)
def forward(self,x):
out = [torch.pow(x,n) for n in self.degreelist]
shape = x.shape
out = torch.cat([j.reshape(*shape,1) for j in out],dim=-1)
out = out * self.coefficients
out = out.sum(-1)
return out
class fully_conn(nn.Module):
"""Creates a fully connected neural network according to specs.
input_size: features length
layers: list of how many neurons per layer
activation: "relu" or "poly"
degrees: optional. If choosing activation=poly you must specify degress.
The activation polynomial will have trainable coefficients but only
for the degrees specified. E.g.: [2,3]-> activation= ax^2 +bx^3. """
def __init__(self, input_size, layers, activation, degrees = None):
super(fully_conn, self).__init__()
network = [("weightedLinear0", weight_norm(nn.Linear(input_size,layers[0])))]
numlayer = len(layers)
if activation == "relu":
if numlayer > 1:
Relu = ("relu0", ReLU())
network.append(Relu)
for i in range(numlayer-1):
l = (f"weightedLinear{i+1}", weight_norm(nn.Linear(layers[i],layers[i+1])))
if i < numlayer-2:
Relu = (f"relu{i+1}", ReLU())
network.extend([l, Relu])
else:
network.append(l)
if activation == "poly":
if numlayer > 1:
Poly = (f"poly0", poly(degrees))
network.append(Poly)
p = len(degrees)
for i in range(numlayer-1):
l = (f"weightedLinear{i+1}",weight_norm(nn.Linear(layers[i],layers[i+1])))
if i < numlayer-2:
Poly = (f"poly{i+1}", poly(degrees))
network.extend([l,Poly])
else:
network.append(l)
self.nnet = nn.Sequential(OrderedDict(network))
def forward(self,x):
logits = self.nnet(x)
return logits
def predict(self,x):
return torch.sigmoid(self.forward(x))
class logreg(nn.Module):
def __init__(self, input_size, classes):
super(logreg, self).__init__()
linear = nn.Linear(input_size, classes)
self.logistic_reg = weight_norm(linear,name = "weight")
def forward(self, x):
return self.logistic_reg(x)
def predict(self,x):
return torch.sigmoid(self.forward(x))
def train(config, train_data, model, optimizer_state=None):
"""
Training for mortality models.
config: dict of learning parameters
train_dict: dict {"x":ndarray, "y": ndarray}
"""
num_epochs = config["num_epochs"]
batch_size = config["batch_size"]
lr = config["learning_rate"]
train_x = train_data["train_x"]
train_y = train_data["train_y"]
test_x = train_data["test_x"]
test_y = train_data["test_y"]
train_tensors = torch.utils.data.TensorDataset(train_x,train_y)
train_loader = torch.utils.data.DataLoader(train_tensors,
batch_size = batch_size,
shuffle = True,
)
optimizer = torch.optim.Adam(model.parameters(),lr = lr)
if optimizer_state != None:
optimizer.load_state_dict(optimizer_state)
loss_values = []
pd_loss_value = pd.DataFrame(columns = ["loss", "test_loss","step"])
round = 0
placeholderpath = st.empty()
placeholdergraph = st.empty()
placeholder = st.empty()
for epoch in range(num_epochs):
for (x,y) in train_loader:
outputs = model(x)
optimizer.zero_grad()
loss = torch.nn.functional.binary_cross_entropy_with_logits(outputs,y)
loss.backward()
optimizer.step()
if round % 50 == 0:
pred = model(test_x)
test_loss = torch.nn.functional.binary_cross_entropy_with_logits(pred,test_y)
print(f"epoch: {epoch}/{num_epochs}; step: {round}; loss: {loss}; test_loss: {test_loss}")
lossdict = {"epoch": epoch,
"step": round,
"loss": loss.detach().numpy(),
"test_loss": test_loss.detach().numpy(),
}
loss_values.append(lossdict)
pd_loss_value = pd_loss_value.append(lossdict,ignore_index=True)
#df = pd_loss_value[["loss","test_loss","step"]].set_index('step')
p = figure(title="Loss/test loss")
p.line(pd_loss_value.step,pd_loss_value.loss,line_width=2, color="firebrick", legend="loss")
p.line(pd_loss_value.step,pd_loss_value.test_loss, line_width=2, legend="test_loss")
placeholdergraph.bokeh_chart(p)
placeholder.table(pd_loss_value)
round+=1
return model, optimizer, loss_values, placeholderpath
def convert_mortality_data(train_dict, test=False):
"""Converts mortality data dictionary with keys ("train", "test") or just
("test") for testing only when train == False.
"""
#Hack for now
if "test_x" in train_dict.keys():
if test == False:
train_dict["train_x"] = torch.Tensor(train_dict["train_x"].values)
train_dict["train_y"] = torch.Tensor(train_dict["train_y"].values).unsqueeze_(1)
train_dict["test_x"] = torch.Tensor(train_dict["test_x"].values)
train_dict["test_y"] = torch.Tensor(train_dict["test_y"].values).unsqueeze_(1)
else:
if test == False:
trainset = train_dict.pop("train")
train_dict["train_x"] = torch.Tensor(trainset.drop(columns = ["expire"]).values)
train_dict["train_y"] = torch.Tensor(trainset.expire.values).unsqueeze_(1)
testset = train_dict.pop("test")
train_dict["test_x"] = torch.Tensor(testset.drop(columns = ["expire"]).values)
train_dict["test_y"] = torch.Tensor(testset.expire.values).unsqueeze_(1)
train_dict["num_features"] = train_dict["test_x"].shape[1]
return train_dict
def main(modeldir = None, datadir = None, continuetrain = None, test = False):
#Get all parsed arguments
modeldir = serverdir.joinpath("model_params",modeldir)
data_pickle = he_for_medical_data.joinpath("data",datadir,"train_dict.pkl") #moved data path
#Load the training configs
cfgs = modeldir.joinpath("configs.yaml")
try:
with open(cfgs) as f:
configs = yaml.load(f,Loader = yaml.FullLoader)
except FileNotFoundError as e:
raise ValueError("There was a problem finding configs.yaml.")
except Exception as e:
raise ValueError(f"There was an exception: {e}")
#Load the data
try:
with open(data_pickle,'rb') as f:
data_dict = pickle.load(f)
except Exception as e:
raise ValueError(f"There was an exception raised when trying to load the data: {e}")
#Turn data into torch.tensor. For the future: can remove this to processing pipeline.
try:
train_data = convert_mortality_data(data_dict, test = test)
except Exception as e:
raise ValueError(f"There was an issue with the data format: {e}")
#Put together the model either nn or logreg
modeltype = configs["modeltype"]
if modeltype == "nn":
try:
layers = configs["layers"]
activation = configs["activation"]
degrees = configs["degrees"]
input_size = train_data["num_features"]
model = fully_conn(input_size,
layers,
activation,
degrees=degrees,
)
except Exception as e:
raise ValueError(f"The model couldn't load: {e}")
if modeltype == "logreg":
try:
layers = configs["layers"]
input_size = train_data["num_features"]
model = logreg(input_size, layers)
except Exception as e:
raise ValueError(f"The model couldn't load: {e}")
#Initialize model with pretrained params to continue training or test ...
if continuetrain == True or test == True:
list_of_paths = modeldir.glob("*")
paths = sorted(list_of_paths, key=lambda p: p.stat().st_ctime)
paths.reverse()
for path in paths:
if path.name[0:5] == "model":
latest_path = path
break
checkpoint = torch.load(latest_path)
model_state = checkpoint["model_state_dict"]
optimizer_state = checkpoint["optimizer_state_dict"]
model.load_state_dict(model_state)
else:
optimizer_state = None
#Predict only
if test == True:
test_x = train_data["test_x"]
test_y = train_data["test_y"].squeeze().numpy()
st.write("Model loaded. Now making predictions...")
y = model.predict(test_x).squeeze().detach().numpy()
predictions = np.stack([test_y, y], axis=-1)
now = datetime.now().strftime("%d-%m-%Y-%H_%M_%S")
st.write("Saving predictions alongside true values...")
file = modeldir/f"predictions_{now}"
with open(file, "wb") as f:
pickle.dump(predictions, f)
st.write(f"Saved to {file}.")
else:
#Train the model
print("Training the model...")
trained_model, optimizer, loss_values , placeholderpath = train(configs,
train_data,
model,
optimizer_state=optimizer_state,
)
now = datetime.now().strftime("%d-%m-%Y-%H_%M_%S")
loss_values_file = modeldir.joinpath(f"loss_values_{now}.pkl")
with open(loss_values_file, "wb") as f:
pickle.dump(loss_values,f)
model_file = modeldir.joinpath(f"model_{now}.pkl")
placeholderpath.text(f"Finished training. Saving model parameters to {model_file}")
d = {"model_state_dict": trained_model.state_dict(),
"optimizer_state_dict": optimizer.state_dict()
}
torch.save(d,model_file)
def run():
#Set up stdin parser
parser = argparse.ArgumentParser(description = "Training for a logistic \
regression, or fully connected nn model with optional polynomial or relu\
acivations.")
parser.add_argument("--modeldir",
metavar = "-M",
type = str,
default = "log_reg_mortality",
help = "Relative directory name in directory 'model_params' \
containing config.yml file for training and building \
the model (in the case of a nn). This is where the \
model will be saved.",
)
parser.add_argument("--datadir",
metavar = "-D",
type = str,
default = "mortality_risk",
help = "Directory in server/data with train/test dictionary.",
)
parser.add_argument("--continuetrain",
action = "store_true",
help = "Add this flag to pick up training at the last model checkpoint",
)
parser.add_argument("--test",
action = "store_true",
help = "Add this flag for testing on data under 'test' key in \
path input for --datadir"
)
return vars(parser.parse_args())
if __name__ == "__main__":
#Set up stdin parser
kwargs = run()
main(**kwargs)
| [
"torch.nn.ReLU",
"numpy.random.rand",
"bokeh.plotting.figure",
"yaml.load",
"torch.pow",
"streamlit.empty",
"argparse.ArgumentParser",
"numpy.exp",
"numpy.stack",
"numpy.dot",
"numpy.rint",
"pandas.DataFrame",
"collections.OrderedDict",
"numpy.ones",
"torch.Tensor",
"streamlit.write",
... | [((958, 967), 'numpy.exp', 'np.exp', (['b'], {}), '(b)\n', (964, 967), True, 'import numpy as np\n'), ((998, 1014), 'numpy.rint', 'np.rint', (['y_float'], {}), '(y_float)\n', (1005, 1014), True, 'import numpy as np\n'), ((4421, 4469), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['train_x', 'train_y'], {}), '(train_x, train_y)\n', (4451, 4469), False, 'import torch\n'), ((4488, 4567), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_tensors'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_tensors, batch_size=batch_size, shuffle=True)\n', (4515, 4567), False, 'import torch\n'), ((4897, 4948), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['loss', 'test_loss', 'step']"}), "(columns=['loss', 'test_loss', 'step'])\n", (4909, 4948), True, 'import pandas as pd\n'), ((4986, 4996), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (4994, 4996), True, 'import streamlit as st\n'), ((5020, 5030), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (5028, 5030), True, 'import streamlit as st\n'), ((5049, 5059), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (5057, 5059), True, 'import streamlit as st\n'), ((11975, 12147), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training for a logistic regression, or fully connected nn model with optional polynomial or relu acivations."""'}), "(description=\n 'Training for a logistic regression, or fully connected nn model with optional polynomial or relu acivations.'\n )\n", (11998, 12147), False, 'import argparse\n'), ((724, 748), 'numpy.random.rand', 'np.random.rand', (['data_dim'], {}), '(data_dim)\n', (738, 748), True, 'import numpy as np\n'), ((770, 803), 'numpy.random.randn', 'np.random.randn', (['num_data_samp', '(1)'], {}), '(num_data_samp, 1)\n', (785, 803), True, 'import numpy as np\n'), ((1317, 1345), 'numpy.ones', 'np.ones', (['p'], {'dtype': 'np.float32'}), '(p, dtype=np.float32)\n', (1324, 1345), True, 'import numpy as np\n'), ((3713, 3743), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'classes'], {}), '(input_size, classes)\n', (3722, 3743), True, 'import torch.nn as nn\n'), ((3772, 3806), 'torch.nn.utils.weight_norm', 'weight_norm', (['linear'], {'name': '"""weight"""'}), "(linear, name='weight')\n", (3783, 3806), False, 'from torch.nn.utils import weight_norm\n'), ((6977, 7018), 'torch.Tensor', 'torch.Tensor', (["train_dict['test_x'].values"], {}), "(train_dict['test_x'].values)\n", (6989, 7018), False, 'import torch\n'), ((10109, 10132), 'torch.load', 'torch.load', (['latest_path'], {}), '(latest_path)\n', (10119, 10132), False, 'import torch\n'), ((10473, 10524), 'streamlit.write', 'st.write', (['"""Model loaded. Now making predictions..."""'], {}), "('Model loaded. Now making predictions...')\n", (10481, 10524), True, 'import streamlit as st\n'), ((10608, 10638), 'numpy.stack', 'np.stack', (['[test_y, y]'], {'axis': '(-1)'}), '([test_y, y], axis=-1)\n', (10616, 10638), True, 'import numpy as np\n'), ((10707, 10762), 'streamlit.write', 'st.write', (['"""Saving predictions alongside true values..."""'], {}), "('Saving predictions alongside true values...')\n", (10715, 10762), True, 'import streamlit as st\n'), ((10892, 10921), 'streamlit.write', 'st.write', (['f"""Saved to {file}."""'], {}), "(f'Saved to {file}.')\n", (10900, 10921), True, 'import streamlit as st\n'), ((11900, 11925), 'torch.save', 'torch.save', (['d', 'model_file'], {}), '(d, model_file)\n', (11910, 11925), False, 'import torch\n'), ((531, 557), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (547, 557), False, 'import os\n'), ((814, 853), 'numpy.random.rand', 'np.random.rand', (['num_data_samp', 'data_dim'], {}), '(num_data_samp, data_dim)\n', (828, 853), True, 'import numpy as np\n'), ((1380, 1397), 'torch.tensor', 'torch.tensor', (['arr'], {}), '(arr)\n', (1392, 1397), False, 'import torch\n'), ((1514, 1529), 'torch.pow', 'torch.pow', (['x', 'n'], {}), '(x, n)\n', (1523, 1529), False, 'import torch\n'), ((3406, 3426), 'collections.OrderedDict', 'OrderedDict', (['network'], {}), '(network)\n', (3417, 3426), False, 'from collections import OrderedDict\n'), ((5216, 5280), 'torch.nn.functional.binary_cross_entropy_with_logits', 'torch.nn.functional.binary_cross_entropy_with_logits', (['outputs', 'y'], {}), '(outputs, y)\n', (5268, 5280), False, 'import torch\n'), ((6810, 6852), 'torch.Tensor', 'torch.Tensor', (["train_dict['train_x'].values"], {}), "(train_dict['train_x'].values)\n", (6822, 6852), False, 'import torch\n'), ((8082, 8118), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (8091, 8118), False, 'import yaml\n'), ((8404, 8418), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8415, 8418), False, 'import pickle\n'), ((10856, 10883), 'pickle.dump', 'pickle.dump', (['predictions', 'f'], {}), '(predictions, f)\n', (10867, 10883), False, 'import pickle\n'), ((11580, 11607), 'pickle.dump', 'pickle.dump', (['loss_values', 'f'], {}), '(loss_values, f)\n', (11591, 11607), False, 'import pickle\n'), ((5434, 5500), 'torch.nn.functional.binary_cross_entropy_with_logits', 'torch.nn.functional.binary_cross_entropy_with_logits', (['pred', 'test_y'], {}), '(pred, test_y)\n', (5486, 5500), False, 'import torch\n'), ((6084, 6114), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""Loss/test loss"""'}), "(title='Loss/test loss')\n", (6090, 6114), False, 'from bokeh.plotting import figure\n'), ((7050, 7091), 'torch.Tensor', 'torch.Tensor', (["train_dict['test_y'].values"], {}), "(train_dict['test_y'].values)\n", (7062, 7091), False, 'import torch\n'), ((7537, 7572), 'torch.Tensor', 'torch.Tensor', (['testset.expire.values'], {}), '(testset.expire.values)\n', (7549, 7572), False, 'import torch\n'), ((10654, 10668), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10666, 10668), False, 'from datetime import datetime\n'), ((11404, 11418), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11416, 11418), False, 'from datetime import datetime\n'), ((876, 898), 'numpy.dot', 'np.dot', (['a', 'x[row, ...]'], {}), '(a, x[row, ...])\n', (882, 898), True, 'import numpy as np\n'), ((2338, 2370), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'layers[0]'], {}), '(input_size, layers[0])\n', (2347, 2370), True, 'import torch.nn as nn\n'), ((2499, 2505), 'torch.nn.ReLU', 'ReLU', ([], {}), '()\n', (2503, 2505), False, 'from torch.nn import ReLU\n'), ((6889, 6931), 'torch.Tensor', 'torch.Tensor', (["train_dict['train_y'].values"], {}), "(train_dict['train_y'].values)\n", (6901, 6931), False, 'import torch\n'), ((7327, 7363), 'torch.Tensor', 'torch.Tensor', (['trainset.expire.values'], {}), '(trainset.expire.values)\n', (7339, 7363), False, 'import torch\n'), ((2641, 2676), 'torch.nn.Linear', 'nn.Linear', (['layers[i]', 'layers[i + 1]'], {}), '(layers[i], layers[i + 1])\n', (2650, 2676), True, 'import torch.nn as nn\n'), ((2753, 2759), 'torch.nn.ReLU', 'ReLU', ([], {}), '()\n', (2757, 2759), False, 'from torch.nn import ReLU\n'), ((3140, 3175), 'torch.nn.Linear', 'nn.Linear', (['layers[i]', 'layers[i + 1]'], {}), '(layers[i], layers[i + 1])\n', (3149, 3175), True, 'import torch.nn as nn\n')] |
import datetime
import numpy as np
import os
import pandas as pd
import tempfile
import tarfile
from zipfile import ZipFile
station_zip = 'wbanmasterlist.psv.zip'
climate_tar = '2006_daily.tar.gz'
assert station_zip in os.listdir(), station_zip
assert climate_tar in os.listdir(), climate_tar
with tempfile.TemporaryDirectory() as tmp:
print('expanding', station_zip, 'and', climate_tar, 'into', tmp)
with ZipFile(station_zip) as stations:
stations.extractall(tmp)
with tarfile.open(climate_tar) as climate:
climate.extractall(tmp)
climate_dir = os.path.join(tmp, '2006_daily')
stations_file = os.path.join(tmp, 'wbanmasterlist.psv')
print('reading in raw data')
stations = pd.read_csv(stations_file, sep='|', header=0)
climates = [os.path.join(climate_dir, f) for f in os.listdir(climate_dir)]
daily_dfs = [pd.read_csv(f, header=0, encoding='ISO-8859-1')
for f in climates]
for df in daily_dfs:
df.columns = df.columns.str.strip()
climate = pd.concat(daily_dfs, ignore_index=True)
print('cleaning climate data')
climate['time'] = pd.to_datetime(climate['YearMonthDay'], format='%Y%m%d')
climate['time'] = (climate['time'] - pd.to_datetime('2006-01-01')
) / datetime.timedelta(days=1)
oldcols = ['Wban Number', 'Avg Temp', 'Avg Dew Pt',
'Pressure Avg Sea Level', 'Wind Avg Speed', 'time']
newcols = ['wban', 'temp', 'dew', 'pressure', 'wind', 'time']
climate = climate.rename(columns=dict(zip(oldcols, newcols)))
climate = climate[newcols]
print('climate NAs per column')
print(len(climate) - climate.count())
print('cleaning station data')
oldcols = ['WBAN_ID', 'LOCATION']
newcols = ['wban', 'loc']
stations = stations.rename(columns=dict(zip(oldcols, newcols)))
stations = stations[newcols]
print('stations NAs per column')
print(len(stations) - stations.count())
stations = stations.dropna()
missing = len(set(climate.wban) - set(stations.dropna().wban))
tot = len(set(climate.wban))
print('stations in climate missing location data', missing, 'of', tot)
joined = climate.merge(stations, on='wban', how='left')
tot = len(joined)
numerics = ['temp', 'dew', 'pressure', 'wind', 'time']
for name in numerics:
cleanstr = joined[name].astype('str').str.replace(r'[^\d\.]', '')
joined[name] = pd.to_numeric(cleanstr)
joined = joined.dropna()
print('left-joined climate/stations data has',
len(joined), 'of', tot, 'clean rows')
print('parsing lattitude/longitude')
import re
p1 = re.compile(
r"""(\d+)\D(\d+)\D+(\d+)\D*(N|S)\W+(\d+)\D(\d+)\D(\d+)\D*(E|W)""")
p2 = re.compile(r"""(\d+)\D+(\d+)\D+(N|S)\W+(\d+)\D+(\d+)\D+(E|W)""")
lats = []
lons = []
badparse = 0
for i in joined['loc']:
m = p1.match(i)
if m:
lat = float(m.group(1)) + float(m.group(2)) / \
60 + float(m.group(3)) / 60 / 60
lat *= 1 if m.group(4) == 'N' else -1
lon = float(m.group(5)) + float(m.group(6)) / \
60 + float(m.group(7)) / 60 / 60
lon *= 1 if m.group(8) == 'E' else -1
lats.append(lat)
lons.append(lon)
continue
m = p2.match(i)
if m:
lat = float(m.group(1)) + float(m.group(2)) / 60
lat *= 1 if m.group(3) == 'N' else -1
lon = float(m.group(4)) + float(m.group(5)) / 60
lon *= 1 if m.group(6) == 'E' else -1
lats.append(lat)
lons.append(lon)
continue
if i == """47123'56"N 120*12'24"W""":
# this entry probably had 1 as a typo for *
# other values are unreasonable for US lat/lon
lats.append(47 + 23 / 60 + 56 / 3600)
lons.append(-1 * (120 + 12 / 60 + 24 / 3600))
continue
print('NO MATCH', i)
lats.append(np.nan)
lons.append(np.nan)
badparse += 1
joined['lat'] = lats
joined['lon'] = lons
joined = joined.drop('loc', axis=1)
print('# of bad lat/lon parses:', badparse)
fname = 'noaa.csv'
print('exporting cleaned noaa dataset to', fname)
joined.to_csv(fname, index=False)
fname = 'stations.pdf'
print('plotting stations with weather data on', fname)
import matplotlib as mpl
mpl.use('Agg')
# https://stackoverflow.com/questions/44488167
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import pandas as pd
lat = np.array(lats)
lon = np.array(lons)
# determine range to print based on min, max lat and lon of the data
margin = 2 # buffer to add to the range
lat_min, lon_min = 24.344974, -126.592164
lat_max, lon_max = 52.735210, -54.079873
lat_min = min(min(lat) - margin, lat_min)
lat_max = max(max(lat) + margin, lat_max)
lon_min = min(min(lon) - margin, lon_min)
lon_max = max(max(lon) + margin, lon_max)
# create map using BASEMAP
m = Basemap(llcrnrlon=lon_min,
llcrnrlat=lat_min,
urcrnrlon=lon_max,
urcrnrlat=lat_max,
lat_0=(lat_max - lat_min) / 2,
lon_0=(lon_max - lon_min) / 2,
projection='merc',
resolution='h',
area_thresh=10000.,
)
m.drawcoastlines()
m.drawcountries()
m.drawstates()
m.drawmapboundary(fill_color='#46bcec')
m.fillcontinents(color='white', lake_color='#46bcec')
# convert lat and lon to map projection coordinates
lonss, latss = m(lon, lat)
# plot points as red dots
m.scatter(lonss, latss, marker='.', color='r', zorder=5, s=1)
plt.savefig(fname, format='pdf', bbox_inches='tight')
| [
"tempfile.TemporaryDirectory",
"os.listdir",
"matplotlib.pyplot.savefig",
"tarfile.open",
"pandas.read_csv",
"re.compile",
"matplotlib.use",
"zipfile.ZipFile",
"os.path.join",
"numpy.array",
"mpl_toolkits.basemap.Basemap",
"pandas.to_numeric",
"datetime.timedelta",
"pandas.concat",
"pand... | [((1121, 1177), 'pandas.to_datetime', 'pd.to_datetime', (["climate['YearMonthDay']"], {'format': '"""%Y%m%d"""'}), "(climate['YearMonthDay'], format='%Y%m%d')\n", (1135, 1177), True, 'import pandas as pd\n'), ((2521, 2611), 're.compile', 're.compile', (['"""(\\\\d+)\\\\D(\\\\d+)\\\\D+(\\\\d+)\\\\D*(N|S)\\\\W+(\\\\d+)\\\\D(\\\\d+)\\\\D(\\\\d+)\\\\D*(E|W)"""'], {}), "(\n '(\\\\d+)\\\\D(\\\\d+)\\\\D+(\\\\d+)\\\\D*(N|S)\\\\W+(\\\\d+)\\\\D(\\\\d+)\\\\D(\\\\d+)\\\\D*(E|W)')\n", (2531, 2611), False, 'import re\n'), ((2609, 2677), 're.compile', 're.compile', (['"""(\\\\d+)\\\\D+(\\\\d+)\\\\D+(N|S)\\\\W+(\\\\d+)\\\\D+(\\\\d+)\\\\D+(E|W)"""'], {}), "('(\\\\d+)\\\\D+(\\\\d+)\\\\D+(N|S)\\\\W+(\\\\d+)\\\\D+(\\\\d+)\\\\D+(E|W)')\n", (2619, 2677), False, 'import re\n'), ((4117, 4131), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (4124, 4131), True, 'import matplotlib as mpl\n'), ((4279, 4293), 'numpy.array', 'np.array', (['lats'], {}), '(lats)\n', (4287, 4293), True, 'import numpy as np\n'), ((4300, 4314), 'numpy.array', 'np.array', (['lons'], {}), '(lons)\n', (4308, 4314), True, 'import numpy as np\n'), ((4709, 4919), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlon': 'lon_min', 'llcrnrlat': 'lat_min', 'urcrnrlon': 'lon_max', 'urcrnrlat': 'lat_max', 'lat_0': '((lat_max - lat_min) / 2)', 'lon_0': '((lon_max - lon_min) / 2)', 'projection': '"""merc"""', 'resolution': '"""h"""', 'area_thresh': '(10000.0)'}), "(llcrnrlon=lon_min, llcrnrlat=lat_min, urcrnrlon=lon_max, urcrnrlat=\n lat_max, lat_0=(lat_max - lat_min) / 2, lon_0=(lon_max - lon_min) / 2,\n projection='merc', resolution='h', area_thresh=10000.0)\n", (4716, 4919), False, 'from mpl_toolkits.basemap import Basemap\n'), ((5334, 5387), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "(fname, format='pdf', bbox_inches='tight')\n", (5345, 5387), True, 'import matplotlib.pyplot as plt\n'), ((221, 233), 'os.listdir', 'os.listdir', ([], {}), '()\n', (231, 233), False, 'import os\n'), ((269, 281), 'os.listdir', 'os.listdir', ([], {}), '()\n', (279, 281), False, 'import os\n'), ((301, 330), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (328, 330), False, 'import tempfile\n'), ((581, 612), 'os.path.join', 'os.path.join', (['tmp', '"""2006_daily"""'], {}), "(tmp, '2006_daily')\n", (593, 612), False, 'import os\n'), ((633, 672), 'os.path.join', 'os.path.join', (['tmp', '"""wbanmasterlist.psv"""'], {}), "(tmp, 'wbanmasterlist.psv')\n", (645, 672), False, 'import os\n'), ((722, 767), 'pandas.read_csv', 'pd.read_csv', (['stations_file'], {'sep': '"""|"""', 'header': '(0)'}), "(stations_file, sep='|', header=0)\n", (733, 767), True, 'import pandas as pd\n'), ((1031, 1070), 'pandas.concat', 'pd.concat', (['daily_dfs'], {'ignore_index': '(True)'}), '(daily_dfs, ignore_index=True)\n', (1040, 1070), True, 'import pandas as pd\n'), ((1267, 1293), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1285, 1293), False, 'import datetime\n'), ((2324, 2347), 'pandas.to_numeric', 'pd.to_numeric', (['cleanstr'], {}), '(cleanstr)\n', (2337, 2347), True, 'import pandas as pd\n'), ((417, 437), 'zipfile.ZipFile', 'ZipFile', (['station_zip'], {}), '(station_zip)\n', (424, 437), False, 'from zipfile import ZipFile\n'), ((493, 518), 'tarfile.open', 'tarfile.open', (['climate_tar'], {}), '(climate_tar)\n', (505, 518), False, 'import tarfile\n'), ((784, 812), 'os.path.join', 'os.path.join', (['climate_dir', 'f'], {}), '(climate_dir, f)\n', (796, 812), False, 'import os\n'), ((864, 911), 'pandas.read_csv', 'pd.read_csv', (['f'], {'header': '(0)', 'encoding': '"""ISO-8859-1"""'}), "(f, header=0, encoding='ISO-8859-1')\n", (875, 911), True, 'import pandas as pd\n'), ((1215, 1243), 'pandas.to_datetime', 'pd.to_datetime', (['"""2006-01-01"""'], {}), "('2006-01-01')\n", (1229, 1243), True, 'import pandas as pd\n'), ((822, 845), 'os.listdir', 'os.listdir', (['climate_dir'], {}), '(climate_dir)\n', (832, 845), False, 'import os\n')] |
import os
import glob
import numpy as np
from skimage.io import imread
# from PIL import imageio
# from PIL import Image
# import image
#.io import imread
import random
import tensorflow as tf
# from keras.utils.np_utils import to_categorical
class DataLoader:
def __init__(self, train_images_dir, val_images_dir, test_images_dir, train_batch_size, val_batch_size,
test_batch_size, height_of_image, width_of_image, num_channels, num_classes):
self.train_paths = glob.glob(os.path.join(train_images_dir, "**/*.png"), recursive=True)
self.val_paths = glob.glob(os.path.join(val_images_dir, "**/*.png"), recursive=True)
self.test_paths = glob.glob(os.path.join(test_images_dir, "**/*.png"), recursive=True)
self.train_batch_size = train_batch_size
self.val_batch_size = val_batch_size
self.test_batch_size = test_batch_size
self.num_classes=num_classes
def load_image(self, path):
# image=imread(path)
image=imread(path)
image=image.reshape(784)
# image=resize(image, (height_of_image,width_of_image ))
target_vector=int(os.path.basename(os.path.dirname(path)))
label=np.eye(self.num_classes)[int(target_vector)]
# label=np.identity(self.num_classes)[int(target_vector)]
#permutation = list(np.random.permutation(image.shape[0]))
#shuffled_image = image[permutation]
#shuffled_label = label[permutation]
return image , label
def batch_data_loader(self, batch_size, file_paths, index):
# if len(file_paths)%batch_size !=0:
# k=int(len(file_paths)-(len(file_paths)%batch_size))
# file_paths=file_paths[:k+1]
images=[]
labels=[]
for i in range(int(index*batch_size),int((index+1)*batch_size)):
image,label=self.load_image(file_paths[i])
images.append(image)
labels.append(label)
# def one_hot_matrix(labels, C=10):
# C = tf.constant(C, name="C")
# one_hot_matrix = tf.one_hot(labels, C, axis=1)
# with tf.Session() as sess:
# one_hot = sess.run(one_hot_matrix)
# labels=one_hot_matrix(labels, C=10)
# labels_matrix = np.array(labels)
# labels_matrix_one_hot = to_categorical(labels_matrix, num_classes=num_classes)
return images , labels
def on_epoch_end(self):
np.random.shuffle(self.train_paths)
def train_data_loader(self, index):
# permutation = list(np.random.permutation(len(self.train_paths)))
# shuffled_paths = self.train_paths[permutation]
# if index==0:
# self.train_paths= shuffled_paths
# if index== 0:
# random.shuffle(self.train_paths)
return self.batch_data_loader(self.train_batch_size, self.train_paths, index)
def val_data_loader(self, index):
return self.batch_data_loader(self.val_batch_size, self.val_paths, index)
def test_data_loader(self, index):
return self.batch_data_loader(self.test_batch_size, self.test_paths, index)
| [
"numpy.eye",
"os.path.join",
"os.path.dirname",
"skimage.io.imread",
"numpy.random.shuffle"
] | [((1004, 1016), 'skimage.io.imread', 'imread', (['path'], {}), '(path)\n', (1010, 1016), False, 'from skimage.io import imread\n'), ((2451, 2486), 'numpy.random.shuffle', 'np.random.shuffle', (['self.train_paths'], {}), '(self.train_paths)\n', (2468, 2486), True, 'import numpy as np\n'), ((501, 543), 'os.path.join', 'os.path.join', (['train_images_dir', '"""**/*.png"""'], {}), "(train_images_dir, '**/*.png')\n", (513, 543), False, 'import os\n'), ((596, 636), 'os.path.join', 'os.path.join', (['val_images_dir', '"""**/*.png"""'], {}), "(val_images_dir, '**/*.png')\n", (608, 636), False, 'import os\n'), ((690, 731), 'os.path.join', 'os.path.join', (['test_images_dir', '"""**/*.png"""'], {}), "(test_images_dir, '**/*.png')\n", (702, 731), False, 'import os\n'), ((1197, 1221), 'numpy.eye', 'np.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (1203, 1221), True, 'import numpy as np\n'), ((1158, 1179), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1173, 1179), False, 'import os\n')] |
import numpy as np
import tensorflow as tf
def inception_score(model, images, labels):
predicted_labels = model.predict(images)
accuracy = 0
for i in range(len(predicted_labels)):
golden_label = np.argmax(labels[i])
predicted_label = np.argmax(predicted_labels[i])
if golden_label == predicted_label:
accuracy+=1
return accuracy/len(labels)
| [
"numpy.argmax"
] | [((216, 236), 'numpy.argmax', 'np.argmax', (['labels[i]'], {}), '(labels[i])\n', (225, 236), True, 'import numpy as np\n'), ((263, 293), 'numpy.argmax', 'np.argmax', (['predicted_labels[i]'], {}), '(predicted_labels[i])\n', (272, 293), True, 'import numpy as np\n')] |
import math
import numpy as np
import cv2
from pathlib import Path
import typing
import geometry as geom
class MappingPoints:
def __init__(self, settings_map):
import pickle, json
if settings_map.get('interceptor'):
settings_map['frame_points'] = (np.array(settings_map['frame_points']) -
np.array(settings_map['interceptor'])).tolist()
self.parking_lines = settings_map.get('parking_lines')
self.frame_points = np.int32(settings_map['frame_points'])
self.map_points = np.int32(settings_map['map_points'])
self.coord_points = np.array(settings_map['coord_points'])
self.VIDEO_SOURCE = f"data/{settings_map['VIDEO_SOURCE']}"
self.MAP_SOURCE = f"data/{settings_map['MAP_SOURCE']}"
self.transform_frame_to_map = cv2.getPerspectiveTransform(np.float32(self.frame_points),
np.float32(self.map_points))
self.transform_frame_to_coord = cv2.getPerspectiveTransform(np.float32(self.frame_points),
np.float32(self.coord_points))
self.transform_coord_to_map = cv2.getPerspectiveTransform(np.float32(self.coord_points),
np.float32(self.map_points))
self.transform_coord_to_frame = cv2.getPerspectiveTransform(np.float32(self.coord_points),
np.float32(self.frame_points))
# self.colors = [tuple(int(i) for i in np.random.choice(range(40, 256, 32), size=3)) for _ in
# range(len(self.parking_lines))]
# self.colors = [(100, 100, 255), (255, 100, 255), (100, 255, 255), (255, 255, 100)]
self.colors = [(100, 100, 255)] * len(self.parking_lines)
# with open('data/saved_data.obj', 'rb') as f:
# self.parked_car_info = pickle.load(f)
# self.car_boxes = self.get_car_boxes(self.parked_car_info) # Достаем данные о найденных автомобилях
# with open('data/car_boxes.obj', 'wb') as f:
# pickle.dump(self.car_boxes, f)
with open('data/car_boxes.obj', 'rb') as f:
self.car_boxes = pickle.load(f)
self.cam_point = None
self.car_points = None
self.m_in_degree = None
self.parking_dict = None
self.car_coords = None
self.cam_height = 40
self.angles = []
def rectangle_car_boxes(self, frame):
for parking_area in self.car_boxes:
y1, x1, y2, x2 = parking_area
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 1)
return frame
def render_points(self, frame_layer, data):
if data['coord'] is not None and len(data['coord']) > 0:
data_coord = [(int(p[0]), int(p[1])) for p in data['coord']]
for p in data_coord:
cv2.circle(frame_layer, p, radius=5, color=data['color'], thickness=data['thick'])
def search_cam_xy(self):
points = np.array([[0, 0], [0, 100], [100, 0], [100, 100]])
points = geom.get_point_in_other(self.transform_frame_to_coord, points)
self.cam_point = geom.intersection_array(points)
self.m_in_degree = math.cos(self.cam_point[1]) * 111300
self.cam_height = self.cam_height / self.m_in_degree
@staticmethod
def get_car_boxes(r):
return np.array([box for i, box in enumerate(r['rois']) if r['class_ids'][i] in [3, 8, 6]])
def get_car_points_from_boxes(self):
cars = []
for y1, x1, y2, x2 in self.car_boxes:
pa = geom.get_point_in_other(self.transform_frame_to_coord, np.array([[(x1 + x2) / 2, y2]]))[0]
alpha = geom.search_angle_between_points(pa, self.cam_point, self.cam_height)
y = y2 - (y2 - y1) * math.tan(alpha)
self.angles += [alpha]
cars += [[(x1 + x2) / 2, y]]
return np.array(cars)
def parking_lines_coord(self):
self.parking_lines = np.array([geom.get_point_in_other(self.transform_frame_to_coord, np.array(pl))
for pl in self.parking_lines])
def car_points_coord(self):
self.car_coords = geom.get_point_in_other(self.transform_frame_to_coord, self.car_points)
def perpendicular_dict(self):
parking_lines_ab = geom.get_lines_from_array(self.parking_lines)
parking_dict = []
for i, (pl, pl_p) in enumerate(zip(parking_lines_ab, self.parking_lines)):
park = {'park': i, 'cars': [], 'inter': [], 'lens': []}
for j, car in enumerate(self.car_coords):
point, leng = geom.get_perpendicular_to_line_from_point(pl, car)
if leng < 1e-5 and pl_p[0][0] < point[0] < pl_p[1][0]:
park['cars'] += [j]
park['inter'] += [point]
park['lens'] += [leng]
park['cars'], park['inter'], park['lens'] = np.array(park['cars']), \
np.array(park['inter']), \
np.array(park['lens'])
parking_dict += [park]
self.parking_dict = parking_dict
def parking_sort_len(self):
for pdict, pl in zip(self.parking_dict, self.parking_lines):
# Определяем расстояния на парковке от начала парковки
lens = np.array([geom.get_lenght(pl[0], p) for p in np.vstack((pdict['inter'], pl[1]))])
ind_sort_lens = np.argsort(lens) # Находим индексы сортировки расстояний
lens = lens[ind_sort_lens] # Сортируем расстояния
lens = np.append(lens[0], lens[1:] - lens[:-1]) # Находим все расстояния между машинами и краями парка
ind_sort_dict = ind_sort_lens[:-1] # Формируем индексы сортировки автомобилей
# Сортируем
pdict['cars'], pdict['inter'], pdict['lens'] = pdict['cars'][ind_sort_dict], \
pdict['inter'][ind_sort_dict], \
pdict['lens'][ind_sort_dict]
median = np.median(lens[1:-1])
median_lens = lens / median
minimal = np.min(median_lens[1:-1]) * 1.1
points_to_add = []
for i, ml in enumerate(median_lens):
result_point = pdict['inter'][np.min([i, len(pdict['inter']) - 1])]
if i == len(pdict['inter']):
direction = 1
count = int(ml / minimal - 0.3)
len_to_calc = ml / (count + 0.5) * median
else:
direction = -1
count = int(ml / minimal + 0.2) - 1
if count < 0:
continue
len_to_calc = ml / (count + 1) * median
if count > 0:
for _ in range(count):
result_point = geom.get_point_on_line(result_point, pl, direction * len_to_calc)
points_to_add += [result_point]
pdict['free'] = np.array(points_to_add)
def render(self):
# Получаем картинки
image_map = cv2.imread(self.MAP_SOURCE)
map_copy = image_map.copy()
video_capture = cv2.VideoCapture(self.VIDEO_SOURCE)
_, image_frame = video_capture.read()
frame_copy = image_frame.copy()
# Дополнительные данные
self.search_cam_xy() # Находим координаты камеры
self.car_points = self.get_car_points_from_boxes() # Определить точки машин
self.car_points_coord()
self.parking_lines_coord() # Перевести координаты линий парковки в координаты
self.perpendicular_dict() # Наити перпедникуляры
self.parking_sort_len() # Отсортировать автомобили
frame_pl = [geom.get_point_in_other(self.transform_coord_to_frame, pl) for pl in self.parking_lines]
frame_free = [geom.get_point_in_other(self.transform_coord_to_frame, pd['free']) for pd in self.parking_dict]
map_cars = geom.get_point_in_other(self.transform_frame_to_map, self.car_points)
map_pl = [geom.get_point_in_other(self.transform_coord_to_map, pl) for pl in self.parking_lines]
map_free = [geom.get_point_in_other(self.transform_coord_to_map, pd['free']) for pd in self.parking_dict]
color_free = (0, 255, 0)
# Работа с кадром
frame_copy = self.rectangle_car_boxes(frame_copy) # Нарисовать боксы
for park, line, color, ff in zip(self.parking_dict, frame_pl, self.colors, frame_free):
# for park, line, color in zip(self.parking_dict, frame_pl, self.colors):
self.render_points(frame_copy, {'coord': self.car_points[park['cars']], 'color': color, 'thick': 3})
self.render_points(frame_copy, {'coord': ff, 'color': color_free, 'thick': 3})
self.render_line_with_check(color, frame_copy, line)
# Работа с картой
for park, line, color, mf in zip(self.parking_dict, map_pl, self.colors, map_free):
# for park, line, color in zip(self.parking_dict, map_pl, self.colors):
self.render_points(map_copy, {'coord': map_cars[park['cars']], 'color': color, 'thick': 2})
self.render_points(map_copy, {'coord': mf, 'color': color_free, 'thick': 2})
self.render_line_with_check(color, map_copy, line)
# Совмещение данных и визуализация
image_frame, image_map = [cv2.addWeighted(c, 0.8, i, 0.2, 0.0) for c, i in
[[frame_copy, image_frame], [map_copy, image_map]]]
frame_with_map = np.concatenate((image_frame, image_map), axis=0)
cv2.imshow('Detecting...', frame_with_map)
# cv2.waitKey(0)
while True:
# Нажмите 'q', чтобы выйти.
key = cv2.waitKey(1) & 0xFF
if key in [ord('q'), ord('й')]:
break
video_capture.release()
cv2.destroyAllWindows()
def render_line_with_check(self, color, frame_copy, line):
line = np.int32(line)
cv2.line(frame_copy, line[0], line[1], color=color, thickness=2)
if __name__ == '__main__':
import json
file_name = 'data/setting.json'
with open(file_name, 'r') as f:
settings = json.load(f)
MappingPoints(settings).render()
| [
"cv2.rectangle",
"numpy.int32",
"cv2.imshow",
"geometry.get_lines_from_array",
"numpy.array",
"math.cos",
"numpy.argsort",
"cv2.destroyAllWindows",
"geometry.get_point_in_other",
"math.tan",
"geometry.search_angle_between_points",
"cv2.line",
"cv2.addWeighted",
"geometry.get_perpendicular_... | [((506, 544), 'numpy.int32', 'np.int32', (["settings_map['frame_points']"], {}), "(settings_map['frame_points'])\n", (514, 544), True, 'import numpy as np\n'), ((571, 607), 'numpy.int32', 'np.int32', (["settings_map['map_points']"], {}), "(settings_map['map_points'])\n", (579, 607), True, 'import numpy as np\n'), ((636, 674), 'numpy.array', 'np.array', (["settings_map['coord_points']"], {}), "(settings_map['coord_points'])\n", (644, 674), True, 'import numpy as np\n'), ((3111, 3161), 'numpy.array', 'np.array', (['[[0, 0], [0, 100], [100, 0], [100, 100]]'], {}), '([[0, 0], [0, 100], [100, 0], [100, 100]])\n', (3119, 3161), True, 'import numpy as np\n'), ((3179, 3241), 'geometry.get_point_in_other', 'geom.get_point_in_other', (['self.transform_frame_to_coord', 'points'], {}), '(self.transform_frame_to_coord, points)\n', (3202, 3241), True, 'import geometry as geom\n'), ((3267, 3298), 'geometry.intersection_array', 'geom.intersection_array', (['points'], {}), '(points)\n', (3290, 3298), True, 'import geometry as geom\n'), ((4013, 4027), 'numpy.array', 'np.array', (['cars'], {}), '(cars)\n', (4021, 4027), True, 'import numpy as np\n'), ((4301, 4372), 'geometry.get_point_in_other', 'geom.get_point_in_other', (['self.transform_frame_to_coord', 'self.car_points'], {}), '(self.transform_frame_to_coord, self.car_points)\n', (4324, 4372), True, 'import geometry as geom\n'), ((4435, 4480), 'geometry.get_lines_from_array', 'geom.get_lines_from_array', (['self.parking_lines'], {}), '(self.parking_lines)\n', (4460, 4480), True, 'import geometry as geom\n'), ((7327, 7354), 'cv2.imread', 'cv2.imread', (['self.MAP_SOURCE'], {}), '(self.MAP_SOURCE)\n', (7337, 7354), False, 'import cv2\n'), ((7415, 7450), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.VIDEO_SOURCE'], {}), '(self.VIDEO_SOURCE)\n', (7431, 7450), False, 'import cv2\n'), ((8197, 8266), 'geometry.get_point_in_other', 'geom.get_point_in_other', (['self.transform_frame_to_map', 'self.car_points'], {}), '(self.transform_frame_to_map, self.car_points)\n', (8220, 8266), True, 'import geometry as geom\n'), ((9772, 9820), 'numpy.concatenate', 'np.concatenate', (['(image_frame, image_map)'], {'axis': '(0)'}), '((image_frame, image_map), axis=0)\n', (9786, 9820), True, 'import numpy as np\n'), ((9829, 9871), 'cv2.imshow', 'cv2.imshow', (['"""Detecting..."""', 'frame_with_map'], {}), "('Detecting...', frame_with_map)\n", (9839, 9871), False, 'import cv2\n'), ((10103, 10126), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10124, 10126), False, 'import cv2\n'), ((10206, 10220), 'numpy.int32', 'np.int32', (['line'], {}), '(line)\n', (10214, 10220), True, 'import numpy as np\n'), ((10229, 10293), 'cv2.line', 'cv2.line', (['frame_copy', 'line[0]', 'line[1]'], {'color': 'color', 'thickness': '(2)'}), '(frame_copy, line[0], line[1], color=color, thickness=2)\n', (10237, 10293), False, 'import cv2\n'), ((10431, 10443), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10440, 10443), False, 'import pickle, json\n'), ((871, 900), 'numpy.float32', 'np.float32', (['self.frame_points'], {}), '(self.frame_points)\n', (881, 900), True, 'import numpy as np\n'), ((968, 995), 'numpy.float32', 'np.float32', (['self.map_points'], {}), '(self.map_points)\n', (978, 995), True, 'import numpy as np\n'), ((1065, 1094), 'numpy.float32', 'np.float32', (['self.frame_points'], {}), '(self.frame_points)\n', (1075, 1094), True, 'import numpy as np\n'), ((1164, 1193), 'numpy.float32', 'np.float32', (['self.coord_points'], {}), '(self.coord_points)\n', (1174, 1193), True, 'import numpy as np\n'), ((1261, 1290), 'numpy.float32', 'np.float32', (['self.coord_points'], {}), '(self.coord_points)\n', (1271, 1290), True, 'import numpy as np\n'), ((1358, 1385), 'numpy.float32', 'np.float32', (['self.map_points'], {}), '(self.map_points)\n', (1368, 1385), True, 'import numpy as np\n'), ((1455, 1484), 'numpy.float32', 'np.float32', (['self.coord_points'], {}), '(self.coord_points)\n', (1465, 1484), True, 'import numpy as np\n'), ((1554, 1583), 'numpy.float32', 'np.float32', (['self.frame_points'], {}), '(self.frame_points)\n', (1564, 1583), True, 'import numpy as np\n'), ((2300, 2314), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2311, 2314), False, 'import pickle, json\n'), ((2667, 2723), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(1)'], {}), '(frame, (x1, y1), (x2, y2), (0, 0, 255), 1)\n', (2680, 2723), False, 'import cv2\n'), ((3326, 3353), 'math.cos', 'math.cos', (['self.cam_point[1]'], {}), '(self.cam_point[1])\n', (3334, 3353), False, 'import math\n'), ((3803, 3872), 'geometry.search_angle_between_points', 'geom.search_angle_between_points', (['pa', 'self.cam_point', 'self.cam_height'], {}), '(pa, self.cam_point, self.cam_height)\n', (3835, 3872), True, 'import geometry as geom\n'), ((5610, 5626), 'numpy.argsort', 'np.argsort', (['lens'], {}), '(lens)\n', (5620, 5626), True, 'import numpy as np\n'), ((5750, 5790), 'numpy.append', 'np.append', (['lens[0]', '(lens[1:] - lens[:-1])'], {}), '(lens[0], lens[1:] - lens[:-1])\n', (5759, 5790), True, 'import numpy as np\n'), ((6255, 6276), 'numpy.median', 'np.median', (['lens[1:-1]'], {}), '(lens[1:-1])\n', (6264, 6276), True, 'import numpy as np\n'), ((7232, 7255), 'numpy.array', 'np.array', (['points_to_add'], {}), '(points_to_add)\n', (7240, 7255), True, 'import numpy as np\n'), ((7971, 8029), 'geometry.get_point_in_other', 'geom.get_point_in_other', (['self.transform_coord_to_frame', 'pl'], {}), '(self.transform_coord_to_frame, pl)\n', (7994, 8029), True, 'import geometry as geom\n'), ((8082, 8148), 'geometry.get_point_in_other', 'geom.get_point_in_other', (['self.transform_coord_to_frame', "pd['free']"], {}), "(self.transform_coord_to_frame, pd['free'])\n", (8105, 8148), True, 'import geometry as geom\n'), ((8285, 8341), 'geometry.get_point_in_other', 'geom.get_point_in_other', (['self.transform_coord_to_map', 'pl'], {}), '(self.transform_coord_to_map, pl)\n', (8308, 8341), True, 'import geometry as geom\n'), ((8392, 8456), 'geometry.get_point_in_other', 'geom.get_point_in_other', (['self.transform_coord_to_map', "pd['free']"], {}), "(self.transform_coord_to_map, pd['free'])\n", (8415, 8456), True, 'import geometry as geom\n'), ((9612, 9648), 'cv2.addWeighted', 'cv2.addWeighted', (['c', '(0.8)', 'i', '(0.2)', '(0.0)'], {}), '(c, 0.8, i, 0.2, 0.0)\n', (9627, 9648), False, 'import cv2\n'), ((2981, 3068), 'cv2.circle', 'cv2.circle', (['frame_layer', 'p'], {'radius': '(5)', 'color': "data['color']", 'thickness': "data['thick']"}), "(frame_layer, p, radius=5, color=data['color'], thickness=data[\n 'thick'])\n", (2991, 3068), False, 'import cv2\n'), ((4742, 4792), 'geometry.get_perpendicular_to_line_from_point', 'geom.get_perpendicular_to_line_from_point', (['pl', 'car'], {}), '(pl, car)\n', (4783, 4792), True, 'import geometry as geom\n'), ((5048, 5070), 'numpy.array', 'np.array', (["park['cars']"], {}), "(park['cars'])\n", (5056, 5070), True, 'import numpy as np\n'), ((5130, 5153), 'numpy.array', 'np.array', (["park['inter']"], {}), "(park['inter'])\n", (5138, 5153), True, 'import numpy as np\n'), ((5213, 5235), 'numpy.array', 'np.array', (["park['lens']"], {}), "(park['lens'])\n", (5221, 5235), True, 'import numpy as np\n'), ((6339, 6364), 'numpy.min', 'np.min', (['median_lens[1:-1]'], {}), '(median_lens[1:-1])\n', (6345, 6364), True, 'import numpy as np\n'), ((9975, 9989), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9986, 9989), False, 'import cv2\n'), ((3747, 3778), 'numpy.array', 'np.array', (['[[(x1 + x2) / 2, y2]]'], {}), '([[(x1 + x2) / 2, y2]])\n', (3755, 3778), True, 'import numpy as np\n'), ((3906, 3921), 'math.tan', 'math.tan', (['alpha'], {}), '(alpha)\n', (3914, 3921), False, 'import math\n'), ((4158, 4170), 'numpy.array', 'np.array', (['pl'], {}), '(pl)\n', (4166, 4170), True, 'import numpy as np\n'), ((5510, 5535), 'geometry.get_lenght', 'geom.get_lenght', (['pl[0]', 'p'], {}), '(pl[0], p)\n', (5525, 5535), True, 'import geometry as geom\n'), ((282, 320), 'numpy.array', 'np.array', (["settings_map['frame_points']"], {}), "(settings_map['frame_points'])\n", (290, 320), True, 'import numpy as np\n'), ((367, 404), 'numpy.array', 'np.array', (["settings_map['interceptor']"], {}), "(settings_map['interceptor'])\n", (375, 404), True, 'import numpy as np\n'), ((5545, 5579), 'numpy.vstack', 'np.vstack', (["(pdict['inter'], pl[1])"], {}), "((pdict['inter'], pl[1]))\n", (5554, 5579), True, 'import numpy as np\n'), ((7081, 7146), 'geometry.get_point_on_line', 'geom.get_point_on_line', (['result_point', 'pl', '(direction * len_to_calc)'], {}), '(result_point, pl, direction * len_to_calc)\n', (7103, 7146), True, 'import geometry as geom\n')] |
"""
Module for eigenvalue analysis.
"""
import logging
from math import ceil, pi
import numpy as np
import scipy.io
from scipy.linalg import solve
from andes.io.txt import dump_data
from andes.plot import set_latex, set_style
from andes.routines.base import BaseRoutine
from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix
from andes.utils.misc import elapsed
from andes.variables.report import report_info
logger = logging.getLogger(__name__)
DPI = None
class EIG(BaseRoutine):
"""
Eigenvalue analysis routine
"""
def __init__(self, system, config):
super().__init__(system=system, config=config)
self.config.add(plot=0, tol=1e-6)
self.config.add_extra("_help",
plot="show plot after computation",
tol="numerical tolerance to treat eigenvalues as zeros")
self.config.add_extra("_alt", plot=(0, 1))
# internal flags and storage
self.As = None # state matrix after removing the ones associated with zero T consts
self.Asc = None # the original complete As without reordering
self.mu = None # eigenvalues
self.N = None # right eigenvectors
self.W = None # left eigenvectors
self.pfactors = None
# --- related to states with zero time constants (zs) ---
self.zstate_idx = np.array([], dtype=int)
self.nz_counts = None
# --- statistics --
self.n_positive = 0
self.n_zeros = 0
self.n_negative = 0
self.x_name = []
def calc_As(self, dense=True):
r"""
Return state matrix and store to ``self.As``.
Notes
-----
For systems in the mass-matrix formulation,
.. math ::
T \dot{x} = f(x, y) \\
0 = g(x, y)
Assume `T` is non-singular, the state matrix is calculated from
.. math ::
A_s = T^{-1} (f_x - f_y * g_y^{-1} * g_x)
Returns
-------
kvxopt.matrix
state matrix
"""
dae = self.system.dae
self.find_zero_states()
self.x_name = np.array(dae.x_name)
self.As = self._reduce(dae.fx, dae.fy,
dae.gx, dae.gy, dae.Tf,
dense=dense)
if len(self.zstate_idx) > 0:
self.Asc = self.As
self.As = self._reduce(*self._reorder())
return self.As
def _reduce(self, fx, fy, gx, gy, Tf, dense=True):
"""
Reduce algebraic equations (or states associated with zero time constants).
Returns
-------
spmatrix
The reduced state matrix
"""
gyx = matrix(gx)
self.solver.linsolve(gy, gyx)
Tfnz = Tf + np.ones_like(Tf) * np.equal(Tf, 0.0)
iTf = spdiag((1 / Tfnz).tolist())
if dense:
return iTf * (fx - fy * gyx)
else:
return sparse(iTf * (fx - fy * gyx))
def _reorder(self):
"""
reorder As by moving rows and cols associated with zero time constants to the end.
Returns `fx`, `fy`, `gx`, `gy`, `Tf`.
"""
dae = self.system.dae
rows = np.arange(dae.n, dtype=int)
cols = np.arange(dae.n, dtype=int)
vals = np.ones(dae.n, dtype=float)
swaps = []
bidx = self.nz_counts
for ii in range(dae.n - self.nz_counts):
if ii in self.zstate_idx:
while (bidx in self.zstate_idx):
bidx += 1
cols[ii] = bidx
rows[bidx] = ii
swaps.append((ii, bidx))
# swap the variable names
for fr, bk in swaps:
bk_name = self.x_name[bk]
self.x_name[fr] = bk_name
self.x_name = self.x_name[:self.nz_counts]
# compute the permutation matrix for `As` containing non-states
perm = spmatrix(matrix(vals), matrix(rows), matrix(cols))
As_perm = perm * sparse(self.As) * perm
self.As_perm = As_perm
nfx = As_perm[:self.nz_counts, :self.nz_counts]
nfy = As_perm[:self.nz_counts, self.nz_counts:]
ngx = As_perm[self.nz_counts:, :self.nz_counts]
ngy = As_perm[self.nz_counts:, self.nz_counts:]
nTf = np.delete(self.system.dae.Tf, self.zstate_idx)
return nfx, nfy, ngx, ngy, nTf
def calc_eig(self, As=None):
"""
Calculate eigenvalues and right eigen vectors.
This function is a wrapper to ``np.linalg.eig``.
Results are returned but not stored to ``EIG``.
Returns
-------
np.array(dtype=complex)
eigenvalues
np.array()
right eigenvectors
"""
if As is None:
As = self.As
# `mu`: eigenvalues, `N`: right eigenvectors with each column corr. to one eigvalue
mu, N = np.linalg.eig(As)
return mu, N
def _store_stats(self):
"""
Count and store the number of eigenvalues with positive, zero,
and negative real parts using ``self.mu``.
"""
mu_real = self.mu.real
self.n_positive = np.count_nonzero(mu_real > self.config.tol)
self.n_zeros = np.count_nonzero(abs(mu_real) <= self.config.tol)
self.n_negative = np.count_nonzero(mu_real < self.config.tol)
return True
def calc_pfactor(self, As=None):
"""
Compute participation factor of states in eigenvalues.
Each row in the participation factor correspond to one state,
and each column correspond to one mode.
Parameters
----------
As : np.array or None
State matrix to process. If None, use ``self.As``.
Returns
-------
np.array(dtype=complex)
eigenvalues
np.array
participation factor matrix
"""
mu, N = self.calc_eig(As)
n_state = len(mu)
# --- calculate the left eig vector and store to ``W```
# based on orthogonality that `W.T @ N = I`,
# left eigenvector is `inv(N)^T`
Weye = np.eye(n_state)
WT = solve(N, Weye, overwrite_b=True)
W = WT.T
# --- calculate participation factor ---
pfactor = np.abs(W) * np.abs(N)
b = np.ones(n_state)
W_abs = b @ pfactor
pfactor = pfactor.T
# --- normalize participation factor ---
for item in range(n_state):
pfactor[:, item] /= W_abs[item]
pfactor = np.round(pfactor, 5)
return mu, pfactor, N, W
def summary(self):
"""
Print out a summary to ``logger.info``.
"""
out = list()
out.append('')
out.append('-> Eigenvalue Analysis:')
out_str = '\n'.join(out)
logger.info(out_str)
def find_zero_states(self):
"""
Find the indices of states associated with zero time constants in ``x``.
"""
system = self.system
self.zstate_idx = np.array([], dtype=int)
if sum(system.dae.Tf != 0) != len(system.dae.Tf):
self.zstate_idx = np.where(system.dae.Tf == 0)[0]
logger.info("%d states are associated with zero time constants. ", len(self.zstate_idx))
logger.debug([system.dae.x_name[i] for i in self.zstate_idx])
self.nz_counts = system.dae.n - len(self.zstate_idx)
def _pre_check(self):
"""
Helper function for pre-computation checks.
"""
system = self.system
status = True
if system.PFlow.converged is False:
logger.warning('Power flow not solved. Eig analysis will not continue.')
status = False
if system.TDS.initialized is False:
system.TDS.init()
system.TDS.itm_step()
elif system.dae.n == 0:
logger.error('No dynamic model. Eig analysis will not continue.')
status = False
return status
def run(self, **kwargs):
"""
Run small-signal stability analysis.
"""
succeed = False
system = self.system
if not self._pre_check():
system.exit_code += 1
return False
self.summary()
t1, s = elapsed()
self.calc_As()
self.mu, self.pfactors, self.N, self.W = self.calc_pfactor()
self._store_stats()
t2, s = elapsed(t1)
self.exec_time = t2 - t1
logger.info(self.stats())
logger.info('Eigenvalue analysis finished in {:s}.'.format(s))
if not self.system.files.no_output:
self.report()
if system.options.get('state_matrix'):
self.export_mat()
if self.config.plot:
self.plot()
succeed = True
if not succeed:
system.exit_code += 1
return succeed
def stats(self):
"""
Return statistics of results in a string.
"""
out = list()
out.append(' Positive %6g' % self.n_positive)
out.append(' Zeros %6g' % self.n_zeros)
out.append(' Negative %6g' % self.n_negative)
return '\n'.join(out)
def plot(self, mu=None, fig=None, ax=None,
left=-6, right=0.5, ymin=-8, ymax=8, damping=0.05,
line_width=0.5, dpi=DPI, figsize=None, base_color='black',
show=True, latex=True, style='default'):
"""
Plot utility for eigenvalues in the S domain.
Parameters
----------
mu : array, optional
an array of complex eigenvalues
fig : figure handl, optional
existing matplotlib figure handle
ax : axis handle, optional
existing axis handle
left : int, optional
left tick for the x-axis, by default -6
right : float, optional
right tick, by default 0.5
ymin : int, optional
bottom tick, by default -8
ymax : int, optional
top tick, by default 8
damping : float, optional
damping value for which the dash plots are drawn
line_width : float, optional
default line width, by default 0.5
dpi : int, optional
figure dpi
figsize : [type], optional
default figure size, by default None
base_color : str, optional
base color for negative eigenvalues
show : bool, optional
True to show figure after plot, by default True
latex : bool, optional
True to use latex, by default True
Returns
-------
figure
matplotlib figure object
axis
matplotlib axis object
"""
set_style(style)
if mu is None:
mu = self.mu
mu_real = mu.real
mu_imag = mu.imag
p_mu_real, p_mu_imag = list(), list()
z_mu_real, z_mu_imag = list(), list()
n_mu_real, n_mu_imag = list(), list()
for re, im in zip(mu_real, mu_imag):
if abs(re) <= self.config.tol:
z_mu_real.append(re)
z_mu_imag.append(im)
elif re > self.config.tol:
p_mu_real.append(re)
p_mu_imag.append(im)
elif re < -self.config.tol:
n_mu_real.append(re)
n_mu_imag.append(im)
if latex:
set_latex()
if fig is None or ax is None:
fig = plt.figure(dpi=dpi, figsize=figsize)
ax = plt.gca()
ax.scatter(z_mu_real, z_mu_imag, marker='o', s=40, linewidth=0.5, facecolors='none', edgecolors='green')
ax.scatter(n_mu_real, n_mu_imag, marker='x', s=40, linewidth=0.5, color=base_color)
ax.scatter(p_mu_real, p_mu_imag, marker='x', s=40, linewidth=0.5, color='red')
# axes lines
ax.axhline(linewidth=0.5, color='grey', linestyle='--')
ax.axvline(linewidth=0.5, color='grey', linestyle='--')
# TODO: Improve the damping and range
# --- plot 5% damping lines ---
xin = np.arange(left, 0, 0.01)
yneg = xin / damping
ypos = - xin / damping
ax.plot(xin, yneg, color='grey', linewidth=line_width, linestyle='--')
ax.plot(xin, ypos, color='grey', linewidth=line_width, linestyle='--')
# --- damping lines end ---
if latex:
ax.set_xlabel('Real [$s^{-1}$]')
ax.set_ylabel('Imaginary [$s^{-1}$]')
else:
ax.set_xlabel('Real [s -1]')
ax.set_ylabel('Imaginary [s -1]')
ax.set_xlim(left=left, right=right)
ax.set_ylim(ymin, ymax)
if show is True:
plt.show()
return fig, ax
def export_mat(self):
"""
Export state matrix to a ``<CaseName>_As.mat`` file with the variable name ``As``, where
``<CaseName>`` is the test case name.
State variable names are stored in variables ``x_name`` and ``x_tex_name``.
Returns
-------
bool
True if successful
"""
system = self.system
out = {'As': self.As,
'Asc': self.Asc,
'x_name': np.array(system.dae.x_name, dtype=object),
'x_tex_name': np.array(system.dae.x_tex_name, dtype=object),
}
scipy.io.savemat(system.files.mat, mdict=out)
logger.info('State matrix saved to "%s"', system.files.mat)
return True
def post_process(self):
"""
Post processing of eigenvalues.
"""
# --- statistics ---
n_states = len(self.mu)
mu_real = self.mu.real
numeral = [''] * n_states
for idx, item in enumerate(range(n_states)):
if abs(mu_real[idx]) <= self.config.tol:
marker = '*'
elif mu_real[idx] > self.config.tol:
marker = '**'
else:
marker = ''
numeral[idx] = '#' + str(idx + 1) + marker
# compute frequency, un-damped frequency and damping
freq = np.zeros(n_states)
ufreq = np.zeros(n_states)
damping = np.zeros(n_states)
for idx, item in enumerate(self.mu):
if item.imag == 0:
freq[idx] = 0
ufreq[idx] = 0
damping[idx] = 0
else:
ufreq[idx] = abs(item) / 2 / pi
freq[idx] = abs(item.imag / 2 / pi)
damping[idx] = -div(item.real, abs(item)) * 100
return freq, ufreq, damping, numeral
def report(self, x_name=None, **kwargs):
"""
Save eigenvalue analysis reports.
Returns
-------
None
"""
if x_name is None:
x_name = self.x_name
n_states = len(self.mu)
mu_real = self.mu.real
mu_imag = self.mu.imag
freq, ufreq, damping, numeral = self.post_process()
# obtain most associated variables
var_assoc = []
for prow in range(n_states):
temp_row = self.pfactors[prow, :]
name_idx = list(temp_row).index(max(temp_row))
var_assoc.append(x_name[name_idx])
text, header, rowname, data = list(), list(), list(), list()
# opening info section
text.append(report_info(self.system))
header.append(None)
rowname.append(None)
data.append(None)
text.append('')
text.append('EIGENVALUE ANALYSIS REPORT')
header.append([])
rowname.append([])
data.append([])
text.append('STATISTICS\n')
header.append([''])
rowname.append(['Positives', 'Zeros', 'Negatives'])
data.append((self.n_positive, self.n_zeros, self.n_negative))
text.append('EIGENVALUE DATA\n')
header.append([
'Most Associated',
'Real',
'Imag.',
'Damped Freq.',
'Frequency',
'Damping [%]'])
rowname.append(numeral)
data.append(
[var_assoc,
list(mu_real),
list(mu_imag),
freq,
ufreq,
damping])
n_cols = 7 # columns per block
n_block = int(ceil(n_states / n_cols))
if n_block <= 100:
for idx in range(n_block):
start = n_cols * idx
end = n_cols * (idx + 1)
text.append('PARTICIPATION FACTORS [{}/{}]\n'.format(
idx + 1, n_block))
header.append(numeral[start:end])
rowname.append(x_name)
data.append(self.pfactors[start:end, :])
dump_data(text, header, rowname, data, self.system.files.eig)
logger.info('Report saved to "%s".', self.system.files.eig)
| [
"logging.getLogger",
"andes.shared.plt.show",
"numpy.equal",
"numpy.count_nonzero",
"numpy.array",
"andes.shared.plt.figure",
"numpy.arange",
"numpy.where",
"numpy.delete",
"andes.shared.matrix",
"numpy.round",
"numpy.abs",
"numpy.eye",
"numpy.ones",
"numpy.linalg.eig",
"andes.variable... | [((435, 462), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (452, 462), False, 'import logging\n'), ((1396, 1419), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1404, 1419), True, 'import numpy as np\n'), ((2175, 2195), 'numpy.array', 'np.array', (['dae.x_name'], {}), '(dae.x_name)\n', (2183, 2195), True, 'import numpy as np\n'), ((2754, 2764), 'andes.shared.matrix', 'matrix', (['gx'], {}), '(gx)\n', (2760, 2764), False, 'from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix\n'), ((3258, 3285), 'numpy.arange', 'np.arange', (['dae.n'], {'dtype': 'int'}), '(dae.n, dtype=int)\n', (3267, 3285), True, 'import numpy as np\n'), ((3301, 3328), 'numpy.arange', 'np.arange', (['dae.n'], {'dtype': 'int'}), '(dae.n, dtype=int)\n', (3310, 3328), True, 'import numpy as np\n'), ((3344, 3371), 'numpy.ones', 'np.ones', (['dae.n'], {'dtype': 'float'}), '(dae.n, dtype=float)\n', (3351, 3371), True, 'import numpy as np\n'), ((4341, 4387), 'numpy.delete', 'np.delete', (['self.system.dae.Tf', 'self.zstate_idx'], {}), '(self.system.dae.Tf, self.zstate_idx)\n', (4350, 4387), True, 'import numpy as np\n'), ((4951, 4968), 'numpy.linalg.eig', 'np.linalg.eig', (['As'], {}), '(As)\n', (4964, 4968), True, 'import numpy as np\n'), ((5225, 5268), 'numpy.count_nonzero', 'np.count_nonzero', (['(mu_real > self.config.tol)'], {}), '(mu_real > self.config.tol)\n', (5241, 5268), True, 'import numpy as np\n'), ((5368, 5411), 'numpy.count_nonzero', 'np.count_nonzero', (['(mu_real < self.config.tol)'], {}), '(mu_real < self.config.tol)\n', (5384, 5411), True, 'import numpy as np\n'), ((6196, 6211), 'numpy.eye', 'np.eye', (['n_state'], {}), '(n_state)\n', (6202, 6211), True, 'import numpy as np\n'), ((6225, 6257), 'scipy.linalg.solve', 'solve', (['N', 'Weye'], {'overwrite_b': '(True)'}), '(N, Weye, overwrite_b=True)\n', (6230, 6257), False, 'from scipy.linalg import solve\n'), ((6377, 6393), 'numpy.ones', 'np.ones', (['n_state'], {}), '(n_state)\n', (6384, 6393), True, 'import numpy as np\n'), ((6598, 6618), 'numpy.round', 'np.round', (['pfactor', '(5)'], {}), '(pfactor, 5)\n', (6606, 6618), True, 'import numpy as np\n'), ((7094, 7117), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (7102, 7117), True, 'import numpy as np\n'), ((8345, 8354), 'andes.utils.misc.elapsed', 'elapsed', ([], {}), '()\n', (8352, 8354), False, 'from andes.utils.misc import elapsed\n'), ((8492, 8503), 'andes.utils.misc.elapsed', 'elapsed', (['t1'], {}), '(t1)\n', (8499, 8503), False, 'from andes.utils.misc import elapsed\n'), ((10843, 10859), 'andes.plot.set_style', 'set_style', (['style'], {}), '(style)\n', (10852, 10859), False, 'from andes.plot import set_latex, set_style\n'), ((12197, 12221), 'numpy.arange', 'np.arange', (['left', '(0)', '(0.01)'], {}), '(left, 0, 0.01)\n', (12206, 12221), True, 'import numpy as np\n'), ((14205, 14223), 'numpy.zeros', 'np.zeros', (['n_states'], {}), '(n_states)\n', (14213, 14223), True, 'import numpy as np\n'), ((14240, 14258), 'numpy.zeros', 'np.zeros', (['n_states'], {}), '(n_states)\n', (14248, 14258), True, 'import numpy as np\n'), ((14277, 14295), 'numpy.zeros', 'np.zeros', (['n_states'], {}), '(n_states)\n', (14285, 14295), True, 'import numpy as np\n'), ((16813, 16874), 'andes.io.txt.dump_data', 'dump_data', (['text', 'header', 'rowname', 'data', 'self.system.files.eig'], {}), '(text, header, rowname, data, self.system.files.eig)\n', (16822, 16874), False, 'from andes.io.txt import dump_data\n'), ((2996, 3025), 'andes.shared.sparse', 'sparse', (['(iTf * (fx - fy * gyx))'], {}), '(iTf * (fx - fy * gyx))\n', (3002, 3025), False, 'from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix\n'), ((3981, 3993), 'andes.shared.matrix', 'matrix', (['vals'], {}), '(vals)\n', (3987, 3993), False, 'from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix\n'), ((3995, 4007), 'andes.shared.matrix', 'matrix', (['rows'], {}), '(rows)\n', (4001, 4007), False, 'from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix\n'), ((4009, 4021), 'andes.shared.matrix', 'matrix', (['cols'], {}), '(cols)\n', (4015, 4021), False, 'from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix\n'), ((6343, 6352), 'numpy.abs', 'np.abs', (['W'], {}), '(W)\n', (6349, 6352), True, 'import numpy as np\n'), ((6355, 6364), 'numpy.abs', 'np.abs', (['N'], {}), '(N)\n', (6361, 6364), True, 'import numpy as np\n'), ((11520, 11531), 'andes.plot.set_latex', 'set_latex', ([], {}), '()\n', (11529, 11531), False, 'from andes.plot import set_latex, set_style\n'), ((11589, 11625), 'andes.shared.plt.figure', 'plt.figure', ([], {'dpi': 'dpi', 'figsize': 'figsize'}), '(dpi=dpi, figsize=figsize)\n', (11599, 11625), False, 'from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix\n'), ((11643, 11652), 'andes.shared.plt.gca', 'plt.gca', ([], {}), '()\n', (11650, 11652), False, 'from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix\n'), ((12807, 12817), 'andes.shared.plt.show', 'plt.show', ([], {}), '()\n', (12815, 12817), False, 'from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix\n'), ((13313, 13354), 'numpy.array', 'np.array', (['system.dae.x_name'], {'dtype': 'object'}), '(system.dae.x_name, dtype=object)\n', (13321, 13354), True, 'import numpy as np\n'), ((13385, 13430), 'numpy.array', 'np.array', (['system.dae.x_tex_name'], {'dtype': 'object'}), '(system.dae.x_tex_name, dtype=object)\n', (13393, 13430), True, 'import numpy as np\n'), ((15446, 15470), 'andes.variables.report.report_info', 'report_info', (['self.system'], {}), '(self.system)\n', (15457, 15470), False, 'from andes.variables.report import report_info\n'), ((16379, 16402), 'math.ceil', 'ceil', (['(n_states / n_cols)'], {}), '(n_states / n_cols)\n', (16383, 16402), False, 'from math import ceil, pi\n'), ((2824, 2840), 'numpy.ones_like', 'np.ones_like', (['Tf'], {}), '(Tf)\n', (2836, 2840), True, 'import numpy as np\n'), ((2843, 2860), 'numpy.equal', 'np.equal', (['Tf', '(0.0)'], {}), '(Tf, 0.0)\n', (2851, 2860), True, 'import numpy as np\n'), ((4048, 4063), 'andes.shared.sparse', 'sparse', (['self.As'], {}), '(self.As)\n', (4054, 4063), False, 'from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix\n'), ((7208, 7236), 'numpy.where', 'np.where', (['(system.dae.Tf == 0)'], {}), '(system.dae.Tf == 0)\n', (7216, 7236), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
import tensorflow as tf
import tensors_saver
tensors_saver.set_out_path(sys.argv[1])
x = np.array([
[1, -2.5, 0.4],
[0.2, -0.341, 0.7],
[-2.3, -12.5, 8.4],
[1.9, 1.2, 1.4],
[-0.23, -1.6, 1.4]
])
y = np.array([
[0.1, 0.5, 0.4],
[0.2, 0.1, 0.7],
[0.8, 0.05, 0.15],
[0.3, 0.6, 0.1],
[0.7, 0.1, 0.2]
])
x_node = tf.Variable(x, dtype=tf.float32)
y_node = tf.Variable(y, dtype=tf.float32)
cross_node = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_node, logits=x_node)
loss_node = tf.reduce_mean(cross_node)
dx_node = tf.gradients(loss_node, x_node)[0]
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
tf_loss = sess.run(loss_node)
tf_dx = sess.run(dx_node)
tensors_saver.add(tf_loss)
tensors_saver.add(tf_dx)
| [
"tensorflow.Variable",
"tensorflow.Session",
"tensors_saver.add",
"tensorflow.global_variables_initializer",
"numpy.array",
"tensorflow.gradients",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.reduce_mean",
"tensors_saver.set_out_path"
] | [((87, 126), 'tensors_saver.set_out_path', 'tensors_saver.set_out_path', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (113, 126), False, 'import tensors_saver\n'), ((132, 239), 'numpy.array', 'np.array', (['[[1, -2.5, 0.4], [0.2, -0.341, 0.7], [-2.3, -12.5, 8.4], [1.9, 1.2, 1.4], [\n -0.23, -1.6, 1.4]]'], {}), '([[1, -2.5, 0.4], [0.2, -0.341, 0.7], [-2.3, -12.5, 8.4], [1.9, 1.2,\n 1.4], [-0.23, -1.6, 1.4]])\n', (140, 239), True, 'import numpy as np\n'), ((263, 365), 'numpy.array', 'np.array', (['[[0.1, 0.5, 0.4], [0.2, 0.1, 0.7], [0.8, 0.05, 0.15], [0.3, 0.6, 0.1], [0.7,\n 0.1, 0.2]]'], {}), '([[0.1, 0.5, 0.4], [0.2, 0.1, 0.7], [0.8, 0.05, 0.15], [0.3, 0.6, \n 0.1], [0.7, 0.1, 0.2]])\n', (271, 365), True, 'import numpy as np\n'), ((393, 425), 'tensorflow.Variable', 'tf.Variable', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (404, 425), True, 'import tensorflow as tf\n'), ((435, 467), 'tensorflow.Variable', 'tf.Variable', (['y'], {'dtype': 'tf.float32'}), '(y, dtype=tf.float32)\n', (446, 467), True, 'import tensorflow as tf\n'), ((481, 550), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'y_node', 'logits': 'x_node'}), '(labels=y_node, logits=x_node)\n', (520, 550), True, 'import tensorflow as tf\n'), ((563, 589), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_node'], {}), '(cross_node)\n', (577, 589), True, 'import tensorflow as tf\n'), ((643, 655), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (653, 655), True, 'import tensorflow as tf\n'), ((663, 696), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (694, 696), True, 'import tensorflow as tf\n'), ((769, 795), 'tensors_saver.add', 'tensors_saver.add', (['tf_loss'], {}), '(tf_loss)\n', (786, 795), False, 'import tensors_saver\n'), ((796, 820), 'tensors_saver.add', 'tensors_saver.add', (['tf_dx'], {}), '(tf_dx)\n', (813, 820), False, 'import tensors_saver\n'), ((600, 631), 'tensorflow.gradients', 'tf.gradients', (['loss_node', 'x_node'], {}), '(loss_node, x_node)\n', (612, 631), True, 'import tensorflow as tf\n')] |
import numpy as np
from datashader.composite import add, saturate, over, source
src = np.array([[0x00000000, 0x00ffffff, 0xffffffff],
[0x7dff0000, 0x7d00ff00, 0x7d0000ff],
[0xffff0000, 0xff000000, 0x3a3b3c3d]], dtype='uint32')
clear = np.uint32(0)
clear_white = np.uint32(0x00ffffff)
white = np.uint32(0xffffffff)
blue = np.uint32(0xffff0000)
half_blue = np.uint32(0x7dff0000)
half_purple = np.uint32(0x7d7d007d)
def test_source():
o = src.copy()
o[0, :2] = clear
np.testing.assert_equal(source(src, clear), o)
o[0, :2] = clear_white
np.testing.assert_equal(source(src, clear_white), o)
o[0, :2] = half_blue
np.testing.assert_equal(source(src, half_blue), o)
def test_over():
o = src.copy()
o[0, 1] = 0
np.testing.assert_equal(over(src, clear), o)
np.testing.assert_equal(over(src, clear_white), o)
o = np.array([[0xffffffff, 0xffffffff, 0xffffffff],
[0xffff8282, 0xff82ff82, 0xff8282ff],
[0xffff0000, 0xff000000, 0xffd2d2d2]])
np.testing.assert_equal(over(src, white), o)
o = np.array([[0xffff0000, 0xffff0000, 0xffffffff],
[0xffff0000, 0xff827d00, 0xff82007d],
[0xffff0000, 0xff000000, 0xffd20d0d]])
np.testing.assert_equal(over(src, blue), o)
o = np.array([[0x7dff0000, 0x7dff0000, 0xffffffff],
[0xbcff0000, 0xbc56a800, 0xbc5600a8],
[0xffff0000, 0xff000000, 0x9ab51616]])
np.testing.assert_equal(over(src, half_blue), o)
o = np.array([[0x7d7d007d, 0x7d7d007d, 0xffffffff],
[0xbcd3002a, 0xbc2aa82a, 0xbc2a00d3],
[0xffff0000, 0xff000000, 0x9a641664]])
np.testing.assert_equal(over(src, half_purple), o)
def test_add():
o = src.copy()
o[0, 1] = 0
np.testing.assert_equal(add(src, clear), o)
np.testing.assert_equal(add(src, clear_white), o)
o = np.array([[0xffffffff, 0xffffffff, 0xffffffff],
[0xffffffff, 0xffffffff, 0xffffffff],
[0xffffffff, 0xffffffff, 0xffffffff]])
np.testing.assert_equal(add(src, white), o)
o = np.array([[0xffff0000, 0xffff0000, 0xffffffff],
[0xffff0000, 0xffff7d00, 0xffff007d],
[0xffff0000, 0xffff0000, 0xffff0d0d]])
np.testing.assert_equal(add(src, blue), o)
o = np.array([[0x7dff0000, 0x7dff0000, 0xffffffff],
[0xfaff0000, 0xfa7f7f00, 0xfa7f007f],
[0xffff0000, 0xff7d0000, 0xb7c01313]])
np.testing.assert_equal(add(src, half_blue), o)
o = np.array([[0x7d7d007d, 0x7d7d007d, 0xffffffff],
[0xfabe003e, 0xfa3e7f3e, 0xfa3e00be],
[0xffff003d, 0xff3d003d, 0xb7681368]])
np.testing.assert_equal(add(src, half_purple), o)
def test_saturate():
o = src.copy()
o[0, 1] = 0
np.testing.assert_equal(saturate(src, clear), o)
np.testing.assert_equal(saturate(src, clear_white), o)
o = np.full((3, 3), white, dtype='uint32')
np.testing.assert_equal(saturate(src, white), o)
o = np.full((3, 3), blue, dtype='uint32')
np.testing.assert_equal(saturate(src, blue), o)
o = np.array([[0x7dff0000, 0x7dff0000, 0xffff8282],
[0xfaff0000, 0xfa7f7f00, 0xfa7f007f],
[0xffff0000, 0xff7d0000, 0xb7c01313]])
np.testing.assert_equal(saturate(src, half_blue), o)
o = np.array([[0x7d7d007d, 0x7d7d007d, 0xffbf82bf],
[0xfabe003e, 0xfa3e7f3e, 0xfa3e00be],
[0xffbf003d, 0xff3d003d, 0xb7681368]])
np.testing.assert_equal(saturate(src, half_purple), o)
| [
"datashader.composite.source",
"datashader.composite.over",
"numpy.array",
"numpy.uint32",
"datashader.composite.add",
"numpy.full",
"datashader.composite.saturate"
] | [((88, 220), 'numpy.array', 'np.array', (['[[0, 16777215, 4294967295], [2113863680, 2097217280, 2097152255], [\n 4294901760, 4278190080, 976960573]]'], {'dtype': '"""uint32"""'}), "([[0, 16777215, 4294967295], [2113863680, 2097217280, 2097152255],\n [4294901760, 4278190080, 976960573]], dtype='uint32')\n", (96, 220), True, 'import numpy as np\n'), ((270, 282), 'numpy.uint32', 'np.uint32', (['(0)'], {}), '(0)\n', (279, 282), True, 'import numpy as np\n'), ((297, 316), 'numpy.uint32', 'np.uint32', (['(16777215)'], {}), '(16777215)\n', (306, 316), True, 'import numpy as np\n'), ((327, 348), 'numpy.uint32', 'np.uint32', (['(4294967295)'], {}), '(4294967295)\n', (336, 348), True, 'import numpy as np\n'), ((356, 377), 'numpy.uint32', 'np.uint32', (['(4294901760)'], {}), '(4294901760)\n', (365, 377), True, 'import numpy as np\n'), ((390, 411), 'numpy.uint32', 'np.uint32', (['(2113863680)'], {}), '(2113863680)\n', (399, 411), True, 'import numpy as np\n'), ((426, 447), 'numpy.uint32', 'np.uint32', (['(2105344125)'], {}), '(2105344125)\n', (435, 447), True, 'import numpy as np\n'), ((890, 1019), 'numpy.array', 'np.array', (['[[4294967295, 4294967295, 4294967295], [4294935170, 4286775170, 4286743295],\n [4294901760, 4278190080, 4292006610]]'], {}), '([[4294967295, 4294967295, 4294967295], [4294935170, 4286775170, \n 4286743295], [4294901760, 4278190080, 4292006610]])\n', (898, 1019), True, 'import numpy as np\n'), ((1108, 1237), 'numpy.array', 'np.array', (['[[4294901760, 4294901760, 4294967295], [4294901760, 4286741760, 4286709885],\n [4294901760, 4278190080, 4291955981]]'], {}), '([[4294901760, 4294901760, 4294967295], [4294901760, 4286741760, \n 4286709885], [4294901760, 4278190080, 4291955981]])\n', (1116, 1237), True, 'import numpy as np\n'), ((1325, 1454), 'numpy.array', 'np.array', (['[[2113863680, 2113863680, 4294967295], [3170828288, 3159795712, 3159752872],\n [4294901760, 4278190080, 2595558934]]'], {}), '([[2113863680, 2113863680, 4294967295], [3170828288, 3159795712, \n 3159752872], [4294901760, 4278190080, 2595558934]])\n', (1333, 1454), True, 'import numpy as np\n'), ((1547, 1676), 'numpy.array', 'np.array', (['[[2105344125, 2105344125, 4294967295], [3167944746, 3156912170, 3156869331],\n [4294901760, 4278190080, 2590250596]]'], {}), '([[2105344125, 2105344125, 4294967295], [3167944746, 3156912170, \n 3156869331], [4294901760, 4278190080, 2590250596]])\n', (1555, 1676), True, 'import numpy as np\n'), ((1926, 2055), 'numpy.array', 'np.array', (['[[4294967295, 4294967295, 4294967295], [4294967295, 4294967295, 4294967295],\n [4294967295, 4294967295, 4294967295]]'], {}), '([[4294967295, 4294967295, 4294967295], [4294967295, 4294967295, \n 4294967295], [4294967295, 4294967295, 4294967295]])\n', (1934, 2055), True, 'import numpy as np\n'), ((2143, 2272), 'numpy.array', 'np.array', (['[[4294901760, 4294901760, 4294967295], [4294901760, 4294933760, 4294901885],\n [4294901760, 4294901760, 4294905101]]'], {}), '([[4294901760, 4294901760, 4294967295], [4294901760, 4294933760, \n 4294901885], [4294901760, 4294901760, 4294905101]])\n', (2151, 2272), True, 'import numpy as np\n'), ((2359, 2488), 'numpy.array', 'np.array', (['[[2113863680, 2113863680, 4294967295], [4211015680, 4202659584, 4202627199],\n [4294901760, 4286382080, 3082818323]]'], {}), '([[2113863680, 2113863680, 4294967295], [4211015680, 4202659584, \n 4202627199], [4294901760, 4286382080, 3082818323]])\n', (2367, 2488), True, 'import numpy as np\n'), ((2580, 2709), 'numpy.array', 'np.array', (['[[2105344125, 2105344125, 4294967295], [4206755902, 4198399806, 4198367422],\n [4294901821, 4282187837, 3077051240]]'], {}), '([[2105344125, 2105344125, 4294967295], [4206755902, 4198399806, \n 4198367422], [4294901821, 4282187837, 3077051240]])\n', (2588, 2709), True, 'import numpy as np\n'), ((2973, 3011), 'numpy.full', 'np.full', (['(3, 3)', 'white'], {'dtype': '"""uint32"""'}), "((3, 3), white, dtype='uint32')\n", (2980, 3011), True, 'import numpy as np\n'), ((3073, 3110), 'numpy.full', 'np.full', (['(3, 3)', 'blue'], {'dtype': '"""uint32"""'}), "((3, 3), blue, dtype='uint32')\n", (3080, 3110), True, 'import numpy as np\n'), ((3171, 3300), 'numpy.array', 'np.array', (['[[2113863680, 2113863680, 4294935170], [4211015680, 4202659584, 4202627199],\n [4294901760, 4286382080, 3082818323]]'], {}), '([[2113863680, 2113863680, 4294935170], [4211015680, 4202659584, \n 4202627199], [4294901760, 4286382080, 3082818323]])\n', (3179, 3300), True, 'import numpy as np\n'), ((3397, 3526), 'numpy.array', 'np.array', (['[[2105344125, 2105344125, 4290740927], [4206755902, 4198399806, 4198367422],\n [4290707517, 4282187837, 3077051240]]'], {}), '([[2105344125, 2105344125, 4290740927], [4206755902, 4198399806, \n 4198367422], [4290707517, 4282187837, 3077051240]])\n', (3405, 3526), True, 'import numpy as np\n'), ((537, 555), 'datashader.composite.source', 'source', (['src', 'clear'], {}), '(src, clear)\n', (543, 555), False, 'from datashader.composite import add, saturate, over, source\n'), ((615, 639), 'datashader.composite.source', 'source', (['src', 'clear_white'], {}), '(src, clear_white)\n', (621, 639), False, 'from datashader.composite import add, saturate, over, source\n'), ((697, 719), 'datashader.composite.source', 'source', (['src', 'half_blue'], {}), '(src, half_blue)\n', (703, 719), False, 'from datashader.composite import add, saturate, over, source\n'), ((806, 822), 'datashader.composite.over', 'over', (['src', 'clear'], {}), '(src, clear)\n', (810, 822), False, 'from datashader.composite import add, saturate, over, source\n'), ((855, 877), 'datashader.composite.over', 'over', (['src', 'clear_white'], {}), '(src, clear_white)\n', (859, 877), False, 'from datashader.composite import add, saturate, over, source\n'), ((1079, 1095), 'datashader.composite.over', 'over', (['src', 'white'], {}), '(src, white)\n', (1083, 1095), False, 'from datashader.composite import add, saturate, over, source\n'), ((1297, 1312), 'datashader.composite.over', 'over', (['src', 'blue'], {}), '(src, blue)\n', (1301, 1312), False, 'from datashader.composite import add, saturate, over, source\n'), ((1514, 1534), 'datashader.composite.over', 'over', (['src', 'half_blue'], {}), '(src, half_blue)\n', (1518, 1534), False, 'from datashader.composite import add, saturate, over, source\n'), ((1736, 1758), 'datashader.composite.over', 'over', (['src', 'half_purple'], {}), '(src, half_purple)\n', (1740, 1758), False, 'from datashader.composite import add, saturate, over, source\n'), ((1844, 1859), 'datashader.composite.add', 'add', (['src', 'clear'], {}), '(src, clear)\n', (1847, 1859), False, 'from datashader.composite import add, saturate, over, source\n'), ((1892, 1913), 'datashader.composite.add', 'add', (['src', 'clear_white'], {}), '(src, clear_white)\n', (1895, 1913), False, 'from datashader.composite import add, saturate, over, source\n'), ((2115, 2130), 'datashader.composite.add', 'add', (['src', 'white'], {}), '(src, white)\n', (2118, 2130), False, 'from datashader.composite import add, saturate, over, source\n'), ((2332, 2346), 'datashader.composite.add', 'add', (['src', 'blue'], {}), '(src, blue)\n', (2335, 2346), False, 'from datashader.composite import add, saturate, over, source\n'), ((2548, 2567), 'datashader.composite.add', 'add', (['src', 'half_blue'], {}), '(src, half_blue)\n', (2551, 2567), False, 'from datashader.composite import add, saturate, over, source\n'), ((2769, 2790), 'datashader.composite.add', 'add', (['src', 'half_purple'], {}), '(src, half_purple)\n', (2772, 2790), False, 'from datashader.composite import add, saturate, over, source\n'), ((2881, 2901), 'datashader.composite.saturate', 'saturate', (['src', 'clear'], {}), '(src, clear)\n', (2889, 2901), False, 'from datashader.composite import add, saturate, over, source\n'), ((2934, 2960), 'datashader.composite.saturate', 'saturate', (['src', 'clear_white'], {}), '(src, clear_white)\n', (2942, 2960), False, 'from datashader.composite import add, saturate, over, source\n'), ((3040, 3060), 'datashader.composite.saturate', 'saturate', (['src', 'white'], {}), '(src, white)\n', (3048, 3060), False, 'from datashader.composite import add, saturate, over, source\n'), ((3139, 3158), 'datashader.composite.saturate', 'saturate', (['src', 'blue'], {}), '(src, blue)\n', (3147, 3158), False, 'from datashader.composite import add, saturate, over, source\n'), ((3360, 3384), 'datashader.composite.saturate', 'saturate', (['src', 'half_blue'], {}), '(src, half_blue)\n', (3368, 3384), False, 'from datashader.composite import add, saturate, over, source\n'), ((3586, 3612), 'datashader.composite.saturate', 'saturate', (['src', 'half_purple'], {}), '(src, half_purple)\n', (3594, 3612), False, 'from datashader.composite import add, saturate, over, source\n')] |
import os
import os.path as op
import mne
import numpy as np
import pickle
from brain_utils import get_multiple_indices
import config as cfg
resolution = 3
spacing = "ico%d" % resolution
dataset_name = "camcan"
save_dir = "~/data/%s/" % dataset_name
save_dir = op.expanduser(save_dir)
subjects_dir = cfg.get_subjects_dir(dataset_name)
os.environ['SUBJECTS_DIR'] = subjects_dir
labels_path = subjects_dir + "fsaverage/label/"
fname = "aparca2009s"
labels_fname = subjects_dir + "fsaverage/label/%s.pkl" % fname
labels_raw = mne.read_labels_from_annot("fsaverage", "aparc.a2009s",
subjects_dir=subjects_dir)
labels_dict = {}
annot_dict = {}
for l in labels_raw:
labels_dict[l.name] = l
ll = l.morph(subject_to="fsaverage", grade=resolution)
annot_dict[ll.name] = ll
labels_dict_fname = save_dir + "label/%s-%s.pkl" % (fname, spacing)
with open(labels_dict_fname, "wb") as ff:
pickle.dump(annot_dict, ff)
fname = "aparca2009s"
labels_fname = subjects_dir + "fsaverage/label/%s-%s.pkl" % (fname, spacing)
labels_raw = mne.read_labels_from_annot("fsaverage", "aparc.a2009s",
subjects_dir=subjects_dir)
annot_dict_full = {}
for l in labels_raw:
annot_dict_full[l.name] = l
if resolution == 4:
sulci_names = ["S_front_middle",
"S_precentral-sup-part",
"S_oc_sup_and_transversal",
"S_orbital-H_Shaped",
"S_circular_insula_inf"]
gyri_names = ["G_rectus",
"G_front_inf-Opercular",
"G_oc-temp_lat-fusifor",
"G_parietal_sup",
"G_front_inf-Orbital"]
mix_names = ["S_oc_sup_and_transversal",
"G_front_inf-Opercular",
"S_precentral-sup-part",
"S_front_middle",
"G_pariet_inf-Supramar",
"S_occipital_ant",
"S_orbital_lateral",
"G_temp_sup-G_T_transv",
"G_and_S_paracentral"
]
else:
sulci_names = ["S_intrapariet_and_P_trans",
"G_pariet_inf-Supramar",
"S_front_sup"]
gyri_names = ["G_occipital_middle"
]
mix_names = sulci_names + gyri_names
label_lists = [sulci_names, gyri_names, mix_names]
label_types = ["sulci", "gyri", "any"]
for hemi in ["lh", "rh"]:
hemi_bool = int(hemi == "rh")
subject = "fsaverage"
vert_fname = save_dir + "vertno/%s-%s-filtered-%s-vrt.npy" %\
(subject, spacing, hemi)
vertno = np.load(vert_fname)
colors = [(1, 0, 0), (0, 0, 1), (0, 1, 1), (1, 1, 0),
(1, 0, 1), (0.5, 0.2, 0.8), (0.2, 0.9, 0.3),
(1., 0.5, 0.2), (0.5, 1, 0.9), (1, 0.5, 0.2)]
for label_type, label_names in zip(label_types, label_lists):
n_labels = len(label_names)
labels = np.zeros((n_labels, len(vertno)))
for i, name in enumerate(label_names):
ll = annot_dict[name + "-%s" % hemi]
indices = get_multiple_indices(ll.vertices, vertno)
labels[i, indices] = 1
labels_fname = save_dir + "label/labels-%s-%s-%s.npy" % \
(label_type, spacing, hemi)
np.save(labels_fname, labels)
| [
"pickle.dump",
"brain_utils.get_multiple_indices",
"mne.read_labels_from_annot",
"numpy.save",
"config.get_subjects_dir",
"numpy.load",
"os.path.expanduser"
] | [((264, 287), 'os.path.expanduser', 'op.expanduser', (['save_dir'], {}), '(save_dir)\n', (277, 287), True, 'import os.path as op\n'), ((304, 338), 'config.get_subjects_dir', 'cfg.get_subjects_dir', (['dataset_name'], {}), '(dataset_name)\n', (324, 338), True, 'import config as cfg\n'), ((529, 616), 'mne.read_labels_from_annot', 'mne.read_labels_from_annot', (['"""fsaverage"""', '"""aparc.a2009s"""'], {'subjects_dir': 'subjects_dir'}), "('fsaverage', 'aparc.a2009s', subjects_dir=\n subjects_dir)\n", (555, 616), False, 'import mne\n'), ((1079, 1166), 'mne.read_labels_from_annot', 'mne.read_labels_from_annot', (['"""fsaverage"""', '"""aparc.a2009s"""'], {'subjects_dir': 'subjects_dir'}), "('fsaverage', 'aparc.a2009s', subjects_dir=\n subjects_dir)\n", (1105, 1166), False, 'import mne\n'), ((937, 964), 'pickle.dump', 'pickle.dump', (['annot_dict', 'ff'], {}), '(annot_dict, ff)\n', (948, 964), False, 'import pickle\n'), ((2602, 2621), 'numpy.load', 'np.load', (['vert_fname'], {}), '(vert_fname)\n', (2609, 2621), True, 'import numpy as np\n'), ((3261, 3290), 'numpy.save', 'np.save', (['labels_fname', 'labels'], {}), '(labels_fname, labels)\n', (3268, 3290), True, 'import numpy as np\n'), ((3070, 3111), 'brain_utils.get_multiple_indices', 'get_multiple_indices', (['ll.vertices', 'vertno'], {}), '(ll.vertices, vertno)\n', (3090, 3111), False, 'from brain_utils import get_multiple_indices\n')] |
import time
import torch
import wandb
import random
import itertools
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from torchsummary import summary
from mdlearn.utils import (
parse_args,
log_checkpoint,
log_latent_visualization,
resume_checkpoint,
get_torch_optimizer,
)
from mdlearn.nn.models.aae.point_3d_aae import AAE3d
from mdlearn.data.utils import train_valid_split
from mdlearn.data.datasets.point_cloud import PointCloudDataset
from config import Point3dAAEConfig
def main(cfg: Point3dAAEConfig):
# Create directory for new run, or use old directory if resuming from a checkpoint
exist_ok = cfg.resume_checkpoint is not None
cfg.output_path.mkdir(exist_ok=exist_ok)
checkpoint_path = cfg.output_path / "checkpoints"
checkpoint_path.mkdir(exist_ok=exist_ok)
# Create plot directory
plot_path = cfg.output_path / "plots"
plot_path.mkdir(exist_ok=exist_ok)
# Copy training data to output directory to not slow down other
# training processes using the same data.
# cfg.input_path = shutil.copy(cfg.input_path, cfg.output_path)
# Set random seed
torch.manual_seed(cfg.seed)
np.random.seed(cfg.seed)
random.seed(cfg.seed)
torch.set_num_threads(cfg.num_data_workers)
# Load training and validation data
dataset = PointCloudDataset(
path=cfg.input_path,
num_points=cfg.num_points,
num_features=cfg.num_features,
dataset_name=cfg.dataset_name,
scalar_dset_names=cfg.scalar_dset_names,
seed=cfg.seed,
cms_transform=cfg.cms_transform,
scalar_requires_grad=cfg.scalar_requires_grad,
in_memory=cfg.in_memory,
)
train_loader, valid_loader = train_valid_split(
dataset,
cfg.split_pct,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.num_data_workers,
drop_last=True,
pin_memory=True,
persistent_workers=True,
prefetch_factor=cfg.prefetch_factor,
)
# Hardware
device = torch.device(
"cuda:0" if torch.cuda.is_available() and not cfg.ignore_gpu else "cpu"
)
# Create model
model = AAE3d(
cfg.num_points,
cfg.num_features,
cfg.latent_dim,
cfg.encoder_bias,
cfg.encoder_relu_slope,
cfg.encoder_filters,
cfg.encoder_kernels,
cfg.decoder_bias,
cfg.decoder_relu_slope,
cfg.decoder_affine_widths,
cfg.discriminator_bias,
cfg.discriminator_relu_slope,
cfg.discriminator_affine_widths,
)
model = model.to(device)
if cfg.wandb:
cfg.wandb.init(cfg, model, cfg.output_path)
# Diplay model
print(model)
summary(model, (3 + cfg.num_features, cfg.num_points))
disc_optimizer = get_torch_optimizer(
cfg.disc_optimizer.name,
cfg.disc_optimizer.hparams,
model.discriminator.parameters(),
)
ae_optimizer = get_torch_optimizer(
cfg.ae_optimizer.name,
cfg.ae_optimizer.hparams,
itertools.chain(model.encoder.parameters(), model.decoder.parameters()),
)
# Optionally initialize model with pre-trained weights
if cfg.init_weights is not None:
checkpoint = torch.load(cfg.init_weights, map_location="cpu")
model.load_state_dict(checkpoint["model_state_dict"])
print(f"Loaded model from {cfg.init_weights}")
# Optionally resume training from a checkpoint
if cfg.resume_checkpoint is not None:
start_epoch = resume_checkpoint(
cfg.resume_checkpoint,
model,
{"disc_optimizer": disc_optimizer, "ae_optimizer": ae_optimizer},
)
print(f"Resume training at epoch {start_epoch} from {cfg.resume_checkpoint}")
else:
start_epoch = 0
# Start training
for epoch in range(start_epoch, cfg.epochs):
train_start = time.time()
# Training
model.train()
avg_train_disc_loss, avg_train_ae_loss = train(
train_loader, model, disc_optimizer, ae_optimizer, device
)
print(
"====> Epoch: {} Train:\tAvg Disc loss: {:.4f}\tAvg AE loss: {:.4f}\tTime: {:.4f}".format(
epoch, avg_train_disc_loss, avg_train_ae_loss, time.time() - train_start
)
)
valid_start = time.time()
# Validation
model.eval()
with torch.no_grad():
avg_valid_recon_loss, latent_vectors, scalars = validate(
valid_loader, model, device
)
print(
"====> Epoch: {} Valid:\tAvg recon loss: {:.4f}\tTime: {:.4f}\n".format(
epoch, avg_valid_recon_loss, time.time() - valid_start
)
)
print("Total time: {:.4f}".format(time.time() - train_start))
metrics = {
"train_disc_loss": avg_train_disc_loss,
"train_ae_loss": avg_train_ae_loss,
"valid_recon_loss": avg_valid_recon_loss,
}
# Visualize latent space
if epoch % cfg.plot_log_every == 0:
html_strings = log_latent_visualization(
latent_vectors,
scalars,
plot_path,
epoch,
cfg.plot_n_samples,
cfg.plot_method,
)
if cfg.wandb:
for name, html_string in html_strings.items():
metrics[name] = wandb.Html(html_string, inject=False)
if cfg.wandb:
wandb.log(metrics)
if epoch % cfg.checkpoint_log_every == 0:
log_checkpoint(
checkpoint_path / f"checkpoint-epoch-{epoch}.pt",
epoch,
model,
{"disc_optimizer": disc_optimizer, "ae_optimizer": ae_optimizer},
)
def train(train_loader, model: AAE3d, disc_optimizer, ae_optimizer, device):
avg_disc_loss, avg_ae_loss = 0.0, 0.0
# Create prior noise buffer array
noise = torch.FloatTensor(cfg.batch_size, cfg.latent_dim).to(device)
for batch in tqdm(train_loader):
x = batch["X"].to(device, non_blocking=True)
# Encoder/Discriminator forward
# Get latent vectors
z = model.encode(x)
# Get prior noise
noise.normal_(mean=cfg.noise_mu, std=cfg.noise_std)
# Get discriminator logits
real_logits = model.discriminate(noise)
fake_logits = model.discriminate(z)
# Discriminator loss
critic_loss = model.critic_loss(real_logits, fake_logits)
gp_loss = model.gp_loss(noise, z)
disc_loss = critic_loss + cfg.lambda_gp * gp_loss
# Discriminator backward
disc_optimizer.zero_grad()
model.discriminator.zero_grad()
disc_loss.backward(retain_graph=True)
disc_optimizer.step()
# Decoder forward
recon_x = model.decode(z)
recon_loss = model.recon_loss(x, recon_x)
# Discriminator forward
fake_logit = model.discriminate(z)
decoder_loss = model.decoder_loss(fake_logit)
ae_loss = decoder_loss + cfg.lambda_rec * recon_loss
# AE backward
ae_optimizer.zero_grad()
model.decoder.zero_grad()
model.encoder.zero_grad()
ae_loss.backward()
# Collect loss
avg_disc_loss += disc_loss.item()
avg_ae_loss += ae_loss.item()
avg_disc_loss /= len(train_loader)
avg_ae_loss /= len(train_loader)
return avg_disc_loss, avg_ae_loss
def validate(valid_loader, model: AAE3d, device):
scalars = defaultdict(list)
latent_vectors = []
avg_ae_loss = 0.0
for batch in valid_loader:
x = batch["X"].to(device)
z = model.encode(x)
recon_x = model.decode(z)
avg_ae_loss += model.recon_loss(x, recon_x).item()
# Collect latent vectors for visualization
latent_vectors.append(z.cpu().numpy())
for name in cfg.scalar_dset_names:
scalars[name].append(batch[name].cpu().numpy())
avg_ae_loss /= len(valid_loader)
latent_vectors = np.concatenate(latent_vectors)
scalars = {name: np.concatenate(scalar) for name, scalar in scalars.items()}
return avg_ae_loss, latent_vectors, scalars
if __name__ == "__main__":
args = parse_args()
cfg = Point3dAAEConfig.from_yaml(args.config)
main(cfg)
| [
"mdlearn.utils.log_checkpoint",
"wandb.log",
"mdlearn.utils.resume_checkpoint",
"torch.cuda.is_available",
"mdlearn.data.utils.train_valid_split",
"torch.set_num_threads",
"numpy.random.seed",
"numpy.concatenate",
"torchsummary.summary",
"config.Point3dAAEConfig.from_yaml",
"mdlearn.utils.parse_... | [((1160, 1187), 'torch.manual_seed', 'torch.manual_seed', (['cfg.seed'], {}), '(cfg.seed)\n', (1177, 1187), False, 'import torch\n'), ((1192, 1216), 'numpy.random.seed', 'np.random.seed', (['cfg.seed'], {}), '(cfg.seed)\n', (1206, 1216), True, 'import numpy as np\n'), ((1221, 1242), 'random.seed', 'random.seed', (['cfg.seed'], {}), '(cfg.seed)\n', (1232, 1242), False, 'import random\n'), ((1248, 1291), 'torch.set_num_threads', 'torch.set_num_threads', (['cfg.num_data_workers'], {}), '(cfg.num_data_workers)\n', (1269, 1291), False, 'import torch\n'), ((1347, 1652), 'mdlearn.data.datasets.point_cloud.PointCloudDataset', 'PointCloudDataset', ([], {'path': 'cfg.input_path', 'num_points': 'cfg.num_points', 'num_features': 'cfg.num_features', 'dataset_name': 'cfg.dataset_name', 'scalar_dset_names': 'cfg.scalar_dset_names', 'seed': 'cfg.seed', 'cms_transform': 'cfg.cms_transform', 'scalar_requires_grad': 'cfg.scalar_requires_grad', 'in_memory': 'cfg.in_memory'}), '(path=cfg.input_path, num_points=cfg.num_points,\n num_features=cfg.num_features, dataset_name=cfg.dataset_name,\n scalar_dset_names=cfg.scalar_dset_names, seed=cfg.seed, cms_transform=\n cfg.cms_transform, scalar_requires_grad=cfg.scalar_requires_grad,\n in_memory=cfg.in_memory)\n', (1364, 1652), False, 'from mdlearn.data.datasets.point_cloud import PointCloudDataset\n'), ((1748, 1979), 'mdlearn.data.utils.train_valid_split', 'train_valid_split', (['dataset', 'cfg.split_pct'], {'batch_size': 'cfg.batch_size', 'shuffle': 'cfg.shuffle', 'num_workers': 'cfg.num_data_workers', 'drop_last': '(True)', 'pin_memory': '(True)', 'persistent_workers': '(True)', 'prefetch_factor': 'cfg.prefetch_factor'}), '(dataset, cfg.split_pct, batch_size=cfg.batch_size,\n shuffle=cfg.shuffle, num_workers=cfg.num_data_workers, drop_last=True,\n pin_memory=True, persistent_workers=True, prefetch_factor=cfg.\n prefetch_factor)\n', (1765, 1979), False, 'from mdlearn.data.utils import train_valid_split\n'), ((2207, 2521), 'mdlearn.nn.models.aae.point_3d_aae.AAE3d', 'AAE3d', (['cfg.num_points', 'cfg.num_features', 'cfg.latent_dim', 'cfg.encoder_bias', 'cfg.encoder_relu_slope', 'cfg.encoder_filters', 'cfg.encoder_kernels', 'cfg.decoder_bias', 'cfg.decoder_relu_slope', 'cfg.decoder_affine_widths', 'cfg.discriminator_bias', 'cfg.discriminator_relu_slope', 'cfg.discriminator_affine_widths'], {}), '(cfg.num_points, cfg.num_features, cfg.latent_dim, cfg.encoder_bias,\n cfg.encoder_relu_slope, cfg.encoder_filters, cfg.encoder_kernels, cfg.\n decoder_bias, cfg.decoder_relu_slope, cfg.decoder_affine_widths, cfg.\n discriminator_bias, cfg.discriminator_relu_slope, cfg.\n discriminator_affine_widths)\n', (2212, 2521), False, 'from mdlearn.nn.models.aae.point_3d_aae import AAE3d\n'), ((2755, 2809), 'torchsummary.summary', 'summary', (['model', '(3 + cfg.num_features, cfg.num_points)'], {}), '(model, (3 + cfg.num_features, cfg.num_points))\n', (2762, 2809), False, 'from torchsummary import summary\n'), ((6118, 6136), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (6122, 6136), False, 'from tqdm import tqdm\n'), ((7621, 7638), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7632, 7638), False, 'from collections import defaultdict\n'), ((8132, 8162), 'numpy.concatenate', 'np.concatenate', (['latent_vectors'], {}), '(latent_vectors)\n', (8146, 8162), True, 'import numpy as np\n'), ((8333, 8345), 'mdlearn.utils.parse_args', 'parse_args', ([], {}), '()\n', (8343, 8345), False, 'from mdlearn.utils import parse_args, log_checkpoint, log_latent_visualization, resume_checkpoint, get_torch_optimizer\n'), ((8356, 8395), 'config.Point3dAAEConfig.from_yaml', 'Point3dAAEConfig.from_yaml', (['args.config'], {}), '(args.config)\n', (8382, 8395), False, 'from config import Point3dAAEConfig\n'), ((3280, 3328), 'torch.load', 'torch.load', (['cfg.init_weights'], {'map_location': '"""cpu"""'}), "(cfg.init_weights, map_location='cpu')\n", (3290, 3328), False, 'import torch\n'), ((3562, 3679), 'mdlearn.utils.resume_checkpoint', 'resume_checkpoint', (['cfg.resume_checkpoint', 'model', "{'disc_optimizer': disc_optimizer, 'ae_optimizer': ae_optimizer}"], {}), "(cfg.resume_checkpoint, model, {'disc_optimizer':\n disc_optimizer, 'ae_optimizer': ae_optimizer})\n", (3579, 3679), False, 'from mdlearn.utils import parse_args, log_checkpoint, log_latent_visualization, resume_checkpoint, get_torch_optimizer\n'), ((3936, 3947), 'time.time', 'time.time', ([], {}), '()\n', (3945, 3947), False, 'import time\n'), ((4380, 4391), 'time.time', 'time.time', ([], {}), '()\n', (4389, 4391), False, 'import time\n'), ((8184, 8206), 'numpy.concatenate', 'np.concatenate', (['scalar'], {}), '(scalar)\n', (8198, 8206), True, 'import numpy as np\n'), ((4447, 4462), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4460, 4462), False, 'import torch\n'), ((5149, 5258), 'mdlearn.utils.log_latent_visualization', 'log_latent_visualization', (['latent_vectors', 'scalars', 'plot_path', 'epoch', 'cfg.plot_n_samples', 'cfg.plot_method'], {}), '(latent_vectors, scalars, plot_path, epoch, cfg.\n plot_n_samples, cfg.plot_method)\n', (5173, 5258), False, 'from mdlearn.utils import parse_args, log_checkpoint, log_latent_visualization, resume_checkpoint, get_torch_optimizer\n'), ((5563, 5581), 'wandb.log', 'wandb.log', (['metrics'], {}), '(metrics)\n', (5572, 5581), False, 'import wandb\n'), ((5645, 5793), 'mdlearn.utils.log_checkpoint', 'log_checkpoint', (["(checkpoint_path / f'checkpoint-epoch-{epoch}.pt')", 'epoch', 'model', "{'disc_optimizer': disc_optimizer, 'ae_optimizer': ae_optimizer}"], {}), "(checkpoint_path / f'checkpoint-epoch-{epoch}.pt', epoch,\n model, {'disc_optimizer': disc_optimizer, 'ae_optimizer': ae_optimizer})\n", (5659, 5793), False, 'from mdlearn.utils import parse_args, log_checkpoint, log_latent_visualization, resume_checkpoint, get_torch_optimizer\n'), ((6040, 6089), 'torch.FloatTensor', 'torch.FloatTensor', (['cfg.batch_size', 'cfg.latent_dim'], {}), '(cfg.batch_size, cfg.latent_dim)\n', (6057, 6089), False, 'import torch\n'), ((2109, 2134), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2132, 2134), False, 'import torch\n'), ((4307, 4318), 'time.time', 'time.time', ([], {}), '()\n', (4316, 4318), False, 'import time\n'), ((4738, 4749), 'time.time', 'time.time', ([], {}), '()\n', (4747, 4749), False, 'import time\n'), ((4831, 4842), 'time.time', 'time.time', ([], {}), '()\n', (4840, 4842), False, 'import time\n'), ((5490, 5527), 'wandb.Html', 'wandb.Html', (['html_string'], {'inject': '(False)'}), '(html_string, inject=False)\n', (5500, 5527), False, 'import wandb\n')] |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statistics as stat
from scipy import signal
import math
#from scipy import signal
import librosa
import librosa.display
import params
WINDOW = params.WINDOW
BINS_PER_OCTAVE = params.BINS_PER_OCTAVE
STEP = 512
ALPHA = params.ALPHA
BETA = params.BETA
H = params.H
T = params.T
T_att = params.T_att
plot_onsets = params.plot_onsets
norm_spectre = params.norm_spectre
title = 'Palestrina'
#Palestrina, Cadence4VMaj
y, sr = librosa.load('/Users/manuel/Github/DescripteursHarmoniquesAudio/Exemples/'+title+'.wav')
Notemin = 'D3'
Notemax = 'D9'
def detectionOnsets(y):
fmin = librosa.note_to_hz(Notemin)
fmax = librosa.note_to_hz(Notemax)
#Nmin = int((sr/(fmax*(2**(1/BINS_PER_OCTAVE)-1))))
#Nmax = int((sr/(fmin*(2**(1/BINS_PER_OCTAVE)-1))))
n_bins = int((librosa.note_to_midi(Notemax) - librosa.note_to_midi(Notemin))*BINS_PER_OCTAVE/12)
Chrom = librosa.amplitude_to_db(np.abs(librosa.cqt(y=y, sr=sr, hop_length = STEP, fmin= fmin, bins_per_octave=BINS_PER_OCTAVE, n_bins=n_bins)), ref=np.max)
Nf = len(Chrom)
N = len(Chrom[0])
Diff = np.zeros((Nf,N))
Dev = np.zeros(N)
for j in range(1,N):
for i in range(Nf):
Diff[i,j] = np.abs(Chrom[i,j]-Chrom[i,j-1])
Dev[j] = sum(Diff[:,j])
# FONCTION DE SEUIL
# Ajout de zéros en queue et en tête
l = []
Seuil = []
Onsets = []
for k in range(int(H/2)):
l.append(0)
for val in Dev:
l.append(val)
for k in range(int(H/2)):
l.append(0)
#Calcul de la médiane
for i in range(N):
Seuil.append(ALPHA + BETA*stat.median(l[i:i+H]))
if Dev[i] > Seuil[i]:
Onsets.append(i)
times = librosa.frames_to_time(np.arange(N), sr=sr, hop_length=STEP)
# FONCTION DE TRI SUR LES ONSETS
i=0
while i<(len(Onsets)-1):
while (i<(len(Onsets)-1)) and (times[Onsets[i+1]]< times[Onsets[i]]+T):
if Dev[Onsets[i+1]] < Dev[Onsets[i]]: del Onsets[i+1]
else: del Onsets[i]
i=i+1
onset_frames = librosa.util.fix_frames(Onsets, x_min=0, x_max=Chrom.shape[1]-1)
onset_times = librosa.frames_to_time(onset_frames, sr=sr, hop_length = STEP)
#Synchronisation sur les onsets, en enlevant le début et la fin des longues frames
ChromSync = np.zeros((Nf,len(onset_frames)-1))
n_att = int(librosa.time_to_frames(T_att, sr=sr, hop_length = STEP))
for j in range(len(onset_frames)-1):
for i in range(Nf):
ChromSync[i,j] = np.mean(Chrom[i][(onset_frames[j]+n_att):(onset_frames[j+1]-n_att)])
#Normalisation du spectre
# ChromSync[:,1] = librosa.power_to_db(librosa.db_to_power(ChromSync[:,1]) / np.sum(librosa.db_to_power(ChromSync[:,1])))
if norm_spectre:
for j in range(ChromSync.shape[1]):
ChromSync[:,j] = librosa.power_to_db(librosa.db_to_power(ChromSync[:,j]) / np.sum(librosa.db_to_power(ChromSync[:,j])))
#Affichage
if plot_onsets:
plt.figure(figsize=(13, 7))
ax1 = plt.subplot(3, 1, 1)
librosa.display.specshow(Chrom, bins_per_octave=BINS_PER_OCTAVE, fmin=fmin, y_axis='cqt_note', x_axis='time', x_coords=times)
plt.title('CQT spectrogram')
plt.subplot(3, 1, 2, sharex=ax1)
plt.plot(times, Dev, label='Deviation')
plt.plot(times, Seuil, color='g', label='Seuil')
plt.vlines(times[Onsets], 0, Dev.max(), color='r', alpha=0.9, linestyle='--', label='Onsets')
plt.axis('tight')
plt.legend(frameon=True, framealpha=0.75)
ax1 = plt.subplot(3, 1, 3, sharex=ax1)
librosa.display.specshow(ChromSync, bins_per_octave=BINS_PER_OCTAVE, fmin=fmin, y_axis='cqt_note', x_axis='time',x_coords=onset_times)
plt.show()
return onset_times
onset_times = detectionOnsets(y)
| [
"librosa.db_to_power",
"librosa.util.fix_frames",
"numpy.arange",
"librosa.load",
"numpy.mean",
"matplotlib.pyplot.plot",
"librosa.frames_to_time",
"librosa.time_to_frames",
"librosa.display.specshow",
"matplotlib.pyplot.axis",
"numpy.abs",
"matplotlib.pyplot.title",
"librosa.note_to_hz",
... | [((519, 615), 'librosa.load', 'librosa.load', (["('/Users/manuel/Github/DescripteursHarmoniquesAudio/Exemples/' + title + '.wav'\n )"], {}), "('/Users/manuel/Github/DescripteursHarmoniquesAudio/Exemples/' +\n title + '.wav')\n", (531, 615), False, 'import librosa\n'), ((677, 704), 'librosa.note_to_hz', 'librosa.note_to_hz', (['Notemin'], {}), '(Notemin)\n', (695, 704), False, 'import librosa\n'), ((716, 743), 'librosa.note_to_hz', 'librosa.note_to_hz', (['Notemax'], {}), '(Notemax)\n', (734, 743), False, 'import librosa\n'), ((1170, 1187), 'numpy.zeros', 'np.zeros', (['(Nf, N)'], {}), '((Nf, N))\n', (1178, 1187), True, 'import numpy as np\n'), ((1197, 1208), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1205, 1208), True, 'import numpy as np\n'), ((2135, 2201), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['Onsets'], {'x_min': '(0)', 'x_max': '(Chrom.shape[1] - 1)'}), '(Onsets, x_min=0, x_max=Chrom.shape[1] - 1)\n', (2158, 2201), False, 'import librosa\n'), ((2218, 2278), 'librosa.frames_to_time', 'librosa.frames_to_time', (['onset_frames'], {'sr': 'sr', 'hop_length': 'STEP'}), '(onset_frames, sr=sr, hop_length=STEP)\n', (2240, 2278), False, 'import librosa\n'), ((1808, 1820), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1817, 1820), True, 'import numpy as np\n'), ((2436, 2489), 'librosa.time_to_frames', 'librosa.time_to_frames', (['T_att'], {'sr': 'sr', 'hop_length': 'STEP'}), '(T_att, sr=sr, hop_length=STEP)\n', (2458, 2489), False, 'import librosa\n'), ((3060, 3087), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 7)'}), '(figsize=(13, 7))\n', (3070, 3087), True, 'import matplotlib.pyplot as plt\n'), ((3102, 3122), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (3113, 3122), True, 'import matplotlib.pyplot as plt\n'), ((3131, 3260), 'librosa.display.specshow', 'librosa.display.specshow', (['Chrom'], {'bins_per_octave': 'BINS_PER_OCTAVE', 'fmin': 'fmin', 'y_axis': '"""cqt_note"""', 'x_axis': '"""time"""', 'x_coords': 'times'}), "(Chrom, bins_per_octave=BINS_PER_OCTAVE, fmin=fmin,\n y_axis='cqt_note', x_axis='time', x_coords=times)\n", (3155, 3260), False, 'import librosa\n'), ((3265, 3293), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram"""'], {}), "('CQT spectrogram')\n", (3274, 3293), True, 'import matplotlib.pyplot as plt\n'), ((3303, 3335), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {'sharex': 'ax1'}), '(3, 1, 2, sharex=ax1)\n', (3314, 3335), True, 'import matplotlib.pyplot as plt\n'), ((3344, 3383), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'Dev'], {'label': '"""Deviation"""'}), "(times, Dev, label='Deviation')\n", (3352, 3383), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3440), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'Seuil'], {'color': '"""g"""', 'label': '"""Seuil"""'}), "(times, Seuil, color='g', label='Seuil')\n", (3400, 3440), True, 'import matplotlib.pyplot as plt\n'), ((3551, 3568), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (3559, 3568), True, 'import matplotlib.pyplot as plt\n'), ((3577, 3618), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)', 'framealpha': '(0.75)'}), '(frameon=True, framealpha=0.75)\n', (3587, 3618), True, 'import matplotlib.pyplot as plt\n'), ((3634, 3666), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {'sharex': 'ax1'}), '(3, 1, 3, sharex=ax1)\n', (3645, 3666), True, 'import matplotlib.pyplot as plt\n'), ((3675, 3815), 'librosa.display.specshow', 'librosa.display.specshow', (['ChromSync'], {'bins_per_octave': 'BINS_PER_OCTAVE', 'fmin': 'fmin', 'y_axis': '"""cqt_note"""', 'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(ChromSync, bins_per_octave=BINS_PER_OCTAVE, fmin=\n fmin, y_axis='cqt_note', x_axis='time', x_coords=onset_times)\n", (3699, 3815), False, 'import librosa\n'), ((3818, 3828), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3826, 3828), True, 'import matplotlib.pyplot as plt\n'), ((1000, 1104), 'librosa.cqt', 'librosa.cqt', ([], {'y': 'y', 'sr': 'sr', 'hop_length': 'STEP', 'fmin': 'fmin', 'bins_per_octave': 'BINS_PER_OCTAVE', 'n_bins': 'n_bins'}), '(y=y, sr=sr, hop_length=STEP, fmin=fmin, bins_per_octave=\n BINS_PER_OCTAVE, n_bins=n_bins)\n', (1011, 1104), False, 'import librosa\n'), ((1286, 1323), 'numpy.abs', 'np.abs', (['(Chrom[i, j] - Chrom[i, j - 1])'], {}), '(Chrom[i, j] - Chrom[i, j - 1])\n', (1292, 1323), True, 'import numpy as np\n'), ((2591, 2661), 'numpy.mean', 'np.mean', (['Chrom[i][onset_frames[j] + n_att:onset_frames[j + 1] - n_att]'], {}), '(Chrom[i][onset_frames[j] + n_att:onset_frames[j + 1] - n_att])\n', (2598, 2661), True, 'import numpy as np\n'), ((874, 903), 'librosa.note_to_midi', 'librosa.note_to_midi', (['Notemax'], {}), '(Notemax)\n', (894, 903), False, 'import librosa\n'), ((906, 935), 'librosa.note_to_midi', 'librosa.note_to_midi', (['Notemin'], {}), '(Notemin)\n', (926, 935), False, 'import librosa\n'), ((1689, 1712), 'statistics.median', 'stat.median', (['l[i:i + H]'], {}), '(l[i:i + H])\n', (1700, 1712), True, 'import statistics as stat\n'), ((2930, 2966), 'librosa.db_to_power', 'librosa.db_to_power', (['ChromSync[:, j]'], {}), '(ChromSync[:, j])\n', (2949, 2966), False, 'import librosa\n'), ((2975, 3011), 'librosa.db_to_power', 'librosa.db_to_power', (['ChromSync[:, j]'], {}), '(ChromSync[:, j])\n', (2994, 3011), False, 'import librosa\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 11 11:10:24 2019
@author: nmei
"""
import os
import pandas as pd
import numpy as np
from glob import glob
import seaborn as sns
sns.set_context('poster')
sns.set_style('whitegrid')
from utils import resample_ttest,MCPConverter,stars
from statsmodels.stats.anova import AnovaRM
#from matplotlib import pyplot as plt
figure_dir = '../figures'
saving_dir = '../results/aggregate_experiment_score'
feature_names = [
'correct',
'awareness',
'confidence',]
new_names = [f"{name}_{n_back}" for n_back in np.arange(1,5) for name in feature_names ]
working_dir = '../results/aggregate_experiment_score'
working_data = glob(os.path.join(working_dir,'aggregate*.csv'))
# exp 1 #########################
experiment = 'pos'
pos_file = [item for item in working_data if (f'{experiment}' in item)][0]
df_pos = pd.read_csv(pos_file)
df_pos_save = df_pos.copy()
df_pos_save.loc[df_pos_save['model_name'] == 'LogisticRegression',new_names] = \
df_pos_save.loc[df_pos_save['model_name'] == 'LogisticRegression',new_names].apply(np.exp)
df_pos_save.to_csv(os.path.join(saving_dir,
f'features_normalized_{experiment}.csv'),
index = False)
df_pos_plot = pd.melt(df_pos_save,
id_vars = ['sub_name','model_name'],
value_vars = new_names,
var_name = 'Attributes',)
def get_attr(x):
return x.split('_')[0]
def get_window(x):
return int(x.split('_')[-1])
df_pos_plot['attr'] = df_pos_plot['Attributes'].apply(get_attr)
df_pos_plot['window'] = df_pos_plot['Attributes'].apply(get_window)
df_pos_plot = df_pos_plot.sort_values(['model_name','window','attr'])
g = sns.catplot(x = 'window',
y = 'value',
row = 'model_name',
hue = 'attr',
data = df_pos_plot,
kind = 'bar',
aspect = 3,
sharey = False,
)
g._legend.set_title("Attibutes")
scores = df_pos[df_pos['model_name'] == "LogisticRegression"]['scores_mean'].values
g.axes.flatten()[0].set(ylabel = 'Odd Ratio',
title = f"LogisticRegression scores = {scores.mean():.3f} +/- {scores.std():.3f}")
scores = df_pos[df_pos['model_name'] == "RandomForest"]['scores_mean'].values
g.axes.flatten()[1].set(ylabel = 'Feature Importance (normalized)',
title = f"RandomForest scores = {scores.mean():.3f} +/- {scores.std():.3f}")
g.fig.suptitle("Exp. 1",y = 1.02)
g.savefig(os.path.join(figure_dir,
f'{experiment}_aggregate_features.png'),
dpi = 400,
bbox_inches = 'tight')
for model_name,df_sub in df_pos_plot.groupby(['model_name']):
aovrm = AnovaRM(df_sub,
'value',
'sub_name',
within = ['attr','window'])
res = aovrm.fit()
anova_table = res.anova_table
anova_table.round(5).to_csv(os.path.join(
saving_dir,
f'{experiment}_{model_name}.csv'),index=True)
ttest_results = dict(
model_name = [],
window = [],
attribute = [],
ps_mean = [],
ps_std = [],
value_mean = [],
value_std = [],
baseline = [],
)
for (model_name,window,attribute),df_sub in df_pos_plot.groupby([
'model_name',
'window',
'attr']):
print(model_name,window,attribute,df_sub['value'].values.mean())
if model_name == 'LogisticRegression':
baseline = 1
ps = resample_ttest(df_sub['value'].values,baseline = baseline,
n_ps = 100,n_permutation = int(1e6),one_tail = False)
elif model_name == 'RandomForest':
baseline = 0
ps = resample_ttest(df_sub['value'].values,baseline = baseline,
n_ps = 100,n_permutation = int(1e6),one_tail = False)
ttest_results['model_name'].append(model_name)
ttest_results['window'].append(window)
ttest_results['attribute'].append(attribute)
ttest_results['ps_mean'].append(ps.mean())
ttest_results['ps_std'].append(ps.std())
ttest_results['value_mean'].append(df_sub['value'].values.mean())
ttest_results['value_std'].append(df_sub['value'].values.std())
ttest_results['baseline'].append(baseline)
ttest_results = pd.DataFrame(ttest_results)
temp = []
for model_name, df_sub in ttest_results.groupby(['model_name']):
df_sub = df_sub.sort_values(['ps_mean'])
converter = MCPConverter(pvals = df_sub['ps_mean'].values)
d = converter.adjust_many()
df_sub['ps_corrected'] = d['bonferroni'].values
temp.append(df_sub)
ttest_results = pd.concat(temp)
ttest_results = ttest_results.sort_values(['model_name','window','attribute'])
ttest_results['stars'] = ttest_results['ps_corrected'].apply(stars)
ttest_results.to_csv(os.path.join(
saving_dir,
f't_test_{experiment}.csv'),index=False)
# exp 2 ###################
experiment = 'att'
att_file = [item for item in working_data if (f'{experiment}' in item)][0]
df_att = pd.read_csv(att_file)
df_att_save = df_att.copy()
df_att_save.loc[df_att_save['model_name'] == 'LogisticRegression',new_names] = \
df_att_save.loc[df_att_save['model_name'] == 'LogisticRegression',new_names].apply(np.exp)
df_att_save.to_csv(os.path.join(saving_dir,
f'features_normalized_{experiment}.csv'))
df_att_plot = pd.melt(df_att_save,
id_vars = ['sub_name','model_name'],
value_vars = new_names,
var_name = 'Attributes',)
df_att_plot['attr'] = df_att_plot['Attributes'].apply(get_attr)
df_att_plot['window'] = df_att_plot['Attributes'].apply(get_window)
df_att_plot = df_att_plot.sort_values(['model_name','window','attr'])
g = sns.catplot(x = 'window',
y = 'value',
row = 'model_name',
hue = 'attr',
data = df_att_plot,
kind = 'bar',
aspect = 3,
sharey = False,
)
g._legend.set_title("Attibutes")
scores = df_att[df_att['model_name'] == "LogisticRegression"]['scores_mean'].values
g.axes.flatten()[0].set(ylabel = 'Odd Ratio',
title = f"LogisticRegression scores = {scores.mean():.3f} +/- {scores.std():.3f}")
scores = df_att[df_att['model_name'] == "RandomForest"]['scores_mean'].values
g.axes.flatten()[1].set(ylabel = 'Feature Importance (normalized)',
title = f"RandomForest scores = {scores.mean():.3f} +/- {scores.std():.3f}")
g.fig.suptitle("Exp. 2",y = 1.02)
g.savefig(os.path.join(figure_dir,
f'{experiment}_aggregate_features.png'),
dpi = 400,
bbox_inches = 'tight')
df_att_plot['attr'] = df_att_plot['Attributes'].apply(get_attr)
df_att_plot['window'] = df_att_plot['Attributes'].apply(get_window)
for model_name,df_sub in df_att_plot.groupby(['model_name']):
aovrm = AnovaRM(df_sub,
'value',
'sub_name',
within = ['attr','window'])
res = aovrm.fit()
anova_table = res.anova_table
anova_table.round(5).to_csv(os.path.join(
saving_dir,
f'{experiment}_{model_name}.csv'),index=True)
ttest_results = dict(
model_name = [],
window = [],
attribute = [],
ps_mean = [],
ps_std = [],
value_mean = [],
value_std = [],
baseline = [],
)
for (model_name,window,attribute),df_sub in df_att_plot.groupby([
'model_name',
'window',
'attr']):
print(model_name,window,attribute,df_sub['value'].values.mean())
if model_name == 'LogisticRegression':
baseline = 1
ps = resample_ttest(df_sub['value'].values,baseline = baseline,
n_ps = 100,n_permutation = int(1e6),one_tail = False)
elif model_name == 'RandomForest':
baseline = 0
ps = resample_ttest(df_sub['value'].values,baseline = baseline,
n_ps = 100,n_permutation = int(1e6),one_tail = False)
ttest_results['model_name'].append(model_name)
ttest_results['window'].append(window)
ttest_results['attribute'].append(attribute)
ttest_results['ps_mean'].append(ps.mean())
ttest_results['ps_std'].append(ps.std())
ttest_results['value_mean'].append(df_sub['value'].values.mean())
ttest_results['value_std'].append(df_sub['value'].values.std())
ttest_results['baseline'].append(baseline)
ttest_results = pd.DataFrame(ttest_results)
temp = []
for model_name, df_sub in ttest_results.groupby(['model_name']):
df_sub = df_sub.sort_values(['ps_mean'])
converter = MCPConverter(pvals = df_sub['ps_mean'].values)
d = converter.adjust_many()
df_sub['ps_corrected'] = d['bonferroni'].values
temp.append(df_sub)
ttest_results = pd.concat(temp)
ttest_results = ttest_results.sort_values(['model_name','window','attribute'])
ttest_results['stars'] = ttest_results['ps_corrected'].apply(stars)
ttest_results.to_csv(os.path.join(
saving_dir,
f't_test_{experiment}.csv'),index=False)
| [
"pandas.read_csv",
"seaborn.set_context",
"os.path.join",
"seaborn.catplot",
"seaborn.set_style",
"pandas.concat",
"utils.MCPConverter",
"pandas.DataFrame",
"statsmodels.stats.anova.AnovaRM",
"pandas.melt",
"numpy.arange"
] | [((200, 225), 'seaborn.set_context', 'sns.set_context', (['"""poster"""'], {}), "('poster')\n", (215, 225), True, 'import seaborn as sns\n'), ((226, 252), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (239, 252), True, 'import seaborn as sns\n'), ((921, 942), 'pandas.read_csv', 'pd.read_csv', (['pos_file'], {}), '(pos_file)\n', (932, 942), True, 'import pandas as pd\n'), ((1303, 1409), 'pandas.melt', 'pd.melt', (['df_pos_save'], {'id_vars': "['sub_name', 'model_name']", 'value_vars': 'new_names', 'var_name': '"""Attributes"""'}), "(df_pos_save, id_vars=['sub_name', 'model_name'], value_vars=\n new_names, var_name='Attributes')\n", (1310, 1409), True, 'import pandas as pd\n'), ((1769, 1892), 'seaborn.catplot', 'sns.catplot', ([], {'x': '"""window"""', 'y': '"""value"""', 'row': '"""model_name"""', 'hue': '"""attr"""', 'data': 'df_pos_plot', 'kind': '"""bar"""', 'aspect': '(3)', 'sharey': '(False)'}), "(x='window', y='value', row='model_name', hue='attr', data=\n df_pos_plot, kind='bar', aspect=3, sharey=False)\n", (1780, 1892), True, 'import seaborn as sns\n'), ((4423, 4450), 'pandas.DataFrame', 'pd.DataFrame', (['ttest_results'], {}), '(ttest_results)\n', (4435, 4450), True, 'import pandas as pd\n'), ((4758, 4773), 'pandas.concat', 'pd.concat', (['temp'], {}), '(temp)\n', (4767, 4773), True, 'import pandas as pd\n'), ((5156, 5177), 'pandas.read_csv', 'pd.read_csv', (['att_file'], {}), '(att_file)\n', (5167, 5177), True, 'import pandas as pd\n'), ((5515, 5621), 'pandas.melt', 'pd.melt', (['df_att_save'], {'id_vars': "['sub_name', 'model_name']", 'value_vars': 'new_names', 'var_name': '"""Attributes"""'}), "(df_att_save, id_vars=['sub_name', 'model_name'], value_vars=\n new_names, var_name='Attributes')\n", (5522, 5621), True, 'import pandas as pd\n'), ((5886, 6009), 'seaborn.catplot', 'sns.catplot', ([], {'x': '"""window"""', 'y': '"""value"""', 'row': '"""model_name"""', 'hue': '"""attr"""', 'data': 'df_att_plot', 'kind': '"""bar"""', 'aspect': '(3)', 'sharey': '(False)'}), "(x='window', y='value', row='model_name', hue='attr', data=\n df_att_plot, kind='bar', aspect=3, sharey=False)\n", (5897, 6009), True, 'import seaborn as sns\n'), ((8674, 8701), 'pandas.DataFrame', 'pd.DataFrame', (['ttest_results'], {}), '(ttest_results)\n', (8686, 8701), True, 'import pandas as pd\n'), ((9009, 9024), 'pandas.concat', 'pd.concat', (['temp'], {}), '(temp)\n', (9018, 9024), True, 'import pandas as pd\n'), ((739, 782), 'os.path.join', 'os.path.join', (['working_dir', '"""aggregate*.csv"""'], {}), "(working_dir, 'aggregate*.csv')\n", (751, 782), False, 'import os\n'), ((1166, 1231), 'os.path.join', 'os.path.join', (['saving_dir', 'f"""features_normalized_{experiment}.csv"""'], {}), "(saving_dir, f'features_normalized_{experiment}.csv')\n", (1178, 1231), False, 'import os\n'), ((2576, 2640), 'os.path.join', 'os.path.join', (['figure_dir', 'f"""{experiment}_aggregate_features.png"""'], {}), "(figure_dir, f'{experiment}_aggregate_features.png')\n", (2588, 2640), False, 'import os\n'), ((2790, 2853), 'statsmodels.stats.anova.AnovaRM', 'AnovaRM', (['df_sub', '"""value"""', '"""sub_name"""'], {'within': "['attr', 'window']"}), "(df_sub, 'value', 'sub_name', within=['attr', 'window'])\n", (2797, 2853), False, 'from statsmodels.stats.anova import AnovaRM\n'), ((4587, 4631), 'utils.MCPConverter', 'MCPConverter', ([], {'pvals': "df_sub['ps_mean'].values"}), "(pvals=df_sub['ps_mean'].values)\n", (4599, 4631), False, 'from utils import resample_ttest, MCPConverter, stars\n'), ((4942, 4994), 'os.path.join', 'os.path.join', (['saving_dir', 'f"""t_test_{experiment}.csv"""'], {}), "(saving_dir, f't_test_{experiment}.csv')\n", (4954, 4994), False, 'import os\n'), ((5401, 5466), 'os.path.join', 'os.path.join', (['saving_dir', 'f"""features_normalized_{experiment}.csv"""'], {}), "(saving_dir, f'features_normalized_{experiment}.csv')\n", (5413, 5466), False, 'import os\n'), ((6693, 6757), 'os.path.join', 'os.path.join', (['figure_dir', 'f"""{experiment}_aggregate_features.png"""'], {}), "(figure_dir, f'{experiment}_aggregate_features.png')\n", (6705, 6757), False, 'import os\n'), ((7040, 7103), 'statsmodels.stats.anova.AnovaRM', 'AnovaRM', (['df_sub', '"""value"""', '"""sub_name"""'], {'within': "['attr', 'window']"}), "(df_sub, 'value', 'sub_name', within=['attr', 'window'])\n", (7047, 7103), False, 'from statsmodels.stats.anova import AnovaRM\n'), ((8838, 8882), 'utils.MCPConverter', 'MCPConverter', ([], {'pvals': "df_sub['ps_mean'].values"}), "(pvals=df_sub['ps_mean'].values)\n", (8850, 8882), False, 'from utils import resample_ttest, MCPConverter, stars\n'), ((9193, 9245), 'os.path.join', 'os.path.join', (['saving_dir', 'f"""t_test_{experiment}.csv"""'], {}), "(saving_dir, f't_test_{experiment}.csv')\n", (9205, 9245), False, 'import os\n'), ((621, 636), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (630, 636), True, 'import numpy as np\n'), ((3000, 3058), 'os.path.join', 'os.path.join', (['saving_dir', 'f"""{experiment}_{model_name}.csv"""'], {}), "(saving_dir, f'{experiment}_{model_name}.csv')\n", (3012, 3058), False, 'import os\n'), ((7250, 7308), 'os.path.join', 'os.path.join', (['saving_dir', 'f"""{experiment}_{model_name}.csv"""'], {}), "(saving_dir, f'{experiment}_{model_name}.csv')\n", (7262, 7308), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 26 11:00:51 2016
@author: snoran
Includes various utility functions.
"""
import numpy as np
def slidingWindow(sequence,winSize,step=1):
"""Returns a generator that will iterate through
the defined chunks of input sequence. Input sequence
must be iterable.
Thanks to https://scipher.wordpress.com/2010/12/02/simple-sliding-window-iterator-in-python/"""
# Verify the inputs
try:
it = iter(sequence)
except TypeError:
raise Exception("**ERROR** sequence must be iterable.")
if not ((type(winSize) == type(0)) and (type(step) == type(0))):
raise Exception("**ERROR** type(winSize) and type(step) must be int.")
if step > winSize:
raise Exception("**ERROR** step must not be larger than winSize.")
if winSize > len(sequence):
raise Exception("**ERROR** winSize must not be larger than sequence length.")
# Pre-compute number of chunks to emit
numOfChunks = ((len(sequence)-winSize)/step)+1
# Do the work
for i in range(0,numOfChunks*step,step):
yield i, sequence[i:i+winSize]
GRAVITY = 9.81
READ_LIMIT = 400;
acc_readings = np.zeros((READ_LIMIT, 3))
acc_state = False;
read_counter = 0;
aggX = 0
aggY = 0
aggZ = 0
def reset_vars():
"""
Resets the variables used in reorientation. Since they are global
variables, we need to make sure that they are reset. In the future,
this should really be done using some sort of Python object.
"""
global acc_state
global read_counter
global aggX
global aggY
global aggZ
acc_state = False;
read_counter = 0;
aggX = 0
aggY = 0
aggZ = 0
def reorient(acc_x, acc_y, acc_z):
"""
Reorients the accelerometer data. It comes from some legacy
Java code, so it's very messy. You don't need to worry about
how it works.
"""
x = acc_x
y = acc_z
z = -acc_y
global acc_state
global read_counter
global aggX
global aggY
global aggZ
if read_counter >= READ_LIMIT:
read_counter = 0
accState = True;
aggX += x - acc_readings[read_counter][0];
aggY += y - acc_readings[read_counter][1];
aggZ += z - acc_readings[read_counter][2];
acc_readings[read_counter][0] = x;
acc_readings[read_counter][1] = y;
acc_readings[read_counter][2] = z;
if(accState):
acc_z_o = aggZ/(READ_LIMIT*GRAVITY);
acc_y_o = aggY/(READ_LIMIT*GRAVITY);
acc_x_o = aggX/(READ_LIMIT*GRAVITY);
if acc_z_o > 1.0:
acc_z_o = 1.0
if acc_z_o < -1.0:
acc_z_o = -1.0
x = x/GRAVITY;
y = y/GRAVITY;
z = z/GRAVITY;
theta_tilt = np.arccos(acc_z_o);
phi_pre = np.arctan2(acc_y_o, acc_x_o);
tan_psi = (-acc_x_o*np.sin(phi_pre) + acc_y_o*np.cos(phi_pre))/((acc_x_o*np.cos(phi_pre)+acc_y_o*np.sin(phi_pre))*np.cos(theta_tilt)-acc_z_o*np.sin(theta_tilt));
psi_post = np.arctan(tan_psi);
acc_x_pre = x*np.cos(phi_pre)+ y*np.sin(phi_pre);
acc_y_pre = -x*np.sin(phi_pre)+ y*np.cos(phi_pre);
acc_x_pre_tilt = acc_x_pre*np.cos(theta_tilt)-z*np.sin(theta_tilt);
acc_y_pre_tilt = acc_y_pre;
orient_acc_x = (acc_x_pre_tilt*np.cos(psi_post)+acc_y_pre_tilt*np.sin(psi_post))*GRAVITY;
orient_acc_y =(-acc_x_pre_tilt*np.sin(psi_post)+acc_y_pre_tilt*np.cos(psi_post))*GRAVITY;
orient_acc_z = z*GRAVITY/(np.cos(theta_tilt));
if orient_acc_z > 3 * GRAVITY:
orient_acc_z = 3 * GRAVITY;
if orient_acc_z < -3 * GRAVITY:
orient_acc_z = -3 * GRAVITY;
orient_acc_z = np.sqrt((x*x+y*y+z*z)*GRAVITY*GRAVITY - (orient_acc_x*orient_acc_x + orient_acc_y*orient_acc_y));
result = [orient_acc_x, orient_acc_y, orient_acc_z]
read_counter += 1;
return result;
| [
"numpy.sqrt",
"numpy.arccos",
"numpy.zeros",
"numpy.arctan2",
"numpy.cos",
"numpy.sin",
"numpy.arctan"
] | [((1180, 1205), 'numpy.zeros', 'np.zeros', (['(READ_LIMIT, 3)'], {}), '((READ_LIMIT, 3))\n', (1188, 1205), True, 'import numpy as np\n'), ((2723, 2741), 'numpy.arccos', 'np.arccos', (['acc_z_o'], {}), '(acc_z_o)\n', (2732, 2741), True, 'import numpy as np\n'), ((2761, 2789), 'numpy.arctan2', 'np.arctan2', (['acc_y_o', 'acc_x_o'], {}), '(acc_y_o, acc_x_o)\n', (2771, 2789), True, 'import numpy as np\n'), ((2980, 2998), 'numpy.arctan', 'np.arctan', (['tan_psi'], {}), '(tan_psi)\n', (2989, 2998), True, 'import numpy as np\n'), ((3665, 3783), 'numpy.sqrt', 'np.sqrt', (['((x * x + y * y + z * z) * GRAVITY * GRAVITY - (orient_acc_x * orient_acc_x +\n orient_acc_y * orient_acc_y))'], {}), '((x * x + y * y + z * z) * GRAVITY * GRAVITY - (orient_acc_x *\n orient_acc_x + orient_acc_y * orient_acc_y))\n', (3672, 3783), True, 'import numpy as np\n'), ((3459, 3477), 'numpy.cos', 'np.cos', (['theta_tilt'], {}), '(theta_tilt)\n', (3465, 3477), True, 'import numpy as np\n'), ((3022, 3037), 'numpy.cos', 'np.cos', (['phi_pre'], {}), '(phi_pre)\n', (3028, 3037), True, 'import numpy as np\n'), ((3041, 3056), 'numpy.sin', 'np.sin', (['phi_pre'], {}), '(phi_pre)\n', (3047, 3056), True, 'import numpy as np\n'), ((3081, 3096), 'numpy.sin', 'np.sin', (['phi_pre'], {}), '(phi_pre)\n', (3087, 3096), True, 'import numpy as np\n'), ((3100, 3115), 'numpy.cos', 'np.cos', (['phi_pre'], {}), '(phi_pre)\n', (3106, 3115), True, 'import numpy as np\n'), ((3152, 3170), 'numpy.cos', 'np.cos', (['theta_tilt'], {}), '(theta_tilt)\n', (3158, 3170), True, 'import numpy as np\n'), ((3173, 3191), 'numpy.sin', 'np.sin', (['theta_tilt'], {}), '(theta_tilt)\n', (3179, 3191), True, 'import numpy as np\n'), ((2819, 2834), 'numpy.sin', 'np.sin', (['phi_pre'], {}), '(phi_pre)\n', (2825, 2834), True, 'import numpy as np\n'), ((2845, 2860), 'numpy.cos', 'np.cos', (['phi_pre'], {}), '(phi_pre)\n', (2851, 2860), True, 'import numpy as np\n'), ((2913, 2931), 'numpy.cos', 'np.cos', (['theta_tilt'], {}), '(theta_tilt)\n', (2919, 2931), True, 'import numpy as np\n'), ((2940, 2958), 'numpy.sin', 'np.sin', (['theta_tilt'], {}), '(theta_tilt)\n', (2946, 2958), True, 'import numpy as np\n'), ((3268, 3284), 'numpy.cos', 'np.cos', (['psi_post'], {}), '(psi_post)\n', (3274, 3284), True, 'import numpy as np\n'), ((3300, 3316), 'numpy.sin', 'np.sin', (['psi_post'], {}), '(psi_post)\n', (3306, 3316), True, 'import numpy as np\n'), ((3366, 3382), 'numpy.sin', 'np.sin', (['psi_post'], {}), '(psi_post)\n', (3372, 3382), True, 'import numpy as np\n'), ((3398, 3414), 'numpy.cos', 'np.cos', (['psi_post'], {}), '(psi_post)\n', (3404, 3414), True, 'import numpy as np\n'), ((2872, 2887), 'numpy.cos', 'np.cos', (['phi_pre'], {}), '(phi_pre)\n', (2878, 2887), True, 'import numpy as np\n'), ((2896, 2911), 'numpy.sin', 'np.sin', (['phi_pre'], {}), '(phi_pre)\n', (2902, 2911), True, 'import numpy as np\n')] |
from bsuite.environments.memory_chain import MemoryChain
import dm_env
import numpy as np
import random
import pandas as pd
class CustomMemoryChain(MemoryChain):
def __init__(self, memory_length: int, num_bits: int, seed: int = 0):
super().__init__(memory_length, num_bits, seed)
assert (
memory_length >= num_bits
), "Memory length must be greater than number of bits for custom Memory Chain."
assert num_bits % 2 != 0, "Num bits must be an odd number"
random.seed(seed)
self._context_timesteps = random.sample(range(memory_length), num_bits)
self._context_timesteps.sort()
self._context_index = 0
# Ignore query
self._query = 0.0
def _get_observation(self):
"""Observation of form [time, query, num_bits of context]."""
obs = np.zeros(shape=(1, self._num_bits + 2), dtype=np.float32)
# Show the time, on every step.
obs[0, 0] = 1 - self._timestep / self._memory_length
# Show the query, on the last step
if self._timestep == self._memory_length - 1:
obs[0, 1] = self._query
# Show part of the context, on varied steps
if self._timestep in self._context_timesteps:
obs[0, 2 + self._context_index] = 2 * self._context[self._context_index] - 1
self._context_index += 1
return obs
def _step(self, action: int) -> dm_env.TimeStep:
observation = self._get_observation()
self._timestep += 1
if self._timestep - 1 < self._memory_length:
# On all but the last step provide a reward of 0.
return dm_env.transition(reward=0.0, observation=observation)
if self._timestep - 1 > self._memory_length:
raise RuntimeError("Invalid state.") # We shouldn't get here.
# Convert context from [0, 1] to [-1, 1]
context = 2 * self._context - 1
# If sum(context) > 0, action 1
# If sum(context) < 0, action 0
if (sum(context) > 0 and action == 1) or (sum(context) < 0 and action == 0):
reward = 1.0
self._total_perfect += 1
else:
reward = -1.0
self._total_regret += 2.0
return dm_env.termination(reward=reward, observation=observation)
def _reset(self):
self._context_index = 0
self._timestep = 0
self._episode_mistakes = 0
self._context = self._rng.binomial(1, 0.5, self._num_bits)
# Ignore query
self._query = 0.0
observation = self._get_observation()
self._timestep += 1
return dm_env.restart(observation)
def get_context(self):
return self._context[self._query]
def memory_preprocess(df_in: pd.DataFrame) -> pd.DataFrame:
"""Preprocess data for memory environments = regret relative to random."""
df = df_in.copy()
df["perfection_regret"] = df.episode - df.total_perfect
# a random agent always has 50% chance on each episode
# independently from memory length and number of bits.
df["base_rate"] = 0.5
df["regret_ratio"] = df.perfection_regret / df.base_rate
return df
def score(df: pd.DataFrame, group_col: str = "custom_memory") -> float:
"""Output a single score for custom_memory."""
df = memory_preprocess(df_in=df)
max_eps = 10000
ave_perfection = df.loc[df.episode == max_eps, "regret_ratio"].mean() / max_eps
return ave_perfection
# Length: 3
# Bits: 3
# Length: 10
# Bits: 3, 5, 7, 9
# Length: 30
# Bits: 3, 5, 7, 9, 17, 25
# Length: 100
# Bits: 3, 5, 7, 9, 17, 25, 29, 35, 39
custom_memory_sweep = (
"memory_custom/0",
"memory_custom/1",
"memory_custom/2",
"memory_custom/3",
"memory_custom/4",
"memory_custom/5",
"memory_custom/6",
"memory_custom/7",
"memory_custom/8",
"memory_custom/9",
"memory_custom/10",
"memory_custom/11",
"memory_custom/12",
)
def load_custom_memory_env(experiment: str):
memory_length, num_bits = {
"memory_custom/0": (3, 3),
"memory_custom/1": (5, 3),
"memory_custom/2": (5, 5),
"memory_custom/3": (10, 3),
"memory_custom/4": (10, 5),
"memory_custom/5": (10, 7),
"memory_custom/6": (10, 9),
"memory_custom/7": (30, 3),
"memory_custom/8": (30, 5),
"memory_custom/9": (30, 7),
"memory_custom/10": (30, 9),
"memory_custom/11": (30, 17),
"memory_custom/12": (30, 25),
}.get(experiment)
return CustomMemoryChain(memory_length=memory_length, num_bits=num_bits)
| [
"dm_env.restart",
"random.seed",
"numpy.zeros",
"dm_env.transition",
"dm_env.termination"
] | [((513, 530), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (524, 530), False, 'import random\n'), ((851, 908), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, self._num_bits + 2)', 'dtype': 'np.float32'}), '(shape=(1, self._num_bits + 2), dtype=np.float32)\n', (859, 908), True, 'import numpy as np\n'), ((2254, 2312), 'dm_env.termination', 'dm_env.termination', ([], {'reward': 'reward', 'observation': 'observation'}), '(reward=reward, observation=observation)\n', (2272, 2312), False, 'import dm_env\n'), ((2638, 2665), 'dm_env.restart', 'dm_env.restart', (['observation'], {}), '(observation)\n', (2652, 2665), False, 'import dm_env\n'), ((1660, 1714), 'dm_env.transition', 'dm_env.transition', ([], {'reward': '(0.0)', 'observation': 'observation'}), '(reward=0.0, observation=observation)\n', (1677, 1714), False, 'import dm_env\n')] |
import os, time, glob
import logging
from absl import flags, app
import importlib
from datetime import datetime
import pickle
import numpy as np
import pandas as pd
from comet_ml import Experiment, OfflineExperiment, Optimizer
from sklearn.preprocessing import StandardScaler, KBinsDiscretizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.metrics import (
f1_score,
r2_score,
confusion_matrix,
)
from gahaco.features.feature_utils import (
get_data,
balance_dataset,
load_positions,
)
from gahaco.visualization import visualize
from gahaco.utils import summary
from gahaco.models import hod
from gahaco.models.model import Model
from gahaco.utils.optimize import merge_configs
from gahaco.utils import feature_importance
from gahaco.utils.optimize import feature_optimization
from gahaco.utils.config import load_config
from gahaco.utils.tpcf import compute_tpcf
from gahaco.features.correlation import select_uncorrelated_features
# -----------------------------------------------------------------------------
# Flags
# -----------------------------------------------------------------------------
flags.DEFINE_string('model', 'lightgbm_reg', 'model to run') # name ,default, help
flags.DEFINE_integer('boxsize', 300, 'TNG box to use: either 100 or 300')
flags.DEFINE_integer('np', 4, 'Number of processes to run')
flags.DEFINE_integer('n_splits', 4, 'Number of folds for cross-validation')
flags.DEFINE_boolean('upload', False, 'upload model to comet.ml, otherwise save in temporary folder')
flags.DEFINE_boolean('optimize_model', False, 'use comet.ml to perform hyper-param. optimization.')
flags.DEFINE_boolean('logging', False, 'save log files')
flags.DEFINE_boolean('mass_balance', False, 'balance dataset in different mass bins')
flags.DEFINE_boolean('figures', True, 'if final figures should be created')
FLAGS = flags.FLAGS
def main(argv):
"""
"""
opt_config_file_path = "../../models/%s/config_optimize.json" % (FLAGS.model)
main_config_file_path = "../../models/%s/config_%s_tng%d.json" % (FLAGS.model, FLAGS.model, FLAGS.boxsize)
config = load_config(config_file_path=main_config_file_path, purpose="")
config['model']['parameters']['n_jobs'] = FLAGS.np
print(f"Using {FLAGS.np} cores to fit models")
# Initiate Model/Experiment
model = Model(FLAGS, config, opt_config_file_path)
# Load dataset
features, labels = get_data(config["label"], boxsize=FLAGS.boxsize)
m200c = features.M200_DMO.values
#keep_list = [
# "concentration_prada", "CentralVmax", "Spin", #"env_5",
#]
#features = features[keep_list]
# Set metric
metric_module = importlib.import_module(config["metric"]["module"])
metric = getattr(metric_module, config["metric"]["method"])
# Define sampling
if "sampling" in config:
sampler_module = importlib.import_module(config["sampling"]["module"])
sampler = getattr(sampler_module, config["sampling"]["method"])
else:
sampler=None
# K-fold validation setting
skf = StratifiedKFold(n_splits=FLAGS.n_splits, shuffle=True, random_state=0)
if FLAGS.optimize_model:
# model-/hyper-parameter optimization (run many experiments)
for experiment in model.opt.get_experiments():
experiment.add_tag(
'hyper-parameter optimization %s for %s' % (FLAGS.model, FLAGS.boxsize))
config = merge_configs(config, model.opt, experiment)
train(
model, experiment, features, labels, m200c, metric, sampler, skf, config, FLAGS)
else:
# run one experiment
train(model, model.experiment, features, labels, m200c, metric, sampler, skf, config, FLAGS)
def train(model, experiment, features, labels, m200c, metric, sampler, skf, config, FLAGS):
"""
"""
if ("feature_optimization" in config.keys()) and (FLAGS.optimize_model is False):
if 'PCA' in config['feature_optimization']:
# TODO: Needs to be updated to only take features and return dataframe
train_features, test_features = feature_optimization(
train, test, config["feature_optimization"], experiment=experiment
)
feature_names = [f"PCA_{i}" for i in range(train["features"].shape[1])]
elif config['feature_optimization']['uncorrelated']:
gini_importances = np.loadtxt(f'../../models/{FLAGS.model}/gini_importances.csv')
features = select_uncorrelated_features(features,
labels,
#gini_impurities=gini_importances,
experiment=experiment)
dropcol_importance,pm_importance,gini_importance,cms, chisquare_tpcf = ([] for i in range(5))
hod_cms,hydro_tpcf,pred_tpcf,hod_tpcfs = ([] for i in range(4))
halo_occs = []
if config['label']=='stellar_mass':
stratify = KBinsDiscretizer(n_bins=20, encode="ordinal",
strategy="uniform").fit_transform(np.expand_dims(labels.values, -1)).astype(int)
else:
stratify = labels
fold=0
for train_idx, test_idx in skf.split(features, stratify):
x_train, x_test = (features.iloc[train_idx], features.iloc[test_idx])
y_train, y_test = (labels.iloc[train_idx], labels.iloc[test_idx])
# -----------------------------------------------------------------------------
# BASELINE HOD MODEL EVALUATION
# -----------------------------------------------------------------------------
hydro_pos_test, dmo_pos_test = load_positions(test_idx, boxsize=FLAGS.boxsize)
if FLAGS.optimize_model is False:
if (config['label']=='stellar_mass'):
#stellar_mass_thresholds = np.array([9.2, 9.3, 9.4])
stellar_mass_thresholds = np.array([9., 9.6, 9.8])
#if FLAGS.boxsize == 300:
#stellar_mass_thresholds += np.log10(1.4)
halo_occ, hod_cm, hod_tpcf, y_pred_hod = summary.hod_stellar_mass_summary(
m200c[train_idx], m200c[test_idx],
y_train,
y_test,
stellar_mass_thresholds,
dmo_pos_test,
FLAGS.boxsize
)
r_c, hydro_tpcf_test = summary.hydro_stellar_mass_summary(
hydro_pos_test,
y_test,
stellar_mass_thresholds,
FLAGS.boxsize,
)
else:
stellar_mass_thresholds = [9]
halo_occ, hod_cm, hod_tpcf = summary.hod_summary(
m200c[train_idx],
m200c[test_idx],
y_train,
y_test,
dmo_pos_test,
FLAGS.boxsize
)
r_c, hydro_tpcf_test = summary.hydro_summary(
hydro_pos_test, y_test, FLAGS.boxsize
)
hydro_tpcf.append(hydro_tpcf_test)
halo_occs.append(halo_occ)
hod_cms.append(hod_cm)
hod_tpcfs.append(hod_tpcf)
# -----------------------------------------------------------------------------
# PREPROCESS DATASET FOR TRAINING (balancing + normalisation)
# -----------------------------------------------------------------------------
if sampler is not None:
if FLAGS.mass_balance:
x_train, y_train = balance_dataset(x_train, y_train,
sampler)
else:
x_train, y_train = balance_dataset(x_train, y_train,
sampler, split=None)
## Standarize features
scaler = StandardScaler()
scaler.fit(x_train)
x_test_save = x_test.copy()
x_train_scaled = scaler.transform(x_train)
x_train = pd.DataFrame(x_train_scaled, index=x_train.index, columns=x_train.columns)
x_test_scaled = scaler.transform(x_test)
x_test = pd.DataFrame(x_test_scaled, index=x_test.index, columns=x_test.columns)
# -----------------------------------------------------------------------------
# FIT MODEL
# -----------------------------------------------------------------------------
trained_model = model.fit(x_train, y_train, config["model"])
y_pred = model.predict(trained_model, x_test, config["model"])
x_test_save['prediction'] = y_pred
x_test_save['label'] = y_test
x_test_save['hod'] = y_pred_hod
x_test_save.to_hdf(f'../../models/{FLAGS.model}/test_results_fold{fold}_env',
key='hf')
metric_value = metric(y_test, y_pred, **config["metric"]["params"])
experiment.log_metric("Metric value", metric_value)
# -----------------------------------------------------------------------------
# SAVE FEATURE IMPORTANCE AND EVALUATION METRIC
# -----------------------------------------------------------------------------
if (config['label']=='stellar_mass') or (config['label']=='nr_of_satellites'):
threshold = (y_test > 0.) & (y_pred > 0.)
r2 = r2_score(y_test[threshold], y_pred[threshold])
visualize.regression(
y_test[threshold], y_pred[threshold], r2, metric_value, stellar_mass_thresholds,
fold=fold, experiment=experiment
)
if FLAGS.optimize_model is False:
if config['feature_optimization']['measure_importance']:
imp, xi2 = feature_importance.dropcol(
trained_model,
x_train,
y_train,
x_test,
y_test,
dmo_pos_test,
r_c,
hydro_tpcf_test,
metric_value,
metric,
config['metric']['params'],
stellar_mass_thresholds,
boxsize = FLAGS.boxsize
)
dropcol_importance.append(imp)
chisquare_tpcf.append(xi2)
imp = feature_importance.permutation(
trained_model,
x_test,
y_test,
metric_value,
metric,
config['metric']['params']
)
pm_importance.append(imp)
gini_importance.append(trained_model.feature_importances_)
if (config['label']=='stellar_mass'):
cm, model_tpcf = summary.model_stellar_mass_summary(y_test, y_pred,
stellar_mass_thresholds,
dmo_pos_test, FLAGS.boxsize)
else:
cm, model_tpcf = summary.model_summary(y_test, y_pred, dmo_pos_test, FLAGS.boxsize)
cms.append(cm)
pred_tpcf.append(model_tpcf)
fold+=1
# -----------------------------------------------------------------------------
# SUMMARY FIGURES
# -----------------------------------------------------------------------------
if (FLAGS.optimize_model is False) and (FLAGS.figures is True):
if (config['label'] != 'nr_of_satellites'):
# ---------------------------------------------------------------------
# Save output's visualizations
# ---------------------------------------------------------------------
visualize.plot_confusion_matrix(
cms,
classes = ['Dark', 'Luminous'],
normalize = False,
title='LGBM',
experiment = experiment,
stellar_mass_thresholds=stellar_mass_thresholds
)
visualize.plot_confusion_matrix(
hod_cms,
classes = ['Dark', 'Luminous'],
normalize = False,
title='HOD',
experiment = experiment,
stellar_mass_thresholds=stellar_mass_thresholds
)
visualize.plot_tpcfs(
r_c, hydro_tpcf, pred_tpcf, hod_tpcfs, experiment=experiment,
stellar_mass_thresholds=stellar_mass_thresholds
)
visualize.plot_tpcfs(
r_c, hydro_tpcf, None, hod_tpcfs, experiment=experiment,
stellar_mass_thresholds=stellar_mass_thresholds
)
else:
visualize.mean_halo_occupation(halo_occs, experiment=experiment)
if config['feature_optimization']['measure_importance']:
visualize.plot_feature_importance(
dropcol_importance,
x_train.columns,
title='Drop column',
experiment=experiment
)
visualize.plot_feature_importance(
pm_importance,
x_train.columns,
title='Permute column',
experiment=experiment
)
visualize.plot_feature_importance(
gini_importance,
x_train.columns,
title='Gini impurity',
experiment=experiment,
)
if config['feature_optimization']['save_importance']:
np.savetxt(
f'../../models/{FLAGS.model}/gini_importances.csv',
np.mean(gini_importance, axis=0)
)
experiment.add_tag(f'classifier = {FLAGS.model}')
np.save('/cosma6/data/dp004/dc-cues1/gahaco_data/dropcol.npy', dropcol_importance)
np.save('/cosma6/data/dp004/dc-cues1/gahaco_data/chisquare.npy', chisquare_tpcf)
print(features.columns.values)
np.save('/cosma6/data/dp004/dc-cues1/gahaco_data/names.npy', features.columns.values)
print('All good :)')
if __name__ == "__main__":
app.run(main)
| [
"gahaco.models.model.Model",
"gahaco.visualization.visualize.plot_confusion_matrix",
"sklearn.model_selection.StratifiedKFold",
"gahaco.visualization.visualize.plot_feature_importance",
"numpy.array",
"sklearn.metrics.r2_score",
"gahaco.utils.optimize.feature_optimization",
"numpy.save",
"numpy.mean... | [((1210, 1270), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model"""', '"""lightgbm_reg"""', '"""model to run"""'], {}), "('model', 'lightgbm_reg', 'model to run')\n", (1229, 1270), False, 'from absl import flags, app\n'), ((1293, 1366), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""boxsize"""', '(300)', '"""TNG box to use: either 100 or 300"""'], {}), "('boxsize', 300, 'TNG box to use: either 100 or 300')\n", (1313, 1366), False, 'from absl import flags, app\n'), ((1367, 1426), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""np"""', '(4)', '"""Number of processes to run"""'], {}), "('np', 4, 'Number of processes to run')\n", (1387, 1426), False, 'from absl import flags, app\n'), ((1427, 1502), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""n_splits"""', '(4)', '"""Number of folds for cross-validation"""'], {}), "('n_splits', 4, 'Number of folds for cross-validation')\n", (1447, 1502), False, 'from absl import flags, app\n'), ((1503, 1608), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""upload"""', '(False)', '"""upload model to comet.ml, otherwise save in temporary folder"""'], {}), "('upload', False,\n 'upload model to comet.ml, otherwise save in temporary folder')\n", (1523, 1608), False, 'from absl import flags, app\n'), ((1605, 1708), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""optimize_model"""', '(False)', '"""use comet.ml to perform hyper-param. optimization."""'], {}), "('optimize_model', False,\n 'use comet.ml to perform hyper-param. optimization.')\n", (1625, 1708), False, 'from absl import flags, app\n'), ((1705, 1761), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""logging"""', '(False)', '"""save log files"""'], {}), "('logging', False, 'save log files')\n", (1725, 1761), False, 'from absl import flags, app\n'), ((1762, 1851), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""mass_balance"""', '(False)', '"""balance dataset in different mass bins"""'], {}), "('mass_balance', False,\n 'balance dataset in different mass bins')\n", (1782, 1851), False, 'from absl import flags, app\n'), ((1848, 1923), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""figures"""', '(True)', '"""if final figures should be created"""'], {}), "('figures', True, 'if final figures should be created')\n", (1868, 1923), False, 'from absl import flags, app\n'), ((2183, 2246), 'gahaco.utils.config.load_config', 'load_config', ([], {'config_file_path': 'main_config_file_path', 'purpose': '""""""'}), "(config_file_path=main_config_file_path, purpose='')\n", (2194, 2246), False, 'from gahaco.utils.config import load_config\n'), ((2398, 2440), 'gahaco.models.model.Model', 'Model', (['FLAGS', 'config', 'opt_config_file_path'], {}), '(FLAGS, config, opt_config_file_path)\n', (2403, 2440), False, 'from gahaco.models.model import Model\n'), ((2484, 2532), 'gahaco.features.feature_utils.get_data', 'get_data', (["config['label']"], {'boxsize': 'FLAGS.boxsize'}), "(config['label'], boxsize=FLAGS.boxsize)\n", (2492, 2532), False, 'from gahaco.features.feature_utils import get_data, balance_dataset, load_positions\n'), ((2742, 2793), 'importlib.import_module', 'importlib.import_module', (["config['metric']['module']"], {}), "(config['metric']['module'])\n", (2765, 2793), False, 'import importlib\n'), ((3135, 3205), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'FLAGS.n_splits', 'shuffle': '(True)', 'random_state': '(0)'}), '(n_splits=FLAGS.n_splits, shuffle=True, random_state=0)\n', (3150, 3205), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((13857, 13943), 'numpy.save', 'np.save', (['"""/cosma6/data/dp004/dc-cues1/gahaco_data/dropcol.npy"""', 'dropcol_importance'], {}), "('/cosma6/data/dp004/dc-cues1/gahaco_data/dropcol.npy',\n dropcol_importance)\n", (13864, 13943), True, 'import numpy as np\n'), ((13944, 14029), 'numpy.save', 'np.save', (['"""/cosma6/data/dp004/dc-cues1/gahaco_data/chisquare.npy"""', 'chisquare_tpcf'], {}), "('/cosma6/data/dp004/dc-cues1/gahaco_data/chisquare.npy', chisquare_tpcf\n )\n", (13951, 14029), True, 'import numpy as np\n'), ((14064, 14154), 'numpy.save', 'np.save', (['"""/cosma6/data/dp004/dc-cues1/gahaco_data/names.npy"""', 'features.columns.values'], {}), "('/cosma6/data/dp004/dc-cues1/gahaco_data/names.npy', features.\n columns.values)\n", (14071, 14154), True, 'import numpy as np\n'), ((14207, 14220), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (14214, 14220), False, 'from absl import flags, app\n'), ((2935, 2988), 'importlib.import_module', 'importlib.import_module', (["config['sampling']['module']"], {}), "(config['sampling']['module'])\n", (2958, 2988), False, 'import importlib\n'), ((5746, 5793), 'gahaco.features.feature_utils.load_positions', 'load_positions', (['test_idx'], {'boxsize': 'FLAGS.boxsize'}), '(test_idx, boxsize=FLAGS.boxsize)\n', (5760, 5793), False, 'from gahaco.features.feature_utils import get_data, balance_dataset, load_positions\n'), ((7947, 7963), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7961, 7963), False, 'from sklearn.preprocessing import StandardScaler, KBinsDiscretizer\n'), ((8097, 8171), 'pandas.DataFrame', 'pd.DataFrame', (['x_train_scaled'], {'index': 'x_train.index', 'columns': 'x_train.columns'}), '(x_train_scaled, index=x_train.index, columns=x_train.columns)\n', (8109, 8171), True, 'import pandas as pd\n'), ((8238, 8309), 'pandas.DataFrame', 'pd.DataFrame', (['x_test_scaled'], {'index': 'x_test.index', 'columns': 'x_test.columns'}), '(x_test_scaled, index=x_test.index, columns=x_test.columns)\n', (8250, 8309), True, 'import pandas as pd\n'), ((3506, 3550), 'gahaco.utils.optimize.merge_configs', 'merge_configs', (['config', 'model.opt', 'experiment'], {}), '(config, model.opt, experiment)\n', (3519, 3550), False, 'from gahaco.utils.optimize import merge_configs\n'), ((4185, 4277), 'gahaco.utils.optimize.feature_optimization', 'feature_optimization', (['train', 'test', "config['feature_optimization']"], {'experiment': 'experiment'}), "(train, test, config['feature_optimization'],\n experiment=experiment)\n", (4205, 4277), False, 'from gahaco.utils.optimize import feature_optimization\n'), ((9412, 9458), 'sklearn.metrics.r2_score', 'r2_score', (['y_test[threshold]', 'y_pred[threshold]'], {}), '(y_test[threshold], y_pred[threshold])\n', (9420, 9458), False, 'from sklearn.metrics import f1_score, r2_score, confusion_matrix\n'), ((9471, 9610), 'gahaco.visualization.visualize.regression', 'visualize.regression', (['y_test[threshold]', 'y_pred[threshold]', 'r2', 'metric_value', 'stellar_mass_thresholds'], {'fold': 'fold', 'experiment': 'experiment'}), '(y_test[threshold], y_pred[threshold], r2, metric_value,\n stellar_mass_thresholds, fold=fold, experiment=experiment)\n', (9491, 9610), False, 'from gahaco.visualization import visualize\n'), ((11822, 11999), 'gahaco.visualization.visualize.plot_confusion_matrix', 'visualize.plot_confusion_matrix', (['cms'], {'classes': "['Dark', 'Luminous']", 'normalize': '(False)', 'title': '"""LGBM"""', 'experiment': 'experiment', 'stellar_mass_thresholds': 'stellar_mass_thresholds'}), "(cms, classes=['Dark', 'Luminous'],\n normalize=False, title='LGBM', experiment=experiment,\n stellar_mass_thresholds=stellar_mass_thresholds)\n", (11853, 11999), False, 'from gahaco.visualization import visualize\n'), ((12120, 12300), 'gahaco.visualization.visualize.plot_confusion_matrix', 'visualize.plot_confusion_matrix', (['hod_cms'], {'classes': "['Dark', 'Luminous']", 'normalize': '(False)', 'title': '"""HOD"""', 'experiment': 'experiment', 'stellar_mass_thresholds': 'stellar_mass_thresholds'}), "(hod_cms, classes=['Dark', 'Luminous'],\n normalize=False, title='HOD', experiment=experiment,\n stellar_mass_thresholds=stellar_mass_thresholds)\n", (12151, 12300), False, 'from gahaco.visualization import visualize\n'), ((12421, 12557), 'gahaco.visualization.visualize.plot_tpcfs', 'visualize.plot_tpcfs', (['r_c', 'hydro_tpcf', 'pred_tpcf', 'hod_tpcfs'], {'experiment': 'experiment', 'stellar_mass_thresholds': 'stellar_mass_thresholds'}), '(r_c, hydro_tpcf, pred_tpcf, hod_tpcfs, experiment=\n experiment, stellar_mass_thresholds=stellar_mass_thresholds)\n', (12441, 12557), False, 'from gahaco.visualization import visualize\n'), ((12611, 12742), 'gahaco.visualization.visualize.plot_tpcfs', 'visualize.plot_tpcfs', (['r_c', 'hydro_tpcf', 'None', 'hod_tpcfs'], {'experiment': 'experiment', 'stellar_mass_thresholds': 'stellar_mass_thresholds'}), '(r_c, hydro_tpcf, None, hod_tpcfs, experiment=\n experiment, stellar_mass_thresholds=stellar_mass_thresholds)\n', (12631, 12742), False, 'from gahaco.visualization import visualize\n'), ((12811, 12875), 'gahaco.visualization.visualize.mean_halo_occupation', 'visualize.mean_halo_occupation', (['halo_occs'], {'experiment': 'experiment'}), '(halo_occs, experiment=experiment)\n', (12841, 12875), False, 'from gahaco.visualization import visualize\n'), ((12954, 13072), 'gahaco.visualization.visualize.plot_feature_importance', 'visualize.plot_feature_importance', (['dropcol_importance', 'x_train.columns'], {'title': '"""Drop column"""', 'experiment': 'experiment'}), "(dropcol_importance, x_train.columns,\n title='Drop column', experiment=experiment)\n", (12987, 13072), False, 'from gahaco.visualization import visualize\n'), ((13159, 13276), 'gahaco.visualization.visualize.plot_feature_importance', 'visualize.plot_feature_importance', (['pm_importance', 'x_train.columns'], {'title': '"""Permute column"""', 'experiment': 'experiment'}), "(pm_importance, x_train.columns, title=\n 'Permute column', experiment=experiment)\n", (13192, 13276), False, 'from gahaco.visualization import visualize\n'), ((13362, 13480), 'gahaco.visualization.visualize.plot_feature_importance', 'visualize.plot_feature_importance', (['gini_importance', 'x_train.columns'], {'title': '"""Gini impurity"""', 'experiment': 'experiment'}), "(gini_importance, x_train.columns, title=\n 'Gini impurity', experiment=experiment)\n", (13395, 13480), False, 'from gahaco.visualization import visualize\n'), ((4481, 4543), 'numpy.loadtxt', 'np.loadtxt', (['f"""../../models/{FLAGS.model}/gini_importances.csv"""'], {}), "(f'../../models/{FLAGS.model}/gini_importances.csv')\n", (4491, 4543), True, 'import numpy as np\n'), ((4567, 4636), 'gahaco.features.correlation.select_uncorrelated_features', 'select_uncorrelated_features', (['features', 'labels'], {'experiment': 'experiment'}), '(features, labels, experiment=experiment)\n', (4595, 4636), False, 'from gahaco.features.correlation import select_uncorrelated_features\n'), ((5999, 6024), 'numpy.array', 'np.array', (['[9.0, 9.6, 9.8]'], {}), '([9.0, 9.6, 9.8])\n', (6007, 6024), True, 'import numpy as np\n'), ((6183, 6325), 'gahaco.utils.summary.hod_stellar_mass_summary', 'summary.hod_stellar_mass_summary', (['m200c[train_idx]', 'm200c[test_idx]', 'y_train', 'y_test', 'stellar_mass_thresholds', 'dmo_pos_test', 'FLAGS.boxsize'], {}), '(m200c[train_idx], m200c[test_idx], y_train,\n y_test, stellar_mass_thresholds, dmo_pos_test, FLAGS.boxsize)\n', (6215, 6325), False, 'from gahaco.utils import summary\n'), ((6500, 6602), 'gahaco.utils.summary.hydro_stellar_mass_summary', 'summary.hydro_stellar_mass_summary', (['hydro_pos_test', 'y_test', 'stellar_mass_thresholds', 'FLAGS.boxsize'], {}), '(hydro_pos_test, y_test,\n stellar_mass_thresholds, FLAGS.boxsize)\n', (6534, 6602), False, 'from gahaco.utils import summary\n'), ((6808, 6912), 'gahaco.utils.summary.hod_summary', 'summary.hod_summary', (['m200c[train_idx]', 'm200c[test_idx]', 'y_train', 'y_test', 'dmo_pos_test', 'FLAGS.boxsize'], {}), '(m200c[train_idx], m200c[test_idx], y_train, y_test,\n dmo_pos_test, FLAGS.boxsize)\n', (6827, 6912), False, 'from gahaco.utils import summary\n'), ((7088, 7148), 'gahaco.utils.summary.hydro_summary', 'summary.hydro_summary', (['hydro_pos_test', 'y_test', 'FLAGS.boxsize'], {}), '(hydro_pos_test, y_test, FLAGS.boxsize)\n', (7109, 7148), False, 'from gahaco.utils import summary\n'), ((7703, 7745), 'gahaco.features.feature_utils.balance_dataset', 'balance_dataset', (['x_train', 'y_train', 'sampler'], {}), '(x_train, y_train, sampler)\n', (7718, 7745), False, 'from gahaco.features.feature_utils import get_data, balance_dataset, load_positions\n'), ((7823, 7877), 'gahaco.features.feature_utils.balance_dataset', 'balance_dataset', (['x_train', 'y_train', 'sampler'], {'split': 'None'}), '(x_train, y_train, sampler, split=None)\n', (7838, 7877), False, 'from gahaco.features.feature_utils import get_data, balance_dataset, load_positions\n'), ((9791, 10009), 'gahaco.utils.feature_importance.dropcol', 'feature_importance.dropcol', (['trained_model', 'x_train', 'y_train', 'x_test', 'y_test', 'dmo_pos_test', 'r_c', 'hydro_tpcf_test', 'metric_value', 'metric', "config['metric']['params']", 'stellar_mass_thresholds'], {'boxsize': 'FLAGS.boxsize'}), "(trained_model, x_train, y_train, x_test, y_test,\n dmo_pos_test, r_c, hydro_tpcf_test, metric_value, metric, config[\n 'metric']['params'], stellar_mass_thresholds, boxsize=FLAGS.boxsize)\n", (9817, 10009), False, 'from gahaco.utils import feature_importance\n'), ((10393, 10508), 'gahaco.utils.feature_importance.permutation', 'feature_importance.permutation', (['trained_model', 'x_test', 'y_test', 'metric_value', 'metric', "config['metric']['params']"], {}), "(trained_model, x_test, y_test, metric_value,\n metric, config['metric']['params'])\n", (10423, 10508), False, 'from gahaco.utils import feature_importance\n'), ((10846, 10954), 'gahaco.utils.summary.model_stellar_mass_summary', 'summary.model_stellar_mass_summary', (['y_test', 'y_pred', 'stellar_mass_thresholds', 'dmo_pos_test', 'FLAGS.boxsize'], {}), '(y_test, y_pred, stellar_mass_thresholds,\n dmo_pos_test, FLAGS.boxsize)\n', (10880, 10954), False, 'from gahaco.utils import summary\n'), ((11131, 11197), 'gahaco.utils.summary.model_summary', 'summary.model_summary', (['y_test', 'y_pred', 'dmo_pos_test', 'FLAGS.boxsize'], {}), '(y_test, y_pred, dmo_pos_test, FLAGS.boxsize)\n', (11152, 11197), False, 'from gahaco.utils import summary\n'), ((5180, 5213), 'numpy.expand_dims', 'np.expand_dims', (['labels.values', '(-1)'], {}), '(labels.values, -1)\n', (5194, 5213), True, 'import numpy as np\n'), ((13743, 13775), 'numpy.mean', 'np.mean', (['gini_importance'], {'axis': '(0)'}), '(gini_importance, axis=0)\n', (13750, 13775), True, 'import numpy as np\n'), ((5075, 5140), 'sklearn.preprocessing.KBinsDiscretizer', 'KBinsDiscretizer', ([], {'n_bins': '(20)', 'encode': '"""ordinal"""', 'strategy': '"""uniform"""'}), "(n_bins=20, encode='ordinal', strategy='uniform')\n", (5091, 5140), False, 'from sklearn.preprocessing import StandardScaler, KBinsDiscretizer\n')] |
"""
=============================
Plotting reliability diagrams
=============================
This example illustrates how to visualise the reliability diagram for a binary
probabilistic classifier.
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
print(__doc__)
##############################################################################
# This example shows different ways to visualise the reliability diagram for a
# binary classification problem.
#
# First we will generate two synthetic models and some synthetic scores and
# labels.
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(42)
n_c1 = n_c2 = 500
p = np.concatenate((np.random.beta(2, 5, n_c1),
np.random.beta(4, 3, n_c2)
))
y = np.concatenate((np.zeros(n_c1), np.ones(n_c2)))
s1 = 1/(1 + np.exp(-8*(p - 0.5)))
s2 = 1/(1 + np.exp(-3*(p - 0.5)))
plt.scatter(s1, p, label='Model 1')
plt.scatter(s2, p, label='Model 2')
plt.scatter(p, y)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlabel('Model scores')
plt.ylabel('Sample true probability')
plt.grid()
plt.legend()
p = np.vstack((1 - p, p)).T
s1 = np.vstack((1 - s1, s1)).T
s2 = np.vstack((1 - s2, s2)).T
##############################################################################
# A perfect calibration should be as follows, compared with the generated
# scores
import scipy.stats as stats
p_g_p = stats.beta.pdf(x=p[:, 1], a=3, b=2)
p_g_n = stats.beta.pdf(x=p[:, 1], a=2, b=7)
p_hat = p_g_p/(p_g_n+p_g_p)
p_hat = np.vstack((1 - p_hat, p_hat)).T
plt.scatter(p[:, 1], s1[:, 1], label='Model 1')
plt.scatter(p[:, 1], s2[:, 1], label='Model 2')
plt.scatter(p[:, 1], p_hat[:, 1], color='red', label='Bayes optimal correction')
plt.xlabel('Sample true probability')
plt.ylabel('Model scores')
plt.grid()
plt.legend()
##############################################################################
# There are at least 2 very common ways to show a reliability diagram for a
# probabilistic binary classifier. Drawing a line between all the binned mean
# predictions and the true proportion of positives.
from pycalib.visualisations import plot_reliability_diagram
fig = plot_reliability_diagram(labels=y, scores=s1, show_histogram=False)
##############################################################################
# And showing bars instead of a lineplot, usually with errorbars showing the
# discrepancy with respect to a perfectly calibrated model (diagonal)
fig = plot_reliability_diagram(labels=y, scores=s1,
class_names=['Negative', 'Positive'],
show_gaps=True, show_bars=True,
show_histogram=False)
##############################################################################
# However, both previous illustrations do not include the number of samples
# that fall into each bin. By default the parameter show_bars is set to True as
# this information is crucial to understand how reliable is each estimation,
# and how this affects some of the calibration metrics.
# We also specify the bin boundaries and change the color of the gaps.
fig = plot_reliability_diagram(labels=y, scores=s1,
class_names=['Negative', 'Positive'],
show_gaps=True, color_gaps='firebrick',
bins=[0, .3, .4, .45, .5, .55, .6, .7, 1])
##############################################################################
# It is also common to plot only the confidence (considering the winning class
# only as positive class for each prediction). Notice that the class names is
# automatically set to *winning* class.
fig = plot_reliability_diagram(labels=y, scores=s1,
show_gaps=True,
confidence=True,
show_bars=True)
##############################################################################
# We can enable some parameters to show several aspects of the reliability
# diagram. For example, we can add a histogram indicating the number of samples
# on each bin (or show the count in each marker), the correction that should be
# applied to the average scores in order to calibrate the model can be also
# shown as red arrows pointing to the direction of the diagonal (perfectly
# calibrated model). And even the true class of each sample at the y
# coordinates [0 and 1] for each scored instance (50% of the data in
# this example, but default is 100%).
fig = plot_reliability_diagram(labels=y, scores=s1,
legend=['Model 1'],
show_histogram=True,
bins=9, class_names=['Negative', 'Positive'],
show_counts=True,
show_correction=True,
sample_proportion=0.5,
hist_per_class=True)
##############################################################################
# It can be also useful to have 95% confidence intervals for each bin by
# performing a binomial proportion confidence interval with various statistical
# tests. This function uses https://www.statsmodels.org/stable/generated/statsmodels.stats.proportion.proportion_confint.html
# thus accepts the different tests available in the statsmodels library. In the
# following example we use the Clopper-Pearson interval based on Beta
# distribution and a confidence interval of 95%.
fig = plot_reliability_diagram(labels=y, scores=s2,
legend=['Model 2'],
show_histogram=True,
show_counts=True,
bins=13, class_names=['Negative', 'Positive'],
sample_proportion=1.0,
errorbar_interval=0.95,
interval_method='beta',
color_list=['orange'])
##############################################################################
# The function also allows the visualisation of multiple models for comparison.
fig = plot_reliability_diagram(labels=y, scores=[s1, s2],
legend=['Model 1', 'Model 2'],
show_histogram=True,
bins=10, class_names=['Negative', 'Positive'],
errorbar_interval=0.95,
interval_method='beta')
##############################################################################
# It is possible to draw reliability diagram for multiple classes as well. We
# will simulate 3 classes by changing some original labels to a 3rd class, and
# modifying the scores of Model 1 and 2 to create new models 3 and 4.
class_2_idx = range(int(len(y)/3), int(2*len(y)/3))
y[class_2_idx] = 2
s1 = np.hstack((s1, s1[:, 1].reshape(-1, 1)))
s1[class_2_idx,2] *= 3
s1 /= s1.sum(axis=1)[:, None]
s2 = np.hstack((s2, s2[:, 1].reshape(-1, 1)))
s2[class_2_idx,2] *= 2
s2 /= s2.sum(axis=1)[:, None]
fig = plot_reliability_diagram(labels=y, scores=[s1, s2],
legend=['Model 3', 'Model 4'],
show_histogram=True,
color_list=['darkgreen', 'chocolate'])
##############################################################################
# If we are only interested in the confidence, the 3 classes can be visualised
# in a single reliability diagram
fig = plot_reliability_diagram(labels=y, scores=[s1, s2],
legend=['Model 3', 'Model 4'],
show_histogram=True,
color_list=['darkgreen', 'chocolate'],
confidence=True)
##############################################################################
# The same can be done with the bars.
fig = plot_reliability_diagram(labels=y, scores=s1,
legend=['Model 3'],
show_histogram=True,
color_list=['darkgreen'],
show_bars=True,
show_gaps=True,
color_gaps='orange')
##############################################################################
# If we have precomputed the average proportion of true positives and
# predictions, or we have access to the ground truth, it is possible to plot
# the same reliability diagram using the following function
from pycalib.visualisations import plot_reliability_diagram_precomputed
avg_true = [np.array([.1, .3, .6, .8, .9, 1]).reshape(-1, 1),
np.array([.2, .4, .5, .7, .8, .9]).reshape(-1, 1)]
avg_pred = [np.array([.01, .25, .4, .6, .7, .8]).reshape(-1, 1),
np.array([.15, .39, .7, .75, .8, .9]).reshape(-1, 1)]
fig = plot_reliability_diagram_precomputed(avg_true, avg_pred)
##############################################################################
# Similarly for a multiclass problem we can provide full matrices of size
# (n_bins, n_classes) instead. Notice that the order of the predicted scores
# doesn't need to be in order, and the probabilities doesn't need to sum to one
# among all classes, as the way they are computed may be from different
# instances.
avg_true = [np.array([[.1, .3, .6, .8, .9, 1.],
[.0, .2, .4, .7, .8, .9],
[.1, .2, .3, .5, .6, .8]]).T,
np.array([[.1, .4, .7, .8, .9, 1.],
[.9, .3, .8, .2, .7, .1],
[.2, .3, .5, .4, .7, .1]]).T]
avg_pred = [np.array([[.0, .3, .6, .7, .8, 9.],
[.1, .2, .3, .5, .8, .7],
[.3, .5, .4, .7, .8, .9]]).T,
np.array([[.0, .3, .6, .8, .9, 1.],
[.8, .1, .6, .2, .9, 0.],
[.1, .4, .6, .3, .5, 0.]]).T]
fig = plot_reliability_diagram_precomputed(avg_true, avg_pred)
| [
"pycalib.visualisations.plot_reliability_diagram_precomputed",
"matplotlib.pyplot.grid",
"numpy.random.beta",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.array",
"pycalib.visualisations.plot_reliability_diagram",
"numpy.zero... | [((599, 617), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (613, 617), True, 'import numpy as np\n'), ((877, 912), 'matplotlib.pyplot.scatter', 'plt.scatter', (['s1', 'p'], {'label': '"""Model 1"""'}), "(s1, p, label='Model 1')\n", (888, 912), True, 'import matplotlib.pyplot as plt\n'), ((913, 948), 'matplotlib.pyplot.scatter', 'plt.scatter', (['s2', 'p'], {'label': '"""Model 2"""'}), "(s2, p, label='Model 2')\n", (924, 948), True, 'import matplotlib.pyplot as plt\n'), ((949, 966), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p', 'y'], {}), '(p, y)\n', (960, 966), True, 'import matplotlib.pyplot as plt\n'), ((967, 998), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""r--"""'], {}), "([0, 1], [0, 1], 'r--')\n", (975, 998), True, 'import matplotlib.pyplot as plt\n'), ((999, 1025), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Model scores"""'], {}), "('Model scores')\n", (1009, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1026, 1063), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sample true probability"""'], {}), "('Sample true probability')\n", (1036, 1063), True, 'import matplotlib.pyplot as plt\n'), ((1064, 1074), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1072, 1074), True, 'import matplotlib.pyplot as plt\n'), ((1075, 1087), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1085, 1087), True, 'import matplotlib.pyplot as plt\n'), ((1380, 1415), 'scipy.stats.beta.pdf', 'stats.beta.pdf', ([], {'x': 'p[:, 1]', 'a': '(3)', 'b': '(2)'}), '(x=p[:, 1], a=3, b=2)\n', (1394, 1415), True, 'import scipy.stats as stats\n'), ((1424, 1459), 'scipy.stats.beta.pdf', 'stats.beta.pdf', ([], {'x': 'p[:, 1]', 'a': '(2)', 'b': '(7)'}), '(x=p[:, 1], a=2, b=7)\n', (1438, 1459), True, 'import scipy.stats as stats\n'), ((1530, 1577), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p[:, 1]', 's1[:, 1]'], {'label': '"""Model 1"""'}), "(p[:, 1], s1[:, 1], label='Model 1')\n", (1541, 1577), True, 'import matplotlib.pyplot as plt\n'), ((1578, 1625), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p[:, 1]', 's2[:, 1]'], {'label': '"""Model 2"""'}), "(p[:, 1], s2[:, 1], label='Model 2')\n", (1589, 1625), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1711), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p[:, 1]', 'p_hat[:, 1]'], {'color': '"""red"""', 'label': '"""Bayes optimal correction"""'}), "(p[:, 1], p_hat[:, 1], color='red', label='Bayes optimal correction'\n )\n", (1637, 1711), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1744), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample true probability"""'], {}), "('Sample true probability')\n", (1717, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1771), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Model scores"""'], {}), "('Model scores')\n", (1755, 1771), True, 'import matplotlib.pyplot as plt\n'), ((1772, 1782), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1780, 1782), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1795), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1793, 1795), True, 'import matplotlib.pyplot as plt\n'), ((2150, 2217), 'pycalib.visualisations.plot_reliability_diagram', 'plot_reliability_diagram', ([], {'labels': 'y', 'scores': 's1', 'show_histogram': '(False)'}), '(labels=y, scores=s1, show_histogram=False)\n', (2174, 2217), False, 'from pycalib.visualisations import plot_reliability_diagram\n'), ((2452, 2593), 'pycalib.visualisations.plot_reliability_diagram', 'plot_reliability_diagram', ([], {'labels': 'y', 'scores': 's1', 'class_names': "['Negative', 'Positive']", 'show_gaps': '(True)', 'show_bars': '(True)', 'show_histogram': '(False)'}), "(labels=y, scores=s1, class_names=['Negative',\n 'Positive'], show_gaps=True, show_bars=True, show_histogram=False)\n", (2476, 2593), False, 'from pycalib.visualisations import plot_reliability_diagram\n'), ((3130, 3311), 'pycalib.visualisations.plot_reliability_diagram', 'plot_reliability_diagram', ([], {'labels': 'y', 'scores': 's1', 'class_names': "['Negative', 'Positive']", 'show_gaps': '(True)', 'color_gaps': '"""firebrick"""', 'bins': '[0, 0.3, 0.4, 0.45, 0.5, 0.55, 0.6, 0.7, 1]'}), "(labels=y, scores=s1, class_names=['Negative',\n 'Positive'], show_gaps=True, color_gaps='firebrick', bins=[0, 0.3, 0.4,\n 0.45, 0.5, 0.55, 0.6, 0.7, 1])\n", (3154, 3311), False, 'from pycalib.visualisations import plot_reliability_diagram\n'), ((3674, 3773), 'pycalib.visualisations.plot_reliability_diagram', 'plot_reliability_diagram', ([], {'labels': 'y', 'scores': 's1', 'show_gaps': '(True)', 'confidence': '(True)', 'show_bars': '(True)'}), '(labels=y, scores=s1, show_gaps=True, confidence=\n True, show_bars=True)\n', (3698, 3773), False, 'from pycalib.visualisations import plot_reliability_diagram\n'), ((4511, 4739), 'pycalib.visualisations.plot_reliability_diagram', 'plot_reliability_diagram', ([], {'labels': 'y', 'scores': 's1', 'legend': "['Model 1']", 'show_histogram': '(True)', 'bins': '(9)', 'class_names': "['Negative', 'Positive']", 'show_counts': '(True)', 'show_correction': '(True)', 'sample_proportion': '(0.5)', 'hist_per_class': '(True)'}), "(labels=y, scores=s1, legend=['Model 1'],\n show_histogram=True, bins=9, class_names=['Negative', 'Positive'],\n show_counts=True, show_correction=True, sample_proportion=0.5,\n hist_per_class=True)\n", (4535, 4739), False, 'from pycalib.visualisations import plot_reliability_diagram\n'), ((5509, 5766), 'pycalib.visualisations.plot_reliability_diagram', 'plot_reliability_diagram', ([], {'labels': 'y', 'scores': 's2', 'legend': "['Model 2']", 'show_histogram': '(True)', 'show_counts': '(True)', 'bins': '(13)', 'class_names': "['Negative', 'Positive']", 'sample_proportion': '(1.0)', 'errorbar_interval': '(0.95)', 'interval_method': '"""beta"""', 'color_list': "['orange']"}), "(labels=y, scores=s2, legend=['Model 2'],\n show_histogram=True, show_counts=True, bins=13, class_names=['Negative',\n 'Positive'], sample_proportion=1.0, errorbar_interval=0.95,\n interval_method='beta', color_list=['orange'])\n", (5533, 5766), False, 'from pycalib.visualisations import plot_reliability_diagram\n'), ((6169, 6375), 'pycalib.visualisations.plot_reliability_diagram', 'plot_reliability_diagram', ([], {'labels': 'y', 'scores': '[s1, s2]', 'legend': "['Model 1', 'Model 2']", 'show_histogram': '(True)', 'bins': '(10)', 'class_names': "['Negative', 'Positive']", 'errorbar_interval': '(0.95)', 'interval_method': '"""beta"""'}), "(labels=y, scores=[s1, s2], legend=['Model 1',\n 'Model 2'], show_histogram=True, bins=10, class_names=['Negative',\n 'Positive'], errorbar_interval=0.95, interval_method='beta')\n", (6193, 6375), False, 'from pycalib.visualisations import plot_reliability_diagram\n'), ((7107, 7253), 'pycalib.visualisations.plot_reliability_diagram', 'plot_reliability_diagram', ([], {'labels': 'y', 'scores': '[s1, s2]', 'legend': "['Model 3', 'Model 4']", 'show_histogram': '(True)', 'color_list': "['darkgreen', 'chocolate']"}), "(labels=y, scores=[s1, s2], legend=['Model 3',\n 'Model 4'], show_histogram=True, color_list=['darkgreen', 'chocolate'])\n", (7131, 7253), False, 'from pycalib.visualisations import plot_reliability_diagram\n'), ((7543, 7710), 'pycalib.visualisations.plot_reliability_diagram', 'plot_reliability_diagram', ([], {'labels': 'y', 'scores': '[s1, s2]', 'legend': "['Model 3', 'Model 4']", 'show_histogram': '(True)', 'color_list': "['darkgreen', 'chocolate']", 'confidence': '(True)'}), "(labels=y, scores=[s1, s2], legend=['Model 3',\n 'Model 4'], show_histogram=True, color_list=['darkgreen', 'chocolate'],\n confidence=True)\n", (7567, 7710), False, 'from pycalib.visualisations import plot_reliability_diagram\n'), ((7952, 8125), 'pycalib.visualisations.plot_reliability_diagram', 'plot_reliability_diagram', ([], {'labels': 'y', 'scores': 's1', 'legend': "['Model 3']", 'show_histogram': '(True)', 'color_list': "['darkgreen']", 'show_bars': '(True)', 'show_gaps': '(True)', 'color_gaps': '"""orange"""'}), "(labels=y, scores=s1, legend=['Model 3'],\n show_histogram=True, color_list=['darkgreen'], show_bars=True,\n show_gaps=True, color_gaps='orange')\n", (7976, 8125), False, 'from pycalib.visualisations import plot_reliability_diagram\n'), ((8927, 8983), 'pycalib.visualisations.plot_reliability_diagram_precomputed', 'plot_reliability_diagram_precomputed', (['avg_true', 'avg_pred'], {}), '(avg_true, avg_pred)\n', (8963, 8983), False, 'from pycalib.visualisations import plot_reliability_diagram_precomputed\n'), ((9980, 10036), 'pycalib.visualisations.plot_reliability_diagram_precomputed', 'plot_reliability_diagram_precomputed', (['avg_true', 'avg_pred'], {}), '(avg_true, avg_pred)\n', (10016, 10036), False, 'from pycalib.visualisations import plot_reliability_diagram_precomputed\n'), ((1093, 1114), 'numpy.vstack', 'np.vstack', (['(1 - p, p)'], {}), '((1 - p, p))\n', (1102, 1114), True, 'import numpy as np\n'), ((1122, 1145), 'numpy.vstack', 'np.vstack', (['(1 - s1, s1)'], {}), '((1 - s1, s1))\n', (1131, 1145), True, 'import numpy as np\n'), ((1153, 1176), 'numpy.vstack', 'np.vstack', (['(1 - s2, s2)'], {}), '((1 - s2, s2))\n', (1162, 1176), True, 'import numpy as np\n'), ((1497, 1526), 'numpy.vstack', 'np.vstack', (['(1 - p_hat, p_hat)'], {}), '((1 - p_hat, p_hat))\n', (1506, 1526), True, 'import numpy as np\n'), ((657, 683), 'numpy.random.beta', 'np.random.beta', (['(2)', '(5)', 'n_c1'], {}), '(2, 5, n_c1)\n', (671, 683), True, 'import numpy as np\n'), ((705, 731), 'numpy.random.beta', 'np.random.beta', (['(4)', '(3)', 'n_c2'], {}), '(4, 3, n_c2)\n', (719, 731), True, 'import numpy as np\n'), ((775, 789), 'numpy.zeros', 'np.zeros', (['n_c1'], {}), '(n_c1)\n', (783, 789), True, 'import numpy as np\n'), ((791, 804), 'numpy.ones', 'np.ones', (['n_c2'], {}), '(n_c2)\n', (798, 804), True, 'import numpy as np\n'), ((820, 842), 'numpy.exp', 'np.exp', (['(-8 * (p - 0.5))'], {}), '(-8 * (p - 0.5))\n', (826, 842), True, 'import numpy as np\n'), ((854, 876), 'numpy.exp', 'np.exp', (['(-3 * (p - 0.5))'], {}), '(-3 * (p - 0.5))\n', (860, 876), True, 'import numpy as np\n'), ((9393, 9504), 'numpy.array', 'np.array', (['[[0.1, 0.3, 0.6, 0.8, 0.9, 1.0], [0.0, 0.2, 0.4, 0.7, 0.8, 0.9], [0.1, 0.2,\n 0.3, 0.5, 0.6, 0.8]]'], {}), '([[0.1, 0.3, 0.6, 0.8, 0.9, 1.0], [0.0, 0.2, 0.4, 0.7, 0.8, 0.9], [\n 0.1, 0.2, 0.3, 0.5, 0.6, 0.8]])\n', (9401, 9504), True, 'import numpy as np\n'), ((9541, 9652), 'numpy.array', 'np.array', (['[[0.1, 0.4, 0.7, 0.8, 0.9, 1.0], [0.9, 0.3, 0.8, 0.2, 0.7, 0.1], [0.2, 0.3,\n 0.5, 0.4, 0.7, 0.1]]'], {}), '([[0.1, 0.4, 0.7, 0.8, 0.9, 1.0], [0.9, 0.3, 0.8, 0.2, 0.7, 0.1], [\n 0.2, 0.3, 0.5, 0.4, 0.7, 0.1]])\n', (9549, 9652), True, 'import numpy as np\n'), ((9689, 9800), 'numpy.array', 'np.array', (['[[0.0, 0.3, 0.6, 0.7, 0.8, 9.0], [0.1, 0.2, 0.3, 0.5, 0.8, 0.7], [0.3, 0.5,\n 0.4, 0.7, 0.8, 0.9]]'], {}), '([[0.0, 0.3, 0.6, 0.7, 0.8, 9.0], [0.1, 0.2, 0.3, 0.5, 0.8, 0.7], [\n 0.3, 0.5, 0.4, 0.7, 0.8, 0.9]])\n', (9697, 9800), True, 'import numpy as np\n'), ((9837, 9948), 'numpy.array', 'np.array', (['[[0.0, 0.3, 0.6, 0.8, 0.9, 1.0], [0.8, 0.1, 0.6, 0.2, 0.9, 0.0], [0.1, 0.4,\n 0.6, 0.3, 0.5, 0.0]]'], {}), '([[0.0, 0.3, 0.6, 0.8, 0.9, 1.0], [0.8, 0.1, 0.6, 0.2, 0.9, 0.0], [\n 0.1, 0.4, 0.6, 0.3, 0.5, 0.0]])\n', (9845, 9948), True, 'import numpy as np\n'), ((8676, 8714), 'numpy.array', 'np.array', (['[0.1, 0.3, 0.6, 0.8, 0.9, 1]'], {}), '([0.1, 0.3, 0.6, 0.8, 0.9, 1])\n', (8684, 8714), True, 'import numpy as np\n'), ((8738, 8778), 'numpy.array', 'np.array', (['[0.2, 0.4, 0.5, 0.7, 0.8, 0.9]'], {}), '([0.2, 0.4, 0.5, 0.7, 0.8, 0.9])\n', (8746, 8778), True, 'import numpy as np\n'), ((8801, 8843), 'numpy.array', 'np.array', (['[0.01, 0.25, 0.4, 0.6, 0.7, 0.8]'], {}), '([0.01, 0.25, 0.4, 0.6, 0.7, 0.8])\n', (8809, 8843), True, 'import numpy as np\n'), ((8866, 8909), 'numpy.array', 'np.array', (['[0.15, 0.39, 0.7, 0.75, 0.8, 0.9]'], {}), '([0.15, 0.39, 0.7, 0.75, 0.8, 0.9])\n', (8874, 8909), True, 'import numpy as np\n')] |
"""hexa_grid.py: resqpy HexaGrid class module."""
version = '24th November 2021'
import logging
log = logging.getLogger(__name__)
import numpy as np
import resqpy.grid as grr
import resqpy.olio.volume as vol
import resqpy.property as rqp
from resqpy.unstructured._unstructured_grid import UnstructuredGrid
class HexaGrid(UnstructuredGrid):
"""Class for unstructured grids where every cell is hexahedral (faces may be degenerate)."""
def __init__(self,
parent_model,
uuid = None,
find_properties = True,
cache_geometry = False,
title = None,
originator = None,
extra_metadata = {}):
"""Creates a new resqpy HexaGrid object (RESQML UnstructuredGrid with cell shape hexahedral)
arguments:
parent_model (model.Model object): the model which this grid is part of
uuid (uuid.UUID, optional): if present, the new grid object is populated from the RESQML object
find_properties (boolean, default True): if True and uuid is present, a
grid property collection is instantiated as an attribute, holding properties for which
this grid is the supporting representation
cache_geometry (boolean, default False): if True and uuid is present, all the geometry arrays
are loaded into attributes of the new grid object
title (str, optional): citation title for new grid; ignored if uuid is present
originator (str, optional): name of person creating the grid; defaults to login id;
ignored if uuid is present
extra_metadata (dict, optional): dictionary of extra metadata items to add to the grid;
ignored if uuid is present
returns:
a newly created HexaGrid object
"""
super().__init__(parent_model = parent_model,
uuid = uuid,
find_properties = find_properties,
geometry_required = True,
cache_geometry = cache_geometry,
cell_shape = 'hexahedral',
title = title,
originator = originator,
extra_metadata = extra_metadata)
if self.root is not None:
assert grr.grid_flavour(self.root) == 'HexaGrid'
self.check_hexahedral()
self.grid_representation = 'HexaGrid' #: flavour of grid; not much used
@classmethod
def from_unsplit_grid(cls,
parent_model,
grid_uuid,
inherit_properties = True,
title = None,
extra_metadata = {},
write_active = None):
"""Creates a new (unstructured) HexaGrid from an existing resqpy unsplit (IJK) Grid without K gaps.
arguments:
parent_model (model.Model object): the model which this grid is part of
grid_uuid (uuid.UUID): the uuid of an IjkGridRepresentation from which the hexa grid will be created
inherit_properties (boolean, default True): if True, properties will be created for the new grid
title (str, optional): citation title for the new grid
extra_metadata (dict, optional): dictionary of extra metadata items to add to the grid
write_active (boolean, optional): if True (or None and inactive property is established) then an
active cell property is created (in addition to any inherited properties)
returns:
a newly created HexaGrid object
note:
this method includes the writing of hdf5 data, creation of xml for the new grid and adding it as a part
"""
import resqpy.grid as grr
# establish existing IJK grid
ijk_grid = grr.Grid(parent_model, uuid = grid_uuid, find_properties = inherit_properties)
assert ijk_grid is not None
assert not ijk_grid.has_split_coordinate_lines, 'IJK grid has split coordinate lines (faults)'
assert not ijk_grid.k_gaps, 'IJK grid has K gaps'
ijk_grid.cache_all_geometry_arrays()
ijk_points = ijk_grid.points_ref(masked = False)
if title is None:
title = ijk_grid.title
# make empty unstructured hexa grid
hexa_grid = cls(parent_model, title = title, extra_metadata = extra_metadata)
# derive hexa grid attributes from ijk grid
hexa_grid.crs_uuid = ijk_grid.crs_uuid
hexa_grid.set_cell_count(ijk_grid.cell_count())
if ijk_grid.inactive is not None:
hexa_grid.inactive = ijk_grid.inactive.reshape((hexa_grid.cell_count,))
hexa_grid.all_inactive = np.all(hexa_grid.inactive)
if hexa_grid.all_inactive:
log.warning(f'all cells marked as inactive for unstructured hexa grid {hexa_grid.title}')
else:
hexa_grid.all_inactive = False
# inherit points (nodes) in IJK grid order, ie. K cycling fastest, then I, then J
hexa_grid.points_cached = ijk_points.reshape((-1, 3))
# setup faces per cell
# ordering of faces (in nodes per face): all K faces, then all J faces, then all I faces
# within J faces, ordering is all of J- faces for J = 0 first, then increasing planes in J
# similarly for I faces
nk_plus_1 = ijk_grid.nk + 1
nj_plus_1 = ijk_grid.nj + 1
ni_plus_1 = ijk_grid.ni + 1
k_face_count = nk_plus_1 * ijk_grid.nj * ijk_grid.ni
j_face_count = ijk_grid.nk * nj_plus_1 * ijk_grid.ni
i_face_count = ijk_grid.nk * ijk_grid.nj * ni_plus_1
kj_face_count = k_face_count + j_face_count
hexa_grid.face_count = k_face_count + j_face_count + i_face_count
hexa_grid.faces_per_cell_cl = 6 * (1 + np.arange(hexa_grid.cell_count, dtype = int)) # 6 faces per cell
hexa_grid.faces_per_cell = np.empty(6 * hexa_grid.cell_count, dtype = int)
arange = np.arange(hexa_grid.cell_count, dtype = int)
hexa_grid.faces_per_cell[0::6] = arange # K- faces
hexa_grid.faces_per_cell[1::6] = ijk_grid.nj * ijk_grid.ni + arange # K+ faces
nki = ijk_grid.nk * ijk_grid.ni
nkj = ijk_grid.nk * ijk_grid.nj
# todo: vectorise following for loop
for cell in range(hexa_grid.cell_count):
k, j, i = ijk_grid.denaturalized_cell_index(cell)
j_minus_face = k_face_count + nki * j + ijk_grid.ni * k + i
hexa_grid.faces_per_cell[6 * cell + 2] = j_minus_face # J- face
hexa_grid.faces_per_cell[6 * cell + 3] = j_minus_face + nki # J+ face
i_minus_face = kj_face_count + nkj * i + ijk_grid.nj * k + j
hexa_grid.faces_per_cell[6 * cell + 4] = i_minus_face # I- face
hexa_grid.faces_per_cell[6 * cell + 5] = i_minus_face + nkj # I+ face
# setup nodes per face, clockwise when viewed from negative side of face if ijk handedness matches xyz handedness
# ordering of nodes in points array is as for the IJK grid
hexa_grid.node_count = hexa_grid.points_cached.shape[0]
assert hexa_grid.node_count == (ijk_grid.nk + 1) * (ijk_grid.nj + 1) * (ijk_grid.ni + 1)
hexa_grid.nodes_per_face_cl = 4 * (1 + np.arange(hexa_grid.face_count, dtype = int)) # 4 nodes per face
hexa_grid.nodes_per_face = np.empty(4 * hexa_grid.face_count, dtype = int)
# todo: vectorise for loops
# K faces
face_base = 0
for k in range(nk_plus_1):
for j in range(ijk_grid.nj):
for i in range(ijk_grid.ni):
hexa_grid.nodes_per_face[face_base] = (k * nj_plus_1 + j) * ni_plus_1 + i # ip 0, jp 0
hexa_grid.nodes_per_face[face_base + 1] = (k * nj_plus_1 + j + 1) * ni_plus_1 + i # ip 0, jp 1
hexa_grid.nodes_per_face[face_base + 2] = (k * nj_plus_1 + j + 1) * ni_plus_1 + i + 1 # ip 1, jp 1
hexa_grid.nodes_per_face[face_base + 3] = (k * nj_plus_1 + j) * ni_plus_1 + i + 1 # ip 1, jp 0
face_base += 4
# J faces
assert face_base == 4 * k_face_count
for j in range(nj_plus_1):
for k in range(ijk_grid.nk):
for i in range(ijk_grid.ni):
hexa_grid.nodes_per_face[face_base] = (k * nj_plus_1 + j) * ni_plus_1 + i # ip 0, kp 0
hexa_grid.nodes_per_face[face_base + 1] = (k * nj_plus_1 + j) * ni_plus_1 + i + 1 # ip 1, kp 0
hexa_grid.nodes_per_face[face_base +
2] = ((k + 1) * nj_plus_1 + j) * ni_plus_1 + i + 1 # ip 1, kp 1
hexa_grid.nodes_per_face[face_base + 3] = ((k + 1) * nj_plus_1 + j) * ni_plus_1 + i # ip 0, kp 1
face_base += 4
# I faces
assert face_base == 4 * kj_face_count
for i in range(ni_plus_1):
for k in range(ijk_grid.nk):
for j in range(ijk_grid.nj):
hexa_grid.nodes_per_face[face_base] = (k * nj_plus_1 + j) * ni_plus_1 + i # jp 0, kp 0
hexa_grid.nodes_per_face[face_base + 1] = ((k + 1) * nj_plus_1 + j) * ni_plus_1 + i # jp 0, kp 1
hexa_grid.nodes_per_face[face_base +
2] = ((k + 1) * nj_plus_1 + j + 1) * ni_plus_1 + i # jp 1, kp 1
hexa_grid.nodes_per_face[face_base + 3] = (k * nj_plus_1 + j + 1) * ni_plus_1 + i # jp 1, kp 0
face_base += 4
assert face_base == 4 * hexa_grid.face_count
# set cell face is right handed
# todo: check Energistics documents for meaning of cell face is right handed
# here the assumption is clockwise ordering of nodes viewed from within cell means 'right handed'
hexa_grid.cell_face_is_right_handed = np.zeros(6 * hexa_grid.cell_count,
dtype = bool) # initially set to left handed
# if IJK grid's ijk handedness matches the xyz handedness, then set +ve faces to right handed; else -ve faces
if ijk_grid.off_handed():
hexa_grid.cell_face_is_right_handed[0::2] = True # negative faces are right handed
else:
hexa_grid.cell_face_is_right_handed[1::2] = True # positive faces are right handed
hexa_grid.write_hdf5(write_active = write_active)
hexa_grid.create_xml(write_active = write_active)
if inherit_properties:
ijk_pc = ijk_grid.extract_property_collection()
hexa_pc = rqp.PropertyCollection(support = hexa_grid)
for part in ijk_pc.parts():
count = ijk_pc.count_for_part(part)
hexa_part_shape = (hexa_grid.cell_count,) if count == 1 else (hexa_grid.cell_count, count)
hexa_pc.add_cached_array_to_imported_list(
ijk_pc.cached_part_array_ref(part).reshape(hexa_part_shape),
'inherited from grid ' + str(ijk_grid.title),
ijk_pc.citation_title_for_part(part),
discrete = not ijk_pc.continuous_for_part(part),
uom = ijk_pc.uom_for_part(part),
time_index = ijk_pc.time_index_for_part(part),
null_value = ijk_pc.null_value_for_part(part),
property_kind = ijk_pc.property_kind_for_part(part),
local_property_kind_uuid = ijk_pc.local_property_kind_uuid(part),
facet_type = ijk_pc.facet_type_for_part(part),
facet = ijk_pc.facet_for_part(part),
realization = ijk_pc.realization_for_part(part),
indexable_element = ijk_pc.indexable_for_part(part),
count = count,
const_value = ijk_pc.constant_value_for_part(part))
# todo: patch min & max values if present in ijk part
hexa_pc.write_hdf5_for_imported_list()
hexa_pc.create_xml_for_imported_list_and_add_parts_to_model(
support_uuid = hexa_grid.uuid,
time_series_uuid = ijk_pc.time_series_uuid_for_part(part),
string_lookup_uuid = ijk_pc.string_lookup_uuid_for_part(part),
extra_metadata = ijk_pc.extra_metadata_for_part(part))
return hexa_grid
def check_hexahedral(self):
"""Checks that each cell has 6 faces and each face has 4 nodes.
notes:
currently only performs a cursory check, without checking nodes are shared;
assumes that degenerate faces still have four nodes identified
"""
assert self.cell_shape == 'hexahedral'
self.cache_all_geometry_arrays()
assert self.faces_per_cell_cl is not None and self.nodes_per_face_cl is not None
assert self.faces_per_cell_cl[0] == 6 and np.all(self.faces_per_cell_cl[1:] - self.faces_per_cell_cl[:-1] == 6)
assert self.nodes_per_face_cl[0] == 4 and np.all(self.nodes_per_face_cl[1:] - self.nodes_per_face_cl[:-1] == 4)
def corner_points(self, cell):
"""Returns corner points (nodes) of a single cell.
arguments:
cell (int): the index of the cell for which the corner points are required
returns:
numpy float array of shape (8, 3) being the xyz points of 8 nodes defining a single hexahedral cell
note:
if this hexa grid has been created using the from_unsplit_grid class method, then the result can be
reshaped to (2, 2, 2, 3) for corner points compatible with those used by the Grid class
"""
self.cache_all_geometry_arrays()
return self.points_cached[self.distinct_node_indices_for_cell(cell)]
def volume(self, cell):
"""Returns the volume of a single cell.
arguments:
cell (int): the index of the cell for which the volume is required
returns:
float being the volume of the hexahedral cell; units of measure is implied by crs units
"""
self._set_crs_handedness()
apex = self.cell_centre_point(cell)
v = 0.0
faces, handednesses = self.face_indices_and_handedness_for_cell(cell)
for face_index, handedness in zip(faces, handednesses):
nodes = self.node_indices_for_face(face_index)
abcd = self.points_cached[nodes]
assert abcd.shape == (4, 3)
v += vol.pyramid_volume(apex,
abcd[0],
abcd[1],
abcd[2],
abcd[3],
crs_is_right_handed = (self.crs_is_right_handed == handedness))
return v
# todo: add hexahedral specific method for centre_point()?
# todo: also add other methods equivalent to those in Grid class
| [
"logging.getLogger",
"resqpy.grid.Grid",
"resqpy.property.PropertyCollection",
"numpy.zeros",
"numpy.empty",
"resqpy.olio.volume.pyramid_volume",
"resqpy.grid.grid_flavour",
"numpy.all",
"numpy.arange"
] | [((105, 132), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (122, 132), False, 'import logging\n'), ((3954, 4028), 'resqpy.grid.Grid', 'grr.Grid', (['parent_model'], {'uuid': 'grid_uuid', 'find_properties': 'inherit_properties'}), '(parent_model, uuid=grid_uuid, find_properties=inherit_properties)\n', (3962, 4028), True, 'import resqpy.grid as grr\n'), ((6050, 6095), 'numpy.empty', 'np.empty', (['(6 * hexa_grid.cell_count)'], {'dtype': 'int'}), '(6 * hexa_grid.cell_count, dtype=int)\n', (6058, 6095), True, 'import numpy as np\n'), ((6115, 6157), 'numpy.arange', 'np.arange', (['hexa_grid.cell_count'], {'dtype': 'int'}), '(hexa_grid.cell_count, dtype=int)\n', (6124, 6157), True, 'import numpy as np\n'), ((7508, 7553), 'numpy.empty', 'np.empty', (['(4 * hexa_grid.face_count)'], {'dtype': 'int'}), '(4 * hexa_grid.face_count, dtype=int)\n', (7516, 7553), True, 'import numpy as np\n'), ((10036, 10082), 'numpy.zeros', 'np.zeros', (['(6 * hexa_grid.cell_count)'], {'dtype': 'bool'}), '(6 * hexa_grid.cell_count, dtype=bool)\n', (10044, 10082), True, 'import numpy as np\n'), ((4843, 4869), 'numpy.all', 'np.all', (['hexa_grid.inactive'], {}), '(hexa_grid.inactive)\n', (4849, 4869), True, 'import numpy as np\n'), ((10761, 10802), 'resqpy.property.PropertyCollection', 'rqp.PropertyCollection', ([], {'support': 'hexa_grid'}), '(support=hexa_grid)\n', (10783, 10802), True, 'import resqpy.property as rqp\n'), ((13094, 13163), 'numpy.all', 'np.all', (['(self.faces_per_cell_cl[1:] - self.faces_per_cell_cl[:-1] == 6)'], {}), '(self.faces_per_cell_cl[1:] - self.faces_per_cell_cl[:-1] == 6)\n', (13100, 13163), True, 'import numpy as np\n'), ((13214, 13283), 'numpy.all', 'np.all', (['(self.nodes_per_face_cl[1:] - self.nodes_per_face_cl[:-1] == 4)'], {}), '(self.nodes_per_face_cl[1:] - self.nodes_per_face_cl[:-1] == 4)\n', (13220, 13283), True, 'import numpy as np\n'), ((14673, 14797), 'resqpy.olio.volume.pyramid_volume', 'vol.pyramid_volume', (['apex', 'abcd[0]', 'abcd[1]', 'abcd[2]', 'abcd[3]'], {'crs_is_right_handed': '(self.crs_is_right_handed == handedness)'}), '(apex, abcd[0], abcd[1], abcd[2], abcd[3],\n crs_is_right_handed=self.crs_is_right_handed == handedness)\n', (14691, 14797), True, 'import resqpy.olio.volume as vol\n'), ((2393, 2420), 'resqpy.grid.grid_flavour', 'grr.grid_flavour', (['self.root'], {}), '(self.root)\n', (2409, 2420), True, 'import resqpy.grid as grr\n'), ((5949, 5991), 'numpy.arange', 'np.arange', (['hexa_grid.cell_count'], {'dtype': 'int'}), '(hexa_grid.cell_count, dtype=int)\n', (5958, 5991), True, 'import numpy as np\n'), ((7407, 7449), 'numpy.arange', 'np.arange', (['hexa_grid.face_count'], {'dtype': 'int'}), '(hexa_grid.face_count, dtype=int)\n', (7416, 7449), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import abel
import bz2
import matplotlib.pylab as plt
# This example demonstrates Hansen and Law inverse Abel transform
# of an image obtained using a velocity map imaging (VMI) photoelecton
# spectrometer to record the photoelectron angular distribution resulting
# from photodetachement of O2- at 454 nm.
# Measured at The Australian National University
# J. Chem. Phys. 133, 174311 (2010) DOI: 10.1063/1.3493349
# Load image as a numpy array - numpy handles .gz, .bz2
imagefile = bz2.BZ2File('data/O2-ANU1024.txt.bz2')
IM = np.loadtxt(imagefile)
# use scipy.misc.imread(filename) to load image formats (.png, .jpg, etc)
rows, cols = IM.shape # image size
# Image center should be mid-pixel, i.e. odd number of colums
if cols % 2 != 1:
print ("even pixel width image, make it odd and re-adjust image center")
IM = abel.tools.center.center_image(IM, center="slice")
rows, cols = IM.shape # new image size
r2 = rows//2 # half-height image size
c2 = cols//2 # half-width image size
# Hansen & Law inverse Abel transform
AIM = abel.Transform(IM, method="hansenlaw", direction="inverse",
symmetry_axis=None).transform
# PES - photoelectron speed distribution -------------
print('Calculating speed distribution:')
r, speed = abel.tools.vmi.angular_integration(AIM)
# normalize to max intensity peak
speed /= speed[200:].max() # exclude transform noise near centerline of image
# PAD - photoelectron angular distribution ------------
print('Calculating angular distribution:')
# radial ranges (of spectral features) to follow intensity vs angle
# view the speed distribution to determine radial ranges
r_range = [(93, 111), (145, 162), (255, 280), (330, 350), (350, 370),
(370, 390), (390, 410), (410, 430)]
# map to intensity vs theta for each radial range
Beta, Amp, rad,intensities, theta = abel.tools.vmi.radial_integration(AIM, radial_ranges=r_range)
print("radial-range anisotropy parameter (beta)")
for beta, rr in zip(Beta, r_range):
result = " {:3d}-{:3d} {:+.2f}+-{:.2f}"\
.format(rr[0], rr[1], beta[0], beta[1])
print(result)
# plots of the analysis
fig = plt.figure(figsize=(15, 4))
ax1 = plt.subplot(131)
ax2 = plt.subplot(132)
ax3 = plt.subplot(133)
# join 1/2 raw data : 1/2 inversion image
vmax = IM[:, :c2-100].max()
AIM *= vmax/AIM[:, c2+100:].max()
JIM = np.concatenate((IM[:, :c2], AIM[:, c2:]), axis=1)
rr = r_range[-3]
intensity = intensities[-3]
beta, amp = Beta[-3], Amp[-3]
# Prettify the plot a little bit:
# Plot the raw data
im1 = ax1.imshow(JIM, origin='lower', aspect='auto', vmin=0, vmax=vmax)
fig.colorbar(im1, ax=ax1, fraction=.1, shrink=0.9, pad=0.03)
ax1.set_xlabel('x (pixels)')
ax1.set_ylabel('y (pixels)')
ax1.set_title('VMI, inverse Abel: {:d}x{:d}'\
.format(rows, cols))
# Plot the 1D speed distribution
ax2.plot(speed)
ax2.plot((rr[0], rr[0], rr[1], rr[1]), (1, 1.1, 1.1, 1), 'r-') # red highlight
ax2.axis(xmax=450, ymin=-0.05, ymax=1.2)
ax2.set_xlabel('radial pixel')
ax2.set_ylabel('intensity')
ax2.set_title('speed distribution')
# Plot anisotropy variation
ax3.plot(theta, intensity, 'r',
label="expt. data r=[{:d}:{:d}]".format(*rr))
def P2(x): # 2nd order Legendre polynomial
return (3*x*x-1)/2
def PAD(theta, beta, amp):
return amp*(1 + beta*P2(np.cos(theta)))
ax3.plot(theta, PAD(theta, beta[0], amp[0]), 'b', lw=2, label="fit")
ax3.annotate("$\\beta = ${:+.2f}+-{:.2f}".format(*beta), (-2, -1.1))
ax3.legend(loc=1, labelspacing=0.1, fontsize='small')
ax3.axis(ymin=-2, ymax=12)
ax3.set_xlabel("angle $\\theta$ (radians)")
ax3.set_ylabel("intensity")
ax3.set_title("anisotropy parameter")
# Plot the angular distribution
plt.subplots_adjust(left=0.06, bottom=0.17, right=0.95, top=0.89,
wspace=0.35, hspace=0.37)
# Save a image of the plot
plt.savefig("plot_example_O2_PES_PAD.png", dpi=100)
# Show the plots
plt.show()
| [
"abel.tools.vmi.radial_integration",
"matplotlib.pylab.savefig",
"matplotlib.pylab.figure",
"bz2.BZ2File",
"abel.tools.center.center_image",
"abel.tools.vmi.angular_integration",
"numpy.concatenate",
"matplotlib.pylab.show",
"abel.Transform",
"matplotlib.pylab.subplot",
"numpy.loadtxt",
"numpy... | [((645, 683), 'bz2.BZ2File', 'bz2.BZ2File', (['"""data/O2-ANU1024.txt.bz2"""'], {}), "('data/O2-ANU1024.txt.bz2')\n", (656, 683), False, 'import bz2\n'), ((689, 710), 'numpy.loadtxt', 'np.loadtxt', (['imagefile'], {}), '(imagefile)\n', (699, 710), True, 'import numpy as np\n'), ((1435, 1474), 'abel.tools.vmi.angular_integration', 'abel.tools.vmi.angular_integration', (['AIM'], {}), '(AIM)\n', (1469, 1474), False, 'import abel\n'), ((2020, 2081), 'abel.tools.vmi.radial_integration', 'abel.tools.vmi.radial_integration', (['AIM'], {'radial_ranges': 'r_range'}), '(AIM, radial_ranges=r_range)\n', (2053, 2081), False, 'import abel\n'), ((2331, 2358), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 4)'}), '(figsize=(15, 4))\n', (2341, 2358), True, 'import matplotlib.pylab as plt\n'), ((2365, 2381), 'matplotlib.pylab.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (2376, 2381), True, 'import matplotlib.pylab as plt\n'), ((2388, 2404), 'matplotlib.pylab.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (2399, 2404), True, 'import matplotlib.pylab as plt\n'), ((2411, 2427), 'matplotlib.pylab.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (2422, 2427), True, 'import matplotlib.pylab as plt\n'), ((2539, 2588), 'numpy.concatenate', 'np.concatenate', (['(IM[:, :c2], AIM[:, c2:])'], {'axis': '(1)'}), '((IM[:, :c2], AIM[:, c2:]), axis=1)\n', (2553, 2588), True, 'import numpy as np\n'), ((3883, 3979), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.06)', 'bottom': '(0.17)', 'right': '(0.95)', 'top': '(0.89)', 'wspace': '(0.35)', 'hspace': '(0.37)'}), '(left=0.06, bottom=0.17, right=0.95, top=0.89, wspace=\n 0.35, hspace=0.37)\n', (3902, 3979), True, 'import matplotlib.pylab as plt\n'), ((4024, 4075), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""plot_example_O2_PES_PAD.png"""'], {'dpi': '(100)'}), "('plot_example_O2_PES_PAD.png', dpi=100)\n", (4035, 4075), True, 'import matplotlib.pylab as plt\n'), ((4094, 4104), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (4102, 4104), True, 'import matplotlib.pylab as plt\n'), ((992, 1042), 'abel.tools.center.center_image', 'abel.tools.center.center_image', (['IM'], {'center': '"""slice"""'}), "(IM, center='slice')\n", (1022, 1042), False, 'import abel\n'), ((1213, 1292), 'abel.Transform', 'abel.Transform', (['IM'], {'method': '"""hansenlaw"""', 'direction': '"""inverse"""', 'symmetry_axis': 'None'}), "(IM, method='hansenlaw', direction='inverse', symmetry_axis=None)\n", (1227, 1292), False, 'import abel\n'), ((3500, 3513), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3506, 3513), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
def sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 2 00/XX
padded_sid[0:2],
# subdir 2 XXX/0
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
)
def calc_minute_index(market_opens, trading_session):
"""
Cal all trading minutes according to input daily market open and trading session information.
Parameters
----------
market_opens: datetime64 array
array of every day market open.
trading_session: set -> list
list of time offset in minutes for every trading session in a day.
Returns
-------
out : datetime64 array
all trading minutes.
"""
minutes_per_day = trading_session.minute_per_day
minutes = np.zeros(len(market_opens) * minutes_per_day, dtype="datetime64[ns]")
deltas_lst = []
session_offsets = []
for offset, duration in trading_session.sessions:
deltas_lst.append(np.arange(0, duration, dtype="timedelta64[m]"))
session_offsets.append(pd.Timedelta(minutes=offset))
for i, marker_open in enumerate(market_opens):
start = marker_open.asm8
sessions = []
for deltas, session_offset in zip(deltas_lst, session_offsets):
sessions.append(deltas + start + session_offset)
minute_values = np.concatenate(sessions)
start_ix = minutes_per_day * i
end_ix = start_ix + minutes_per_day
minutes[start_ix:end_ix] = minute_values
return pd.to_datetime(minutes, utc=True, box=True)
FXDAYU_ROOT = os.environ.get("FXDAYU_ROOT", os.path.expanduser("~/.fxdayu"))
FXDAYU_BUNDLE_PATH = os.path.join(FXDAYU_ROOT, "bundle")
| [
"numpy.arange",
"pandas.Timedelta",
"pandas.to_datetime",
"os.path.join",
"numpy.concatenate",
"os.path.expanduser"
] | [((2197, 2232), 'os.path.join', 'os.path.join', (['FXDAYU_ROOT', '"""bundle"""'], {}), "(FXDAYU_ROOT, 'bundle')\n", (2209, 2232), False, 'import os\n'), ((2053, 2096), 'pandas.to_datetime', 'pd.to_datetime', (['minutes'], {'utc': '(True)', 'box': '(True)'}), '(minutes, utc=True, box=True)\n', (2067, 2096), True, 'import pandas as pd\n'), ((2143, 2174), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.fxdayu"""'], {}), "('~/.fxdayu')\n", (2161, 2174), False, 'import os\n'), ((1885, 1909), 'numpy.concatenate', 'np.concatenate', (['sessions'], {}), '(sessions)\n', (1899, 1909), True, 'import numpy as np\n'), ((1513, 1559), 'numpy.arange', 'np.arange', (['(0)', 'duration'], {'dtype': '"""timedelta64[m]"""'}), "(0, duration, dtype='timedelta64[m]')\n", (1522, 1559), True, 'import numpy as np\n'), ((1592, 1620), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': 'offset'}), '(minutes=offset)\n', (1604, 1620), True, 'import pandas as pd\n')] |
#-*-conding:utf-8-*-
import numpy as np
from math import exp, sqrt, log
import random
from scipy.stats.distributions import norm
'''
Parameters:
s0 = initial stock price
k = strike price
r = risk-less short rate
sig = volatility of stock value
dt = t/T = time to maturity
m = the number of path nodes
n = the number of simulation
'''
def black_scholes_model(s0, k, r, sig, dt):
d1 = (log(s0 / k) + (r + sig ** 2 / 2) * dt) / (sig * sqrt(dt))
d2 = d1 - sig * sqrt(dt)
call_bs = s0 * exp(-r * dt) * norm.cdf(d1) - k * exp(-r * dt) * norm.cdf(d2)
put_bs = k * exp(-r * dt) * norm.cdf(-d2) - s0 * exp(-r * dt) * norm.cdf(-d1)
return {'call_BS': call_bs, 'put_BS': put_bs}
def monte_carlo_simulation(s0, k, r, sig, dt, m, n):
list_1 = [] # call option value list
list_2 = [] # put option value list
delta_t = dt / m # length of time interval
for i in range(0, n):
path = [s0]
for j in range(0, m):
path.append(path[-1] * exp((r - 0.5 * sig ** 2) * delta_t + (sig * sqrt(delta_t) * random.gauss(0, 1))))
put_value = max(k - path[-1], 0)
call_value = max(path[-1] - k, 0)
list_2.append(put_value)
list_1.append(call_value)
p = np.average(list_2)
c = np.average(list_1)
return {'call_MC': c, 'put_MC': p}
'''
trial:
a = black_scholes_model(5200, 5200, 0.03, 0.25, 0.08)
b = monte_carlo_simulation(5200, 5200, 0.03, 0.25, 0.08, 20, 2000000)
print(a)
print(b)
'''
| [
"numpy.average",
"scipy.stats.distributions.norm.cdf",
"math.sqrt",
"math.log",
"math.exp",
"random.gauss"
] | [((1272, 1290), 'numpy.average', 'np.average', (['list_2'], {}), '(list_2)\n', (1282, 1290), True, 'import numpy as np\n'), ((1300, 1318), 'numpy.average', 'np.average', (['list_1'], {}), '(list_1)\n', (1310, 1318), True, 'import numpy as np\n'), ((412, 423), 'math.log', 'log', (['(s0 / k)'], {}), '(s0 / k)\n', (415, 423), False, 'from math import exp, sqrt, log\n'), ((460, 468), 'math.sqrt', 'sqrt', (['dt'], {}), '(dt)\n', (464, 468), False, 'from math import exp, sqrt, log\n'), ((491, 499), 'math.sqrt', 'sqrt', (['dt'], {}), '(dt)\n', (495, 499), False, 'from math import exp, sqrt, log\n'), ((535, 547), 'scipy.stats.distributions.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (543, 547), False, 'from scipy.stats.distributions import norm\n'), ((569, 581), 'scipy.stats.distributions.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (577, 581), False, 'from scipy.stats.distributions import norm\n'), ((615, 628), 'scipy.stats.distributions.norm.cdf', 'norm.cdf', (['(-d2)'], {}), '(-d2)\n', (623, 628), False, 'from scipy.stats.distributions import norm\n'), ((651, 664), 'scipy.stats.distributions.norm.cdf', 'norm.cdf', (['(-d1)'], {}), '(-d1)\n', (659, 664), False, 'from scipy.stats.distributions import norm\n'), ((520, 532), 'math.exp', 'exp', (['(-r * dt)'], {}), '(-r * dt)\n', (523, 532), False, 'from math import exp, sqrt, log\n'), ((554, 566), 'math.exp', 'exp', (['(-r * dt)'], {}), '(-r * dt)\n', (557, 566), False, 'from math import exp, sqrt, log\n'), ((600, 612), 'math.exp', 'exp', (['(-r * dt)'], {}), '(-r * dt)\n', (603, 612), False, 'from math import exp, sqrt, log\n'), ((636, 648), 'math.exp', 'exp', (['(-r * dt)'], {}), '(-r * dt)\n', (639, 648), False, 'from math import exp, sqrt, log\n'), ((1085, 1103), 'random.gauss', 'random.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (1097, 1103), False, 'import random\n'), ((1069, 1082), 'math.sqrt', 'sqrt', (['delta_t'], {}), '(delta_t)\n', (1073, 1082), False, 'from math import exp, sqrt, log\n')] |
import numpy as np
from referenceqvm.tests.test_data import data_containers as dc
from pyquil.quil import Program
def test_generate_arbitrary_states(qvm):
for k in list(dc.ARBITRARY_STATE_GEN_INSTRUCTIONS.keys()):
v = np.asarray(dc.ARBITRARY_STATE_GEN_WF[k])
norm = np.sqrt(np.sum(np.multiply(np.conj(v), v)))
p = Program(dc.ARBITRARY_STATE_GEN_INSTRUCTIONS[k])
wf, _ = qvm.wavefunction(p)
# check actual part of wavefunction
assert np.allclose(v.reshape(-1), wf.amplitudes[:len(v)] * norm)
# check remaining zeros part of wavefunction
assert np.allclose(np.zeros((wf.amplitudes.shape[0] - len(v), 1)),
wf.amplitudes[len(v):] * norm)
| [
"numpy.conj",
"numpy.asarray",
"referenceqvm.tests.test_data.data_containers.ARBITRARY_STATE_GEN_INSTRUCTIONS.keys",
"pyquil.quil.Program"
] | [((175, 217), 'referenceqvm.tests.test_data.data_containers.ARBITRARY_STATE_GEN_INSTRUCTIONS.keys', 'dc.ARBITRARY_STATE_GEN_INSTRUCTIONS.keys', ([], {}), '()\n', (215, 217), True, 'from referenceqvm.tests.test_data import data_containers as dc\n'), ((233, 273), 'numpy.asarray', 'np.asarray', (['dc.ARBITRARY_STATE_GEN_WF[k]'], {}), '(dc.ARBITRARY_STATE_GEN_WF[k])\n', (243, 273), True, 'import numpy as np\n'), ((346, 393), 'pyquil.quil.Program', 'Program', (['dc.ARBITRARY_STATE_GEN_INSTRUCTIONS[k]'], {}), '(dc.ARBITRARY_STATE_GEN_INSTRUCTIONS[k])\n', (353, 393), False, 'from pyquil.quil import Program\n'), ((316, 326), 'numpy.conj', 'np.conj', (['v'], {}), '(v)\n', (323, 326), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
将Omicron中与性能相关比较密切的函数抽取到这个模块。以便将来进行加速。
TODO: 部分函数之前已使用numba加速,但因numba与OS的兼容性问题取消。需要随时保持跟踪。
"""
import logging
import numpy as np
from deprecated import deprecated
logger = logging.getLogger(__name__)
@deprecated(version="1.1")
def index(arr, item): # pragma: no cover
for idx, val in np.ndenumerate(arr):
if val == item:
return idx
# If no item was found return None, other return types might be a problem due to
# numba's type inference.
return -1
@deprecated(version="1.1")
def index_sorted(arr, item): # pragma: no cover
pos = np.searchsorted(arr, item)
if arr[pos] == item:
return pos
else:
return -1
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved into omicron.core.numpy_extensions module",
)
def count_between(arr, start, end):
"""计算数组中,`start`元素与`end`元素之间共有多少个元素
要求arr必须是已排序。计算结果会包含区间边界点。
Examples:
>>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111]
>>> count_between(arr, 20050104, 20050111)
6
>>> count_between(arr, 20050104, 20050109)
4
"""
pos_start = np.searchsorted(arr, start, side="right")
pos_end = np.searchsorted(arr, end, side="right")
counter = pos_end - pos_start + 1
if start < arr[0]:
counter -= 1
if end > arr[-1]:
counter -= 1
return counter
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved to omicron.core.numpy_extensions module",
)
def shift(arr, start, offset):
"""在numpy数组arr中,找到start(或者最接近的一个),取offset对应的元素。
要求`arr`已排序。`offset`为正,表明向后移位;`offset`为负,表明向前移位
Examples:
>>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111]
>>> shift(arr, 20050104, 1)
20050105
>>> shift(arr, 20050105, -1)
20050104
>>> # 起始点已右越界,且向右shift,返回起始点
>>> shift(arr, 20050120, 1)
20050120
Args:
arr : 已排序的数组
start : numpy可接受的数据类型
offset (int): [description]
Returns:
移位后得到的元素值
"""
pos = np.searchsorted(arr, start, side="right")
if pos + offset - 1 >= len(arr):
return start
else:
return arr[pos + offset - 1]
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved to omicron.core.timeframe module",
)
def minute_frames_floor(ticks, moment):
"""
对于分钟级的frame,返回它们与frame刻度向下对齐后的frame及日期进位。如果需要对齐到上一个交易
日,则进位为-1,否则为0.
Examples:
>>> ticks = [600, 630, 660, 690, 810, 840, 870, 900]
>>> minute_frames_floor(ticks, 545)
(900, -1)
>>> minute_frames_floor(ticks, 600)
(600, 0)
>>> minute_frames_floor(ticks, 605)
(600, 0)
>>> minute_frames_floor(ticks, 899)
(870, 0)
>>> minute_frames_floor(ticks, 900)
(900, 0)
>>> minute_frames_floor(ticks, 905)
(900, 0)
Args:
ticks (np.array or list): frames刻度
moment (int): 整数表示的分钟数,比如900表示15:00
Returns:
tuple, the first is the new moment, the second is carry-on
"""
if moment < ticks[0]:
return ticks[-1], -1
# ’right' 相当于 ticks <= m
index = np.searchsorted(ticks, moment, side="right")
return ticks[index - 1], 0
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved to omicron.core.numpy_extensions module",
)
def floor(arr, item):
"""
在数据arr中,找到小于等于item的那一个值。如果item小于所有arr元素的值,返回arr[0];如果item
大于所有arr元素的值,返回arr[-1]
与`minute_frames_floor`不同的是,本函数不做回绕与进位.
Examples:
>>> a = [3, 6, 9]
>>> floor(a, -1)
3
>>> floor(a, 9)
9
>>> floor(a, 10)
9
>>> floor(a, 4)
3
>>> floor(a,10)
9
Args:
arr:
item:
Returns:
"""
if item < arr[0]:
return arr[0]
index = np.searchsorted(arr, item, side="right")
return arr[index - 1]
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved to omicron.core.numpy_extensions module",
)
def join_by_left(key, r1, r2, mask=True):
"""左连接 `r1`, `r2` by `key`
如果`r1`中存在`r2`中没有的行,则该行对应的`r2`中的那些字段的取值将使用`fill`来填充。如果
same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows
r1 have duplicat keys
[Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693)
Examples:
>>> # to join the following
>>> # [[ 1, 2],
>>> # [ 1, 3], x [[1, 5],
>>> # [ 2, 3]] [4, 7]]
>>> # only first two rows in left will be joined
>>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')])
>>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')])
>>> joined = join_by_left('seq', r1, r2)
>>> print(joined)
[(1, 2, 5) (1, 3, 5) (2, 3, --)]
>>> print(joined.dtype)
(numpy.record, [('seq', '<i4'), ('score', '<i4'), ('age', '<i4')])
>>> joined[2][2]
masked
>>> joined.tolist()[2][2] == None
True
Args:
key : join关键字
r1 : 数据集1
r2 : 数据集2
fill : 对匹配不上的cell进行填充时使用的值
Returns:
a numpy array
"""
# figure out the dtype of the result array
descr1 = r1.dtype.descr
descr2 = [d for d in r2.dtype.descr if d[0] not in r1.dtype.names]
descrm = descr1 + descr2
# figure out the fields we'll need from each array
f1 = [d[0] for d in descr1]
f2 = [d[0] for d in descr2]
# cache the number of columns in f1
ncol1 = len(f1)
# get a dict of the rows of r2 grouped by key
rows2 = {}
for row2 in r2:
rows2.setdefault(row2[key], []).append(row2)
# figure out how many rows will be in the result
nrowm = 0
for k1 in r1[key]:
if k1 in rows2:
nrowm += len(rows2[k1])
else:
nrowm += 1
# allocate the return array
# ret = np.full((nrowm, ), fill, dtype=descrm)
_ret = np.recarray(nrowm, dtype=descrm)
if mask:
ret = np.ma.array(_ret, mask=True)
else:
ret = _ret
# merge the data into the return array
i = 0
for row1 in r1:
if row1[key] in rows2:
for row2 in rows2[row1[key]]:
ret[i] = tuple(row1[f1]) + tuple(row2[f2])
i += 1
else:
for j in range(ncol1):
ret[i][j] = row1[j]
i += 1
return ret
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved to omicron.core.numpy_extensions module",
)
def numpy_append_fields(base, names, data, dtypes):
"""给现有的数组`base`增加新的字段
实现了`numpy.lib.recfunctions.rec_append_fields`的功能。因为`rec_append_fields`不能处
理`data`元素的类型为Object的情况
Example:
>>> # 新增单个字段
>>> import numpy
>>> old = np.array([i for i in range(3)], dtype=[('col1', '<f4')])
>>> new_list = [2 * i for i in range(3)]
>>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '<f4')])
>>> print(res)
... # doctest: +NORMALIZE_WHITESPACE
[(0., 0.) (1., 2.) (2., 4.)]
>>> # 新增多个字段
>>> data = [res['col1'].tolist(), res['new_col'].tolist()]
>>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', '<f4'), ('col4', '<f4')]))
... # doctest: +NORMALIZE_WHITESPACE
[(0., 0., 0.) (1., 1., 2.) (2., 2., 4.)]
Args:
base ([numpy.array]): 基础数组
name ([type]): 新增字段的名字,可以是字符串(单字段的情况),也可以是字符串列表
data (list): 增加的字段的数据,list类型
dtypes ([type]): 新增字段的dtype
"""
if isinstance(names, str):
names = [
names,
]
data = [
data,
]
result = np.empty(base.shape, dtype=base.dtype.descr + dtypes)
for col in base.dtype.names:
result[col] = base[col]
for i in range(len(names)):
result[names[i]] = data[i]
return result
| [
"logging.getLogger",
"numpy.ma.array",
"numpy.searchsorted",
"deprecated.deprecated",
"numpy.ndenumerate",
"numpy.empty",
"numpy.recarray"
] | [((225, 252), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (242, 252), False, 'import logging\n'), ((256, 281), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""1.1"""'}), "(version='1.1')\n", (266, 281), False, 'from deprecated import deprecated\n'), ((544, 569), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""1.1"""'}), "(version='1.1')\n", (554, 569), False, 'from deprecated import deprecated\n'), ((731, 868), 'deprecated.deprecated', 'deprecated', ([], {'category': 'PendingDeprecationWarning', 'version': '"""1.1"""', 'reason': '"""this will be moved into omicron.core.numpy_extensions module"""'}), "(category=PendingDeprecationWarning, version='1.1', reason=\n 'this will be moved into omicron.core.numpy_extensions module')\n", (741, 868), False, 'from deprecated import deprecated\n'), ((1472, 1607), 'deprecated.deprecated', 'deprecated', ([], {'category': 'PendingDeprecationWarning', 'version': '"""1.1"""', 'reason': '"""this will be moved to omicron.core.numpy_extensions module"""'}), "(category=PendingDeprecationWarning, version='1.1', reason=\n 'this will be moved to omicron.core.numpy_extensions module')\n", (1482, 1607), False, 'from deprecated import deprecated\n'), ((2346, 2474), 'deprecated.deprecated', 'deprecated', ([], {'category': 'PendingDeprecationWarning', 'version': '"""1.1"""', 'reason': '"""this will be moved to omicron.core.timeframe module"""'}), "(category=PendingDeprecationWarning, version='1.1', reason=\n 'this will be moved to omicron.core.timeframe module')\n", (2356, 2474), False, 'from deprecated import deprecated\n'), ((3415, 3550), 'deprecated.deprecated', 'deprecated', ([], {'category': 'PendingDeprecationWarning', 'version': '"""1.1"""', 'reason': '"""this will be moved to omicron.core.numpy_extensions module"""'}), "(category=PendingDeprecationWarning, version='1.1', reason=\n 'this will be moved to omicron.core.numpy_extensions module')\n", (3425, 3550), False, 'from deprecated import deprecated\n'), ((4123, 4258), 'deprecated.deprecated', 'deprecated', ([], {'category': 'PendingDeprecationWarning', 'version': '"""1.1"""', 'reason': '"""this will be moved to omicron.core.numpy_extensions module"""'}), "(category=PendingDeprecationWarning, version='1.1', reason=\n 'this will be moved to omicron.core.numpy_extensions module')\n", (4133, 4258), False, 'from deprecated import deprecated\n'), ((6695, 6830), 'deprecated.deprecated', 'deprecated', ([], {'category': 'PendingDeprecationWarning', 'version': '"""1.1"""', 'reason': '"""this will be moved to omicron.core.numpy_extensions module"""'}), "(category=PendingDeprecationWarning, version='1.1', reason=\n 'this will be moved to omicron.core.numpy_extensions module')\n", (6705, 6830), False, 'from deprecated import deprecated\n'), ((344, 363), 'numpy.ndenumerate', 'np.ndenumerate', (['arr'], {}), '(arr)\n', (358, 363), True, 'import numpy as np\n'), ((629, 655), 'numpy.searchsorted', 'np.searchsorted', (['arr', 'item'], {}), '(arr, item)\n', (644, 655), True, 'import numpy as np\n'), ((1227, 1268), 'numpy.searchsorted', 'np.searchsorted', (['arr', 'start'], {'side': '"""right"""'}), "(arr, start, side='right')\n", (1242, 1268), True, 'import numpy as np\n'), ((1283, 1322), 'numpy.searchsorted', 'np.searchsorted', (['arr', 'end'], {'side': '"""right"""'}), "(arr, end, side='right')\n", (1298, 1322), True, 'import numpy as np\n'), ((2195, 2236), 'numpy.searchsorted', 'np.searchsorted', (['arr', 'start'], {'side': '"""right"""'}), "(arr, start, side='right')\n", (2210, 2236), True, 'import numpy as np\n'), ((3336, 3380), 'numpy.searchsorted', 'np.searchsorted', (['ticks', 'moment'], {'side': '"""right"""'}), "(ticks, moment, side='right')\n", (3351, 3380), True, 'import numpy as np\n'), ((4053, 4093), 'numpy.searchsorted', 'np.searchsorted', (['arr', 'item'], {'side': '"""right"""'}), "(arr, item, side='right')\n", (4068, 4093), True, 'import numpy as np\n'), ((6225, 6257), 'numpy.recarray', 'np.recarray', (['nrowm'], {'dtype': 'descrm'}), '(nrowm, dtype=descrm)\n', (6236, 6257), True, 'import numpy as np\n'), ((8007, 8060), 'numpy.empty', 'np.empty', (['base.shape'], {'dtype': '(base.dtype.descr + dtypes)'}), '(base.shape, dtype=base.dtype.descr + dtypes)\n', (8015, 8060), True, 'import numpy as np\n'), ((6285, 6313), 'numpy.ma.array', 'np.ma.array', (['_ret'], {'mask': '(True)'}), '(_ret, mask=True)\n', (6296, 6313), True, 'import numpy as np\n')] |
'''
Definitions and tests managing the ASSIN dataset.
'''
import os
import pickle
import random
import logging
from collections import Counter
import xmltodict
import numpy as np
import torch
import sentencepiece as spm
from matplotlib import pyplot as plt
from torch.utils.data import Dataset, DataLoader
from transformers import T5Tokenizer
def prepare_data(file_name):
'''
Performs everything needed to get the data ready.
Addition of Eos token and encoding is performed in runtime.
'''
folder = "assin_data"
valid_modes = ["train", "validation", "test"]
file_name = os.path.join(folder, file_name)
if not os.path.isfile(file_name):
logging.info("Preprocessing data...")
filenamesv1 = ['assin-ptbr-train.xml', 'assin-ptbr-dev.xml', 'assin-ptbr-test.xml']
filenamesv2 = ['assin2-train-only.xml', 'assin2-dev.xml', 'assin2-test.xml']
processed_data = {'v1': {mode: [] for mode in valid_modes},
'v2': {mode: [] for mode in valid_modes}}
for mode, fnamev1, fnamev2 in zip(valid_modes, filenamesv1, filenamesv2):
for v, fname in zip(['v1', 'v2'], [fnamev1, fnamev2]):
with open(os.path.join(folder, fname), 'r') as xml:
xml_dict = xmltodict.parse(xml.read())
for data in xml_dict['entailment-corpus']['pair']:
processed_data[v][mode].append({"pair": (data['t'], data['h']),
"similarity": float(data['@similarity']),
"entailment": data['@entailment']})
with open(file_name, 'wb') as processed_file:
pickle.dump(processed_data, processed_file)
logging.info("Done.")
else:
logging.info(f"Processed data found in {file_name}.")
with open(file_name, 'rb') as processed_file:
processed_data = pickle.load(processed_file)
return processed_data, valid_modes
def get_custom_vocab():
# Path to SentencePiece model
SP_MODEL_PATH = 'custom_vocab/spm_32000_unigram/spm_32000_pt.model'
# Loading on sentencepiece
sp = spm.SentencePieceProcessor()
sp.load(SP_MODEL_PATH)
# Loading o HuggingFace
return T5Tokenizer.from_pretrained(SP_MODEL_PATH)
class ASSIN(Dataset):
'''
Loads data from preprocessed file and manages them.
'''
CLASSES = ["None", "Entailment", "Paraphrase"]
CLASSESv2 = ["None", "Entailment"]
TOKENIZER = None
DATA, VALID_MODES = prepare_data("processed_data.pkl")
def __init__(self, version, mode, seq_len, vocab_name, categoric=False):
'''
verison: v1 or v2
mode: One of train, validation or test
seq_len: limit to returned encoded tokens
vocab_name: name of the vocabulary
str_output: wether train will operate in string generation mode or not
'''
if vocab_name == "custom":
ASSIN.TOKENIZER = get_custom_vocab()
else:
ASSIN.TOKENIZER = T5Tokenizer.from_pretrained(vocab_name)
super().__init__()
assert mode in ASSIN.VALID_MODES
self.seq_len = seq_len
self.data = ASSIN.DATA[version][mode]
self.categoric = categoric
self.version = version
logging.info(f"{mode} ASSINv{version} initialized with categoric: {categoric}, seq_len: {seq_len}")
def __len__(self):
return len(self.data)
def __getitem__(self, i: int):
'''
Unpacks line from data and applies T5 encoding if necessary.
returns: input_ids, attention_mask, target (encoded if training)
'''
data = self.data[i]
pair = data["pair"]
eos_token = ASSIN.TOKENIZER.eos_token
if self.categoric: # generate "Entailment", "None"
target = ASSIN.TOKENIZER.encode(text=f"{data['entailment']}{eos_token}",
max_length=5,
pad_to_max_length=True,
return_tensors='pt').squeeze()
else:
target = ASSIN.TOKENIZER.encode(text=f'{data["similarity"]}{eos_token}',
max_length=5,
pad_to_max_length=True,
return_tensors='pt').squeeze()
if self.categoric:
original_number = torch.Tensor([ASSIN.CLASSESv2.index(data["entailment"])]).long().squeeze()
else:
original_number = torch.Tensor([data["similarity"]]).float().squeeze()
source = ASSIN.TOKENIZER.encode_plus(text=f"ASSIN sentence1: {pair[0]} {eos_token}",
text_pair=f"sentence2: {pair[1]} {eos_token}",
max_length=self.seq_len,
pad_to_max_length=True,
return_tensors='pt')
return source["input_ids"].squeeze(), source["attention_mask"].squeeze(), target, original_number
def get_dataloader(self, batch_size: int, shuffle: bool):
return DataLoader(self, batch_size=batch_size, shuffle=shuffle,
num_workers=4)
if __name__ == "__main__":
print("Testing ASSIN dataset.")
hparams = {"model_name": "t5-small", "vocab_name": "t5-small", "seq_len": 128, "bs": 10, "version": 'v2',
"categoric": True}
datasets = {m: ASSIN(version=hparams["version"], mode=m, seq_len=hparams["seq_len"],
vocab_name=hparams["vocab_name"], categoric=hparams["categoric"]) for m in ASSIN.VALID_MODES}
# Testing datasets
for mode, dataset in datasets.items():
print(f"\n{mode} dataset length: {len(dataset)}\n")
print("Random sample")
input_ids, attention_mask, target, original_number = random.choice(dataset)
print(str((input_ids, attention_mask, target, original_number)))
# Testing dataloaders
shuffle = {"train": True, "validation": False, "test": False}
debug_dataloaders = {mode: datasets[mode].get_dataloader(batch_size=hparams["bs"],
shuffle=shuffle[mode])
for mode in ASSIN.VALID_MODES}
for mode, dataloader in debug_dataloaders.items():
print(f"{mode} number of batches: {len(dataloader)}")
batch = next(iter(dataloader))
# Dataset statistics
with open("assin_data/processed_data.pkl", 'rb') as processed_data_pkl:
processed_data = pickle.load(processed_data_pkl)
for version in ['v1', 'v2']:
wc = []
classes = []
regs = []
for mode, data in processed_data[version].items():
if mode == "test":
continue
for item in data:
wc.append(len(item["pair"][0].split()) + len(item["pair"][1].split()))
classes.append(item["entailment"])
regs.append(item["similarity"])
wc = np.array(wc)
word_count_stats = {"total": wc.sum(),
"mean": wc.mean(),
"std": wc.std(),
"max": wc.max(),
"min": wc.min()}
print(f"--------------- {version} stats --------------")
print(f"Class balance: {Counter(classes)}")
print(f"Similarity balance: {Counter(regs)}")
print(word_count_stats)
plt.figure()
plt.xlabel(f"{version} Sentence")
plt.ylabel(f"{version} Word Count")
plt.plot(range(len(wc)), wc)
plt.figure()
plt.xlabel(f"{version} Sentence")
plt.ylabel(f"{version} Similarity")
plt.plot(range(len(regs)), regs)
plt.show()
| [
"transformers.T5Tokenizer.from_pretrained",
"random.choice",
"pickle.dump",
"matplotlib.pyplot.ylabel",
"sentencepiece.SentencePieceProcessor",
"matplotlib.pyplot.xlabel",
"os.path.join",
"pickle.load",
"torch.Tensor",
"os.path.isfile",
"numpy.array",
"matplotlib.pyplot.figure",
"collections... | [((602, 633), 'os.path.join', 'os.path.join', (['folder', 'file_name'], {}), '(folder, file_name)\n', (614, 633), False, 'import os\n'), ((2195, 2223), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {}), '()\n', (2221, 2223), True, 'import sentencepiece as spm\n'), ((2291, 2333), 'transformers.T5Tokenizer.from_pretrained', 'T5Tokenizer.from_pretrained', (['SP_MODEL_PATH'], {}), '(SP_MODEL_PATH)\n', (2318, 2333), False, 'from transformers import T5Tokenizer\n'), ((7897, 7907), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7905, 7907), True, 'from matplotlib import pyplot as plt\n'), ((645, 670), 'os.path.isfile', 'os.path.isfile', (['file_name'], {}), '(file_name)\n', (659, 670), False, 'import os\n'), ((680, 717), 'logging.info', 'logging.info', (['"""Preprocessing data..."""'], {}), "('Preprocessing data...')\n", (692, 717), False, 'import logging\n'), ((1777, 1798), 'logging.info', 'logging.info', (['"""Done."""'], {}), "('Done.')\n", (1789, 1798), False, 'import logging\n'), ((1817, 1870), 'logging.info', 'logging.info', (['f"""Processed data found in {file_name}."""'], {}), "(f'Processed data found in {file_name}.')\n", (1829, 1870), False, 'import logging\n'), ((3337, 3446), 'logging.info', 'logging.info', (['f"""{mode} ASSINv{version} initialized with categoric: {categoric}, seq_len: {seq_len}"""'], {}), "(\n f'{mode} ASSINv{version} initialized with categoric: {categoric}, seq_len: {seq_len}'\n )\n", (3349, 3446), False, 'import logging\n'), ((5249, 5320), 'torch.utils.data.DataLoader', 'DataLoader', (['self'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': '(4)'}), '(self, batch_size=batch_size, shuffle=shuffle, num_workers=4)\n', (5259, 5320), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5985, 6007), 'random.choice', 'random.choice', (['dataset'], {}), '(dataset)\n', (5998, 6007), False, 'import random\n'), ((6685, 6716), 'pickle.load', 'pickle.load', (['processed_data_pkl'], {}), '(processed_data_pkl)\n', (6696, 6716), False, 'import pickle\n'), ((7153, 7165), 'numpy.array', 'np.array', (['wc'], {}), '(wc)\n', (7161, 7165), True, 'import numpy as np\n'), ((7608, 7620), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7618, 7620), True, 'from matplotlib import pyplot as plt\n'), ((7629, 7662), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""{version} Sentence"""'], {}), "(f'{version} Sentence')\n", (7639, 7662), True, 'from matplotlib import pyplot as plt\n'), ((7671, 7706), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""{version} Word Count"""'], {}), "(f'{version} Word Count')\n", (7681, 7706), True, 'from matplotlib import pyplot as plt\n'), ((7753, 7765), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7763, 7765), True, 'from matplotlib import pyplot as plt\n'), ((7774, 7807), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""{version} Sentence"""'], {}), "(f'{version} Sentence')\n", (7784, 7807), True, 'from matplotlib import pyplot as plt\n'), ((7816, 7851), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""{version} Similarity"""'], {}), "(f'{version} Similarity')\n", (7826, 7851), True, 'from matplotlib import pyplot as plt\n'), ((1725, 1768), 'pickle.dump', 'pickle.dump', (['processed_data', 'processed_file'], {}), '(processed_data, processed_file)\n', (1736, 1768), False, 'import pickle\n'), ((1954, 1981), 'pickle.load', 'pickle.load', (['processed_file'], {}), '(processed_file)\n', (1965, 1981), False, 'import pickle\n'), ((3075, 3114), 'transformers.T5Tokenizer.from_pretrained', 'T5Tokenizer.from_pretrained', (['vocab_name'], {}), '(vocab_name)\n', (3102, 3114), False, 'from transformers import T5Tokenizer\n'), ((7492, 7508), 'collections.Counter', 'Counter', (['classes'], {}), '(classes)\n', (7499, 7508), False, 'from collections import Counter\n'), ((7549, 7562), 'collections.Counter', 'Counter', (['regs'], {}), '(regs)\n', (7556, 7562), False, 'from collections import Counter\n'), ((1208, 1235), 'os.path.join', 'os.path.join', (['folder', 'fname'], {}), '(folder, fname)\n', (1220, 1235), False, 'import os\n'), ((4620, 4654), 'torch.Tensor', 'torch.Tensor', (["[data['similarity']]"], {}), "([data['similarity']])\n", (4632, 4654), False, 'import torch\n')] |
import os
import numpy as np
from statistics import mean, stdev
from nnga.datasets.base_dataset import BaseDataset
from nnga.utils.data_io import (
load_csv_file,
load_csv_line,
save_scale_parameters,
save_encoder_parameters,
save_decoder_parameters,
)
from nnga.utils.data_manipulation import scale_features
class CSVDataset(BaseDataset):
"""CSVDataset loader
class implements {BaseDataset} and provide a loader
Arguments
cfg : {yacs.config.CfgNode}
Experiment config data.
All the information to configure the dataset loader is stored
in experiment config.
is_validation : {bool}
Flag to sinalize when dataset loader is a validation dataset
it's important select information on experiment
config to configure dataset loader corretly for validation
"""
def __init__(self, cfg, logger, is_validation=False):
self._is_validation = is_validation
if self._is_validation:
self._dataset_csv_path = os.path.expandvars(
cfg.DATASET.VAL_CSV_PATH
)
else:
self._dataset_csv_path = os.path.expandvars(
cfg.DATASET.TRAIN_CSV_PATH
)
self._data = None
self._features = None
self.scale_parameters = {}
self._sep = ","
self._scale_method = cfg.DATASET.SCALER
self.features_selected = None
super().__init__(cfg, logger, is_validation)
def _load_metadata(self):
"""Create metadata for classification.
Metadata is type of index from all files that
represents dataset. Any huge data is load here
just create a kind of index.
"""
f = open(self._dataset_csv_path)
self._features = f.readline().replace("\n", "")
f.close()
if ";" in self._features:
self._sep = ";"
self._features = self._features.split(self._sep)
col_list = []
if "class" in self._features:
col_list.append("class")
self._features.remove("class")
else:
raise RuntimeError(
"The class column does not found!\n" "Check the dataset!"
)
if "id" in self._features:
col_list.append("id")
self._features.remove("id")
for df in load_csv_file(
self._dataset_csv_path, usecols=col_list, chunksize=10
):
for index, row in df.iterrows():
self._metadata[index] = {
"line_path": index,
"label": str(row["class"]),
}
@property
def features(self):
"""
Property is list with all features in dataset
Returns:
{list} -- list with all features in dataset
"""
return self._features
@property
def input_shape(self):
"""
Property is the input shape
Returns:
{int} -- input shape
"""
return len(self._features)
def _make_scale_parameters(self):
"""Calculate the scale parameters to be used
"""
for key in self._features:
df = load_csv_file(self._dataset_csv_path, usecols=[key])
df[key] = df[key].astype(float)
self.scale_parameters[key] = {
"min": min(df[key].values),
"max": max(df[key].values),
"mean": mean(df[key].values),
"stdev": stdev(df[key].values),
}
def save_parameters(self):
"""
Save parameters from dataset
"""
save_scale_parameters(self._output_dir, self.scale_parameters)
save_encoder_parameters(self._output_dir, self._class_to_id)
save_decoder_parameters(self._output_dir, self._id_to_class)
def load_sample_by_idx(self, idx):
"""
Parameters
----------
idx : str
sample idx to be load
Returns
-------
Sample data scaled
"""
header, sample = load_csv_line(
self._dataset_csv_path, self._sep, self._metadata[idx]["line_path"]
)
indexes_of_features = [
i for i, value in enumerate(header) if value in self._features
]
sample = [
float(value.replace(",", "."))
for i, value in enumerate(sample)
if i in indexes_of_features
]
header = [
value for i, value in enumerate(header) if i in indexes_of_features
]
# Normalize the data
sample = scale_features(
sample,
header,
self.scale_parameters,
scale_method=self._scale_method,
)
return sample
def _data_generation(self, indexes):
"""
Method use indexes to generate a batch data
Use metadata structure to:
- load images and ground thruth
- apply data augmentation
- form batchs
Parameters
----------
indexes {list} -- list of indexes from metadata to be
loaded in a bacth with input and ground thruth
"""
# TODO: Data augmentation
attributes = [self.load_sample_by_idx(idx) for idx in indexes]
labels = []
for i, idx in enumerate(indexes):
np_label = np.zeros(len(self._labels))
np_label[self.label_encode(self._metadata[idx]["label"])] = 1
labels.append(np_label)
if self.features_selected is not None:
attributes[i] = [
attributes[i][index] for index in self.features_selected
]
self._generator_classes.extend(labels)
return np.array(attributes), np.array(labels)
| [
"statistics.mean",
"statistics.stdev",
"nnga.utils.data_io.load_csv_line",
"os.path.expandvars",
"nnga.utils.data_io.save_scale_parameters",
"nnga.utils.data_io.save_decoder_parameters",
"nnga.utils.data_manipulation.scale_features",
"numpy.array",
"nnga.utils.data_io.load_csv_file",
"nnga.utils.d... | [((2407, 2476), 'nnga.utils.data_io.load_csv_file', 'load_csv_file', (['self._dataset_csv_path'], {'usecols': 'col_list', 'chunksize': '(10)'}), '(self._dataset_csv_path, usecols=col_list, chunksize=10)\n', (2420, 2476), False, 'from nnga.utils.data_io import load_csv_file, load_csv_line, save_scale_parameters, save_encoder_parameters, save_decoder_parameters\n'), ((3704, 3766), 'nnga.utils.data_io.save_scale_parameters', 'save_scale_parameters', (['self._output_dir', 'self.scale_parameters'], {}), '(self._output_dir, self.scale_parameters)\n', (3725, 3766), False, 'from nnga.utils.data_io import load_csv_file, load_csv_line, save_scale_parameters, save_encoder_parameters, save_decoder_parameters\n'), ((3775, 3835), 'nnga.utils.data_io.save_encoder_parameters', 'save_encoder_parameters', (['self._output_dir', 'self._class_to_id'], {}), '(self._output_dir, self._class_to_id)\n', (3798, 3835), False, 'from nnga.utils.data_io import load_csv_file, load_csv_line, save_scale_parameters, save_encoder_parameters, save_decoder_parameters\n'), ((3844, 3904), 'nnga.utils.data_io.save_decoder_parameters', 'save_decoder_parameters', (['self._output_dir', 'self._id_to_class'], {}), '(self._output_dir, self._id_to_class)\n', (3867, 3904), False, 'from nnga.utils.data_io import load_csv_file, load_csv_line, save_scale_parameters, save_encoder_parameters, save_decoder_parameters\n'), ((4156, 4243), 'nnga.utils.data_io.load_csv_line', 'load_csv_line', (['self._dataset_csv_path', 'self._sep', "self._metadata[idx]['line_path']"], {}), "(self._dataset_csv_path, self._sep, self._metadata[idx][\n 'line_path'])\n", (4169, 4243), False, 'from nnga.utils.data_io import load_csv_file, load_csv_line, save_scale_parameters, save_encoder_parameters, save_decoder_parameters\n'), ((4694, 4785), 'nnga.utils.data_manipulation.scale_features', 'scale_features', (['sample', 'header', 'self.scale_parameters'], {'scale_method': 'self._scale_method'}), '(sample, header, self.scale_parameters, scale_method=self.\n _scale_method)\n', (4708, 4785), False, 'from nnga.utils.data_manipulation import scale_features\n'), ((1060, 1104), 'os.path.expandvars', 'os.path.expandvars', (['cfg.DATASET.VAL_CSV_PATH'], {}), '(cfg.DATASET.VAL_CSV_PATH)\n', (1078, 1104), False, 'import os\n'), ((1186, 1232), 'os.path.expandvars', 'os.path.expandvars', (['cfg.DATASET.TRAIN_CSV_PATH'], {}), '(cfg.DATASET.TRAIN_CSV_PATH)\n', (1204, 1232), False, 'import os\n'), ((3266, 3318), 'nnga.utils.data_io.load_csv_file', 'load_csv_file', (['self._dataset_csv_path'], {'usecols': '[key]'}), '(self._dataset_csv_path, usecols=[key])\n', (3279, 3318), False, 'from nnga.utils.data_io import load_csv_file, load_csv_line, save_scale_parameters, save_encoder_parameters, save_decoder_parameters\n'), ((5879, 5899), 'numpy.array', 'np.array', (['attributes'], {}), '(attributes)\n', (5887, 5899), True, 'import numpy as np\n'), ((5901, 5917), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5909, 5917), True, 'import numpy as np\n'), ((3519, 3539), 'statistics.mean', 'mean', (['df[key].values'], {}), '(df[key].values)\n', (3523, 3539), False, 'from statistics import mean, stdev\n'), ((3566, 3587), 'statistics.stdev', 'stdev', (['df[key].values'], {}), '(df[key].values)\n', (3571, 3587), False, 'from statistics import mean, stdev\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Preprocessing meta module - preprocess.py - `DeepCV`__
.. moduleauthor:: <NAME>
"""
import copy
import logging
import functools
from pathlib import Path
from joblib import Memory
from typing import Optional, Dict, Tuple, List, Iterable, Union, Callable, Any, Sequence
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
import PIL
import numpy as np
import albumentations
from deepcv.utils import NL
import deepcv.utils
from .. import hyperparams
__all__ = ['PreprocessedDataset', 'fn_to_transform', 'split_dataset', 'preprocess', 'tensor_to_np']
__author__ = '<NAME>'
# Joblib memory used by `_process_normalization_stats`
JOBLIB_CACHE_PATH = Path.cwd() / 'data/03_primary/joblib_cache'
JOBLIB_CACHE_PATH.mkdir(parents=True, exist_ok=True)
memory = Memory(JOBLIB_CACHE_PATH, verbose=0)
class PreprocessedDataset(Dataset):
""" A Simple PyTorch Dataset which applies given inputs/target transforms to underlying pytorch dataset items """
def __init__(self, underlying_dataset: Dataset, img_transform: Optional[Callable], target_transform: Callable = None, augmentation_transform: Callable = None):
self._underlying_dataset = underlying_dataset
self._img_transform = img_transform
self._target_transform = target_transform
self._augmentation_transform = augmentation_transform
def __getitem__(self, index):
data = self._underlying_dataset.__getitem__(index)
if isinstance(data, tuple):
# We assume first entry is image and any other entries are targets
x, *ys = data
if self._img_transform is not None:
x = self._img_transform(x)
if self._target_transform is not None:
ys = (self._target_transform(y) for y in ys)
if self._augmentation_transform is not None:
raise NotImplementedError
return (x, *ys)
else:
return self._img_transform(data)
def __len__(self):
return len(self._underlying_dataset)
def __repr__(self):
return f'{PreprocessedDataset.__name__}[{repr(vars(self))}]'
def fn_to_transform(fn: Callable, *transform_args: Iterable[str]) -> Callable[[], Callable]:
def _get_transform(**transform_kwargs) -> Callable:
if transform_kwargs:
# issuperset is used in case there are some arguments in `transform_args` which are optional/defaulted (may raise later if there are missing required arguments).
if set(transform_args).issuperset(transform_kwargs.keys()):
raise ValueError(f'Error: `{fn}` transform expected following arguments: `{transform_args}` but got: `{transform_kwargs}`')
return functools.partial(fn, **transform_kwargs)
else:
return fn
return _get_transform
@fn_to_transform
def tensor_to_np(tensor: torch.Tensor) -> np.ndarray:
return tensor.numpy
""" `TRANSFORM_ARGS_PROCESSORS` Maps transforms types with their arguments processing function and an iterable of arguments names which can be processed at runtime by this function.
If a transform needs arguments which can be processed at runtime instead of beeing provided in YAML config (`parameters.yml`),
you can register its argument(s) processing function in this Dict or decorate argument(s) processing function with `register_transform_processor`.
NOTE: Registered functions should be a `Callable[['trainset', 'to_process'], Dict[str, Any]]`) which returns a dict of processed arguments to be
provided to their respective tranform (needed by `deepcv.meta.data.preprocess.preprocess` procedure).
"""
TRANSFORM_ARGS_PROCESSORS = dict()
def register_transform_processor(transform: Union[str, Callable], processable_args_names: Iterable[str]):
""" Append decorated function to `deepcv.meta.data.preprocess.TRANSFORM_ARGS_PROCESSORS`.
If a transform needs arguments which can be proceesed at runtime instead of beeing provided in YAML config (`parameters.yml`),
you can register its arguments processing function in `deepcv.meta.data.preprocess.TRANSFORM_ARGS_PROCESSORS` Dict or decorate
this function with `register_transform_processor` (needed by `deepcv.meta.data.preprocess.preprocess` procedure).
Args:
- transform: Transform which needs keyword arguments proceesed by decorated function
- processable_args_names: Transform arguments names which can be processed by decorated function to be provided to transform constructor
"""
def _wrap(process_fn: Callable[['trainset', 'to_process'], Dict[str, Any]]):
if transform in TRANSFORM_ARGS_PROCESSORS:
raise RuntimeError(f'Error: {transform} is already registered in `deepcv.meta.data.preprocess.TRANSFORM_ARGS_PROCESSORS`')
TRANSFORM_ARGS_PROCESSORS[transform] = (process_fn, processable_args_names)
return process_fn
return _wrap
@memory.cache
@register_transform_processor(transform=torchvision.transforms.Normalize, processable_args_names=['mean', 'std'])
def _process_normalization_stats(trainset: Dataset, to_process: Sequence[str]) -> Dict[str, torch.Tensor]:
assert {'mean', 'std'}.issuperset(to_process), f'Error: {deepcv.utils.get_str_repr(_process_normalization_stats)} can only process `mean` or `std`, not: `{to_process}`'
# Determine channel dimension size (we assume channel dim is the first tensor dim as there isn't batches yet (Dataset))
dummy_image = trainset[0][0] if isinstance(trainset[0], Tuple) else trainset[0]
dummy_image = dummy_image if isinstance(dummy_image, torch.Tensor) else torchvision.transforms.ToTensor()(dummy_image)
channels = dummy_image.shape[0]
# Process mean and std per channel dimension across all trainset image batches
stats = {n: torch.zeros((channels,)) for n in to_process}
for input_data in trainset:
img = input_data[0] if isinstance(input_data, Tuple) else input_data # Assumes image is the only or first data entry
if not isinstance(img, torch.Tensor):
# Convert images to Tensors if needed
img = torchvision.transforms.ToTensor()(img)
dims = tuple(range(1, len(img.shape)))
if 'mean' in stats:
stats['mean'] += img.mean(dim=dims) / len(trainset)
if 'std' in stats:
stats['std'] += img.std(dim=dims) / len(trainset)
return stats
def _parse_transforms_specification(transform_identifiers: Sequence, trainset: Dataset, transform_args_processors: Dict = TRANSFORM_ARGS_PROCESSORS) -> torchvision.transforms.Compose:
""" Parses a transforms specification sequence.
Finds transforms type if string identifier is provided, find its arguments from `params`/YAML and/or process it (`TRANSFORM_ARGS_PROCESSORS`) if any, instanciate those transforms and returns their composition.
Args:
- transform_identifiers: Transforms specification to be parsed (probably comes from YAML configuration file, see preprocessing procedures transforms lists in `./conf/base/parameters.yml`)
- trainset: Trainset Dataset to be provided to functions registered in `transform_args_processors`. See `deepcv.meta.data.preprocess.register_transform_processor` for more details.
- transform_args_processors: Dict providing function which can process transforms arguments at runtime from `trainset`. See `deepcv.meta.data.preprocess.register_transform_processor` for more details.
"""
fn_name = deepcv.utils.get_str_repr(_parse_transforms_specification, __file__)
transforms = []
for spec in transform_identifiers:
transform_kwargs = {}
if isinstance(spec, Dict):
if not len(spec.items()) == 1:
raise ValueError(f'Error: {fn_name}: Invalid transform specification, a transform should be specified by a single transform '
f'type/identifer which can eventually be mapped to a dict of keyword arguments')
if not isinstance(next(iter(spec.values())), Dict):
raise ValueError(f'Error: {fn_name}: A value mapped to a transform is expected to be a dict of keyword arguments which will '
f'be provided to transform\'s constructor/function, got: `{spec}`')
# There are user-provided transform keyword arguments in `params` (from YAML)
spec, transform_kwargs = next(iter(spec.items()))
# Check transform specification is a valid string identifier (parsed) or Callable
elif isinstance(spec, str):
spec = deepcv.utils.get_by_identifier(spec) # Try to retreive tranform by its string identifier (raises otherwise)
elif not isinstance(spec, Callable):
raise ValueError(f'Error: {fn_name} couldn\'t find `{spec}` tranform, transform specification should either be a string identifier or tranform `Callable` type.')
# Process any missing transform arguments from trainset (only process arguments which are not already provided in `params`/YAML) (e.g. process mean and variance of trainset images)
if spec in transform_args_processors:
process_fn, processable_args_names = transform_args_processors[spec]
to_process = [arg_name for arg_name in processable_args_names if arg_name not in transform_kwargs]
if len(to_process) > 0:
# There are missing transform argument(s) to be processed
processed_state = process_fn(trainset=trainset, to_process=to_process)
transform_kwargs.update({n: processed_state[n] for n in to_process})
# Instanciate/call tranform with arguments from spec (YAML) and/or from its runtime processing function
transforms.append(spec(**transform_kwargs))
return torchvision.transforms.Compose(transforms)
def split_dataset(params: Union[Dict[str, Any], deepcv.meta.hyperparams.Hyperparameters], dataset_or_trainset: Dataset, testset: Dataset = None) -> Dict[str, Dataset]:
func_name = deepcv.utils.get_str_repr(split_dataset, __file__)
params, _ = deepcv.meta.hyperparams.to_hyperparameters(params, defaults={'validset_ratio': None, 'testset_ratio': None, 'cache': False})
logging.info(f'{func_name}: Spliting pytorch dataset into a trainset, testset and eventually a validset: `params="{params}"`')
testset_ratio, validset_ratio = params['testset_ratio'], params['validset_ratio']
# Find testset size to sample from `dataset_or_trainset` if needed
split_lengths = tuple()
if testset is None:
if testset_ratio is None:
raise ValueError(f'Error: {func_name} function either needs an existing `testset` as argument or you must specify a `testset_ratio` in `params` '
f'(probably from parameters/preprocessing YAML config){NL}Provided dataset spliting parameters: "{params}"')
split_lengths += (int(len(dataset_or_trainset) * testset_ratio),)
# Find validset size to sample from `dataset_or_trainset` if needed
if validset_ratio is not None:
split_lengths += (int(len(dataset_or_trainset) * validset_ratio),)
elif testset is not None:
# Testset is already existing and no validset needs to be sampled : return dataset as is
return {'trainset': dataset_or_trainset, 'testset': testset}
# Perform sampling/splitting
trainset_size = int(len(dataset_or_trainset) - np.sum(split_lengths))
if trainset_size < 1:
raise RuntimeError(f'Error in {func_name}: testset and eventual validset size(s) are too large, there is no remaining trainset samples{NL}'
f'(maybe dataset is too small (`len(dataset_or_trainset)={len(dataset_or_trainset)}`) or there is a mistake in `testset_ratio={testset_ratio}` or `validset_ratio={validset_ratio}` values, whcih must be between 0. and 1.).')
trainset, *testset_and_validset = torch.utils.data.random_split(dataset_or_trainset, (trainset_size, *split_lengths))
if testset is None:
testset = testset_and_validset[0]
validset = testset_and_validset[-1] if validset_ratio is not None else None
if params['cache']:
logging.info(f'{func_name}: Saving resulting dataset to disk (`params["cache"] == True`)...')
raise NotImplementedError # TODO: save to (data/03_primary/)
return {'trainset': trainset, 'validset': validset, 'testset': testset} if validset else {'trainset': trainset, 'testset': testset}
def preprocess(params: Union[Dict[str, Any], deepcv.meta.hyperparams.Hyperparameters], dataset_or_trainset: Dataset, testset: Optional[Dataset]) -> Dict[str, PreprocessedDataset]:
""" Main preprocessing procedure: can perform datasets spliting (calls `deepcv.meta.data.preprocess.split_dataset`), data preprocessing with tranforms and data augmentation if any augmentation recipes have been specified in `params`.
If needed, a seed can also be specified in `params`, in order to perform a deterministic preprocessing.
Preprocess and augment data according to recipes specified in hyperparameters (`params`) from YAML config (see ./conf/base/parameters.yml)
Args:
- params: Parameters map (probably from YAML config) which specifies preprocessing and augmentation procedure, see defaults and required parameters in top lines of this function code (a parameter is required if its default value is `...`).
- dataset_or_trainset: PyTorch dataset (trainset or whole dataset)
- testset: Testset dataset if it exists (if not `None`, then `dataset_or_trainset` is assumed to be the `trainset`; If `None`, testset will be sampled from `dataset_or_trainset` according to ratios given in `params['split_dataset']`, see `deepcv.meta.data.preprocess.split_dataset` for more details)
Returns a dict which contains preprocessed and/or augmented 'trainset', 'testset' and 'validset' datasets
"""
fn_name = deepcv.utils.get_str_repr(preprocess, __file__)
logging.info(f'Starting pytorch dataset preprocessing procedure... ({fn_name})')
params, _ = deepcv.meta.hyperparams.to_hyperparameters(params, defaults={'transforms': ..., 'target_transforms': [], 'cache': False,
'augmentation_reciepe': None, 'split_dataset': {}, 'seed': None})
if params['seed'] is not None:
deepcv.utils.set_seeds(params['seed'])
datasets = split_dataset(params['split_dataset'], dataset_or_trainset, testset)
# Define image preprocessing transforms
preprocess_transforms = dict(img_transform=_parse_transforms_specification(params['transforms'], trainset=datasets['trainset']))
# Setup target preprocessing transforms
if params['target_transforms'] is not None and len(params['target_transforms']) > 0:
preprocess_transforms['target_transform'] = _parse_transforms_specification(params['target_transforms'], trainset=datasets['trainset'])
# Apply data augmentation
if params['augmentation_reciepe'] is not None:
logging.info(f'Applying dataset augmentation reciepe ')
# TODO: (WIP) use same transforms parsing procedure for augmentation: _parse_transforms_specification(params['augmentation_reciepe']['tranforms'], trainset=datasets['trainset'])
preprocess_transforms['augmentation_transform'] = deepcv.meta.data.augmentation.apply_augmentation_reciepe(datasets=datasets, params=params['augmentation_reciepe'])
# Replace datasets with `PreprocessedDataset` instances in order to apply perprocesssing transforms to datasets entries (transforms applied on dataset `__getitem__` calls)
datasets = {n: PreprocessedDataset(ds, **preprocess_transforms) for n, ds in datasets.items()}
# If needed, cache/save preprocessed/augmened dataset(s) to disk
if params['cache']:
logging.info('`deepcv.meta.data.preprocess.preprocess` function is saving resulting dataset to disk (`params["cache"] == True`)')
raise NotImplementedError # TODO: Save preprocessed dataset to disk (data/04_features/)
logging.info(f'Pytorch Dataset preprocessing procedure ({deepcv.utils.get_str_repr(preprocess, __file__)}) done, returning preprocessed/augmented Dataset(s).')
return datasets
if __name__ == '__main__':
cli = deepcv.utils.import_tests().test_module_cli(__file__)
cli()
| [
"torch.utils.data.random_split",
"pathlib.Path.cwd",
"joblib.Memory",
"numpy.sum",
"functools.partial",
"torch.zeros",
"torchvision.transforms.ToTensor",
"logging.info",
"torchvision.transforms.Compose"
] | [((835, 871), 'joblib.Memory', 'Memory', (['JOBLIB_CACHE_PATH'], {'verbose': '(0)'}), '(JOBLIB_CACHE_PATH, verbose=0)\n', (841, 871), False, 'from joblib import Memory\n'), ((729, 739), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (737, 739), False, 'from pathlib import Path\n'), ((9839, 9881), 'torchvision.transforms.Compose', 'torchvision.transforms.Compose', (['transforms'], {}), '(transforms)\n', (9869, 9881), False, 'import torchvision\n'), ((10264, 10400), 'logging.info', 'logging.info', (['f"""{func_name}: Spliting pytorch dataset into a trainset, testset and eventually a validset: `params="{params}"`"""'], {}), '(\n f\'{func_name}: Spliting pytorch dataset into a trainset, testset and eventually a validset: `params="{params}"`\'\n )\n', (10276, 10400), False, 'import logging\n'), ((11955, 12043), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['dataset_or_trainset', '(trainset_size, *split_lengths)'], {}), '(dataset_or_trainset, (trainset_size, *\n split_lengths))\n', (11984, 12043), False, 'import torch\n'), ((14014, 14099), 'logging.info', 'logging.info', (['f"""Starting pytorch dataset preprocessing procedure... ({fn_name})"""'], {}), "(f'Starting pytorch dataset preprocessing procedure... ({fn_name})'\n )\n", (14026, 14099), False, 'import logging\n'), ((5839, 5863), 'torch.zeros', 'torch.zeros', (['(channels,)'], {}), '((channels,))\n', (5850, 5863), False, 'import torch\n'), ((12218, 12321), 'logging.info', 'logging.info', (['f"""{func_name}: Saving resulting dataset to disk (`params["cache"] == True`)..."""'], {}), '(\n f\'{func_name}: Saving resulting dataset to disk (`params["cache"] == True`)...\'\n )\n', (12230, 12321), False, 'import logging\n'), ((15089, 15144), 'logging.info', 'logging.info', (['f"""Applying dataset augmentation reciepe """'], {}), "(f'Applying dataset augmentation reciepe ')\n", (15101, 15144), False, 'import logging\n'), ((15882, 16021), 'logging.info', 'logging.info', (['"""`deepcv.meta.data.preprocess.preprocess` function is saving resulting dataset to disk (`params["cache"] == True`)"""'], {}), '(\n \'`deepcv.meta.data.preprocess.preprocess` function is saving resulting dataset to disk (`params["cache"] == True`)\'\n )\n', (15894, 16021), False, 'import logging\n'), ((2775, 2816), 'functools.partial', 'functools.partial', (['fn'], {}), '(fn, **transform_kwargs)\n', (2792, 2816), False, 'import functools\n'), ((5656, 5689), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (5687, 5689), False, 'import torchvision\n'), ((11469, 11490), 'numpy.sum', 'np.sum', (['split_lengths'], {}), '(split_lengths)\n', (11475, 11490), True, 'import numpy as np\n'), ((6158, 6191), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (6189, 6191), False, 'import torchvision\n')] |
# Copyright (c) 2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test MetPy's testing utilities."""
import warnings
import numpy as np
import pytest
from metpy.deprecation import MetpyDeprecationWarning
from metpy.testing import assert_array_almost_equal, check_and_silence_deprecation
# Test #1183: numpy.testing.assert_array* ignores any masked value, so work-around
def test_masked_arrays():
"""Test that we catch masked arrays with different masks."""
with pytest.raises(AssertionError):
assert_array_almost_equal(np.array([10, 20]),
np.ma.array([10, np.nan], mask=[False, True]), 2)
def test_masked_and_no_mask():
"""Test that we can compare a masked array with no masked values and a regular array."""
a = np.array([10, 20])
b = np.ma.array([10, 20], mask=[False, False])
assert_array_almost_equal(a, b)
@check_and_silence_deprecation
def test_deprecation_decorator():
"""Make sure the deprecation checker works."""
warnings.warn('Testing warning.', MetpyDeprecationWarning)
| [
"metpy.testing.assert_array_almost_equal",
"numpy.ma.array",
"numpy.array",
"pytest.raises",
"warnings.warn"
] | [((855, 873), 'numpy.array', 'np.array', (['[10, 20]'], {}), '([10, 20])\n', (863, 873), True, 'import numpy as np\n'), ((882, 924), 'numpy.ma.array', 'np.ma.array', (['[10, 20]'], {'mask': '[False, False]'}), '([10, 20], mask=[False, False])\n', (893, 924), True, 'import numpy as np\n'), ((929, 960), 'metpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['a', 'b'], {}), '(a, b)\n', (954, 960), False, 'from metpy.testing import assert_array_almost_equal, check_and_silence_deprecation\n'), ((1083, 1141), 'warnings.warn', 'warnings.warn', (['"""Testing warning."""', 'MetpyDeprecationWarning'], {}), "('Testing warning.', MetpyDeprecationWarning)\n", (1096, 1141), False, 'import warnings\n'), ((552, 581), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (565, 581), False, 'import pytest\n'), ((617, 635), 'numpy.array', 'np.array', (['[10, 20]'], {}), '([10, 20])\n', (625, 635), True, 'import numpy as np\n'), ((671, 716), 'numpy.ma.array', 'np.ma.array', (['[10, np.nan]'], {'mask': '[False, True]'}), '([10, np.nan], mask=[False, True])\n', (682, 716), True, 'import numpy as np\n')] |
import warnings
import numpy as np
import pint as pt
warnings.filterwarnings("ignore", category=pt.errors.UnitStrippedWarning)
def u_log_law(unit,
reference_speed,
reference_height,
height,
aerodynamic_roughness,
return_without_units=True,
debug=False):
'''
Take reference values, return wind speeds at given height(s).
Parameters
----------
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
height : float or np.array().astype(float64) or
pint.quantity.build_quantity_class.<locals>.Quantity
The heights in which to return the velocity results at, this can
be an array, or a single value. If no dimenson is supplied
we assume meters.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
return_without_units : bool.
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
u : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
the velocity at specified height or heights.
'''
# return expected dimensional unit types
distance = 1 * unit.meter
distance_type = type(distance)
speed = 1 * unit.meter / unit.second
speed_type = type(speed)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(reference_speed, speed_type):
reference_speed = reference_speed * speed
if not isinstance(reference_height, distance_type):
reference_height = reference_height * distance
if not isinstance(aerodynamic_roughness, distance_type):
aerodynamic_roughness = aerodynamic_roughness * distance
if not isinstance(height, distance_type):
height = height * distance
if debug:
print(reference_speed)
print(reference_height)
print(aerodynamic_roughness)
print(height)
u = reference_speed * (
(np.log(
(height + aerodynamic_roughness) / aerodynamic_roughness)
/ np.log(
(reference_height + aerodynamic_roughness) / aerodynamic_roughness)
))
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
u = np.array(u)
return u
def u_eurocode(unit,
reference_speed,
reference_height,
height,
aerodynamic_roughness,
return_without_units=True,
debug=False):
'''
Take reference values, return wind speeds at given height(s).
Parameters
----------
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
height : float or np.array().astype(float64) or
pint.quantity.build_quantity_class.<locals>.Quantity
The heights in which to return the velocity results at, this can
be an array, or a single value. If no dimenson is supplied
we assume meters.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
return_without_units : bool.
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
u : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
the velocity at specified height or heights.
'''
# return expected dimensional unit types
distance = 1 * unit.meter
distance_type = type(distance)
speed = 1 * unit.meter / unit.second
speed_type = type(speed)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(reference_speed, speed_type):
reference_speed = reference_speed * speed
if not isinstance(reference_height, distance_type):
reference_height = reference_height * distance
if not isinstance(aerodynamic_roughness, distance_type):
aerodynamic_roughness = aerodynamic_roughness * distance
if not isinstance(height, distance_type):
height = height * distance
if debug:
print(reference_speed)
print(reference_height)
print(aerodynamic_roughness)
print(height)
krz0 = 0.19 * (aerodynamic_roughness / 0.05) ** 0.07
cz = krz0 * np.log(height / aerodynamic_roughness)
cr = krz0 * np.log(reference_height / aerodynamic_roughness)
u = (cz / cr) * reference_speed
zmin = get_eurocode_minimum_height(unit, aerodynamic_roughness)
cmin = krz0 * np.log(zmin / aerodynamic_roughness)
if u.shape != ():
u[height < zmin] = (cmin / cr) * reference_speed
else:
if height < zmin:
(cmin / cr) * reference_speed
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
u = np.array(u)
return u
def u_power_law(unit,
reference_speed,
reference_height,
height,
alpha,
return_without_units=True,
debug=False):
'''
Take reference values, return wind speeds at given height(s).
Parameters
----------
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
height : float or np.array().astype(float64) or
pint.quantity.build_quantity_class.<locals>.Quantity
The heights in which to return the velocity results at, this can
be an array, or a single value. If no dimenson is supplied
we assume meters.
alpha : float or pint.quantity.build_quantity_class.<locals>.Quantity
The exponent of the Power Law
return_without_units : bool.
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
u : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The velocity at specified height or heights.
'''
# return expected dimensional unit types
distance = 1 * unit.meter
distance_type = type(distance)
speed = 1 * unit.meter / unit.second
speed_type = type(speed)
dimensionless = 1 * unit.meter / unit.meter
dimensionless_type = type(dimensionless)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(reference_speed, speed_type):
reference_speed = reference_speed * speed
if not isinstance(reference_height, distance_type):
reference_height = reference_height * distance
if not isinstance(alpha, dimensionless_type):
alpha = alpha * dimensionless
if not isinstance(height, distance_type):
height = height * distance
if debug:
print(reference_speed)
print(reference_height)
print(alpha)
print(height)
u = reference_speed * (height / reference_height) ** alpha
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
u = np.array(u)
return u
def calulate_u_star(unit,
reference_speed,
reference_height,
aerodynamic_roughness,
k=0.41,
return_without_units=True,
debug=False):
'''
take reference values, return the friction velocity
Parameters
----------
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
k : float, optional
The von karman constant. The default is 0.41.
return_without_units : bool, optional
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units. The default is True.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
u_star : float or pint.quantity.build_quantity_class.<locals>.Quantity
The friction velocity.
'''
# return expected dimensional unit types
distance = 1 * unit.meter
distance_type = type(distance)
speed = 1 * unit.meter / unit.second
speed_type = type(speed)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(reference_speed, speed_type):
reference_speed = reference_speed * speed
if not isinstance(reference_height, distance_type):
reference_height = reference_height * distance
if not isinstance(aerodynamic_roughness, distance_type):
aerodynamic_roughness = aerodynamic_roughness * distance
if debug:
print(reference_speed)
print(reference_height)
print(aerodynamic_roughness)
u_star = (reference_speed * k) / (np.log((reference_height + aerodynamic_roughness) / aerodynamic_roughness))
if return_without_units:
u_star = np.array(u_star)
return u_star
def i_eurocode(unit,
height,
aerodynamic_roughness,
return_without_units=True,
debug=False):
'''
Take reference values, return wind speeds at given height(s).
Parameters
----------
height : float or np.array().astype(float64) or
pint.quantity.build_quantity_class.<locals>.Quantity
The heights in which to return the velocity results at, this can
be an array, or a single value. If no dimenson is supplied
we assume meters.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
return_without_units : bool.
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
i : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The intensity at specified height or heights.
'''
# return expected dimensional unit types
distance = 1 * unit.meter
distance_type = type(distance)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(aerodynamic_roughness, distance_type):
aerodynamic_roughness = aerodynamic_roughness * distance
if not isinstance(height, distance_type):
height = height * distance
if debug:
print(aerodynamic_roughness)
print(height)
i = 1 / np.log(height / aerodynamic_roughness)
zmin = get_eurocode_minimum_height(unit, aerodynamic_roughness)
i[height < zmin] = 1 / np.log(zmin / aerodynamic_roughness)
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
i = np.array(i)
return i
def tke_derived(unit,
u,
intensity,
return_without_units=True,
debug=False):
'''
Take reference values, return TKE at given height(s).
Parameters
----------
u : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The height dependent streamwise wind speed.
intensity : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The height dependent turbulent intensity.
return_without_units : bool.
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
tke : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The turbulent kinetic energy as a function of height.
'''
# check there is data
if u.size == 0:
raise OrderError("set_tke", "Error: define the wind speed profile first using set_streamwise_speed(method)")
if intensity.size == 0:
raise OrderError("set_tke", "Error: define the intensity profile first using set_streamwise_speed(method)")
# return expected dimensional unit types
speed = 1 * unit.meter / unit.second
speed_type = type(speed)
dimensionless = 1 * unit.meter / unit.meter
dimensionless_type = type(dimensionless)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(u, speed_type):
u = u * speed
if not isinstance(intensity, dimensionless_type):
intensity = intensity * dimensionless_type
if debug:
print(u)
print(intensity)
tke = (3 / 2) * (u * intensity) ** 2
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
tke = np.array(tke)
return tke
def tke_uniform(unit,
height,
tke,
return_without_units=True,
debug=False):
'''
Take reference values, return TKE at given height(s).
Parameters
----------
height : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The heights in which to return the velocity results at, this can
be an array, or a single value. If no dimenson is supplied
we assume meters.
tke : float or pint.quantity.build_quantity_class.<locals>.Quantity
A value of TKE for the uniform velocity profile.
return_without_units : bool, optional
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.. The default is True.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.. The default is False.
Returns
-------
tke : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The turbulent kinetic energy as a function of height.
'''
# return expected dimensional unit types
distance = 1 * unit.meter
distance_type = type(distance)
energy = 1 * unit.meter ** 2 / unit.second ** 2
tke_type = type(energy)
if not isinstance(height, distance_type):
height = height * distance
if not isinstance(tke, tke_type):
tke = tke * tke_type
if debug:
print(height)
ones = np.ones(len(height))
tke = (ones * tke)
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
tke = np.array(tke)
return tke
def tke_YGCJ(unit,
reference_speed,
reference_height,
height,
aerodynamic_roughness,
c1, c2,
return_without_units=True,
debug=False):
'''
Take reference values, return TKE at given heights.
Expression generalisations to allow height
variation for turbulence quantities (tag:YGCJ):
<NAME>., <NAME>., <NAME>., & <NAME>. (2009).
New inflow boundary conditions for modelling the neutral
equilibrium atmospheric boundary layer in computational
wind engineering. J. of Wind Engineering and Industrial
Aerodynamics, 97(2), 88-95.
DOI:10.1016/j.jweia.2008.12.001
Parameters
----------
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
height : float or np.array().astype(float64) or
pint.quantity.build_quantity_class.<locals>.Quantity
The heights in which to return the velocity results at, this can
be an array, or a single value. If no dimenson is supplied
we assume meters.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
c1 : float
fitting coefficient 1
c2 : float
fitting coefficient 2
return_without_units : bool.
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
u : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
the velocity at specified height or heights.
'''
# return expected dimensional unit types
distance = 1 * unit.meter
distance_type = type(distance)
speed = 1 * unit.meter / unit.second
speed_type = type(speed)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(reference_speed, speed_type):
reference_speed = reference_speed * speed
if not isinstance(reference_height, distance_type):
reference_height = reference_height * distance
if not isinstance(aerodynamic_roughness, distance_type):
aerodynamic_roughness = aerodynamic_roughness * distance
if not isinstance(height, distance_type):
height = height * distance
if debug:
print(reference_speed)
print(reference_height)
print(aerodynamic_roughness)
print(height)
u_star = calulate_u_star(unit,
reference_speed,
reference_height,
aerodynamic_roughness,
return_without_units=False)
cmu = 0.09
tke = (u_star ** 2 / cmu ** 0.5) * (
(c1 * np.log((height + aerodynamic_roughness) / aerodynamic_roughness)) + c2) ** 0.5
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
tke = np.array(tke)
return tke
def omega_YGCJ(
unit,
reference_speed,
reference_height,
height,
aerodynamic_roughness,
return_without_units=True,
debug=False
):
'''
Take reference values, return TKE at given heights.
Expression generalisations to allow height
variation for turbulence quantities (tag:YGCJ):
<NAME>., <NAME>., <NAME>., & <NAME>. (2009).
New inflow boundary conditions for modelling the neutral
equilibrium atmospheric boundary layer in computational
wind engineering. J. of Wind Engineering and Industrial
Aerodynamics, 97(2), 88-95.
DOI:10.1016/j.jweia.2008.12.001
Parameters
----------
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
height : float or np.array().astype(float64) or
pint.quantity.build_quantity_class.<locals>.Quantity
The heights in which to return the velocity results at, this can
be an array, or a single value. If no dimenson is supplied
we assume meters.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
return_without_units : bool.
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
u : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
the velocity at specified height or heights.
'''
# return expected dimensional unit types
distance = 1 * unit.meter
distance_type = type(distance)
speed = 1 * unit.meter / unit.second
speed_type = type(speed)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(reference_speed, speed_type):
reference_speed = reference_speed * speed
if not isinstance(reference_height, distance_type):
reference_height = reference_height * distance
if not isinstance(aerodynamic_roughness, distance_type):
aerodynamic_roughness = aerodynamic_roughness * distance
if not isinstance(height, distance_type):
height = height * distance
if debug:
print(reference_speed)
print(reference_height)
print(aerodynamic_roughness)
print(height)
u_star = calulate_u_star(unit,
reference_speed,
reference_height,
aerodynamic_roughness,
return_without_units=False)
cmu = 0.09 # model coef
k = 0.41 # von karman constant
omega = ((u_star / (k * cmu ** 0.5))
* (1 / (height + aerodynamic_roughness)))
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
omega = np.array(omega)
return omega
def omega_AIJ(unit,
u,
tke,
return_without_units=True,
debug=False):
'''
Take reference values, return TKE at given height(s).
Parameters
----------
u : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The height dependent streamwise wind speed.
tke : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The height dependent turbulent kinetic energy.
return_without_units : bool.
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
omega : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The specific turbulent dissipation energy as a function of height.
'''
# return expected dimensional unit types
speed = 1 * unit.meter / unit.second
speed_type = type(speed)
turbulant_energy = 1 * unit.meter ** 2 / unit.second ** 2
turbulant_energy_type = type(turbulant_energy)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(u, speed_type):
u = u * speed
if not isinstance(tke, turbulant_energy_type):
tke = tke * turbulant_energy
if debug:
print(u)
print(tke)
cmu = 0.09 # turbulence model constant
velocity_gradient = np.gradient(u)
epsilon = (cmu ** (1 / 2)) * tke * velocity_gradient
omega = epsilon / (cmu * tke)
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
omega = np.array(omega)
return omega
def get_eurocode_minimum_height(unit, z0):
@unit.check('[length]')
def unit_check(z0):
if z0.check('[length]'):
original_unit = z0.units
z0.to(unit.meter)
z0 = z0.magnitude
return [z0, original_unit]
check = unit_check(z0)
x = [0.003, 0.01, 0.05, 0.3, 1.0]
y = [1, 1, 12, 5, 10]
interpolated_value = np.interp(check[0], x, y)
interpolated_value = interpolated_value * unit.meter
interpolated_value = interpolated_value.to(check[1])
return interpolated_value
def eurocode_meteo_corrector(unit,
reference_speed,
reference_height,
blend_height,
aerodynamic_roughness,
reference_roughness=0.05,
return_without_units=False):
'''
Take reference values, return the meteological correction factor
Parameters
----------
unit : pint.registry.UnitRegistry
A unit registary to do the dimensional calculations.
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
blend_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
Like the reference height, but higher, considered to be the height
in which the local roughness no longer effects the metological reading.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
reference_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
The roughness of the undesterbed boundary layer, not influenced
by local roughness. The default is 0.05.
Returns
-------
corrector : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
A dimensionless corrector used to correct for the metological readings.
'''
numerator = (u_eurocode(unit,
reference_speed,
reference_height,
blend_height,
reference_roughness,
return_without_units=return_without_units)
/ reference_speed)
denominator = (u_eurocode(unit,
reference_speed,
reference_height,
blend_height,
aerodynamic_roughness,
return_without_units=return_without_units)
/ reference_speed)
corrector = numerator / denominator
return corrector
def log_law_meteo_corrector(unit,
reference_speed,
reference_height,
blend_height,
aerodynamic_roughness,
reference_roughness=0.05,
return_without_units=False):
'''
Take reference values, return the meteological correction factor
Parameters
----------
unit : pint.registry.UnitRegistry
A unit registary to do the dimensional calculations.
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
blend_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
Like the reference height, but higher, considered to be the height
in which the local roughness no longer effects the metological reading.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
reference_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
The roughness of the undesterbed boundary layer, not influenced
by local roughness. The default is 0.05.
Returns
-------
corrector : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
A dimensionless corrector used to correct for the metological readings.
'''
numerator = (u_log_law(unit,
reference_speed,
reference_height,
blend_height,
reference_roughness,
return_without_units=return_without_units)
/ reference_speed)
denominator = (u_log_law(unit,
reference_speed,
reference_height,
blend_height,
aerodynamic_roughness,
return_without_units=return_without_units)
/ reference_speed)
corrector = numerator / denominator
return corrector
def power_law_meteo_corrector(unit,
reference_speed,
reference_height,
blend_height,
alpha,
reference_alpha=0.115,
return_without_units=False):
'''
Take reference values, return the meteological correction factor
Parameters
----------
unit : pint.registry.UnitRegistry
A unit registary to do the dimensional calculations.
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
blend_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
Like the reference height, but higher, considered to be the height
in which the local roughness no longer effects the metological reading.
alpha : float or pint.quantity.build_quantity_class.<locals>.Quantity
The alpha of the AbL profile.
reference_alpha : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
The alpha of the undesterbed boundary layer, not influenced
by local roughness. The default is 0.05.
Returns
-------
corrector : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
A dimensionless corrector used to correct for the metological readings.
'''
numerator = (u_power_law(unit,
reference_speed,
reference_height,
blend_height,
reference_alpha,
return_without_units=return_without_units)
/ reference_speed)
denominator = (u_power_law(unit,
reference_speed,
reference_height,
blend_height,
alpha,
return_without_units=return_without_units)
/ reference_speed)
corrector = numerator / denominator
return corrector
def generic_power_law(reference,
reference_z,
exponent,
z):
'''
Parameters
----------
reference : float
A reference value to use in the power law.
reference_z : float
A reference .
exponent : TYPE
DESCRIPTION.
z : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
'''
return reference * (z / reference_z) ** -exponent
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class OrderError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
message = ""
def __init__(self, expression, message):
self.expression = expression
self.message = message
| [
"numpy.log",
"numpy.array",
"numpy.interp",
"numpy.gradient",
"warnings.filterwarnings"
] | [((55, 128), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'pt.errors.UnitStrippedWarning'}), "('ignore', category=pt.errors.UnitStrippedWarning)\n", (78, 128), False, 'import warnings\n'), ((27736, 27750), 'numpy.gradient', 'np.gradient', (['u'], {}), '(u)\n', (27747, 27750), True, 'import numpy as np\n'), ((28409, 28434), 'numpy.interp', 'np.interp', (['check[0]', 'x', 'y'], {}), '(check[0], x, y)\n', (28418, 28434), True, 'import numpy as np\n'), ((3212, 3223), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (3220, 3223), True, 'import numpy as np\n'), ((6022, 6060), 'numpy.log', 'np.log', (['(height / aerodynamic_roughness)'], {}), '(height / aerodynamic_roughness)\n', (6028, 6060), True, 'import numpy as np\n'), ((6077, 6125), 'numpy.log', 'np.log', (['(reference_height / aerodynamic_roughness)'], {}), '(reference_height / aerodynamic_roughness)\n', (6083, 6125), True, 'import numpy as np\n'), ((6250, 6286), 'numpy.log', 'np.log', (['(zmin / aerodynamic_roughness)'], {}), '(zmin / aerodynamic_roughness)\n', (6256, 6286), True, 'import numpy as np\n'), ((6589, 6600), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (6597, 6600), True, 'import numpy as np\n'), ((9532, 9543), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (9540, 9543), True, 'import numpy as np\n'), ((12010, 12084), 'numpy.log', 'np.log', (['((reference_height + aerodynamic_roughness) / aerodynamic_roughness)'], {}), '((reference_height + aerodynamic_roughness) / aerodynamic_roughness)\n', (12016, 12084), True, 'import numpy as np\n'), ((12133, 12149), 'numpy.array', 'np.array', (['u_star'], {}), '(u_star)\n', (12141, 12149), True, 'import numpy as np\n'), ((13971, 14009), 'numpy.log', 'np.log', (['(height / aerodynamic_roughness)'], {}), '(height / aerodynamic_roughness)\n', (13977, 14009), True, 'import numpy as np\n'), ((14106, 14142), 'numpy.log', 'np.log', (['(zmin / aerodynamic_roughness)'], {}), '(zmin / aerodynamic_roughness)\n', (14112, 14142), True, 'import numpy as np\n'), ((14288, 14299), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (14296, 14299), True, 'import numpy as np\n'), ((16492, 16505), 'numpy.array', 'np.array', (['tke'], {}), '(tke)\n', (16500, 16505), True, 'import numpy as np\n'), ((18347, 18360), 'numpy.array', 'np.array', (['tke'], {}), '(tke)\n', (18355, 18360), True, 'import numpy as np\n'), ((22221, 22234), 'numpy.array', 'np.array', (['tke'], {}), '(tke)\n', (22229, 22234), True, 'import numpy as np\n'), ((25955, 25970), 'numpy.array', 'np.array', (['omega'], {}), '(omega)\n', (25963, 25970), True, 'import numpy as np\n'), ((27991, 28006), 'numpy.array', 'np.array', (['omega'], {}), '(omega)\n', (27999, 28006), True, 'import numpy as np\n'), ((2870, 2934), 'numpy.log', 'np.log', (['((height + aerodynamic_roughness) / aerodynamic_roughness)'], {}), '((height + aerodynamic_roughness) / aerodynamic_roughness)\n', (2876, 2934), True, 'import numpy as np\n'), ((2959, 3033), 'numpy.log', 'np.log', (['((reference_height + aerodynamic_roughness) / aerodynamic_roughness)'], {}), '((reference_height + aerodynamic_roughness) / aerodynamic_roughness)\n', (2965, 3033), True, 'import numpy as np\n'), ((21995, 22059), 'numpy.log', 'np.log', (['((height + aerodynamic_roughness) / aerodynamic_roughness)'], {}), '((height + aerodynamic_roughness) / aerodynamic_roughness)\n', (22001, 22059), True, 'import numpy as np\n')] |
"""
Tests for the utilities module.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
from gridded.pyugrid import util
class DummyArrayLike(object):
"""
Class that will look like an array to this function, even
though it won't work!
Just for tests. All it does is add a few expected attributes
This will need to be updated when the function is changed.
"""
must_have = ['dtype', 'shape', 'ndim', '__len__', '__getitem__', '__getattribute__']
# pretty kludgy way to do this..
def __new__(cls):
obj = object.__new__(cls)
for attr in cls.must_have:
setattr(obj, attr, None)
return obj
def test_dummy_array_like():
dum = DummyArrayLike()
print(dum)
print(dum.dtype)
for attr in DummyArrayLike.must_have:
assert hasattr(dum, attr)
def test_asarraylike_list():
"""
Passing in a list should return a np.ndarray.
"""
lst = [1, 2, 3, 4]
result = util.asarraylike(lst)
assert isinstance(result, np.ndarray)
assert np.array_equal(result, lst)
def test_asarraylike_array():
"""
Passing in a list should return a np.ndarray.
"""
arr = np.array([1, 2, 3, 4])
result = util.asarraylike(arr)
assert result is arr
def test_as_test_asarraylike_dummy():
dum = DummyArrayLike()
result = util.asarraylike(dum)
assert result is dum
| [
"numpy.array",
"numpy.array_equal",
"gridded.pyugrid.util.asarraylike"
] | [((1007, 1028), 'gridded.pyugrid.util.asarraylike', 'util.asarraylike', (['lst'], {}), '(lst)\n', (1023, 1028), False, 'from gridded.pyugrid import util\n'), ((1082, 1109), 'numpy.array_equal', 'np.array_equal', (['result', 'lst'], {}), '(result, lst)\n', (1096, 1109), True, 'import numpy as np\n'), ((1219, 1241), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1227, 1241), True, 'import numpy as np\n'), ((1255, 1276), 'gridded.pyugrid.util.asarraylike', 'util.asarraylike', (['arr'], {}), '(arr)\n', (1271, 1276), False, 'from gridded.pyugrid import util\n'), ((1383, 1404), 'gridded.pyugrid.util.asarraylike', 'util.asarraylike', (['dum'], {}), '(dum)\n', (1399, 1404), False, 'from gridded.pyugrid import util\n')] |
# Config file for define some parameters
import argparse
import numpy as np
import tensorflow as tf
import math
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Facade ALK Network")
parser.add_argument("--batch_size", type=int, default=1,
help="Number of images sent to the network in one step.")
parser.add_argument("--image_height", type=int, default=544,
help="Image height and width of image.")
parser.add_argument("--image_width", type=int, default=400,
help="Image height and width of image.")
parser.add_argument("--learning_rate", type=float,
default=2e-4,
help="Learning rate for training.")
parser.add_argument("--optimizer", type=str, default='Adam', # Adam Momentum
help="optimizer for BP.")
return parser.parse_args()
args = get_arguments()
# ---------------Modified Paras---------------------------
dataset = 'RueMonge428_seq3_homo'
use_gpu = '1'
seq_num = 3
NUM_OF_CLASSESS = 8
# The number of gradient accumulation
Gradient_Accumulation = 1
total_iter = 10000
model_save_num = 12
is_epoch_acc = False
is_time_acc = False
# 10s to show acc
acc_interval = 180
# epoch 10000 to show train data acc
start_show_iter = 2000
is_save_epoch = False
save_epoch_inter = 100
start_save_epoch = 500
is_save_step = True
save_step_inter = 2000
start_save_step = 8000
weight_decay = 0.0001
freeze_bn = True
is_save_last10_model = False
# ----------------------------------------------------------
if dataset == 'ecp_seq3':
data_dir = 'data/ecp_seq3/'
save_dir = 'saves/ecp_seq3/deeplabv3_plus/' # pspnet, deeplabv3_plus, danet
logs_dir = 'tensorboard/ecp_seq3/'
class_names = ['Outlier','Window', 'Wall', 'Balcony', 'Door', 'Roof', 'Chimney', 'Sky', 'Shop']
train_number = 520
elif dataset == 'RueMonge428_seq3_homo':
data_dir = 'data/RueMonge428_seq3_homo/'
save_dir = 'saves/RueMonge428_seq3_homo_aggregation_att/'
logs_dir = 'tensorboard/RueMonge428_seq3_homo/'
class_names = ['Outlier', 'Window', 'Wall', 'Balcony', 'Door', 'Roof', 'Sky', 'Shop']
train_number = 113
# ------------------------Other high-paras----------------------------------
pre_trained_model = 'data/pre-trained_model/resnet_v1_50.ckpt'
IMAGE_HEIGHT = args.image_height
IMAGE_WIDTH = args.image_width
IMAGE_ORI_HEIGHT = 1067 #
IMAGE_ORI_WIDTH = 800 # only used in image_reader
batch_size = args.batch_size
learning_rate = args.learning_rate
optimizer = args.optimizer
decay_rate = 0.9
summary_interval = 60 # 60s to save a summary
train_data_dir = data_dir + 'train'
train_data_list = data_dir + 'train.txt'
test_data_dir = data_dir + 'val'
test_data_list = data_dir + 'val.txt'
val_data_dir = data_dir + 'train'
val_data_list = data_dir + 'train.txt'
random_resize = False
random_color = False
if 'ecp' in train_data_list:
random_scale = False
random_mirror = False
else:
random_scale = True
random_mirror = True # Intrinsics paras not suit flipping
minScale = 0.4 # 0.75: ecp, 0.4: rue Modified
maxScale = 0.8 # 1.25: ecp, 0.8: rue
random_crop_pad = True
ignore_label = 0
IMG_MEAN = np.array([103.94, 116.78, 123.68], dtype=np.float32) # B G R
# -----------------Learning Schedule------------------------------
def get_cur_lr(step_ph):
cur_lr = tf.py_func(_get_cur_lr, [step_ph], tf.float32)
return cur_lr
def _get_cur_lr(step_ph):
step = np.array(step_ph, np.int32)
ep = int(step / (train_number / batch_size))
if ep < 10:
cur_lr = 1e-4
elif ep < 20:
cur_lr = 1e-5
else:
cur_lr = 1e-6
return np.asarray(cur_lr, dtype=np.float32)
def get_step_lr(step_ph):
step_lr = tf.py_func(_get_step_lr, [step_ph], tf.float32)
return step_lr
def _get_step_lr(step_ph):
step = np.array(step_ph, np.int32)
ep = step
if ep < 2000:
step_lr = 2e-4
elif ep < 4000:
step_lr = 1e-4
elif ep < 6000:
step_lr = 5e-5
elif ep < 8000:
step_lr = 1e-5
elif ep < 10000:
step_lr = 1e-6
else:
step_lr = 1e-6
return np.asarray(step_lr, dtype=np.float32)
def get_cosine_lr(step_ph):
cur_lr = tf.py_func(_get_cosine_lr, [step_ph], tf.float32)
return cur_lr
def _get_cosine_lr(step_ph):
step = np.array(step_ph, np.int32)
total_step = int((train_number / batch_size) * args.epoch_num)
cur_lr = ((1 + math.cos((step * 3.1415926535897932384626433) / total_step)) * args.learning_rate) / 2
return np.asarray(cur_lr, dtype=np.float32)
def noam_scheme(cur_step): # warmup learning rate
lr = tf.py_func(_noam_scheme, [cur_step], tf.float32)
return lr
def _noam_scheme(cur_step):
"""
if cur < warnup_step, lr increase
if cur > warnup_step, lr decrease
"""
step = np.array(cur_step, np.int32)
init_lr = learning_rate
global_step = total_iter
warnup_factor = 1.0 / 3
power = 0.9
warnup_step = 500
if step <= warnup_step:
alpha = step / warnup_step
warnup_factor = warnup_factor * (1 - alpha) + alpha
lr = init_lr * warnup_factor
else:
# learning_rate = tf.scalar_mul(init_lr, tf.pow((1 - cur_step / global_step), power))
lr = init_lr * np.power(
(1 - (step - warnup_step) / (global_step - warnup_step)), power)
return np.asarray(lr, dtype=np.float32)
def circle_scheme(cur_step): # circle learning rate
lr = tf.py_func(_circle_scheme, [cur_step], tf.float32)
return lr
def _circle_scheme(cur_step):
step = np.array(cur_step, np.int32)
CYCLE = 1000
LR_INIT = learning_rate
LR_MIN = 1e-10
scheduler = lambda x: ((LR_INIT - LR_MIN) / 2) * (np.cos(3.1415926535897932384626433 * (np.mod(x - 1, CYCLE) / (CYCLE))) + 1) + LR_MIN
lr = scheduler(step)
return np.asarray(lr, dtype=np.float32)
| [
"argparse.ArgumentParser",
"numpy.power",
"numpy.asarray",
"math.cos",
"numpy.array",
"tensorflow.py_func",
"numpy.mod"
] | [((3429, 3481), 'numpy.array', 'np.array', (['[103.94, 116.78, 123.68]'], {'dtype': 'np.float32'}), '([103.94, 116.78, 123.68], dtype=np.float32)\n', (3437, 3481), True, 'import numpy as np\n'), ((256, 313), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Facade ALK Network"""'}), "(description='Facade ALK Network')\n", (279, 313), False, 'import argparse\n'), ((3602, 3648), 'tensorflow.py_func', 'tf.py_func', (['_get_cur_lr', '[step_ph]', 'tf.float32'], {}), '(_get_cur_lr, [step_ph], tf.float32)\n', (3612, 3648), True, 'import tensorflow as tf\n'), ((3704, 3731), 'numpy.array', 'np.array', (['step_ph', 'np.int32'], {}), '(step_ph, np.int32)\n', (3712, 3731), True, 'import numpy as np\n'), ((3903, 3939), 'numpy.asarray', 'np.asarray', (['cur_lr'], {'dtype': 'np.float32'}), '(cur_lr, dtype=np.float32)\n', (3913, 3939), True, 'import numpy as np\n'), ((3981, 4028), 'tensorflow.py_func', 'tf.py_func', (['_get_step_lr', '[step_ph]', 'tf.float32'], {}), '(_get_step_lr, [step_ph], tf.float32)\n', (3991, 4028), True, 'import tensorflow as tf\n'), ((4086, 4113), 'numpy.array', 'np.array', (['step_ph', 'np.int32'], {}), '(step_ph, np.int32)\n', (4094, 4113), True, 'import numpy as np\n'), ((4387, 4424), 'numpy.asarray', 'np.asarray', (['step_lr'], {'dtype': 'np.float32'}), '(step_lr, dtype=np.float32)\n', (4397, 4424), True, 'import numpy as np\n'), ((4467, 4516), 'tensorflow.py_func', 'tf.py_func', (['_get_cosine_lr', '[step_ph]', 'tf.float32'], {}), '(_get_cosine_lr, [step_ph], tf.float32)\n', (4477, 4516), True, 'import tensorflow as tf\n'), ((4576, 4603), 'numpy.array', 'np.array', (['step_ph', 'np.int32'], {}), '(step_ph, np.int32)\n', (4584, 4603), True, 'import numpy as np\n'), ((4788, 4824), 'numpy.asarray', 'np.asarray', (['cur_lr'], {'dtype': 'np.float32'}), '(cur_lr, dtype=np.float32)\n', (4798, 4824), True, 'import numpy as np\n'), ((4918, 4966), 'tensorflow.py_func', 'tf.py_func', (['_noam_scheme', '[cur_step]', 'tf.float32'], {}), '(_noam_scheme, [cur_step], tf.float32)\n', (4928, 4966), True, 'import tensorflow as tf\n'), ((5113, 5141), 'numpy.array', 'np.array', (['cur_step', 'np.int32'], {}), '(cur_step, np.int32)\n', (5121, 5141), True, 'import numpy as np\n'), ((5653, 5685), 'numpy.asarray', 'np.asarray', (['lr'], {'dtype': 'np.float32'}), '(lr, dtype=np.float32)\n', (5663, 5685), True, 'import numpy as np\n'), ((5779, 5829), 'tensorflow.py_func', 'tf.py_func', (['_circle_scheme', '[cur_step]', 'tf.float32'], {}), '(_circle_scheme, [cur_step], tf.float32)\n', (5789, 5829), True, 'import tensorflow as tf\n'), ((5886, 5914), 'numpy.array', 'np.array', (['cur_step', 'np.int32'], {}), '(cur_step, np.int32)\n', (5894, 5914), True, 'import numpy as np\n'), ((6155, 6187), 'numpy.asarray', 'np.asarray', (['lr'], {'dtype': 'np.float32'}), '(lr, dtype=np.float32)\n', (6165, 6187), True, 'import numpy as np\n'), ((5554, 5625), 'numpy.power', 'np.power', (['(1 - (step - warnup_step) / (global_step - warnup_step))', 'power'], {}), '(1 - (step - warnup_step) / (global_step - warnup_step), power)\n', (5562, 5625), True, 'import numpy as np\n'), ((4690, 4737), 'math.cos', 'math.cos', (['(step * 3.141592653589793 / total_step)'], {}), '(step * 3.141592653589793 / total_step)\n', (4698, 4737), False, 'import math\n'), ((6071, 6091), 'numpy.mod', 'np.mod', (['(x - 1)', 'CYCLE'], {}), '(x - 1, CYCLE)\n', (6077, 6091), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .clustering import normalized_cut
class ClusteringTest(TestCase):
def test_normalized_cut_without_singletons(self):
adj = [[0, 2, 1, 0], [2, 0, 0, 1], [1, 0, 0, 2], [0, 1, 2, 0]]
adj = sp.coo_matrix(adj)
rid = np.array([0, 1, 2, 3])
assert_equal(normalized_cut(adj, rid), [0, 0, 1, 1])
rid = np.array([3, 2, 1, 0])
assert_equal(normalized_cut(adj, rid), [1, 1, 0, 0])
def test_normalized_cut_with_singletons(self):
adj = [[0, 3, 0, 2, 0], [3, 0, 2, 0, 0], [0, 2, 0, 3, 0],
[2, 0, 3, 0, 1], [0, 0, 0, 1, 0]]
adj = sp.coo_matrix(adj)
rid = np.array([0, 1, 2, 3, 4])
assert_equal(normalized_cut(adj, rid), [0, 0, 1, 1, 2])
rid = np.array([4, 3, 2, 1, 0])
assert_equal(normalized_cut(adj, rid), [2, 1, 1, 0, 0])
rid = np.array([1, 0, 4, 2, 3])
assert_equal(normalized_cut(adj, rid), [0, 0, 2, 1, 1])
| [
"numpy.array",
"scipy.sparse.coo_matrix"
] | [((328, 346), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (341, 346), True, 'import scipy.sparse as sp\n'), ((361, 383), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (369, 383), True, 'import numpy as np\n'), ((460, 482), 'numpy.array', 'np.array', (['[3, 2, 1, 0]'], {}), '([3, 2, 1, 0])\n', (468, 482), True, 'import numpy as np\n'), ((725, 743), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (738, 743), True, 'import scipy.sparse as sp\n'), ((758, 783), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (766, 783), True, 'import numpy as np\n'), ((863, 888), 'numpy.array', 'np.array', (['[4, 3, 2, 1, 0]'], {}), '([4, 3, 2, 1, 0])\n', (871, 888), True, 'import numpy as np\n'), ((968, 993), 'numpy.array', 'np.array', (['[1, 0, 4, 2, 3]'], {}), '([1, 0, 4, 2, 3])\n', (976, 993), True, 'import numpy as np\n')] |
from collections import namedtuple
import os
import re
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
import h5py
import pandas as pd
import numpy as np
import numpy.ma as ma
from numpy.random import default_rng
from desc.skycatalogs.utils.common_utils import print_dated_msg
__all__ = ['LookupInfo', 'Cmp', 'MagNorm', 'convert_tophat_sed',
'write_sed_file', 'NORMWV_IX', 'get_star_sed_path',
'create_cosmology']
# Index for tophat bin containing 500 nm
NORMWV_IX = 13
def convert_tophat_sed(a_bins, f_nu_input, mag_norm_f, redshift=0,
wavelen_step=0.1):
'''
Given a tophat SED and redshift, produce an equivalent SED as lists of
wavelength and f_lambda. Also compute magnorm
Parameters
----------
a_bins: list of Tophat [tuples (start, width)] in Angstroms
f_nu: list of values for the tophats
mag_norm_f: an instance of MagNorm
redshift: needed for computing distance modulus. Should be
cosmoDC2 redshiftHubble, aka redshift_hubble in sky catalogs
wavelen_step: Re-cast tophat seds to use this bin width in nm (keeping
same step function in f_nu space).
return
------
arrays lambda, f_lambda where lambda is in nm and f_lambda is in
erg / (cm**2 * s * nm)
Also return final magnorm (including redshift adjustment) and f_nu value
at 500 nm
'''
lam_nm = 0.1 * np.array([b.start + 0.5 * b.width for b in a_bins])
lam_width_nm = 0.1 * np.array([b.width for b in a_bins])
f_nu = 1.0 * np.array(f_nu_input)
val_500nm = f_nu[NORMWV_IX]
# Convert from f_nu to f_lambda:
# In earlier versions tophats were in decreasing lambda order
if (lam_nm[0] > lam_nm[1]): # reverse
lam_nm[:] = lam_nm[::-1]
lam_width_nm[:] = lam_width_nm[::-1]
f_nu[:] = f_nu[::-1]
lam_min = lam_nm[0]
lam_max = lam_nm[-1] + lam_width_nm[-1]
# Keep the same step function but use fine bins instead of the
# original tophat widths.
n_bins = int((lam_max - lam_min) / wavelen_step)
lam_fine = np.empty(n_bins)
f_nu_fine = np.empty(n_bins)
boundaries = list(lam_nm)
boundaries.append(lam_max)
b_ix = 0
for i in range(n_bins):
lam_fine[i] = lam_min + wavelen_step * i
if (lam_fine[i] > boundaries[b_ix + 1]) :
b_ix = b_ix + 1
f_nu_fine[i] = f_nu[b_ix]
# Convert fnu to flambda, ignoring constant factors.
flambda = f_nu_fine/lam_fine**2
# Normalize so flambda value at 500 nm is 1.0
nm500_ix = int((500 - lam_min) / wavelen_step) + 1
flambda_norm = flambda / flambda[nm500_ix]
return lam_fine, flambda_norm, mag_norm_f(f_nu[NORMWV_IX],
redshift), val_500nm
def write_sed_file(path, wv, f_lambda, wv_unit=None, f_lambda_unit=None):
'''
Write a two-column text file. First column is wavelength,
second is luminosity value
If units are supplied, write a comment line at the top
Parameters
----------
path Where to write the file and what to call it
wv List or array of wavelength values
f_lambda List or array of luminosities. Must be the same length as wv
wv_unit String describing units for first column
f_lambda_unit String describing units for second column
'''
header = '# '
if wv_unit:
header += wv_unit + ' '
else:
header += ' lambda unit unknown '
if f_lambda_unit:
header += f_lambda_unit
else:
header += ' f_lambda unit unknown'
header += '\n'
with open(path, mode="w") as f:
f.write(header)
for i in range(len(wv)):
line = '{:8.2f} {:g}\n'.format(wv[i], f_lambda[i])
f.write(line)
f.close()
_standard_dict = {'lte' : 'starSED/phoSimMLT',
'bergeron' : 'starSED/wDs',
'km|kp' : 'starSED/kurucz'}
def get_star_sed_path(filename, name_to_folder=_standard_dict):
'''
Return numpy array of full paths relative to SIMS_SED_LIBRARY_DIR,
given filenames
Parameters
----------
filename list of strings. Usually full filename but may be missing final ".gz"
name_to_folder dict mapping regular expression (to be matched with
filename) to relative path for containing directory
Returns
-------
Full path for file, relative to SIMS_SED_LIBRARY_DIR
'''
compiled = { re.compile(k) : v for (k, v) in name_to_folder.items()}
path_list = []
for f in filename:
m = None
matched = False
for k,v in compiled.items():
f = f.strip()
m = k.match(f)
if m:
p = os.path.join(v, f)
if not p.endswith('.gz'):
p = p + '.gz'
path_list.append(p)
matched = True
break
if not matched:
raise ValueError(f'get_star_sed_path: Filename {f} does not match any known patterns')
return np.array(path_list)
def create_cosmology(config):
"""
Create a FlatLambdaCDM cosmology from a dictionary of input parameters.
This code is based on/borrowed from
https://github.com/LSSTDESC/gcr-catalogs/blob/master/GCRCatalogs/cosmodc2.py#L128
"""
cosmo_astropy_allowed = FlatLambdaCDM.__init__.__code__.co_varnames[1:]
cosmo_astropy = {k: v for k, v in config.items()
if k in cosmo_astropy_allowed}
cosmology = FlatLambdaCDM(**cosmo_astropy)
return cosmology
class MagNorm:
def __init__(self, cosmology):
"""
Parameters
----------
cosmology : astropy.cosmology
Cosmology object created from the gcr-catalogs galaxy catalog
cosmology specification.
"""
self.cosmology = cosmology
def dl(self, z):
"""
Return the luminosity distance in units of meters.
"""
# Conversion factor from Mpc to meters (obtained from pyccl).
MPC_TO_METER = 3.085677581491367e+22
return self.cosmology.luminosity_distance(z).value*MPC_TO_METER
def __call__(self, tophat_sed_value, redshift_hubble, one_maggy=4.3442e13):
one_Jy = 1e-26 # W/Hz/m**2
Lnu = tophat_sed_value*one_maggy # convert from maggies to W/Hz
Fnu = Lnu/4/np.pi/self.dl(redshift_hubble)**2
return -2.5*np.log10(Fnu/one_Jy) + 8.90
class LookupInfo(object):
'''
Stash information from the lookup file for a particular hp which
will be useful for Cmp class
Also save tophat scale
'''
def __init__(self, sed_library_dir, hp):
self.sed_lookup_file = os.path.join(sed_library_dir,
f'sed_fit_{hp}.h5')
self.cached = False
def cache_info(self):
if self.cached: return
with h5py.File(self.sed_lookup_file) as f:
# Make a copy which will exist after file is closed
self.sed_names = np.array(f['sed_names'])
self.disk_sed = np.array(f['disk_sed'])
self.bulge_sed = np.array(f['bulge_sed'])
self.galaxy_id = np.array(f['galaxy_id'])
self.cached = True
def get_orig_sed_file(self, cmp, galaxy_id, min_ix=0):
# Start searching for galaxy_id starting with min_ix
the_ix = -1
if cmp not in ['bulge', 'disk']:
raise ValueError(f'Unknown component type "{cmp}" ')
for i in range(min_ix, len(self.galaxy_id)):
if self.galaxy_id[i] == galaxy_id:
the_ix = i
break
if the_ix == -1:
raise ValueError(f'Galaxy {galaxy_id} not found')
if cmp == 'bulge':
return (self.sed_names[self.bulge_sed[the_ix]]).decode("utf-8")
else:
return (self.sed_names[self.disk_sed[the_ix]]).decode("utf-8")
# This class is no longer used. Consider deleting
class Cmp(object):
'''
Handle writing of SED files and booking for either disk or bulge
'''
def __init__(self, cmp_name, obj_coll, output_dir, hp, n_seds, bins,
lookup_info, mag_norm_f):
'''
Parameters
----------
cmp_name string one of 'bulge', 'disk'
obj_coll object collection coming from sky catalog, typically all
galaxies belonging to a particular pixel
output_dir string where to write output SED files
hp int in case we decide to embed in output filename
n_seds int how many SED files to write
bins list list of (start, width) tuples describing bins.
lookup_info LookupInfo information pertaining to a particular hp
mag_norm_f MagNorm Used for computing mag norm
'''
self.cmp_name = cmp_name
self.output_dir = output_dir
self.hp = hp
self.coll = obj_coll
self.n_seds = n_seds
self.n_seds_done = 0
self.bins = bins
lookup_info.cache_info()
self.lookup_info = lookup_info
self.mag_norm_f = mag_norm_f
def _write_sed(self, outpath, sed_list, bins, redshift,
wavelen_step=5.0, summary_only=False):
'''
Convert cosmoDC2-style tophat SEDs to a file of the form expected by
ImSim.
Parameters
----------
outpath string full path of output file
sed_list list of floats list of values as they appear in
cosmoDC2 catalog
bins list((start,width)) bin definitions
redshift -- for the object the sed file is associated with
Return
------
(magnorm, val_500nm) magnorm is our computed magnorm value,
including adjustment for redshift.
val_500nm is the sed value at or near 500 nm
'''
(lmbda, f_lambda,
magnorm, val_500nm) = convert_tophat_sed(bins, sed_list,
self.mag_norm_f,
redshift=redshift,
wavelen_step=wavelen_step)
if not summary_only:
write_sed_file(outpath, lmbda, f_lambda, wv_unit='nm')
start = (min([b.start for b in bins]))/10.0 # A to nm
return (magnorm, val_500nm) # for now
def _write_summary(self, ix, gal, sed, redshift, orig_magnorm, our_magnorm,
val_500nm, orig_sed_file, tp_sed_file):
# Filepath. Use same output dir.
print_dated_msg(f'Entered _write_summary for component {self.cmp_name}')
basename_csv = f'{self.cmp_name}_sed_hp{self.hp}_summary.csv'
outpath_csv = os.path.join(self.output_dir, basename_csv)
basename_csv_brief = f'{self.cmp_name}_sed_hp{self.hp}_brief.csv'
outpath_csv_brief = os.path.join(self.output_dir, basename_csv_brief)
basename_pq = f'{self.cmp_name}_sed_hp{self.hp}_summary.parquet'
outpath_pq = os.path.join(self.output_dir, basename_pq)
out_dict = {}
out_dict['chosen_ix'] = ix
out_dict['gal_id'] = gal
out_dict['redshift'] = redshift
out_dict['orig_magnorm'] = orig_magnorm
out_dict['our_magnorm'] = our_magnorm
out_dict['val_500nm'] = val_500nm
df = pd.DataFrame(data=out_dict)
# For convenience, output text file leaving off paths
df.to_csv(path_or_buf=outpath_csv_brief)
out_dict['orig_sed_file'] = orig_sed_file
out_dict['tp_sed_file'] = tp_sed_file
out_dict['tp_vals'] = sed
df = pd.DataFrame(data=out_dict)
df.to_csv(path_or_buf=outpath_csv)
df.to_parquet(outpath_pq)
def create(self, count_start=0, summary_only=False):
'''
Create SED files as specified at init time and also table describing
which tophat SEDs were used.
count_start may be > 0 in case some of the required files have already
been created and we just want to pick up where we left off.
[But initial draft won't support this since there are complications]
'''
# For debugging predictability
seed_dict = {}
seed_dict['bulge'] = 271423 + 2 * self.hp
seed_dict['disk'] = 1780247 + 2 * self.hp
print_dated_msg(f'Cmp.create called for component {self.cmp_name}')
### Really it should have _no_host_extinction suffix
### REALLY??
### but for now schema is not using it
### sed_col = 'sed_val_' + self.cmp_name + '_no_host_extinction'
sed_col = 'sed_val_' + self.cmp_name
sed = np.array(self.coll.get_native_attribute(sed_col))
magnorm_col = self.cmp_name + '_magnorm'
magnorm = np.array(self.coll.get_native_attribute(magnorm_col))
gal_id = np.array(self.coll.get_native_attribute('galaxy_id'))
redshift = np.array(self.coll.get_native_attribute('redshift_hubble'))
mask_inf = np.isinf(magnorm)
good_sed = ma.array(sed, mask=mask_inf).compressed()
good_gal_id = ma.array(gal_id, mask=mask_inf).compressed()
good_magnorm = ma.array(magnorm, mask=mask_inf).compressed()
good_redshift = ma.array(redshift, mask=mask_inf).compressed()
# Choose entries at random
rng = default_rng(seed_dict[self.cmp_name])
ix_list = rng.integers(low=0, high=len(good_magnorm), size=self.n_seds)
gal_chosen = [good_gal_id[i] for i in ix_list]
sed_chosen = [good_sed[i] for i in ix_list]
orig_magnorm_chosen = [good_magnorm[i] for i in ix_list]
redshift_chosen = [good_redshift[i] for i in ix_list]
our_magnorm = []
val_500nm = []
orig_sed_file = []
tp_sed_file = []
sed_rootdir = os.getenv('SIMS_SED_LIBRARY_DIR')
for i in range(len(sed_chosen)):
# Form output path
filename = f'{self.cmp_name}_random_sed_{self.hp}_{i}.txt'
outpath = os.path.join(self.output_dir, filename)
(our_mag, nm500) = self._write_sed(outpath, sed_chosen[i],
self.bins, redshift_chosen[i],
summary_only=summary_only)
our_magnorm.append(our_mag)
val_500nm.append(nm500)
tp_sed_file.append(outpath)
orig_sed = self.lookup_info.get_orig_sed_file(self.cmp_name,
gal_chosen[i],
min_ix=ix_list[i])
orig_sed_file.append(os.path.join(sed_rootdir, orig_sed))
if not summary_only:
print_dated_msg(f'Wrote file {i}')
# Make summary table and write to a file
self._write_summary(ix_list, gal_chosen, sed_chosen, redshift_chosen,
orig_magnorm_chosen, our_magnorm,
val_500nm, orig_sed_file, tp_sed_file)
| [
"numpy.log10",
"numpy.random.default_rng",
"os.getenv",
"re.compile",
"desc.skycatalogs.utils.common_utils.print_dated_msg",
"numpy.ma.array",
"os.path.join",
"astropy.cosmology.FlatLambdaCDM",
"h5py.File",
"numpy.array",
"numpy.empty",
"pandas.DataFrame",
"numpy.isinf"
] | [((2130, 2146), 'numpy.empty', 'np.empty', (['n_bins'], {}), '(n_bins)\n', (2138, 2146), True, 'import numpy as np\n'), ((2163, 2179), 'numpy.empty', 'np.empty', (['n_bins'], {}), '(n_bins)\n', (2171, 2179), True, 'import numpy as np\n'), ((5116, 5135), 'numpy.array', 'np.array', (['path_list'], {}), '(path_list)\n', (5124, 5135), True, 'import numpy as np\n'), ((5583, 5613), 'astropy.cosmology.FlatLambdaCDM', 'FlatLambdaCDM', ([], {}), '(**cosmo_astropy)\n', (5596, 5613), False, 'from astropy.cosmology import FlatLambdaCDM\n'), ((1455, 1508), 'numpy.array', 'np.array', (['[(b.start + 0.5 * b.width) for b in a_bins]'], {}), '([(b.start + 0.5 * b.width) for b in a_bins])\n', (1463, 1508), True, 'import numpy as np\n'), ((1532, 1567), 'numpy.array', 'np.array', (['[b.width for b in a_bins]'], {}), '([b.width for b in a_bins])\n', (1540, 1567), True, 'import numpy as np\n'), ((1585, 1605), 'numpy.array', 'np.array', (['f_nu_input'], {}), '(f_nu_input)\n', (1593, 1605), True, 'import numpy as np\n'), ((4529, 4542), 're.compile', 're.compile', (['k'], {}), '(k)\n', (4539, 4542), False, 'import re\n'), ((6768, 6817), 'os.path.join', 'os.path.join', (['sed_library_dir', 'f"""sed_fit_{hp}.h5"""'], {}), "(sed_library_dir, f'sed_fit_{hp}.h5')\n", (6780, 6817), False, 'import os\n'), ((10761, 10833), 'desc.skycatalogs.utils.common_utils.print_dated_msg', 'print_dated_msg', (['f"""Entered _write_summary for component {self.cmp_name}"""'], {}), "(f'Entered _write_summary for component {self.cmp_name}')\n", (10776, 10833), False, 'from desc.skycatalogs.utils.common_utils import print_dated_msg\n'), ((10926, 10969), 'os.path.join', 'os.path.join', (['self.output_dir', 'basename_csv'], {}), '(self.output_dir, basename_csv)\n', (10938, 10969), False, 'import os\n'), ((11072, 11121), 'os.path.join', 'os.path.join', (['self.output_dir', 'basename_csv_brief'], {}), '(self.output_dir, basename_csv_brief)\n', (11084, 11121), False, 'import os\n'), ((11216, 11258), 'os.path.join', 'os.path.join', (['self.output_dir', 'basename_pq'], {}), '(self.output_dir, basename_pq)\n', (11228, 11258), False, 'import os\n'), ((11539, 11566), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'out_dict'}), '(data=out_dict)\n', (11551, 11566), True, 'import pandas as pd\n'), ((11824, 11851), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'out_dict'}), '(data=out_dict)\n', (11836, 11851), True, 'import pandas as pd\n'), ((12521, 12589), 'desc.skycatalogs.utils.common_utils.print_dated_msg', 'print_dated_msg', (['f"""Cmp.create called for component {self.cmp_name}"""'], {}), "(f'Cmp.create called for component {self.cmp_name}')\n", (12536, 12589), False, 'from desc.skycatalogs.utils.common_utils import print_dated_msg\n'), ((13217, 13234), 'numpy.isinf', 'np.isinf', (['magnorm'], {}), '(magnorm)\n', (13225, 13234), True, 'import numpy as np\n'), ((13553, 13590), 'numpy.random.default_rng', 'default_rng', (['seed_dict[self.cmp_name]'], {}), '(seed_dict[self.cmp_name])\n', (13564, 13590), False, 'from numpy.random import default_rng\n'), ((14028, 14061), 'os.getenv', 'os.getenv', (['"""SIMS_SED_LIBRARY_DIR"""'], {}), "('SIMS_SED_LIBRARY_DIR')\n", (14037, 14061), False, 'import os\n'), ((6964, 6995), 'h5py.File', 'h5py.File', (['self.sed_lookup_file'], {}), '(self.sed_lookup_file)\n', (6973, 6995), False, 'import h5py\n'), ((7095, 7119), 'numpy.array', 'np.array', (["f['sed_names']"], {}), "(f['sed_names'])\n", (7103, 7119), True, 'import numpy as np\n'), ((7148, 7171), 'numpy.array', 'np.array', (["f['disk_sed']"], {}), "(f['disk_sed'])\n", (7156, 7171), True, 'import numpy as np\n'), ((7201, 7225), 'numpy.array', 'np.array', (["f['bulge_sed']"], {}), "(f['bulge_sed'])\n", (7209, 7225), True, 'import numpy as np\n'), ((7255, 7279), 'numpy.array', 'np.array', (["f['galaxy_id']"], {}), "(f['galaxy_id'])\n", (7263, 7279), True, 'import numpy as np\n'), ((14227, 14266), 'os.path.join', 'os.path.join', (['self.output_dir', 'filename'], {}), '(self.output_dir, filename)\n', (14239, 14266), False, 'import os\n'), ((4797, 4815), 'os.path.join', 'os.path.join', (['v', 'f'], {}), '(v, f)\n', (4809, 4815), False, 'import os\n'), ((6491, 6513), 'numpy.log10', 'np.log10', (['(Fnu / one_Jy)'], {}), '(Fnu / one_Jy)\n', (6499, 6513), True, 'import numpy as np\n'), ((13254, 13282), 'numpy.ma.array', 'ma.array', (['sed'], {'mask': 'mask_inf'}), '(sed, mask=mask_inf)\n', (13262, 13282), True, 'import numpy.ma as ma\n'), ((13318, 13349), 'numpy.ma.array', 'ma.array', (['gal_id'], {'mask': 'mask_inf'}), '(gal_id, mask=mask_inf)\n', (13326, 13349), True, 'import numpy.ma as ma\n'), ((13386, 13418), 'numpy.ma.array', 'ma.array', (['magnorm'], {'mask': 'mask_inf'}), '(magnorm, mask=mask_inf)\n', (13394, 13418), True, 'import numpy.ma as ma\n'), ((13456, 13489), 'numpy.ma.array', 'ma.array', (['redshift'], {'mask': 'mask_inf'}), '(redshift, mask=mask_inf)\n', (13464, 13489), True, 'import numpy.ma as ma\n'), ((14864, 14899), 'os.path.join', 'os.path.join', (['sed_rootdir', 'orig_sed'], {}), '(sed_rootdir, orig_sed)\n', (14876, 14899), False, 'import os\n'), ((14951, 14985), 'desc.skycatalogs.utils.common_utils.print_dated_msg', 'print_dated_msg', (['f"""Wrote file {i}"""'], {}), "(f'Wrote file {i}')\n", (14966, 14985), False, 'from desc.skycatalogs.utils.common_utils import print_dated_msg\n')] |
import os, math
import pickle as pk
import numpy as np
from collections.abc import Iterable
def readPLA(fn):
if not os.path.isfile(fn):
print('Warning: PLA "{}" not found.'.format(fn))
return (None,) * 3
getNum = lambda s, head: int(s.strip('\n').replace(head, '').replace(' ', ''))
getPat = lambda s: s.strip('\n').replace(' ', '')
getArr = lambda x: np.array(x, dtype=np.int8)
with open(fn) as fp:
ni = getNum(fp.readline(), '.i')
no = getNum(fp.readline(), '.o')
nl = getNum(fp.readline(), '.p')
for line in fp:
if line.startswith('.type fr'):
break
assert no == 1
data, labels = [], []
for i in range(nl):
pat = getPat(fp.readline())
assert(len(pat) == ni + no)
data.append(getArr([b for b in pat[:-1]]))
labels.append(pat[-1])
for line in fp:
if line.startswith('.e'):
break
return ni, getArr(data).transpose(), getArr(labels)
def readNNDump(fn):
x = pk.load(open(fn, 'rb'))
# sample 1: [[layer1 outputs...], [layer2 outputs...], ..., [layerN output]]
# sample 2: [[layer1 outputs...], [layer2 outputs...], ..., [layerN output]]
# sample 3 ... sample N
nLay = len(x[0])
for i in range(nLay):
pass
# randomly choose n integers from 0, 1, ..., m-1 with given probabilities p
def randChoice(n, m, p=None):
if n < m:
return np.random.choice(m, n, False, p)
q = n // m
r = n % m
ret = [np.arange(m) for _ in range(q)]
ret.append(np.random.choice(m, r, False, p))
return np.concatenate(ret)
# calculate mutual information of the given 2 arrays
def getMI(x, y):
asTuple = lambda v: tuple(v) if isinstance(v, Iterable) else int(v)
assert len(x) == len(y)
xCnt, yCnt, xyCnt = dict(), dict(), dict()
for i, j in zip(x, y):
i, j = asTuple(i), asTuple(j)
k = (i, j)
if i in xCnt: xCnt[i] += 1
else: xCnt[i] = 1
if j in yCnt: yCnt[j] += 1
else: yCnt[j] = 1
if k in xyCnt: xyCnt[k] += 1
else: xyCnt[k] = 1
mi = 0.0
for i, xc in xCnt.items():
for j, yc in yCnt.items():
if (i, j) not in xyCnt: continue
xProb = xc / len(x)
yProb = yc / len(x)
xyProb = xyCnt[(i, j)] / len(x)
#print(xc, yc, xyCnt[(i, j)])
#print(xProb, yProb, xyProb)
mi += xyProb * math.log(xyProb / (xProb * yProb))
assert mi >= 0
return mi | [
"numpy.random.choice",
"math.log",
"os.path.isfile",
"numpy.array",
"numpy.concatenate",
"numpy.arange"
] | [((1735, 1754), 'numpy.concatenate', 'np.concatenate', (['ret'], {}), '(ret)\n', (1749, 1754), True, 'import numpy as np\n'), ((127, 145), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (141, 145), False, 'import os, math\n'), ((396, 422), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.int8'}), '(x, dtype=np.int8)\n', (404, 422), True, 'import numpy as np\n'), ((1565, 1597), 'numpy.random.choice', 'np.random.choice', (['m', 'n', '(False)', 'p'], {}), '(m, n, False, p)\n', (1581, 1597), True, 'import numpy as np\n'), ((1641, 1653), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (1650, 1653), True, 'import numpy as np\n'), ((1689, 1721), 'numpy.random.choice', 'np.random.choice', (['m', 'r', '(False)', 'p'], {}), '(m, r, False, p)\n', (1705, 1721), True, 'import numpy as np\n'), ((2610, 2644), 'math.log', 'math.log', (['(xyProb / (xProb * yProb))'], {}), '(xyProb / (xProb * yProb))\n', (2618, 2644), False, 'import os, math\n')] |
import os
import numpy as np
import pandas as pd
from tensorflow.keras.utils import to_categorical
from sklearn import model_selection
class ACIC(object):
def __init__(
self,
path,
trial,
center=False,
exclude_population=False,
):
self.trial = trial
x_df, targets_df = load_data(
path=path,
trial=trial,
subset='1'
)
train_dataset, test_dataset = train_test_split(
x_df=x_df,
targets_df=targets_df,
trial=trial,
test_size=0.3
)
self.train_data = get_trial(
dataset=train_dataset
)
self.x_mean = self.train_data['x_cont'].mean(0, keepdims=True)
self.x_std = self.train_data['x_cont'].std(0, keepdims=True) + 1e-7
self.y_mean = self.train_data['y'].mean(dtype='float32')
self.y_std = self.train_data['y'].std(dtype='float32') + 1e-7
self.test_data = get_trial(
dataset=test_dataset
)
self.dim_x_cont = self.train_data['x_cont'].shape[-1]
self.dim_x_bin = self.train_data['x_bin'].shape[-1]
self.dim_x = self.dim_x_cont + self.dim_x_bin
def get_training_data(self):
x, y, t = self.preprocess(self.train_data)
examples_per_treatment = t.sum(0)
return x, y, t, examples_per_treatment
def get_test_data(self, test_set=True):
_data = self.test_data if test_set else self.train_data
x, _, _ = self.preprocess(_data)
mu1 = _data['mu1'].astype('float32')
mu0 = _data['mu0'].astype('float32')
cate = mu1 - mu0
return x, cate
def get_subpop(self, test_set=True):
_data = self.test_data if test_set else self.train_data
return _data['ind_subpop']
def get_t(self, test_set=True):
_data = self.test_data if test_set else self.train_data
return _data['t']
def preprocess(self, dataset):
x_cont = (dataset['x_cont'] - self.x_mean) / self.x_std
x_bin = dataset['x_bin']
x = np.hstack([x_cont, x_bin])
y = (dataset['y'].astype('float32') - self.y_mean) / self.y_std
t = dataset['t'].astype('float32')
return x, y, t
def load_data(
path,
trial,
subset='1'
):
x_path = os.path.join(path, 'x.csv')
targets_dir = os.path.join(path, str(trial + 1))
targets_paths = os.listdir(targets_dir)
targets_paths.sort()
x_df = pd.read_csv(
x_path
)
x_df['x_2'] = [ord(x) - 65 for x in x_df['x_2']]
x_df['x_21'] = [ord(x) - 65 for x in x_df['x_21']]
x_df['x_24'] = [ord(x) - 65 for x in x_df['x_24']]
targets_df = pd.read_csv(
os.path.join(
targets_dir,
targets_paths[0]
)
)
return x_df, targets_df
def train_test_split(
x_df,
targets_df,
trial,
test_size=0.3,
):
x_df_train, x_df_test, targets_df_train, targets_df_test = model_selection.train_test_split(
x_df,
targets_df,
test_size=test_size,
random_state=trial,
shuffle=True
)
train_data = {
'x': x_df_train,
'targets': targets_df_train
}
test_data = {
'x': x_df_test,
'targets': targets_df_test
}
return train_data, test_data
def get_trial(
dataset
):
cat_feats = {'x_2': 6, 'x_21': 16, 'x_24': 5}
bin_feats = ['x_17', 'x_22', 'x_38', 'x_51', 'x_54']
cont_feats = []
for i in range(1, 59):
feat_id = 'x_{}'.format(i)
if (feat_id not in bin_feats) and (feat_id not in cat_feats.keys()):
cont_feats.append(feat_id)
x_df = dataset['x']
x_bin = x_df[bin_feats].to_numpy('float32')
for k, v in cat_feats.items():
f = dataset['x'][k].to_numpy()
f = to_categorical(
f,
num_classes=v,
dtype='float32'
)
x_bin = np.hstack([x_bin, f])
x_cont = x_df[cont_feats].to_numpy('float32')
targets_df = dataset['targets']
t = targets_df['z'].to_numpy()
y0 = targets_df['y0'].to_numpy()
y1 = targets_df['y1'].to_numpy()
y = np.zeros_like(t, 'float32')
y[t > 0.5] = y1[t > 0.5]
y[t < 0.5] = y0[t < 0.5]
t_in = np.zeros((len(t), 2), 'float32')
t_in[:, 0] = 1 - t
t_in[:, 1] = t
mu0 = targets_df['mu0'].to_numpy()
mu1 = targets_df['mu1'].to_numpy()
trial_data = {
'x_cont': x_cont,
'x_bin': x_bin,
'y': y.astype('float32'),
't': t_in.astype('float32'),
'mu0': mu0.astype('float32'),
'mu1': mu1.astype('float32')
}
return trial_data
| [
"tensorflow.keras.utils.to_categorical",
"os.listdir",
"pandas.read_csv",
"numpy.hstack",
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.zeros_like"
] | [((2361, 2388), 'os.path.join', 'os.path.join', (['path', '"""x.csv"""'], {}), "(path, 'x.csv')\n", (2373, 2388), False, 'import os\n'), ((2462, 2485), 'os.listdir', 'os.listdir', (['targets_dir'], {}), '(targets_dir)\n', (2472, 2485), False, 'import os\n'), ((2522, 2541), 'pandas.read_csv', 'pd.read_csv', (['x_path'], {}), '(x_path)\n', (2533, 2541), True, 'import pandas as pd\n'), ((3031, 3140), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['x_df', 'targets_df'], {'test_size': 'test_size', 'random_state': 'trial', 'shuffle': '(True)'}), '(x_df, targets_df, test_size=test_size,\n random_state=trial, shuffle=True)\n', (3063, 3140), False, 'from sklearn import model_selection\n'), ((4221, 4248), 'numpy.zeros_like', 'np.zeros_like', (['t', '"""float32"""'], {}), "(t, 'float32')\n", (4234, 4248), True, 'import numpy as np\n'), ((2115, 2141), 'numpy.hstack', 'np.hstack', (['[x_cont, x_bin]'], {}), '([x_cont, x_bin])\n', (2124, 2141), True, 'import numpy as np\n'), ((2757, 2800), 'os.path.join', 'os.path.join', (['targets_dir', 'targets_paths[0]'], {}), '(targets_dir, targets_paths[0])\n', (2769, 2800), False, 'import os\n'), ((3884, 3933), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['f'], {'num_classes': 'v', 'dtype': '"""float32"""'}), "(f, num_classes=v, dtype='float32')\n", (3898, 3933), False, 'from tensorflow.keras.utils import to_categorical\n'), ((3996, 4017), 'numpy.hstack', 'np.hstack', (['[x_bin, f]'], {}), '([x_bin, f])\n', (4005, 4017), True, 'import numpy as np\n')] |
#read data - done by <NAME>
import io
import socket
import struct
import cv2 as cv
import numpy as np
#start a socket listening for connections
server_socket = socket.socket()
#here 0.0.0.0 means all interfaces and 8000 means port 8000
server_socket.bind(('0.0.0.0',8000))
server_socket.listen(0)
#make file obj out of a single connection at a time
connection = server_socket.accept()[0].makefile('rb')
address = server_socket.accept()[1]
print('address:',address)
try:
imindex = 0
while True:
#make image stream to hold img data
image_stream = connection.read()
image = cv.imdecode(np.fromstring(image_stream, np.uint8),cv.IMREAD_COLOR)
if(image[0] == False):
print('error in reading')
continue
#convert image_stream to a numpy array, and make an image out of it (decode)
#image = cv.imdecode(np.fromstring(image_stream.getvalue(), dtype=np.uint8),1)
image = image_stream[1]
print('image'+str(imindex)+' decoded')
#show image in a labeled window,, close it after 2 secs
cv.imshow('image'+str(imindex),image)
#waits for keypress for 600ms and destroys windows created, this is just for debugging purposes
imindex+=1
finally:
connection.close()
server_socket.close()
| [
"numpy.fromstring",
"socket.socket"
] | [((162, 177), 'socket.socket', 'socket.socket', ([], {}), '()\n', (175, 177), False, 'import socket\n'), ((611, 648), 'numpy.fromstring', 'np.fromstring', (['image_stream', 'np.uint8'], {}), '(image_stream, np.uint8)\n', (624, 648), True, 'import numpy as np\n')] |
import asyncio
from pathlib import Path
import numpy as np
from nurses_2.app import App
from nurses_2.widgets.image import Image
from nurses_2.widgets.parallax import Parallax
IMAGES_DIR = Path("images")
SIZE = 30, 50
def load_layers(path):
sorted_dir = sorted(path.iterdir(), key=lambda path: path.stem)
return [
Image(size=SIZE, path=path)
for path in sorted_dir if path.suffix == ".png"
]
class MyApp(App):
async def on_start(self):
parallax_00 = Parallax(size=SIZE, layers=load_layers(IMAGES_DIR / "parallax_00"))
parallax_01 = Parallax(pos=(0, 50), size=SIZE, layers=load_layers(IMAGES_DIR / "parallax_01"))
self.add_widgets(parallax_00, parallax_01)
async def circle_movement():
angles = np.linspace(0, 2 * np.pi, 400)
radius = 50
while True:
for theta in angles:
parallax_00.offset = radius * np.cos(theta), radius * np.sin(theta)
await asyncio.sleep(.016)
async def horizontal_movement():
while True:
parallax_01.horizontal_offset += 1
await asyncio.sleep(.08)
asyncio.create_task(circle_movement())
asyncio.create_task(horizontal_movement())
MyApp().run()
| [
"pathlib.Path",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"asyncio.sleep",
"nurses_2.widgets.image.Image"
] | [((192, 206), 'pathlib.Path', 'Path', (['"""images"""'], {}), "('images')\n", (196, 206), False, 'from pathlib import Path\n'), ((335, 362), 'nurses_2.widgets.image.Image', 'Image', ([], {'size': 'SIZE', 'path': 'path'}), '(size=SIZE, path=path)\n', (340, 362), False, 'from nurses_2.widgets.image import Image\n'), ((780, 810), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(400)'], {}), '(0, 2 * np.pi, 400)\n', (791, 810), True, 'import numpy as np\n'), ((1170, 1189), 'asyncio.sleep', 'asyncio.sleep', (['(0.08)'], {}), '(0.08)\n', (1183, 1189), False, 'import asyncio\n'), ((1011, 1031), 'asyncio.sleep', 'asyncio.sleep', (['(0.016)'], {}), '(0.016)\n', (1024, 1031), False, 'import asyncio\n'), ((947, 960), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (953, 960), True, 'import numpy as np\n'), ((971, 984), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (977, 984), True, 'import numpy as np\n')] |
"""
RQ6 script
"""
import argparse
import os, sys
import utils.data_util as data_util
import auto_patch
import time
import numpy as np
import gc
TOP_N = 1
parser = argparse.ArgumentParser()
parser.add_argument("-datadir", action = "store", default = "data", type = str)
parser.add_argument('-which_data', action = "store",
default = 'cifar10', type = str, help = 'fashion_mnist,cifaf10,lfw')
parser.add_argument("-tensor_name_file", action = "store",
default = "data/tensor_names/tensor.lastLayer.names ", type = str)
parser.add_argument("-patch_key", action = "store", default = "key")
parser.add_argument("-path_to_keras_model", action = 'store', default = None)
parser.add_argument("-seed", action = "store", default = 1, type = int)
parser.add_argument("-iter_num", action = "store", default = 100, type = int)
parser.add_argument("-target_indices_file", action = "store", default = None)
parser.add_argument("-dest", default = ".", type = str)
parser.add_argument("-patch_aggr", action = 'store', default = None, type = float)
parser.add_argument("-female_lst_file", action = 'store',
default = 'data/lfw_np/female_names_lfw.txt', type = str)
args = parser.parse_args()
os.makedirs(args.dest, exist_ok = True)
which = 'lfw_vgg'
which_data = 'lfw'
train_data, test_data = data_util.load_data(which_data, args.datadir,
path_to_female_names = args.female_lst_file)
train_X,train_y = train_data
num_train = len(train_y)
test_X,test_y = test_data
test_X = np.asarray(test_X[::2])
test_y = np.asarray(test_y[::2])
test_data = [test_X, test_y]
num_test = len(test_y)
iter_num = args.iter_num
num_label = 2
# miclfds: key = (true label, predicted label), values: indices to the misclassified inputs
misclfds = data_util.get_misclf_indices(args.target_indices_file,
target_indices = None,
use_all = False)
num_entire_misclfs = np.sum([len(vs) for vs in misclfds.values()])
sorted_keys = data_util.sort_keys_by_cnt(misclfds)
misclf_key = sorted_keys[TOP_N-1]
indices = misclfds[misclf_key]
indices = [int(i/2) for i in indices]
print ("Processing: {}".format("{}-{}".format(misclf_key[0],misclf_key[1])))
#num_of_sampled_correct = num_test - num_entire_misclfs
#print ("The number of correct samples: {}".format(num_of_sampled_correct))
#num_wrong_inputs_to_patch = len(indices)
#print ('pre_defined', num_wrong_inputs_to_patch)
t1 = time.time()
patched_model_name, indices_to_target_inputs, indices_to_patched = auto_patch.patch(
num_label,
test_data,
args.tensor_name_file,
max_search_num = iter_num,
search_method = 'DE',
which = which,
loc_method = "localiser",
patch_target_key = "misclf-{}-{}".format(args.patch_key,"{}-{}".format(misclf_key[0],misclf_key[1])),
path_to_keras_model = args.path_to_keras_model,
predef_indices_to_wrong = indices,
seed = args.seed,
patch_aggr = args.patch_aggr)
t2 = time.time()
print ("Time for patching: {}".format(t2 - t1))
print ("patched_model_name", patched_model_name)
os.rename(patched_model_name + ".json", os.path.join(args.dest, os.path.basename(patched_model_name) + ".json"))
gc.collect()
| [
"utils.data_util.get_misclf_indices",
"utils.data_util.load_data",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.asarray",
"utils.data_util.sort_keys_by_cnt",
"os.path.basename",
"gc.collect",
"time.time"
] | [((166, 191), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (189, 191), False, 'import argparse\n'), ((1183, 1220), 'os.makedirs', 'os.makedirs', (['args.dest'], {'exist_ok': '(True)'}), '(args.dest, exist_ok=True)\n', (1194, 1220), False, 'import os, sys\n'), ((1285, 1378), 'utils.data_util.load_data', 'data_util.load_data', (['which_data', 'args.datadir'], {'path_to_female_names': 'args.female_lst_file'}), '(which_data, args.datadir, path_to_female_names=args.\n female_lst_file)\n', (1304, 1378), True, 'import utils.data_util as data_util\n'), ((1469, 1492), 'numpy.asarray', 'np.asarray', (['test_X[::2]'], {}), '(test_X[::2])\n', (1479, 1492), True, 'import numpy as np\n'), ((1502, 1525), 'numpy.asarray', 'np.asarray', (['test_y[::2]'], {}), '(test_y[::2])\n', (1512, 1525), True, 'import numpy as np\n'), ((1724, 1818), 'utils.data_util.get_misclf_indices', 'data_util.get_misclf_indices', (['args.target_indices_file'], {'target_indices': 'None', 'use_all': '(False)'}), '(args.target_indices_file, target_indices=None,\n use_all=False)\n', (1752, 1818), True, 'import utils.data_util as data_util\n'), ((1907, 1943), 'utils.data_util.sort_keys_by_cnt', 'data_util.sort_keys_by_cnt', (['misclfds'], {}), '(misclfds)\n', (1933, 1943), True, 'import utils.data_util as data_util\n'), ((2357, 2368), 'time.time', 'time.time', ([], {}), '()\n', (2366, 2368), False, 'import time\n'), ((2844, 2855), 'time.time', 'time.time', ([], {}), '()\n', (2853, 2855), False, 'import time\n'), ((3069, 3081), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3079, 3081), False, 'import gc\n'), ((3019, 3055), 'os.path.basename', 'os.path.basename', (['patched_model_name'], {}), '(patched_model_name)\n', (3035, 3055), False, 'import os, sys\n')] |
from typing import Dict, Optional, List, Mapping
import numpy as np
import datasets
from raft_baselines.classifiers.in_context_classifier import InContextClassifier
from raft_baselines.utils.gpt3_utils import (
complete,
search,
)
from raft_baselines.utils.tokenizers import TransformersTokenizer
GPT3_MAX_TOKENS = 2048
tokenizer = TransformersTokenizer("gpt2")
class GPT3Classifier(InContextClassifier):
def __init__(
self,
*args,
engine: str = "ada",
search_engine: str = "ada",
**kwargs,
) -> None:
super().__init__(
*args,
tokenizer=tokenizer,
max_tokens=GPT3_MAX_TOKENS,
**kwargs,
)
self.engine: str = engine
self.search_engine: str = search_engine
def semantically_select_training_examples(
self, target: Mapping[str, str]
) -> datasets.Dataset:
formatted_examples_without_labels = tuple(
self.format_dict(
{col: row[col] for col in self.input_cols if col in row},
)
for row in self.training_data
)
search_results = search(
formatted_examples_without_labels,
self.format_dict(target),
self.search_engine,
)
sorted_indices = list(
map(
lambda result: result["document"], # type: ignore
sorted(
search_results,
key=lambda result: -result["score"], # type: ignore
),
)
)
return self.training_data.select(
list(reversed(sorted_indices[: self.num_prompt_training_examples]))
)
def does_token_match_class(self, token: str, clas: str) -> bool:
# prepend a space to the class label
# because we always expect a leading space in the first token
# returned from the OpenAI API, given our prompt format
clas_str = (
f" {clas}" if not self.add_prefixes else f" {self.classes.index(clas) + 1}"
)
clas_first_token_id: int = self.tokenizer(clas_str)["input_ids"][0]
token_id: int = self.tokenizer(token)["input_ids"][0]
# Compare token ids rather than the raw tokens
# because GPT2TokenizerFast represents some special characters
# differently from the GPT-3 API
# (e.g. the space at the beginning of the token is " " according to the API,
# but "Ġ" according to the tokenizer.
# Standardizing to token ids is one easy way to smooth over that difference.
return clas_first_token_id == token_id
def _get_raw_probabilities(
self,
prompt: str,
) -> List[float]:
response = complete(
prompt,
temperature=0.0,
engine=self.engine,
max_tokens=1,
)
logprobs: Dict[str, float] = response["choices"][0]["logprobs"]["top_logprobs"][
0
]
raw_p = []
for clas in self.classes:
p = 0.0
for token in logprobs.keys():
if self.does_token_match_class(token, clas):
p += np.exp(logprobs[token])
raw_p.append(p)
return raw_p
| [
"raft_baselines.utils.gpt3_utils.complete",
"raft_baselines.utils.tokenizers.TransformersTokenizer",
"numpy.exp"
] | [((343, 372), 'raft_baselines.utils.tokenizers.TransformersTokenizer', 'TransformersTokenizer', (['"""gpt2"""'], {}), "('gpt2')\n", (364, 372), False, 'from raft_baselines.utils.tokenizers import TransformersTokenizer\n'), ((2765, 2832), 'raft_baselines.utils.gpt3_utils.complete', 'complete', (['prompt'], {'temperature': '(0.0)', 'engine': 'self.engine', 'max_tokens': '(1)'}), '(prompt, temperature=0.0, engine=self.engine, max_tokens=1)\n', (2773, 2832), False, 'from raft_baselines.utils.gpt3_utils import complete, search\n'), ((3207, 3230), 'numpy.exp', 'np.exp', (['logprobs[token]'], {}), '(logprobs[token])\n', (3213, 3230), True, 'import numpy as np\n')] |
# author: <NAME>
# date: 2020-01-25
"""
This script takes preprocessed data from `data` folder, performs EDA and saves the result tables and figures to `results` folder.
Both the input file path+name and the save folder are required as inputs.
Usage: 03-EDA.py --input_path_file=<file_name> --save_folder=<save_folder>
Options:
--input_path_file=<file_name> path and file name of the input preprocessed data
--save_folder=<save_folder> folder to save the output table and figures
Example: python scripts/03-EDA.py --input_path_file=data/player_data_ready.csv --save_folder=results
"""
# Loading the required packages
# Data proc
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# Plot
import altair as alt
import matplotlib.pylab as pl
# Other Packages
from docopt import docopt
import sys
import os
from termcolor import colored
# Ignore warnings from packages
import warnings
warnings.simplefilter("ignore")
opt = docopt(__doc__)
def main(input_path_file, save_folder):
# Load the preprocessed data from csv
# e.g. 'player_data_ready.csv'
# Validate the file-path to load file
path_str = str(input_path_file)
if os.path.exists(path_str) == False:
print(colored('ERROR: Path to file is not valid!', 'red'))
try:
df = pd.read_csv(path_str)
print(colored('\nData loaded successfully!', 'green'))
except:
print(colored("ERROR: Data can't be loaded!", 'red'))
raise
# Validate the save_foler directory exists or make folder
if os.path.exists(str(save_folder)) == False:
try:
os.makedirs(save_folder)
except:
print(colored('ERROR: Path to save directory is not valid!', 'red'))
raise
#######################################
########### EDA starts here ###########
#######################################
# Remove Unnecessary Columns
info_cols = ['playDispNm', 'gmDate', 'teamAbbr']
df = df.drop(columns=info_cols)
# Use only train split to perform EDA
df_train, df_test = train_test_split(df, test_size=0.2)
print(colored('Train test split finished!', 'green'))
# Make and save histogram of the target - playMin
ax = df_train['playMin'].hist(bins=55, grid=False)
pl.suptitle("Histogram of the target - playMin")
fig = ax.get_figure()
fig.savefig(str(save_folder)+'/EDA-hist_y.png')
print(colored('EDA-hist_y.png successfully saved!', 'green'))
# Calculate the correlations of the features against the target
correlations = {}
for col in df_train:
if col == 'playMin':
continue
try:
correlations[col] = round(np.corrcoef(df_train[col], df_train['playMin'])[0][1], 3)
except:
continue
correl_df = pd.DataFrame.from_dict(correlations, orient='index')
correl_df.columns = ['corr w/ target']
correl_df = correl_df.sort_values('corr w/ target', ascending=False)
assert len(correl_df) == df_train.shape[1] - 2, "Correlation table is not correctly calculated!" # `playPos` and target are not included
# Save the top positively / negatively correlated features
correl_df_pos_20 = correl_df.iloc[:20, :].copy()
correl_df_neg_9 = correl_df.iloc[-9:, :].sort_values('corr w/ target').copy()
correl_df_pos_20.to_csv(str(save_folder)+'/EDA-correl_df_pos_20.csv')
print(colored('EDA-correl_df_pos_20.csv successfully saved!', 'green'))
correl_df_neg_9.to_csv(str(save_folder)+'/EDA-correl_df_neg_9.csv')
print(colored('EDA-correl_df_neg_9.csv successfully saved!', 'green'))
# make and save the visualization of feature importance
correl_df.reset_index(inplace=True)
correl_df.columns = ['stat', 'correlation']
sort = list(correl_df.reset_index()['index'])
# Base bar chart
c1 = alt.Chart(correl_df).mark_bar(size=1, color='black').encode(
alt.X('correlation:Q',
title='Correlation',
scale=alt.Scale(zero=False, domain=[-.3, 1])),
alt.Y('stat:N', title="", sort=sort))
# Base circle chart
c2 = alt.Chart(correl_df).mark_circle(color='black', size=420).encode(
alt.X('correlation:Q', scale=alt.Scale(zero=False, domain=[-.4, 1])),
alt.Y('stat:N', sort=sort))
# Base text chart
c3 = alt.Chart(correl_df).mark_text(color='white', size=8).encode(
alt.X('correlation:Q', scale=alt.Scale(zero=False, domain=[-.4, 1])),
alt.Y('stat:N', sort=sort),
text=alt.Text('correlation:Q', format='.2f'))
# Final chart object
correl_loli = (c1 + c2 + c3).properties(
title='Feature Correlation with Target',
width=400
).configure_title(
fontSize=18,
font='Courier',
anchor='start')
# Save chart object
correl_loli.save(str(save_folder)+'/EDA-feat_corr.png', scale_factor=1.0)
print(colored('EDA-feat_corr.png successfully saved!', 'green'))
print(colored('\nEDA complete!', 'green'))
if __name__ == "__main__":
main(opt["--input_path_file"], opt["--save_folder"])
| [
"os.path.exists",
"termcolor.colored",
"pandas.read_csv",
"os.makedirs",
"sklearn.model_selection.train_test_split",
"matplotlib.pylab.suptitle",
"altair.Chart",
"numpy.corrcoef",
"pandas.DataFrame.from_dict",
"altair.Text",
"altair.Y",
"warnings.simplefilter",
"docopt.docopt",
"altair.Sca... | [((942, 973), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (963, 973), False, 'import warnings\n'), ((981, 996), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (987, 996), False, 'from docopt import docopt\n'), ((1982, 2017), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': '(0.2)'}), '(df, test_size=0.2)\n', (1998, 2017), False, 'from sklearn.model_selection import train_test_split\n'), ((2178, 2226), 'matplotlib.pylab.suptitle', 'pl.suptitle', (['"""Histogram of the target - playMin"""'], {}), "('Histogram of the target - playMin')\n", (2189, 2226), True, 'import matplotlib.pylab as pl\n'), ((2634, 2686), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['correlations'], {'orient': '"""index"""'}), "(correlations, orient='index')\n", (2656, 2686), True, 'import pandas as pd\n'), ((1187, 1211), 'os.path.exists', 'os.path.exists', (['path_str'], {}), '(path_str)\n', (1201, 1211), False, 'import os\n'), ((1296, 1317), 'pandas.read_csv', 'pd.read_csv', (['path_str'], {}), '(path_str)\n', (1307, 1317), True, 'import pandas as pd\n'), ((2025, 2071), 'termcolor.colored', 'colored', (['"""Train test split finished!"""', '"""green"""'], {}), "('Train test split finished!', 'green')\n", (2032, 2071), False, 'from termcolor import colored\n'), ((2306, 2360), 'termcolor.colored', 'colored', (['"""EDA-hist_y.png successfully saved!"""', '"""green"""'], {}), "('EDA-hist_y.png successfully saved!', 'green')\n", (2313, 2360), False, 'from termcolor import colored\n'), ((3205, 3269), 'termcolor.colored', 'colored', (['"""EDA-correl_df_pos_20.csv successfully saved!"""', '"""green"""'], {}), "('EDA-correl_df_pos_20.csv successfully saved!', 'green')\n", (3212, 3269), False, 'from termcolor import colored\n'), ((3347, 3410), 'termcolor.colored', 'colored', (['"""EDA-correl_df_neg_9.csv successfully saved!"""', '"""green"""'], {}), "('EDA-correl_df_neg_9.csv successfully saved!', 'green')\n", (3354, 3410), False, 'from termcolor import colored\n'), ((3807, 3843), 'altair.Y', 'alt.Y', (['"""stat:N"""'], {'title': '""""""', 'sort': 'sort'}), "('stat:N', title='', sort=sort)\n", (3812, 3843), True, 'import altair as alt\n'), ((4018, 4044), 'altair.Y', 'alt.Y', (['"""stat:N"""'], {'sort': 'sort'}), "('stat:N', sort=sort)\n", (4023, 4044), True, 'import altair as alt\n'), ((4213, 4239), 'altair.Y', 'alt.Y', (['"""stat:N"""'], {'sort': 'sort'}), "('stat:N', sort=sort)\n", (4218, 4239), True, 'import altair as alt\n'), ((4626, 4683), 'termcolor.colored', 'colored', (['"""EDA-feat_corr.png successfully saved!"""', '"""green"""'], {}), "('EDA-feat_corr.png successfully saved!', 'green')\n", (4633, 4683), False, 'from termcolor import colored\n'), ((4693, 4728), 'termcolor.colored', 'colored', (['"""\nEDA complete!"""', '"""green"""'], {}), "('\\nEDA complete!', 'green')\n", (4700, 4728), False, 'from termcolor import colored\n'), ((1230, 1281), 'termcolor.colored', 'colored', (['"""ERROR: Path to file is not valid!"""', '"""red"""'], {}), "('ERROR: Path to file is not valid!', 'red')\n", (1237, 1281), False, 'from termcolor import colored\n'), ((1326, 1376), 'termcolor.colored', 'colored', (['"""\nData loaded successfully!"""', '"""green"""'], {}), '("""\nData loaded successfully!""", \'green\')\n', (1333, 1376), False, 'from termcolor import colored\n'), ((1566, 1590), 'os.makedirs', 'os.makedirs', (['save_folder'], {}), '(save_folder)\n', (1577, 1590), False, 'import os\n'), ((4251, 4290), 'altair.Text', 'alt.Text', (['"""correlation:Q"""'], {'format': '""".2f"""'}), "('correlation:Q', format='.2f')\n", (4259, 4290), True, 'import altair as alt\n'), ((1392, 1438), 'termcolor.colored', 'colored', (['"""ERROR: Data can\'t be loaded!"""', '"""red"""'], {}), '("ERROR: Data can\'t be loaded!", \'red\')\n', (1399, 1438), False, 'from termcolor import colored\n'), ((3761, 3800), 'altair.Scale', 'alt.Scale', ([], {'zero': '(False)', 'domain': '[-0.3, 1]'}), '(zero=False, domain=[-0.3, 1])\n', (3770, 3800), True, 'import altair as alt\n'), ((3972, 4011), 'altair.Scale', 'alt.Scale', ([], {'zero': '(False)', 'domain': '[-0.4, 1]'}), '(zero=False, domain=[-0.4, 1])\n', (3981, 4011), True, 'import altair as alt\n'), ((4167, 4206), 'altair.Scale', 'alt.Scale', ([], {'zero': '(False)', 'domain': '[-0.4, 1]'}), '(zero=False, domain=[-0.4, 1])\n', (4176, 4206), True, 'import altair as alt\n'), ((1610, 1671), 'termcolor.colored', 'colored', (['"""ERROR: Path to save directory is not valid!"""', '"""red"""'], {}), "('ERROR: Path to save directory is not valid!', 'red')\n", (1617, 1671), False, 'from termcolor import colored\n'), ((3623, 3643), 'altair.Chart', 'alt.Chart', (['correl_df'], {}), '(correl_df)\n', (3632, 3643), True, 'import altair as alt\n'), ((3872, 3892), 'altair.Chart', 'alt.Chart', (['correl_df'], {}), '(correl_df)\n', (3881, 3892), True, 'import altair as alt\n'), ((4071, 4091), 'altair.Chart', 'alt.Chart', (['correl_df'], {}), '(correl_df)\n', (4080, 4091), True, 'import altair as alt\n'), ((2540, 2587), 'numpy.corrcoef', 'np.corrcoef', (['df_train[col]', "df_train['playMin']"], {}), "(df_train[col], df_train['playMin'])\n", (2551, 2587), True, 'import numpy as np\n')] |
# 2018/08/31 Initial to use pandas, matplotlib,TAlib
# 2018/09/01 Using pandas to calculate MA
# Calculate golden and dead MA
# Add class PandasDataAnalysis
# Slove matplotlib to show Chinese charter
# 2018/09/02 Match '---' and '--' in close price by using regular express
# Caluclate uprate and downrate by rolling max min of dataframe
# 2018/09/03 add def file1_updownrate_LastMonthYear()
# 2018/09/04 dtype of close price icluding '---' and '--' is object except float64
# Save as test_SeymourTarget.py
# Add def file1_updownrate_LastMonthYear()
# 2018/09/05 Add def get_tradedays_dfinfo () and def file2_updownrate_QuarterYear
# in class PandasDataAnalysis
# Save as test_SeymourTarget.py
# 2018/09/06 Debug output CSV dulplicated rows.
# Add def file2_updownrate_threeYearoneYear in class PandasDataAnalysis
# 2018/09/14 Add def file1_call, file1_put, file2_call, file2_put nad percent2float
# 2018/09/15 Add def file3_call and file3_put
# 2018/10/28 Adapte from test_SeymourTarget.py
# 2019/09/30 Adapte from both test_crawl2sqlite3.py and test_SMTargetSqilte.py
# BB--->Bollinger Band
#########################################################################
from datetime import datetime, timedelta
import time, os, sys, re
import urllib.request
from lxml import html
import httplib2
from apiclient import discovery
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class Flag:
auth_host_name = 'localhost'
noauth_local_webserver = False
auth_host_port = [8080, 8090]
logging_level = 'ERROR'
flags = Flag()
strabspath=os.path.abspath(__file__)
strdirname=os.path.dirname(strabspath)
str_split=os.path.split(strdirname)
prevdirname=str_split[0]
dirnamelib=os.path.join(prevdirname,"lib")
dirnamelog=os.path.join(prevdirname,"logs")
# Set logging directory
if not os.path.isdir('logs'):
os.makedirs('logs')
sys.path.append(dirnamelib)
import excelRW as excelrw
import readConfig as readConfig
import googleDrive as google_drive
import dataAnalysis as data_analysis
N = 240
XMAX = 5
WINMA = 10
ALPHA = 2
def get_bollinger(data, winma=10, alpha=2):
ser = pd.Series(data)
ma = ser.rolling(winma).mean()
std = ser.rolling(winma).std()
lower = pd.Series(ma - alpha*std).fillna(method='bfill').values
upper = pd.Series(ma + alpha*std).fillna(method='bfill').values
return lower, upper
def get_alerts(data, lower, upper):
low = np.argwhere(data < lower)
high = np.argwhere(data > upper)
return low, high
if __name__ == '__main__':
configPath=os.path.join(strdirname,"config.ini")
localReadConfig = readConfig.ReadConfig(configPath)
stkidx_call_file01 = localReadConfig.get_SeymourExcel('stkidx_call_file01')
stkidx_put_file01 = localReadConfig.get_SeymourExcel('stkidx_put_file01')
stkidx_call_file02 = localReadConfig.get_SeymourExcel('stkidx_call_file02')
stkidx_put_file02 = localReadConfig.get_SeymourExcel('stkidx_put_file02')
stkidx_call_file03 = localReadConfig.get_SeymourExcel('stkidx_call_file03')
stkidx_put_file03 = localReadConfig.get_SeymourExcel('stkidx_put_file03')
stkidx_call_file04 = localReadConfig.get_SeymourExcel('stkidx_call_file04')
stkidx_put_file04 = localReadConfig.get_SeymourExcel('stkidx_put_file04')
str_color_ma = localReadConfig.get_SeymourExcel('color_ma05_ma20_ma30')
list_color_ma = str_color_ma.split(',')
str_candlestick_weekly_subfolder = localReadConfig.get_SeymourExcel("candlestick_weekly_subfolder")
url_moneyhunter =localReadConfig.get_SeymourExcel('url_moneyhunterblog')#'http://twmoneyhunter.blogspot.com/'
#2019/1/10(Thu) excute this code dosen't meet from Mon. to Fri unremak below.
str_last_year_month_day = localReadConfig.get_SeymourExcel("last_year_month_day")
str_first_year_month_day = localReadConfig.get_SeymourExcel("first_year_month_day")
#2019/1/10(Thu) excute this code meet from Mon. to Fri unreamrk below.
#str_last_year_month_day = datetime.date.today().strftime('%Y,%m,%d')# ex:2018,10,16 get today date form system
#str_first_year_month_day = datetime.date.today().strftime('%Y,%m,%d')# ex:2018,10,16 get today date form system
#2019/09/01 MoneyHuter blog webpage layout reversion, value of xpath changed
#穩定成長,3;指數ETF, 4;債券ETF, 5;波段投機, 6;循環投資, 7;景氣循環, 8
xpath_url_file01 = '//*[@id="LinkList1"]/div/ul/li[7]/a/@href'#循環投資
xpath_url_file02 = '//*[@id="LinkList1"]/div/ul/li[6]/a/@href'#波段投機
xpath_url_file03 = '//*[@id="LinkList1"]/div/ul/li[8]/a/@href'#景氣循環
xpath_url_file04 = '//*[@id="LinkList1"]/div/ul/li[3]/a/@href'#穩定成長
#Python urllib urlopen not working
#https://stackoverflow.com/questions/25863101/python-urllib-urlopen-not-working
###########################################
with urllib.request.urlopen(url_moneyhunter) as response:
raw = response.read()
html_doc = html.fromstring(raw)
credential_dir = os.getcwd()
localgoogle_drive = google_drive.GoogleDrivebyFileID(dirnamelog,flags)
credentials = localgoogle_drive.get_credentials(credential_dir)
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
fileid_file01 = localgoogle_drive.ggdrive_fileid(html_doc,xpath_url_file01)
fileid_file02 = localgoogle_drive.ggdrive_fileid(html_doc,xpath_url_file02)
fileid_file03 = localgoogle_drive.ggdrive_fileid(html_doc,xpath_url_file03)
fileid_file04 = localgoogle_drive.ggdrive_fileid(html_doc,xpath_url_file04)
list_xlsfile, excel_file01 = localgoogle_drive.check_xlsfile_MHunterblog_logfolder(service,fileid_file01,dirnamelog)#"循環投資追蹤股"
list_xlsfile, excel_file02 = localgoogle_drive.check_xlsfile_MHunterblog_logfolder(service,fileid_file02,dirnamelog)#"波段投機追蹤股"
list_xlsfile, excel_file03 = localgoogle_drive.check_xlsfile_MHunterblog_logfolder(service,fileid_file03,dirnamelog)#"景氣循環追蹤股"
list_xlsfile, excel_file04 = localgoogle_drive.check_xlsfile_MHunterblog_logfolder(service,fileid_file04,dirnamelog)#"公用事業追蹤股"
excel_file05 = localReadConfig.get_SeymourExcel("excelfile05") #"追蹤股_增加遞補"2018/11/10
#"循環投資追蹤股" #"波段投機追蹤股" #"景氣循環追蹤股" #"公用事業追蹤股"#"追蹤股_增加遞補"
#list_excel_file = [excel_file01,excel_file02,excel_file03,excel_file04]
list_excel_file = [excel_file01,excel_file02,excel_file03,excel_file04,excel_file05]
# Test class by excelRW.py
# read each Excel file content ot get stock idx and name
localexcelrw = excelrw.ExcelRW()
# get all stock's idx and name from list_excel_file
'''
.
.
['9937.0', '全國']
['9940.0', '信義']
['9941.0', '裕融']
['9942.0', '茂順']
['9943.0', '好樂迪']
['4126.0', '太醫']
356
'''
# list_all_stockidxname=localexcelrw.get_all_stockidxname_SeymourExcel(dirnamelog,list_excel_file)
#for stockidxname in list_all_stockidxname:
# print(stockidxname)
#print(len(list_all_stockidxname))
#2018/10/31 remark casuse purge jpg files in def plotCandlestickandMA()
#Delete prvious candle stick jpg files.
###############################
str_candlestick_filepath=os.path.join(dirnamelog,str_candlestick_weekly_subfolder)
localgoogle_drive = google_drive.GoogleCloudDrive(str_candlestick_filepath)
re_exp = r'\.jpg$'
#localgoogle_drive.purgelocalfiles(re_exp)
# Initial to sqlite database code
path_db = os.path.join(dirnamelog,'TWTSEOTCDaily.db')
###############################
# excute file1 #"循環投機追蹤股"
###############################
list_excel_Seymour = [excel_file01]
list_stkidx_call_file01 = stkidx_call_file01.split(',')
list_stkidx_put_file01 = stkidx_put_file01.split(',')
debug_verbose ='OFF'
# get all stock's idx and name from file1 #"循環投機追蹤股"
#list_all_stockidxname=localexcelrw.get_all_stockidxname_SeymourExcel(dirnamelog,list_excel_Seymour)
'''
9921 巨大
9927 泰銘
9939 宏全
9945 潤泰新
'''
#for list_stockidxname in list_all_stockidxname:
# 20190721 cause StkIdx:1210.0, 價值比:38.16
# str-->float-->int-->str; '1210.0'-->1210.0-->1210-->'1210'
# str(int(float(list_row_value[0])))
# stock_idx= str(int(float(list_stockidxname[0])))
# stock_name= list_stockidxname[1]
#print(stock_idx,stock_name)
# get daily trade inof rom sqilte DB
# local_pdSqlA = data_analysis.PandasSqliteAnalysis(stock_idx,dirnamelog,path_db,str_first_year_month_day,debug_verbose)
stock_idx= '1788'
local_pdSqlA = data_analysis.PandasSqliteAnalysis(stock_idx,dirnamelog,path_db,str_first_year_month_day,debug_verbose)
'''
date open high low close stkidx CmpName
241 2018-10-01 100.50 100.50 99.90 100.00 1788 杏昌
242 2018-10-02 100.00 100.00 100.00 100.00 1788 杏昌
243 2018-10-03 100.50 100.50 99.90 99.90 1788 杏昌
.. ... ... ... ... ... ... ...
479 2019-10-01 104.00 104.00 102.50 103.00 1788 杏昌
480 2019-10-02 104.00 104.00 103.00 103.50 1788 杏昌
[240 rows x 7 columns]
'''
# How to get the last N rows of a pandas DataFrame?
# https://stackoverflow.com/questions/14663004/how-to-get-the-last-n-rows-of-a-pandas-dataframe
#print(local_pdSqlA.df.iloc[-240:])
'''
date close
241 2018-10-01 100.00
242 2018-10-02 100.00
.. ... ...
479 2019-10-01 103.00
480 2019-10-02 103.50
[240 rows x 2 columns]
'''
#print(local_pdSqlA.df[['date','close']].iloc[-240:])
item = local_pdSqlA.df[['date','close']].copy()
# Calculate 30 Day Moving Average, Std Deviation, Upper Band and Lower Band
item['30Days_MA'] = item['close'].rolling(window=20).mean()
# set .std(ddof=0) for population std instead of sample
item['30Days_STD'] = item['close'].rolling(window=20).std()
item['Upper_Band'] = item['30Days_MA'] + (item['30Days_STD'] * 2)
item['Lower_Band'] = item['30Days_MA'] - (item['30Days_STD'] * 2)
'''
date close 30Days_MA 30Days-STD Upper_Band Lower_Band
241 2018-10-01 100.00 99.700 0.284697 100.269395 99.130605
242 2018-10-02 100.00 99.720 0.291277 100.302553 99.137447
.. ... ... ... ... ... ...
479 2019-10-01 103.00 102.850 0.587143 104.024286 101.675714
480 2019-10-02 103.50 102.875 0.604261 104.083522 101.666478
[240 rows x 6 columns]
'''
#print(item.iloc[-240:])
item = item.iloc[-100:].copy()
# Simple 30 Day Bollinger Band
# set style, empty figure and axes
plt.style.use('fivethirtyeight')
#fig = plt.figure(figsize=(12,6))
f1, ax = plt.subplots(figsize = (12,6))
# Get index values for the X axis for facebook DataFrame
x_axis = item.index.get_level_values(0)
print(x_axis)
# Plot Adjust Closing Price and Moving Averages
ax.plot(x_axis, item['close'], color='blue', label = 'Close')
ax.plot(x_axis, item['30Days_MA'], color='black', lw=2)
# Plot shaded 21 Day Bollinger Band for Facebook
#ax.fill_between(item['date'], item['Upper_Band'], item['Lower_Band'], color='grey')
#plt.grid(True)
plt.title(stock_idx)
ax.yaxis.grid(True)
plt.legend(loc='best')
ax.xaxis_date()
ax.autoscale_view()
ax.grid()
plt.show()
'''
f1, ax = plt.subplots(figsize = (12,6))
# Plotting Close
ax.plot(local_pdSqlA.df.iloc[-240:]['date'], local_pdSqlA.df.iloc[-240:]['close'], color = list_color_ma[0], label = 'Close')
ax.set_xlabel('date')
ax.set_ylabel('close price')
#plt.grid(True)
plt.title(stock_idx)
ax.yaxis.grid(True)
plt.legend(loc='best')
ax.xaxis_date()
ax.autoscale_view()
ax.grid()
plt.show()
'''
| [
"sys.path.append",
"googleDrive.GoogleDrivebyFileID",
"matplotlib.pyplot.style.use",
"os.path.split",
"os.path.isdir",
"excelRW.ExcelRW",
"os.path.dirname",
"dataAnalysis.PandasSqliteAnalysis",
"readConfig.ReadConfig",
"matplotlib.pyplot.title",
"httplib2.Http",
"matplotlib.pyplot.legend",
"... | [((1682, 1707), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1697, 1707), False, 'import time, os, sys, re\n'), ((1719, 1746), 'os.path.dirname', 'os.path.dirname', (['strabspath'], {}), '(strabspath)\n', (1734, 1746), False, 'import time, os, sys, re\n'), ((1757, 1782), 'os.path.split', 'os.path.split', (['strdirname'], {}), '(strdirname)\n', (1770, 1782), False, 'import time, os, sys, re\n'), ((1819, 1851), 'os.path.join', 'os.path.join', (['prevdirname', '"""lib"""'], {}), "(prevdirname, 'lib')\n", (1831, 1851), False, 'import time, os, sys, re\n'), ((1862, 1895), 'os.path.join', 'os.path.join', (['prevdirname', '"""logs"""'], {}), "(prevdirname, 'logs')\n", (1874, 1895), False, 'import time, os, sys, re\n'), ((1975, 2002), 'sys.path.append', 'sys.path.append', (['dirnamelib'], {}), '(dirnamelib)\n', (1990, 2002), False, 'import time, os, sys, re\n'), ((1927, 1948), 'os.path.isdir', 'os.path.isdir', (['"""logs"""'], {}), "('logs')\n", (1940, 1948), False, 'import time, os, sys, re\n'), ((1954, 1973), 'os.makedirs', 'os.makedirs', (['"""logs"""'], {}), "('logs')\n", (1965, 1973), False, 'import time, os, sys, re\n'), ((2228, 2243), 'pandas.Series', 'pd.Series', (['data'], {}), '(data)\n', (2237, 2243), True, 'import pandas as pd\n'), ((2521, 2546), 'numpy.argwhere', 'np.argwhere', (['(data < lower)'], {}), '(data < lower)\n', (2532, 2546), True, 'import numpy as np\n'), ((2558, 2583), 'numpy.argwhere', 'np.argwhere', (['(data > upper)'], {}), '(data > upper)\n', (2569, 2583), True, 'import numpy as np\n'), ((2653, 2691), 'os.path.join', 'os.path.join', (['strdirname', '"""config.ini"""'], {}), "(strdirname, 'config.ini')\n", (2665, 2691), False, 'import time, os, sys, re\n'), ((2713, 2746), 'readConfig.ReadConfig', 'readConfig.ReadConfig', (['configPath'], {}), '(configPath)\n', (2734, 2746), True, 'import readConfig as readConfig\n'), ((4992, 5012), 'lxml.html.fromstring', 'html.fromstring', (['raw'], {}), '(raw)\n', (5007, 5012), False, 'from lxml import html\n'), ((5035, 5046), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5044, 5046), False, 'import time, os, sys, re\n'), ((5072, 5123), 'googleDrive.GoogleDrivebyFileID', 'google_drive.GoogleDrivebyFileID', (['dirnamelog', 'flags'], {}), '(dirnamelog, flags)\n', (5104, 5123), True, 'import googleDrive as google_drive\n'), ((5255, 5296), 'apiclient.discovery.build', 'discovery.build', (['"""drive"""', '"""v3"""'], {'http': 'http'}), "('drive', 'v3', http=http)\n", (5270, 5296), False, 'from apiclient import discovery\n'), ((6575, 6592), 'excelRW.ExcelRW', 'excelrw.ExcelRW', ([], {}), '()\n', (6590, 6592), True, 'import excelRW as excelrw\n'), ((7219, 7277), 'os.path.join', 'os.path.join', (['dirnamelog', 'str_candlestick_weekly_subfolder'], {}), '(dirnamelog, str_candlestick_weekly_subfolder)\n', (7231, 7277), False, 'import time, os, sys, re\n'), ((7301, 7356), 'googleDrive.GoogleCloudDrive', 'google_drive.GoogleCloudDrive', (['str_candlestick_filepath'], {}), '(str_candlestick_filepath)\n', (7330, 7356), True, 'import googleDrive as google_drive\n'), ((7485, 7529), 'os.path.join', 'os.path.join', (['dirnamelog', '"""TWTSEOTCDaily.db"""'], {}), "(dirnamelog, 'TWTSEOTCDaily.db')\n", (7497, 7529), False, 'import time, os, sys, re\n'), ((8647, 8758), 'dataAnalysis.PandasSqliteAnalysis', 'data_analysis.PandasSqliteAnalysis', (['stock_idx', 'dirnamelog', 'path_db', 'str_first_year_month_day', 'debug_verbose'], {}), '(stock_idx, dirnamelog, path_db,\n str_first_year_month_day, debug_verbose)\n', (8681, 8758), True, 'import dataAnalysis as data_analysis\n'), ((10858, 10890), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (10871, 10890), True, 'import matplotlib.pyplot as plt\n'), ((10942, 10971), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (10954, 10971), True, 'import matplotlib.pyplot as plt\n'), ((11452, 11472), 'matplotlib.pyplot.title', 'plt.title', (['stock_idx'], {}), '(stock_idx)\n', (11461, 11472), True, 'import matplotlib.pyplot as plt\n'), ((11501, 11523), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (11511, 11523), True, 'import matplotlib.pyplot as plt\n'), ((11587, 11597), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11595, 11597), True, 'import matplotlib.pyplot as plt\n'), ((5224, 5239), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (5237, 5239), False, 'import httplib2\n'), ((2326, 2353), 'pandas.Series', 'pd.Series', (['(ma - alpha * std)'], {}), '(ma - alpha * std)\n', (2335, 2353), True, 'import pandas as pd\n'), ((2394, 2421), 'pandas.Series', 'pd.Series', (['(ma + alpha * std)'], {}), '(ma + alpha * std)\n', (2403, 2421), True, 'import pandas as pd\n')] |
from aiida.engine import CalcJob
from aiida.common.exceptions import ValidationError
from aiida.common import CalcInfo, CodeInfo
from aiida.orm import StructureData, Dict
from aiida.plugins import DataFactory
from aiida_lammps.common.utils import convert_date_string
from aiida_lammps.common.generate_structure import generate_lammps_structure
from aiida_lammps.data.potential import EmpiricalPotential
import six
import numpy as np
def get_supercell(structure, supercell_shape):
import itertools
symbols = np.array([site.kind_name for site in structure.sites])
positions = np.array([site.position for site in structure.sites])
cell = np.array(structure.cell)
supercell_shape = np.array(supercell_shape.dict.shape)
supercell_array = np.dot(cell, np.diag(supercell_shape))
supercell = StructureData(cell=supercell_array)
for k in range(positions.shape[0]):
for r in itertools.product(*[range(i) for i in supercell_shape[::-1]]):
position = positions[k, :] + np.dot(np.array(r[::-1]), cell)
symbol = symbols[k]
supercell.append_atom(position=position, symbols=symbol)
return supercell
def get_FORCE_CONSTANTS_txt(force_constants):
force_constants = force_constants.get_array('force_constants')
fc_shape = force_constants.shape
fc_txt = "%4d\n" % (fc_shape[0])
for i in range(fc_shape[0]):
for j in range(fc_shape[1]):
fc_txt += "%4d%4d\n" % (i + 1, j + 1)
for vec in force_constants[i][j]:
fc_txt += ("%22.15f" * 3 + "\n") % tuple(vec)
return fc_txt
def structure_to_poscar(structure):
atom_type_unique = np.unique(
[site.kind_name for site in structure.sites], return_index=True)[1]
labels = np.diff(np.append(atom_type_unique, [len(structure.sites)]))
poscar = ' '.join(np.unique([site.kind_name for site in structure.sites]))
poscar += '\n1.0\n'
cell = structure.cell
for row in cell:
poscar += '{0: 22.16f} {1: 22.16f} {2: 22.16f}\n'.format(*row)
poscar += ' '.join(np.unique([site.kind_name for site in structure.sites])) + '\n'
poscar += ' '.join(np.array(labels, dtype=str)) + '\n'
poscar += 'Cartesian\n'
for site in structure.sites:
poscar += '{0: 22.16f} {1: 22.16f} {2: 22.16f}\n'.format(
*site.position)
return poscar
def parameters_to_input_file(parameters_object):
parameters = parameters_object.get_dict()
input_file = ('STRUCTURE FILE POSCAR\nPOSCAR\n\n')
input_file += ('FORCE CONSTANTS\nFORCE_CONSTANTS\n\n')
input_file += ('PRIMITIVE MATRIX\n')
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[0])
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[1])
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[2])
input_file += ('\n')
input_file += ('SUPERCELL MATRIX PHONOPY\n')
input_file += ('{} {} {} \n').format(*np.array(parameters['supercell'])[0])
input_file += ('{} {} {} \n').format(*np.array(parameters['supercell'])[1])
input_file += ('{} {} {} \n').format(*np.array(parameters['supercell'])[2])
input_file += ('\n')
return input_file
def generate_LAMMPS_potential(pair_style):
potential_file = '# Potential file generated by aiida plugin (please check citation in the orignal file)\n'
for key, value in pair_style.dict.data.iteritems():
potential_file += '{} {}\n'.format(key, value)
return potential_file
class BaseLammpsCalculation(CalcJob):
"""
A basic plugin for calculating force constants using Lammps.
Requirement: the node should be able to import phonopy
"""
_INPUT_FILE_NAME = 'input.in'
_INPUT_STRUCTURE = 'input.data'
_DEFAULT_OUTPUT_FILE_NAME = 'log.lammps'
_DEFAULT_TRAJECTORY_FILE_NAME = 'trajectory.lammpstrj'
_DEFAULT_OUTPUT_INFO_FILE_NAME = "system_info.dump"
_DEFAULT_OUTPUT_RESTART_FILE_NAME = 'lammps.restart'
_retrieve_list = []
_retrieve_temporary_list = []
_cmdline_params = ['-in', _INPUT_FILE_NAME]
_stdout_name = None
@classmethod
def define(cls, spec):
super(BaseLammpsCalculation, cls).define(spec)
spec.input('structure', valid_type=StructureData, help='the structure')
spec.input('potential', valid_type=EmpiricalPotential,
help='lammps potential')
spec.input('parameters', valid_type=Dict,
help='the parameters', required=False)
spec.input('metadata.options.cell_transform_filename',
valid_type=six.string_types, default="cell_transform.npy")
spec.input('metadata.options.output_filename',
valid_type=six.string_types, default=cls._DEFAULT_OUTPUT_FILE_NAME)
spec.input('metadata.options.trajectory_name',
valid_type=six.string_types, default=cls._DEFAULT_TRAJECTORY_FILE_NAME)
spec.input('metadata.options.info_filename',
valid_type=six.string_types, default=cls._DEFAULT_OUTPUT_INFO_FILE_NAME)
spec.input('metadata.options.restart_filename',
valid_type=six.string_types, default=cls._DEFAULT_OUTPUT_RESTART_FILE_NAME)
spec.output('output_parameters',
valid_type=Dict,
required=True,
help='the data extracted from the main output file')
spec.default_output_node = 'output_parameters'
# TODO review aiidateam/aiida_core#2997, when closed, for exit code formalization
# Unrecoverable errors: resources like the retrieved folder or its expected contents are missing
spec.exit_code(
200, 'ERROR_NO_RETRIEVED_FOLDER',
message='The retrieved folder data node could not be accessed.')
spec.exit_code(
201, 'ERROR_NO_RETRIEVED_TEMP_FOLDER',
message='The retrieved temporary folder data node could not be accessed.')
spec.exit_code(
202, 'ERROR_LOG_FILE_MISSING',
message='the main log output file was not found')
spec.exit_code(
203, 'ERROR_TRAJ_FILE_MISSING',
message='the trajectory output file was not found')
spec.exit_code(
204, 'ERROR_STDOUT_FILE_MISSING',
message='the stdout output file was not found')
spec.exit_code(
205, 'ERROR_STDERR_FILE_MISSING',
message='the stderr output file was not found')
# Unrecoverable errors: required retrieved files could not be read, parsed or are otherwise incomplete
spec.exit_code(
300, 'ERROR_LOG_PARSING',
message=('An error was flagged trying to parse the '
'main lammps output log file'))
spec.exit_code(
310, 'ERROR_TRAJ_PARSING',
message=('An error was flagged trying to parse the '
'trajectory output file'))
spec.exit_code(
320, 'ERROR_INFO_PARSING',
message=('An error was flagged trying to parse the '
'system info output file'))
# Significant errors but calculation can be used to restart
spec.exit_code(
400, 'ERROR_LAMMPS_RUN',
message='The main lammps output file flagged an error')
def validate_parameters(self, param_data, potential_object):
return True
def prepare_extra_files(self, tempfolder, potential_object):
return True
def prepare_for_submission(self, tempfolder):
"""Create the input files from the input nodes passed to this instance of the `CalcJob`.
:param tempfolder: an `aiida.common.folders.Folder` to temporarily write files on disk
:return: `aiida.common.CalcInfo` instance
"""
# assert that the potential and structure have the same kind elements
if [k.symbol for k in self.inputs.structure.kinds] != self.inputs.potential.kind_elements:
raise ValidationError("the structure and potential are not compatible (different kind elements)")
# Setup potential
potential_txt = self.inputs.potential.get_potential_file()
# Setup structure
structure_txt, struct_transform = generate_lammps_structure(
self.inputs.structure, self.inputs.potential.atom_style)
with open(tempfolder.get_abs_path(self.options.cell_transform_filename), 'w+b') as handle:
np.save(handle, struct_transform)
if "parameters" in self.inputs:
parameters = self.inputs.parameters
else:
parameters = Dict()
pdict = parameters.get_dict()
# Check lammps version date in parameters
lammps_date = convert_date_string(
pdict.get("lammps_version", '11 Aug 2017'))
# Setup input parameters
input_txt = self._generate_input_function(
parameters=parameters,
potential_obj=self.inputs.potential,
structure_filename=self._INPUT_STRUCTURE,
trajectory_filename=self.options.trajectory_name,
info_filename=self.options.info_filename,
restart_filename=self.options.restart_filename,
add_thermo_keywords=pdict.get("thermo_keywords", []),
version_date=lammps_date)
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
with open(input_filename, 'w') as infile:
infile.write(input_txt)
self.validate_parameters(parameters, self.inputs.potential)
# prepare extra files if needed
self.prepare_extra_files(tempfolder, self.inputs.potential)
# =========================== dump to file =============================
structure_filename = tempfolder.get_abs_path(self._INPUT_STRUCTURE)
with open(structure_filename, 'w') as infile:
infile.write(structure_txt)
if potential_txt is not None:
potential_filename = tempfolder.get_abs_path(
self.inputs.potential.potential_filename)
with open(potential_filename, 'w') as infile:
infile.write(potential_txt)
# ============================ calcinfo ================================
codeinfo = CodeInfo()
codeinfo.cmdline_params = self._cmdline_params
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.withmpi = False # Set lammps openmpi environment properly
codeinfo.stdout_name = self._stdout_name
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
calcinfo.retrieve_list = self._retrieve_list + [
self.options.output_filename,
self.options.cell_transform_filename]
calcinfo.retrieve_temporary_list = self._retrieve_temporary_list
calcinfo.codes_info = [codeinfo]
return calcinfo
| [
"aiida.orm.Dict",
"aiida.common.exceptions.ValidationError",
"aiida_lammps.common.generate_structure.generate_lammps_structure",
"numpy.unique",
"aiida.common.CalcInfo",
"numpy.diag",
"numpy.array",
"aiida.orm.StructureData",
"aiida.common.CodeInfo",
"numpy.save"
] | [((519, 573), 'numpy.array', 'np.array', (['[site.kind_name for site in structure.sites]'], {}), '([site.kind_name for site in structure.sites])\n', (527, 573), True, 'import numpy as np\n'), ((590, 643), 'numpy.array', 'np.array', (['[site.position for site in structure.sites]'], {}), '([site.position for site in structure.sites])\n', (598, 643), True, 'import numpy as np\n'), ((655, 679), 'numpy.array', 'np.array', (['structure.cell'], {}), '(structure.cell)\n', (663, 679), True, 'import numpy as np\n'), ((702, 738), 'numpy.array', 'np.array', (['supercell_shape.dict.shape'], {}), '(supercell_shape.dict.shape)\n', (710, 738), True, 'import numpy as np\n'), ((818, 853), 'aiida.orm.StructureData', 'StructureData', ([], {'cell': 'supercell_array'}), '(cell=supercell_array)\n', (831, 853), False, 'from aiida.orm import StructureData, Dict\n'), ((775, 799), 'numpy.diag', 'np.diag', (['supercell_shape'], {}), '(supercell_shape)\n', (782, 799), True, 'import numpy as np\n'), ((1670, 1744), 'numpy.unique', 'np.unique', (['[site.kind_name for site in structure.sites]'], {'return_index': '(True)'}), '([site.kind_name for site in structure.sites], return_index=True)\n', (1679, 1744), True, 'import numpy as np\n'), ((1854, 1909), 'numpy.unique', 'np.unique', (['[site.kind_name for site in structure.sites]'], {}), '([site.kind_name for site in structure.sites])\n', (1863, 1909), True, 'import numpy as np\n'), ((8288, 8375), 'aiida_lammps.common.generate_structure.generate_lammps_structure', 'generate_lammps_structure', (['self.inputs.structure', 'self.inputs.potential.atom_style'], {}), '(self.inputs.structure, self.inputs.potential.\n atom_style)\n', (8313, 8375), False, 'from aiida_lammps.common.generate_structure import generate_lammps_structure\n'), ((10306, 10316), 'aiida.common.CodeInfo', 'CodeInfo', ([], {}), '()\n', (10314, 10316), False, 'from aiida.common import CalcInfo, CodeInfo\n'), ((10568, 10578), 'aiida.common.CalcInfo', 'CalcInfo', ([], {}), '()\n', (10576, 10578), False, 'from aiida.common import CalcInfo, CodeInfo\n'), ((2076, 2131), 'numpy.unique', 'np.unique', (['[site.kind_name for site in structure.sites]'], {}), '([site.kind_name for site in structure.sites])\n', (2085, 2131), True, 'import numpy as np\n'), ((2163, 2190), 'numpy.array', 'np.array', (['labels'], {'dtype': 'str'}), '(labels, dtype=str)\n', (2171, 2190), True, 'import numpy as np\n'), ((8033, 8129), 'aiida.common.exceptions.ValidationError', 'ValidationError', (['"""the structure and potential are not compatible (different kind elements)"""'], {}), "(\n 'the structure and potential are not compatible (different kind elements)')\n", (8048, 8129), False, 'from aiida.common.exceptions import ValidationError\n'), ((8496, 8529), 'numpy.save', 'np.save', (['handle', 'struct_transform'], {}), '(handle, struct_transform)\n', (8503, 8529), True, 'import numpy as np\n'), ((8658, 8664), 'aiida.orm.Dict', 'Dict', ([], {}), '()\n', (8662, 8664), False, 'from aiida.orm import StructureData, Dict\n'), ((2668, 2701), 'numpy.array', 'np.array', (["parameters['primitive']"], {}), "(parameters['primitive'])\n", (2676, 2701), True, 'import numpy as np\n'), ((2748, 2781), 'numpy.array', 'np.array', (["parameters['primitive']"], {}), "(parameters['primitive'])\n", (2756, 2781), True, 'import numpy as np\n'), ((2828, 2861), 'numpy.array', 'np.array', (["parameters['primitive']"], {}), "(parameters['primitive'])\n", (2836, 2861), True, 'import numpy as np\n'), ((2982, 3015), 'numpy.array', 'np.array', (["parameters['supercell']"], {}), "(parameters['supercell'])\n", (2990, 3015), True, 'import numpy as np\n'), ((3062, 3095), 'numpy.array', 'np.array', (["parameters['supercell']"], {}), "(parameters['supercell'])\n", (3070, 3095), True, 'import numpy as np\n'), ((3142, 3175), 'numpy.array', 'np.array', (["parameters['supercell']"], {}), "(parameters['supercell'])\n", (3150, 3175), True, 'import numpy as np\n'), ((1022, 1039), 'numpy.array', 'np.array', (['r[::-1]'], {}), '(r[::-1])\n', (1030, 1039), True, 'import numpy as np\n')] |
# Title: util.py
# Description: Various utilities useful for online PSP tests
# Author: <NAME> (<EMAIL>) and <NAME> (<EMAIL>)
##############################
# Imports
import numpy as np
from scipy.io import loadmat
from sklearn.decomposition import PCA
##############################
def compute_errors(error_options, Uhat, t, errs):
"""
Parameters:
====================
error_options -- A struct of error options
Uhat -- The approximation Uhat of an orthonormal basis for the PCA subspace of size D by K
t -- The current iteration index
errs -- An output dict in which to put the computed errs
"""
if t % error_options['n_skip']:
return
for i, (fname, f) in enumerate(error_options['error_func_list']):
errs[fname][t] = f(Uhat)
def initialize_errors(error_options, n_its):
""" Build a dictionary for storing the error information for each specified error function"""
return {fun_name: np.zeros(n_its) for (fun_name, _) in error_options['error_func_list']}
def subspace_error(Uhat, U, relative_error_flag=True):
"""
Parameters:
====================
Uhat -- The approximation Uhat of an orthonormal basis for the PCA subspace of size D by K
U -- An orthonormal basis for the PCA subspace of size D by K
relative_error_flag -- A flag saying whether to scale the error to put it on a relative scale
Output:
====================
err -- the (relative) Frobenius norm error
"""
K = U.shape[1]
A = Uhat.T.dot(U)
B = Uhat.T.dot(Uhat)
err = np.sqrt(K + np.trace(B.dot(B)) - 2 * np.trace(A.dot(A.T)))
if relative_error_flag:
err = err / np.sqrt(K)
return err
def load_dataset(dataset_name, return_U=True, K=None):
'''
Parameters
----------
dataset_name: str
name of dataset
return_U: bool
whether to also compute the eigenvetor matrix
Returns
-------
X: ndarray
generated samples
U: ndarray
ground truth eigenvectors
lam: ndarray
ground truth eigenvalues
'''
ld = loadmat(dataset_name)
fea = ld['fea']
X = fea.astype(np.float)
X -= X.mean(0)[None, :]
if return_U:
if K is None:
K = X.shape[-1]
pca = PCA(n_components=K, svd_solver='arpack')
pca.fit(X)
U = pca.components_.T
lam = pca.explained_variance_
X = X.T
else:
U = 0
lam = 0
X = X.T
return X, U, lam
def get_scale_data_factor(X):
''' Scaling for convergence reasons
Parameters
----------
q
X
U
lambdas
Returns
-------
'''
norm_fact = np.mean(np.sqrt(np.sum(X ** 2, 0)))
scale_factor = 1 / norm_fact
return scale_factor
def generate_samples(K=None, N=None, D=None, method='spiked_covariance', options=None, scale_data=True,
sample_with_replacement=False, shuffle=False, return_scaling=False):
'''
Parameters
----------
D: int or None
number of features
K: int
number of components
N: int or 'auto'
number of samples, if 'auto' it will return all the samples from real data datasets
method: str
so far 'spiked_covariance' or 'real_data'
options: dict
specific of each method (see code)
scale_data: bool
scaling data so that average sample norm is one
shuffle: bool
whether to shuffle the data or not
return_scaling: bool
true if we want to get two additional output arguments, the centering and scaling
Returns
-------
X: ndarray
generated samples
U: ndarray
ground truth eigenvectors
sigma2: ndarray
ground truth eigenvalues
avg: ndarray
mean of X (only sometimes returned)
scale_factor: float
the amount by which the data was scaled (only sometimes returned)
'''
# Generate synthetic data samples from a specified model or load real datasets
# here making sure that we use the right n when including n_test frames
if method == 'spiked_covariance':
if N == 'auto':
raise ValueError('N cannot be "auto" for spiked_covariance model')
if options is None:
options = {
'lambda_K': 5e-1,
'normalize': True,
'rho': 1e-2 / 5,
'return_U': True
}
return_U = options['return_U']
if N is None or D is None:
raise Exception('Spiked covariance requires parameters N and D')
rho = options['rho']
normalize = options['normalize']
if normalize:
lambda_K = options['lambda_K']
sigma = np.sqrt(np.linspace(1, lambda_K, K))
else:
slope = options['slope']
gap = options['gap']
sigma = np.sqrt(gap + slope * np.arange(K - 1, -1, -1))
U, _ = np.linalg.qr(np.random.normal(0, 1, (D, K)))
w = np.random.normal(0, 1, (K, N))
X = np.sqrt(rho) * np.random.normal(0, 1, (D, N))
X += U.dot((w.T * sigma).T)
sigma2 = (sigma ** 2)[:, np.newaxis]
elif method == 'real_data':
if options is None:
options = {
'filename': './datasets/MNIST.mat',
'return_U': True
}
return_U = options['return_U']
filename = options['filename']
X, U, sigma2 = load_dataset(filename, return_U=return_U, K=K)
if N != 'auto':
if N > X.shape[-1]:
if sample_with_replacement:
print('** Warning: You are sampling real data with replacement')
else:
raise Exception("You are sampling real data with replacement "
"but sample_with_replacement flag is set to False")
X = X[:, np.arange(N) % X.shape[-1]]
else:
assert 0, 'Specified method for data generation is not yet implemented!'
# center data
avg = X.mean(1)[:, None]
X -= avg
if scale_data:
scale_factor = get_scale_data_factor(X)
X, U, sigma2 = X * scale_factor, U, sigma2 * (scale_factor ** 2)
else:
scale_factor = 1
if shuffle:
print('Shuffling data!')
X = X[:,np.random.permutation(X.shape[-1])]
if return_scaling:
if return_U:
return X, U, sigma2, avg, scale_factor
else:
return X, avg, scale_factor
else:
if return_U:
return X, U, sigma2
else:
return X
| [
"numpy.random.normal",
"numpy.sqrt",
"sklearn.decomposition.PCA",
"scipy.io.loadmat",
"numpy.sum",
"numpy.zeros",
"numpy.linspace",
"numpy.arange",
"numpy.random.permutation"
] | [((2156, 2177), 'scipy.io.loadmat', 'loadmat', (['dataset_name'], {}), '(dataset_name)\n', (2163, 2177), False, 'from scipy.io import loadmat\n'), ((988, 1003), 'numpy.zeros', 'np.zeros', (['n_its'], {}), '(n_its)\n', (996, 1003), True, 'import numpy as np\n'), ((2338, 2378), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'K', 'svd_solver': '"""arpack"""'}), "(n_components=K, svd_solver='arpack')\n", (2341, 2378), False, 'from sklearn.decomposition import PCA\n'), ((5135, 5165), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(K, N)'], {}), '(0, 1, (K, N))\n', (5151, 5165), True, 'import numpy as np\n'), ((1704, 1714), 'numpy.sqrt', 'np.sqrt', (['K'], {}), '(K)\n', (1711, 1714), True, 'import numpy as np\n'), ((2760, 2777), 'numpy.sum', 'np.sum', (['(X ** 2)', '(0)'], {}), '(X ** 2, 0)\n', (2766, 2777), True, 'import numpy as np\n'), ((5090, 5120), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(D, K)'], {}), '(0, 1, (D, K))\n', (5106, 5120), True, 'import numpy as np\n'), ((5178, 5190), 'numpy.sqrt', 'np.sqrt', (['rho'], {}), '(rho)\n', (5185, 5190), True, 'import numpy as np\n'), ((5193, 5223), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(D, N)'], {}), '(0, 1, (D, N))\n', (5209, 5223), True, 'import numpy as np\n'), ((4880, 4907), 'numpy.linspace', 'np.linspace', (['(1)', 'lambda_K', 'K'], {}), '(1, lambda_K, K)\n', (4891, 4907), True, 'import numpy as np\n'), ((6462, 6496), 'numpy.random.permutation', 'np.random.permutation', (['X.shape[-1]'], {}), '(X.shape[-1])\n', (6483, 6496), True, 'import numpy as np\n'), ((5035, 5059), 'numpy.arange', 'np.arange', (['(K - 1)', '(-1)', '(-1)'], {}), '(K - 1, -1, -1)\n', (5044, 5059), True, 'import numpy as np\n'), ((6040, 6052), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (6049, 6052), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Python Vamp Host
# Copyright (c) 2008-2015 <NAME>, University of London
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the names of the Centre for
# Digital Music and Queen Mary, University of London shall not be
# used in advertising or otherwise to promote the sale, use or other
# dealings in this Software without prior written authorization.
'''A high-level interface to the vampyhost extension module, for quickly and easily running Vamp audio analysis plugins on audio files and buffers.'''
import numpy
def frames_from_array(arr, step_size, frame_size):
"""Generate a list of frames of size frame_size, extracted from the input array arr at step_size intervals"""
# presumably such a function exists in many places, but I need practice
assert(step_size > 0)
if arr.ndim == 1: # turn 1d into 2d array with 1 channel
arr = numpy.reshape(arr, (1, arr.shape[0]))
assert(arr.ndim == 2)
n = arr.shape[1]
i = 0
while (i < n):
frame = arr[:, i : i + frame_size]
w = frame.shape[1]
if (w < frame_size):
pad = numpy.zeros((frame.shape[0], frame_size - w))
frame = numpy.concatenate((frame, pad), 1)
yield frame
i = i + step_size
| [
"numpy.zeros",
"numpy.reshape",
"numpy.concatenate"
] | [((1955, 1992), 'numpy.reshape', 'numpy.reshape', (['arr', '(1, arr.shape[0])'], {}), '(arr, (1, arr.shape[0]))\n', (1968, 1992), False, 'import numpy\n'), ((2186, 2231), 'numpy.zeros', 'numpy.zeros', (['(frame.shape[0], frame_size - w)'], {}), '((frame.shape[0], frame_size - w))\n', (2197, 2231), False, 'import numpy\n'), ((2252, 2286), 'numpy.concatenate', 'numpy.concatenate', (['(frame, pad)', '(1)'], {}), '((frame, pad), 1)\n', (2269, 2286), False, 'import numpy\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Purpose: This script is to add a specific number to image names in order to increment their number.
Functionality: In order to merge Test set and Training set together, Test set image names should be added with the highest number in the Training set.
Input: Train_DIR=Training set path, Test_DIR=Test set path,
Output: Together_DIR=Merged sets together path,
Usage: Python addNumtoImageNames.py --Train_DIR --Test_DIR --Together_DIR
Author: <NAME>
Date: 11th September 2017
"""
import sys
import os
import argparse
import progressbar
from glob import glob
from skimage import io
import numpy as np
from termcolor import colored
#import subprocess
sys.path.append("/home/azim_se")
np.random.seed(5) # for reproducibility
progress = progressbar.ProgressBar(widgets=[progressbar.Bar('*', '[', ']'), progressbar.Percentage(), ' '])
"""
try:
import cv2
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
"""
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='addNumtoImageNames')
parser.add_argument('--Train_DIR', dest='_trainDir', help='Path to Train set Directory',
default='./train', type=str)
parser.add_argument('--Test_DIR', dest='_testDir', help='Path to Test set Directory',
default='./test', type=str)
parser.add_argument('--Together_DIR', dest='_mergedDir', help='Path to Together set Directory',
default='./together', type=str)
args = parser.parse_args()
return args
class Incrimentation(object):
'''
Read each image and its name. Add a specific number to each file name and save it.
INPUT list 'inputpath': filepaths to all images of specific set
INPUT list 'outputpath': filepaths to output images of specific set
'''
def __init__(self, inputpath, outputpath, number=55680):
self.inputpath=inputpath
self.outputpath=outputpath
self.number=number
print(colored(("\nInput Path is: {}".format(self.inputpath)), 'yellow'))
self._ImagesNames = glob(self.inputpath+'/**') #/**/*more* '/**/**'
print(colored(self._ImagesNames, 'blue'))
self.read(self._ImagesNames)
def read(self, _ImagesNames):
progress.currval = 0
for image_idx in progress(range(len(self._ImagesNames))):
#Incriment *imagePtr=image
image = self.readImage(self._ImagesNames[image_idx])
_IncImageName = self.incrementName(self._ImagesNames[image_idx],self.number)
self.saveIncImage(image, _IncImageName, self.outputpath)
def readImage(self, imagepath):
'''
Reading each image
input: imagepath= path to image
output: img= image file
'''
try:
print(colored(("\nimage path being read is : {}".format(imagepath)), 'green'))
img = io.imread(imagepath)#plugin='simpleitk').astype(float)
except Exception as e:
raise("Can not read image")
return img
def incrementName(self, ImageName, number):
'''
Increment file name by an number
>>> f = 'C:\\X\\Data\\foo.txt'
>>> import os
>>> os.path.basename(f)
'foo.txt'
>>> os.path.dirname(f)
'C:\\X\\Data'
>>> os.path.splitext(f)
('C:\\X\\Data\\foo', '.txt')
>>> os.path.splitext(os.path.basename(f))
('foo', '.txt')
or
>>> filename = "example.jpeg"
>>> filename.split(".")[-1]
'jpeg'
No error when file doesn't have an extension:
>>> "filename".split(".")[-1]
'filename'
But you must be careful:
>>> "png".split(".")[-1]
'png' # But file doesn't have an extension
head, tail = os.path.split("a/b/c/00001.dat")
print(head,tail
'''
# split file base name from head of path file
head, basename = os.path.split(ImageName)
print("Head and Basename are: ", head, basename)
# find out RGB category or grayscale
category = os.path.split(head)[-1]
#split file name from its format
_fileName,_fileformat = os.path.splitext(basename)
print("_fileName and _fileformat are: ", _fileName, _fileformat)
#increment file name
_incfileName = str(int(_fileName)+self.number)+_fileformat
print("Incremented base Name is: ", _incfileName)
#join paths all together
if category=='RGB' or category=='grayscale':
_incfileName=os.path.join(c_incfileName)
print("incremented full name is: ", _incfileName)
return _incfileName
def saveIncImage(self, image, _incfileName, _outpath):
# append output directory path to incremented file path
_fileName = os.path.join(_outpath,_incfileName)
print(colored("\nSaving path is: {}".format(_fileName), 'red'))
io.imsave(_fileName,image)
#cv2.imwrite(outputpath+'/'+'{}'.format(_incfileName))
if __name__ == '__main__':
args = parse_args()
#_trainImages = glob(args._trainDir+'/**.jpg')#/**/*more*
#_testImages = glob(args._testDir+'/**.jpg')#/**/*more*
data = Incrimentation(args._testDir,args._mergedDir)
| [
"progressbar.Bar",
"termcolor.colored",
"argparse.ArgumentParser",
"os.path.splitext",
"os.path.join",
"os.path.split",
"skimage.io.imread",
"progressbar.Percentage",
"numpy.random.seed",
"skimage.io.imsave",
"sys.path.append",
"glob.glob"
] | [((704, 736), 'sys.path.append', 'sys.path.append', (['"""/home/azim_se"""'], {}), "('/home/azim_se')\n", (719, 736), False, 'import sys\n'), ((737, 754), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (751, 754), True, 'import numpy as np\n'), ((1252, 1309), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""addNumtoImageNames"""'}), "(description='addNumtoImageNames')\n", (1275, 1309), False, 'import argparse\n'), ((2225, 2253), 'glob.glob', 'glob', (["(self.inputpath + '/**')"], {}), "(self.inputpath + '/**')\n", (2229, 2253), False, 'from glob import glob\n'), ((3806, 3830), 'os.path.split', 'os.path.split', (['ImageName'], {}), '(ImageName)\n', (3819, 3830), False, 'import os\n'), ((4023, 4049), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (4039, 4049), False, 'import os\n'), ((4583, 4619), 'os.path.join', 'os.path.join', (['_outpath', '_incfileName'], {}), '(_outpath, _incfileName)\n', (4595, 4619), False, 'import os\n'), ((4687, 4714), 'skimage.io.imsave', 'io.imsave', (['_fileName', 'image'], {}), '(_fileName, image)\n', (4696, 4714), False, 'from skimage import io\n'), ((821, 851), 'progressbar.Bar', 'progressbar.Bar', (['"""*"""', '"""["""', '"""]"""'], {}), "('*', '[', ']')\n", (836, 851), False, 'import progressbar\n'), ((853, 877), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (875, 877), False, 'import progressbar\n'), ((2282, 2316), 'termcolor.colored', 'colored', (['self._ImagesNames', '"""blue"""'], {}), "(self._ImagesNames, 'blue')\n", (2289, 2316), False, 'from termcolor import colored\n'), ((2922, 2942), 'skimage.io.imread', 'io.imread', (['imagepath'], {}), '(imagepath)\n', (2931, 2942), False, 'from skimage import io\n'), ((3937, 3956), 'os.path.split', 'os.path.split', (['head'], {}), '(head)\n', (3950, 3956), False, 'import os\n'), ((4347, 4374), 'os.path.join', 'os.path.join', (['c_incfileName'], {}), '(c_incfileName)\n', (4359, 4374), False, 'import os\n')] |
import argparse
import os
import subprocess
import numpy as np
from jinja2 import Template
import datasets as ds
import utils
from vgg16 import vgg16_multilabel_hdf5
from utils import flip_labels as flip_label_matrix
EXP_DIR = os.path.join('data', 'experiments', 'espgame')
DS_DIR = os.path.join('data', 'ESP-Game')
AUX_DIR = os.path.join(EXP_DIR, 'aux')
PROTOTXT_NET = os.path.join(AUX_DIR, 'vgg16_multilabel_00.jinja2')
PROTOTXT_SOLVER = os.path.join(AUX_DIR, 'vgg16_solver_00.jinja2')
SNAPSHOT_FILE = os.path.join(EXP_DIR, '..', 'models',
'VGG_ILSVRC_16_layers.caffemodel')
TRAIN_LIST = os.path.join(DS_DIR, 'espgame_train_list.txt')
TEST_LIST = os.path.join(DS_DIR, 'espgame_test_list.txt')
TRAIN_ANNOT = os.path.join(DS_DIR, 'espgame_train_annot.hvecs')
TEST_ANNOT = os.path.join(DS_DIR, 'espgame_test_annot.hvecs')
DATASET_INFO = dict(train_list=TRAIN_LIST, test_list=TEST_LIST,
train_annot=TRAIN_ANNOT, test_annot=TEST_ANNOT)
def check_image_labels(dirname, prm=DATASET_INFO):
"""Create/Check that ESP-Game labels are in HDF5 format
Note
----
Be careful launching multiple-process dumping source files
"""
filename = os.path.join(dirname, 'label_train.h5')
if not os.path.isfile(filename):
ds.espgame_dump_labels(prm['train_annot'], filename)
filename = os.path.join(dirname, 'label_test.h5')
if not os.path.isfile(filename):
ds.espgame_dump_labels(prm['test_annot'], filename)
def check_txt_sources(dirname, prm=DATASET_INFO):
"""Create/Check that if train and test list exist
Note
----
Be careful launching multiple-process dumping source files
"""
filename_train = os.path.join(dirname, 'img_train.txt')
if not os.path.isfile(filename_train):
ds.espgame_dump_list(prm['train_list'], filename_train)
filename_test = os.path.join(dirname, 'img_test.txt')
if not os.path.isfile(filename_test):
ds.espgame_dump_list(prm['test_list'], filename_test)
return filename_train, filename_test
def create_prefix(name, dirname):
"""Create prefix to identify and experiment"""
exp_id = os.path.join(dirname, name)
if not os.path.isdir(dirname):
os.makedirs(dirname)
return exp_id
def create_prototxt_net(filename, version=0, **kwargs):
if version == 0:
prm = dict(batch_size=64, label_src="{{ h5_src_train }}",
input='image_data_no_label', img_src="{{ img_src_train }}",
img_root='data/ESP-Game/ESP-ImageSet/', loss='l2-norm',
img_transf=dict(crop_size=224, mean_value=[104, 117, 123],
mirror=True), new_width=256, new_height=256, n_output=268)
vgg16_multilabel_hdf5(filename, **prm)
def dump_annotation_batches(name, Y, prefix=AUX_DIR, clobber=True, txt=True):
"""Save HDF5 files used for caffe-stochastic solver"""
exp_id = create_prefix(name, prefix)
src_file, h5_file = exp_id + '.txt', exp_id + '.h5'
src_exist = os.path.isfile(src_file)
if clobber or not src_exist:
utils.h5py_save(h5_file, h5mode='w', label=np.float32(Y.T))
if txt and not src_exist:
with open(src_file, 'w') as fid:
fid.write(h5_file)
return src_file
def launch_caffe(name, solver, net, snapshot=None, finetune=False, gpu_id=-1,
prefix=''):
"""Laucn caffe binary (finetunning)"""
log = os.path.join(prefix, name + '.log')
cmd = ['sh', 'train.sh', str(gpu_id), solver, net, log]
if snapshot is not None:
cmd += [snapshot]
if finetune:
cmd += ['1']
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def load_labels(dirname):
"""Load train/test label matrix"""
filename = os.path.join(dirname, 'label_train.h5')
train = utils.h5py_load(filename, 'labels')
filename = os.path.join(dirname, 'label_test.h5')
test = utils.h5py_load(filename, 'labels')
return train, test
def update_net_prototxt(txt_template, name, prefix, h5_train, h5_test, img_train,
img_test):
"""Update network prototxt template"""
with open(txt_template, 'r') as fid:
prototxt = fid.read()
template = Template(prototxt)
netfile = os.path.join(prefix, name + '_net.prototxt')
with open(netfile, 'w') as fid:
print >>fid, template.render(img_src_train=img_train,
img_src_test=img_test, h5_src_train=h5_train, h5_src_test=h5_test)
return netfile
def update_solver_prototxt(txt_template, name, prefix, netfile):
"""Update solver prototxt template"""
with open(txt_template, 'r') as fid:
prototxt = fid.read()
template = Template(prototxt)
solverfile = os.path.join(prefix, name + '_solver.prototxt')
snapshot = os.path.join(prefix, name + '_')
with open(solverfile, 'w') as fid:
print >>fid, template.render(snapshot=snapshot, net_src=netfile)
return solverfile
# Program
def input_parser():
help_id = 'ID used to identify experiment and its results'
help_gpu = 'Device ID of the GPU used for the experiment'
help_fp = 'Probability of flipping labels'
help_ft = 'Flipping strategy (True: just positive, False: All)'
help_ff = 'Finetune model otherwise Resume training'
help_pn = 'Fullpath of prototxt network jinja2 template'
help_ps = 'Fullpath of prototxt solver jinja2 template'
help_sm = 'Fullpath of snapshot caffe-model'
help_ad = 'Fullpath of auxiliar folder of ESP-Game experiments'
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('exp_id', help=help_id, type=str)
p.add_argument('-gpu', '--gpu_id', help=help_gpu, type=int, default=0)
p.add_argument('-fp', '--flip_prob', help=help_fp, type=float, default=0.0)
p.add_argument('-ft', '--flip_type', help=help_ft, action='store_false')
p.add_argument('-ff', '--finetune_flag', help=help_ff, action='store_false')
p.add_argument('-pn', '--prototxt_net', help=help_pn,
default=PROTOTXT_NET)
p.add_argument('-ps', '--prototxt_solver', help=help_ps,
default=PROTOTXT_SOLVER)
p.add_argument('-sm', '--snapshot_file', help=help_sm,
default=SNAPSHOT_FILE)
p.add_argument('-ad', '--aux_dir', help=help_ad, default=AUX_DIR)
return p
def main(exp_id, gpu_id, flip_prob, flip_type, finetune_flag,
prototxt_net, prototxt_solver, aux_dir, snapshot_file):
train_id, test_id = exp_id + '_trn', exp_id + '_tst'
exp_dir = os.path.join(aux_dir, '..', exp_id)
# Check if source and annotation files exist
img_src_train, img_src_test = check_txt_sources(aux_dir)
check_image_labels(aux_dir)
# Load and Flip label matrix
Y_train, Y_test = load_labels(aux_dir)
Yf_train = flip_label_matrix(Y_train, flip_prob, flip_type)
# Dump annotations in caffe-hdf5 format
h5_src_train = dump_annotation_batches(train_id, Yf_train,
prefix=exp_dir)
h5_src_test = dump_annotation_batches(test_id, Y_test,
prefix=exp_dir)
# Update network prototxt
netfile = update_net_prototxt(prototxt_net, exp_id, exp_dir,
h5_train=h5_src_train, h5_test=h5_src_test, img_train=img_src_train,
img_test=img_src_test)
# Update solver prototxt
solverfile = update_solver_prototxt(prototxt_solver, exp_id, exp_dir,
netfile)
# Launch process
launch_caffe(exp_id, solverfile, netfile, finetune=finetune_flag,
gpu_id=gpu_id, prefix=exp_dir, snapshot=snapshot_file)
if __name__ == '__main__':
p = input_parser()
args = p.parse_args()
main(**vars(args))
| [
"subprocess.check_output",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"utils.h5py_load",
"jinja2.Template",
"os.path.isfile",
"datasets.espgame_dump_list",
"os.path.isdir",
"datasets.espgame_dump_labels",
"utils.flip_labels",
"vgg16.vgg16_multilabel_hdf5",
"numpy.float32"
] | [((230, 276), 'os.path.join', 'os.path.join', (['"""data"""', '"""experiments"""', '"""espgame"""'], {}), "('data', 'experiments', 'espgame')\n", (242, 276), False, 'import os\n'), ((286, 318), 'os.path.join', 'os.path.join', (['"""data"""', '"""ESP-Game"""'], {}), "('data', 'ESP-Game')\n", (298, 318), False, 'import os\n'), ((329, 357), 'os.path.join', 'os.path.join', (['EXP_DIR', '"""aux"""'], {}), "(EXP_DIR, 'aux')\n", (341, 357), False, 'import os\n'), ((373, 424), 'os.path.join', 'os.path.join', (['AUX_DIR', '"""vgg16_multilabel_00.jinja2"""'], {}), "(AUX_DIR, 'vgg16_multilabel_00.jinja2')\n", (385, 424), False, 'import os\n'), ((443, 490), 'os.path.join', 'os.path.join', (['AUX_DIR', '"""vgg16_solver_00.jinja2"""'], {}), "(AUX_DIR, 'vgg16_solver_00.jinja2')\n", (455, 490), False, 'import os\n'), ((507, 579), 'os.path.join', 'os.path.join', (['EXP_DIR', '""".."""', '"""models"""', '"""VGG_ILSVRC_16_layers.caffemodel"""'], {}), "(EXP_DIR, '..', 'models', 'VGG_ILSVRC_16_layers.caffemodel')\n", (519, 579), False, 'import os\n'), ((598, 644), 'os.path.join', 'os.path.join', (['DS_DIR', '"""espgame_train_list.txt"""'], {}), "(DS_DIR, 'espgame_train_list.txt')\n", (610, 644), False, 'import os\n'), ((657, 702), 'os.path.join', 'os.path.join', (['DS_DIR', '"""espgame_test_list.txt"""'], {}), "(DS_DIR, 'espgame_test_list.txt')\n", (669, 702), False, 'import os\n'), ((717, 766), 'os.path.join', 'os.path.join', (['DS_DIR', '"""espgame_train_annot.hvecs"""'], {}), "(DS_DIR, 'espgame_train_annot.hvecs')\n", (729, 766), False, 'import os\n'), ((780, 828), 'os.path.join', 'os.path.join', (['DS_DIR', '"""espgame_test_annot.hvecs"""'], {}), "(DS_DIR, 'espgame_test_annot.hvecs')\n", (792, 828), False, 'import os\n'), ((1163, 1202), 'os.path.join', 'os.path.join', (['dirname', '"""label_train.h5"""'], {}), "(dirname, 'label_train.h5')\n", (1175, 1202), False, 'import os\n'), ((1316, 1354), 'os.path.join', 'os.path.join', (['dirname', '"""label_test.h5"""'], {}), "(dirname, 'label_test.h5')\n", (1328, 1354), False, 'import os\n'), ((1669, 1707), 'os.path.join', 'os.path.join', (['dirname', '"""img_train.txt"""'], {}), "(dirname, 'img_train.txt')\n", (1681, 1707), False, 'import os\n'), ((1835, 1872), 'os.path.join', 'os.path.join', (['dirname', '"""img_test.txt"""'], {}), "(dirname, 'img_test.txt')\n", (1847, 1872), False, 'import os\n'), ((2117, 2144), 'os.path.join', 'os.path.join', (['dirname', 'name'], {}), '(dirname, name)\n', (2129, 2144), False, 'import os\n'), ((2951, 2975), 'os.path.isfile', 'os.path.isfile', (['src_file'], {}), '(src_file)\n', (2965, 2975), False, 'import os\n'), ((3351, 3386), 'os.path.join', 'os.path.join', (['prefix', "(name + '.log')"], {}), "(prefix, name + '.log')\n", (3363, 3386), False, 'import os\n'), ((3558, 3612), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'stderr': 'subprocess.STDOUT'}), '(cmd, stderr=subprocess.STDOUT)\n', (3581, 3612), False, 'import subprocess\n'), ((3694, 3733), 'os.path.join', 'os.path.join', (['dirname', '"""label_train.h5"""'], {}), "(dirname, 'label_train.h5')\n", (3706, 3733), False, 'import os\n'), ((3746, 3781), 'utils.h5py_load', 'utils.h5py_load', (['filename', '"""labels"""'], {}), "(filename, 'labels')\n", (3761, 3781), False, 'import utils\n'), ((3797, 3835), 'os.path.join', 'os.path.join', (['dirname', '"""label_test.h5"""'], {}), "(dirname, 'label_test.h5')\n", (3809, 3835), False, 'import os\n'), ((3847, 3882), 'utils.h5py_load', 'utils.h5py_load', (['filename', '"""labels"""'], {}), "(filename, 'labels')\n", (3862, 3882), False, 'import utils\n'), ((4137, 4155), 'jinja2.Template', 'Template', (['prototxt'], {}), '(prototxt)\n', (4145, 4155), False, 'from jinja2 import Template\n'), ((4170, 4214), 'os.path.join', 'os.path.join', (['prefix', "(name + '_net.prototxt')"], {}), "(prefix, name + '_net.prototxt')\n", (4182, 4214), False, 'import os\n'), ((4605, 4623), 'jinja2.Template', 'Template', (['prototxt'], {}), '(prototxt)\n', (4613, 4623), False, 'from jinja2 import Template\n'), ((4641, 4688), 'os.path.join', 'os.path.join', (['prefix', "(name + '_solver.prototxt')"], {}), "(prefix, name + '_solver.prototxt')\n", (4653, 4688), False, 'import os\n'), ((4704, 4736), 'os.path.join', 'os.path.join', (['prefix', "(name + '_')"], {}), "(prefix, name + '_')\n", (4716, 4736), False, 'import os\n'), ((5447, 5526), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (5470, 5526), False, 'import argparse\n'), ((6454, 6489), 'os.path.join', 'os.path.join', (['aux_dir', '""".."""', 'exp_id'], {}), "(aux_dir, '..', exp_id)\n", (6466, 6489), False, 'import os\n'), ((6723, 6771), 'utils.flip_labels', 'flip_label_matrix', (['Y_train', 'flip_prob', 'flip_type'], {}), '(Y_train, flip_prob, flip_type)\n', (6740, 6771), True, 'from utils import flip_labels as flip_label_matrix\n'), ((1214, 1238), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1228, 1238), False, 'import os\n'), ((1248, 1300), 'datasets.espgame_dump_labels', 'ds.espgame_dump_labels', (["prm['train_annot']", 'filename'], {}), "(prm['train_annot'], filename)\n", (1270, 1300), True, 'import datasets as ds\n'), ((1366, 1390), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1380, 1390), False, 'import os\n'), ((1400, 1451), 'datasets.espgame_dump_labels', 'ds.espgame_dump_labels', (["prm['test_annot']", 'filename'], {}), "(prm['test_annot'], filename)\n", (1422, 1451), True, 'import datasets as ds\n'), ((1719, 1749), 'os.path.isfile', 'os.path.isfile', (['filename_train'], {}), '(filename_train)\n', (1733, 1749), False, 'import os\n'), ((1759, 1814), 'datasets.espgame_dump_list', 'ds.espgame_dump_list', (["prm['train_list']", 'filename_train'], {}), "(prm['train_list'], filename_train)\n", (1779, 1814), True, 'import datasets as ds\n'), ((1884, 1913), 'os.path.isfile', 'os.path.isfile', (['filename_test'], {}), '(filename_test)\n', (1898, 1913), False, 'import os\n'), ((1923, 1976), 'datasets.espgame_dump_list', 'ds.espgame_dump_list', (["prm['test_list']", 'filename_test'], {}), "(prm['test_list'], filename_test)\n", (1943, 1976), True, 'import datasets as ds\n'), ((2156, 2178), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (2169, 2178), False, 'import os\n'), ((2188, 2208), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (2199, 2208), False, 'import os\n'), ((2661, 2699), 'vgg16.vgg16_multilabel_hdf5', 'vgg16_multilabel_hdf5', (['filename'], {}), '(filename, **prm)\n', (2682, 2699), False, 'from vgg16 import vgg16_multilabel_hdf5\n'), ((3060, 3075), 'numpy.float32', 'np.float32', (['Y.T'], {}), '(Y.T)\n', (3070, 3075), True, 'import numpy as np\n')] |
import numpy as np
import shared
def test_auc():
"""
Test combined calculation of PR-AUC and ROC-AUC
"""
predicted = [
[0.2, 0.3, 0.5],
[0.2, 0.41, 0.39],
[0.2, 0.1, 0.7],
[0.8, 0.1, 0.1],
[0.4, 0.3, 0.3],
]
groundtruth = [
[1, 0, 0],
[0, 1, 1],
[0, 0, 1],
[1, 0, 0],
[1, 1, 0],
]
roc_auc, pr_auc = shared.compute_auc(groundtruth, predicted)
# These values were computed using default scikit-learn parameters
np.testing.assert_allclose(roc_auc, 0.8611111)
np.testing.assert_allclose(pr_auc, 0.8444444)
def test_type_of_groundtruth():
assert shared.type_of_groundtruth([1, -1, -1, 1]) == "binary"
assert (
shared.type_of_groundtruth(np.array([[0, 1], [1, 1]])) == "multilabel-indicator"
)
assert (
shared.type_of_groundtruth(np.array([[0, 1], [1, 0]])) == "multiclass-indicator"
)
def test_compute_accuracy_multiclass():
y_pred = [
[0.2, 0.3, 0.5], # fp 0, 0, 1
[0.8, 0.1, 0.1], # tp 1, 0, 0
[0.4, 0.3, 0.3], # fp 1, 0, 0
[0.2, 0.41, 0.39], # tp 0, 1, 0
[0.25, 0.4, 0.35], # fp 0, 1, 0
[0.2, 0.1, 0.7], # tp 0, 0, 1
]
y_true = [
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
]
acc = shared.compute_accuracy(y_true, y_pred)
np.testing.assert_allclose(acc, 0.5)
def test_compute_accuracy_multilabel():
y_pred_ml = [
[0.21, 0.31, 0.5], # fp 0, 0, 1
[0.81, 0.71, 0.1], # tp 1, 1, 0
[0.41, 0.31, 0.3], # fp 1, 0, 0
[0.2, 0.51, 0.69], # tp 0, 1, 1
[0.25, 0.4, 0.35], # fp 0, 0, 0
[0.51, 0.15, 0.7], # tp 0, 0, 1
]
y_true_ml = [
[1, 1, 0],
[1, 1, 0],
[0, 1, 1],
[0, 1, 1],
[1, 0, 1],
[1, 0, 1],
]
acc_multilabel = shared.compute_accuracy(y_true_ml, y_pred_ml)
np.testing.assert_allclose(acc_multilabel, 0.5)
def test_compute_pearson_correlation():
# defines predictions
y_pred_pc = [
[0.89, 0.11],
[0.32, 0.59],
[0.78, 0.39],
[0.12, 0.85],
[0.59, 0.73],
]
# define groundtruth
y_true_ml = [
[0.92, 0.11],
[0.39, 0.65],
[0.78, 0.48],
[0.08, 0.86],
[0.63, 0.74],
]
# compute p corr
p_corr = shared.compute_pearson_correlation(y_pred_pc, y_true_ml)
np.testing.assert_allclose(p_corr, [0.99259384, 0.99102183])
def test_compute_ccc():
# defines predictions
y_pred_ccc = [
[0.81, 0.17],
[0.32, 0.53],
[0.79, 0.35],
[0.18, 0.81],
[0.52, 0.76],
]
# define groundtruth
y_true_ccc = [
[0.96, 0.13],
[0.39, 0.65],
[0.73, 0.42],
[0.09, 0.84],
[0.61, 0.71],
]
# compute ccc
ccc = shared.compute_ccc(y_pred_ccc, y_true_ccc)
np.testing.assert_allclose(ccc, [0.85452581, 1.06378999])
def test_compute_r2_score():
# defines predictions
y_pred_r2 = [
[0.81, 0.17],
[0.32, 0.53],
[0.79, 0.35],
[0.18, 0.81],
[0.52, 0.76],
]
# define groundtruth
y_true_r2 = [
[0.96, 0.13],
[0.39, 0.65],
[0.73, 0.42],
[0.09, 0.84],
[0.61, 0.71],
]
# compute r2 score
r2_score = shared.compute_r2_score(y_pred_r2, y_true_r2)
np.testing.assert_allclose(r2_score, [0.84896967, 0.9170988])
def test_compute_adjusted_r2_score():
# defines predictions
y_pred_r2 = [
[0.81, 0.17],
[0.32, 0.53],
[0.79, 0.35],
[0.18, 0.81],
[0.52, 0.76],
]
# define groundtruth
y_true_r2 = [
[0.96, 0.13],
[0.39, 0.65],
[0.73, 0.42],
[0.09, 0.84],
[0.61, 0.71],
]
# compute r2 score
adjusted_r2_score = shared.compute_adjusted_r2_score(y_pred_r2, y_true_r2, p=np.shape(y_true_r2)[1])
np.testing.assert_allclose(adjusted_r2_score, [0.69793933, 0.8341976])
def test_compute_root_mean_squared_error():
# defines predictions
y_pred_rmse = [
[0.81, 0.17],
[0.32, 0.53],
[0.79, 0.35],
[0.18, 0.81],
[0.52, 0.76],
]
# define groundtruth
y_true_rmse = [
[0.96, 0.13],
[0.39, 0.65],
[0.73, 0.42],
[0.09, 0.84],
[0.61, 0.71],
]
# compute rmse
rmse = shared.compute_root_mean_squared_error(y_pred_rmse, y_true_rmse)
np.testing.assert_allclose(rmse, [0.00944, 0.00486])
def test_compute_mean_squared_error():
# define predictions
y_pred_rmse = [
[0.81, 0.17],
[0.32, 0.53],
[0.79, 0.35],
[0.18, 0.81],
[0.52, 0.76],
]
# define groundtruth
y_true_rmse = [
[0.96, 0.13],
[0.39, 0.65],
[0.73, 0.42],
[0.09, 0.84],
[0.61, 0.71],
]
# compute mean squared error
mse = shared.compute_mean_squared_error(y_pred_rmse, y_true_rmse)
np.testing.assert_allclose(mse, [0.09715966, 0.0697137])
| [
"shared.compute_mean_squared_error",
"shared.compute_auc",
"numpy.testing.assert_allclose",
"shared.compute_root_mean_squared_error",
"shared.compute_accuracy",
"numpy.array",
"shared.compute_r2_score",
"shared.type_of_groundtruth",
"numpy.shape",
"shared.compute_ccc",
"shared.compute_pearson_co... | [((414, 456), 'shared.compute_auc', 'shared.compute_auc', (['groundtruth', 'predicted'], {}), '(groundtruth, predicted)\n', (432, 456), False, 'import shared\n'), ((532, 578), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['roc_auc', '(0.8611111)'], {}), '(roc_auc, 0.8611111)\n', (558, 578), True, 'import numpy as np\n'), ((583, 628), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pr_auc', '(0.8444444)'], {}), '(pr_auc, 0.8444444)\n', (609, 628), True, 'import numpy as np\n'), ((1392, 1431), 'shared.compute_accuracy', 'shared.compute_accuracy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1415, 1431), False, 'import shared\n'), ((1436, 1472), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['acc', '(0.5)'], {}), '(acc, 0.5)\n', (1462, 1472), True, 'import numpy as np\n'), ((1945, 1990), 'shared.compute_accuracy', 'shared.compute_accuracy', (['y_true_ml', 'y_pred_ml'], {}), '(y_true_ml, y_pred_ml)\n', (1968, 1990), False, 'import shared\n'), ((1995, 2042), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['acc_multilabel', '(0.5)'], {}), '(acc_multilabel, 0.5)\n', (2021, 2042), True, 'import numpy as np\n'), ((2441, 2497), 'shared.compute_pearson_correlation', 'shared.compute_pearson_correlation', (['y_pred_pc', 'y_true_ml'], {}), '(y_pred_pc, y_true_ml)\n', (2475, 2497), False, 'import shared\n'), ((2502, 2562), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['p_corr', '[0.99259384, 0.99102183]'], {}), '(p_corr, [0.99259384, 0.99102183])\n', (2528, 2562), True, 'import numpy as np\n'), ((2941, 2983), 'shared.compute_ccc', 'shared.compute_ccc', (['y_pred_ccc', 'y_true_ccc'], {}), '(y_pred_ccc, y_true_ccc)\n', (2959, 2983), False, 'import shared\n'), ((2988, 3045), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ccc', '[0.85452581, 1.06378999]'], {}), '(ccc, [0.85452581, 1.06378999])\n', (3014, 3045), True, 'import numpy as np\n'), ((3436, 3481), 'shared.compute_r2_score', 'shared.compute_r2_score', (['y_pred_r2', 'y_true_r2'], {}), '(y_pred_r2, y_true_r2)\n', (3459, 3481), False, 'import shared\n'), ((3486, 3547), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r2_score', '[0.84896967, 0.9170988]'], {}), '(r2_score, [0.84896967, 0.9170988])\n', (3512, 3547), True, 'import numpy as np\n'), ((4041, 4111), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['adjusted_r2_score', '[0.69793933, 0.8341976]'], {}), '(adjusted_r2_score, [0.69793933, 0.8341976])\n', (4067, 4111), True, 'import numpy as np\n'), ((4513, 4577), 'shared.compute_root_mean_squared_error', 'shared.compute_root_mean_squared_error', (['y_pred_rmse', 'y_true_rmse'], {}), '(y_pred_rmse, y_true_rmse)\n', (4551, 4577), False, 'import shared\n'), ((4582, 4634), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rmse', '[0.00944, 0.00486]'], {}), '(rmse, [0.00944, 0.00486])\n', (4608, 4634), True, 'import numpy as np\n'), ((5043, 5102), 'shared.compute_mean_squared_error', 'shared.compute_mean_squared_error', (['y_pred_rmse', 'y_true_rmse'], {}), '(y_pred_rmse, y_true_rmse)\n', (5076, 5102), False, 'import shared\n'), ((5107, 5163), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mse', '[0.09715966, 0.0697137]'], {}), '(mse, [0.09715966, 0.0697137])\n', (5133, 5163), True, 'import numpy as np\n'), ((674, 716), 'shared.type_of_groundtruth', 'shared.type_of_groundtruth', (['[1, -1, -1, 1]'], {}), '([1, -1, -1, 1])\n', (700, 716), False, 'import shared\n'), ((777, 803), 'numpy.array', 'np.array', (['[[0, 1], [1, 1]]'], {}), '([[0, 1], [1, 1]])\n', (785, 803), True, 'import numpy as np\n'), ((885, 911), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (893, 911), True, 'import numpy as np\n'), ((4013, 4032), 'numpy.shape', 'np.shape', (['y_true_r2'], {}), '(y_true_r2)\n', (4021, 4032), True, 'import numpy as np\n')] |
#!/usr/bin/python
#-*- coding:utf-8 -*-
import sys
import struct
import numpy as np
from torch import tensor
from torch.nn import functional as fn
def convolution3d_f32():
para = []
# init the input data and parameters
batch = int(np.random.randint(1, high=4, size=1))
in_channel = int(np.random.randint(2, high=8, size=1))
in_depth = int(np.random.randint(16, high=128, size=1))
in_height = int(np.random.randint(16, high=128, size=1))
in_width = int(np.random.randint(16, high=128, size=1))
out_channel = int(np.random.randint(8, high=16, size=1))
stride_d = int(np.random.randint(1, high=5, size=1))
stride_h = int(np.random.randint(1, high=5, size=1))
stride_w = int(np.random.randint(1, high=5, size=1))
kernel_d = int(np.random.randint(stride_d, high=8, size=1))
kernel_h = int(np.random.randint(stride_h, high=8, size=1))
kernel_w = int(np.random.randint(stride_w, high=8, size=1))
dilation_d = int(np.random.randint(1, high=4, size=1))
dilation_h = int(np.random.randint(1, high=4, size=1))
dilation_w = int(np.random.randint(1, high=4, size=1))
kernel_d_t = kernel_d + (kernel_d - 1) * (dilation_d - 1)
kernel_h_t = kernel_h + (kernel_h - 1) * (dilation_h - 1)
kernel_w_t = kernel_w + (kernel_w - 1) * (dilation_w - 1)
pad_left = pad_right = 0
pad_top = pad_down = 0
pad_front = pad_back = 0
pad_w = (in_width - kernel_w_t) - int((in_width - kernel_w_t) / stride_w) * stride_w
if(pad_w !=0):
pad_w = int((in_width - kernel_w_t) / stride_w) * stride_w + stride_w - (in_width - kernel_w_t)
pad_left = int(np.random.randint(0, high=pad_w, size=1))
pad_right = pad_w - pad_left
pad_h = (in_height - kernel_h_t) - int((in_height - kernel_h_t) / stride_h) * stride_h
if(pad_h != 0):
pad_h = int((in_height - kernel_h_t) / stride_h) * stride_h + stride_h - (in_height - kernel_h_t)
pad_top = int(np.random.randint(0, high=pad_h, size=1))
pad_down = pad_h - pad_top
pad_d = (in_depth - kernel_d_t) - int((in_depth - kernel_d_t) / stride_d) * stride_d
if(pad_d != 0):
pad_d = int((in_depth - kernel_d_t) / stride_d) * stride_d + stride_d - (in_depth - kernel_d_t)
pad_front = int(np.random.randint(0, high=pad_d, size=1))
pad_back = pad_d - pad_front
zero_point1 = int(np.random.randint(-6, high=6, size=1))
std1 = int(np.random.randint(1, high=2, size=1))
zero_point2 = int(np.random.randint(-6, high=6, size=1))
std2 = int(np.random.randint(1, high=2, size=1))
zero_point3 = int(np.random.randint(-6, high=6, size=1))
std3 = int(np.random.randint(1, high=2, size=1))
src_in = np.random.normal(zero_point1, std1, (batch, in_channel, in_depth, in_height, in_width))
weight = np.random.normal(zero_point2, std2, (out_channel, in_channel, kernel_d, kernel_h, kernel_w))
bias = np.random.normal(zero_point3, std3, out_channel)
# src_in = np.random.randint(-16, 16, (batch, in_channel, in_depth, in_height, in_width))
# weight = np.random.randint(-5, 5, (out_channel, in_channel, kernel_d, kernel_h, kernel_w))
# bias = np.random.randint(-10, 10, out_channel)
src_in = src_in.astype(np.float32)
weight = weight.astype(np.float32)
bias = bias.astype(np.float32)
#print(src_in)
#print(weight)
t_src_in = tensor(src_in)
t_weight = tensor(weight)
t_bias = tensor(bias)
t_src_in1 = fn.pad(t_src_in, (pad_left, pad_right, pad_top, pad_down, pad_front, pad_back), 'constant', 0)
t_src_out = fn.conv3d(t_src_in1, t_weight, bias=t_bias, stride=(stride_d, stride_h, stride_w), dilation=(dilation_d, dilation_h, dilation_w)).numpy()
out_depth = np.shape(t_src_out)[2]
out_height = np.shape(t_src_out)[3]
out_width = np.shape(t_src_out)[4]
#print(np.shape(t_src_in1))
#print(np.shape(t_src_out))
#print((kernel_y, kernel_x, stride_y, stride_x, dilation_y, dilation_x))
src_in_1 = t_src_in.flatten()
weight_1 = t_weight.flatten()
src_out_1 = t_src_out.flatten()
total_size = (len(src_in_1) + len(src_out_1)) + len(weight_1) + len(bias) + 24
para.append(total_size)
para.append(batch)
para.append(in_channel)
para.append(in_depth)
para.append(in_height)
para.append(in_width)
para.append(out_channel)
para.append(kernel_d)
para.append(kernel_h)
para.append(kernel_w)
para.append(out_depth)
para.append(out_height)
para.append(out_width)
para.append(stride_d)
para.append(stride_h)
para.append(stride_w)
para.append(pad_left)
para.append(pad_right)
para.append(pad_top)
para.append(pad_down)
para.append(pad_front)
para.append(pad_back)
para.append(dilation_d)
para.append(dilation_h)
para.append(dilation_w)
with open("convolution3d_data_f32.bin", "wb") as fp:
data = struct.pack(('%di' % len(para)), *para)
fp.write(data)
data = struct.pack(('%df' % len(src_in_1)), *src_in_1)
fp.write(data)
data = struct.pack(('%df' % len(weight_1)), *weight_1)
fp.write(data)
data = struct.pack(('%df' % len(bias)), *bias)
fp.write(data)
data = struct.pack(('%df' % len(src_out_1)), *src_out_1)
fp.write(data)
fp.close()
return 0
if __name__ == '__main__':
convolution3d_f32()
print("end")
| [
"numpy.random.normal",
"torch.nn.functional.conv3d",
"torch.tensor",
"numpy.random.randint",
"torch.nn.functional.pad",
"numpy.shape"
] | [((2817, 2908), 'numpy.random.normal', 'np.random.normal', (['zero_point1', 'std1', '(batch, in_channel, in_depth, in_height, in_width)'], {}), '(zero_point1, std1, (batch, in_channel, in_depth, in_height,\n in_width))\n', (2833, 2908), True, 'import numpy as np\n'), ((2918, 3014), 'numpy.random.normal', 'np.random.normal', (['zero_point2', 'std2', '(out_channel, in_channel, kernel_d, kernel_h, kernel_w)'], {}), '(zero_point2, std2, (out_channel, in_channel, kernel_d,\n kernel_h, kernel_w))\n', (2934, 3014), True, 'import numpy as np\n'), ((3024, 3072), 'numpy.random.normal', 'np.random.normal', (['zero_point3', 'std3', 'out_channel'], {}), '(zero_point3, std3, out_channel)\n', (3040, 3072), True, 'import numpy as np\n'), ((3491, 3505), 'torch.tensor', 'tensor', (['src_in'], {}), '(src_in)\n', (3497, 3505), False, 'from torch import tensor\n'), ((3522, 3536), 'torch.tensor', 'tensor', (['weight'], {}), '(weight)\n', (3528, 3536), False, 'from torch import tensor\n'), ((3553, 3565), 'torch.tensor', 'tensor', (['bias'], {}), '(bias)\n', (3559, 3565), False, 'from torch import tensor\n'), ((3584, 3682), 'torch.nn.functional.pad', 'fn.pad', (['t_src_in', '(pad_left, pad_right, pad_top, pad_down, pad_front, pad_back)', '"""constant"""', '(0)'], {}), "(t_src_in, (pad_left, pad_right, pad_top, pad_down, pad_front,\n pad_back), 'constant', 0)\n", (3590, 3682), True, 'from torch.nn import functional as fn\n'), ((251, 287), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(4)', 'size': '(1)'}), '(1, high=4, size=1)\n', (268, 287), True, 'import numpy as np\n'), ((311, 347), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'high': '(8)', 'size': '(1)'}), '(2, high=8, size=1)\n', (328, 347), True, 'import numpy as np\n'), ((371, 410), 'numpy.random.randint', 'np.random.randint', (['(16)'], {'high': '(128)', 'size': '(1)'}), '(16, high=128, size=1)\n', (388, 410), True, 'import numpy as np\n'), ((434, 473), 'numpy.random.randint', 'np.random.randint', (['(16)'], {'high': '(128)', 'size': '(1)'}), '(16, high=128, size=1)\n', (451, 473), True, 'import numpy as np\n'), ((497, 536), 'numpy.random.randint', 'np.random.randint', (['(16)'], {'high': '(128)', 'size': '(1)'}), '(16, high=128, size=1)\n', (514, 536), True, 'import numpy as np\n'), ((561, 598), 'numpy.random.randint', 'np.random.randint', (['(8)'], {'high': '(16)', 'size': '(1)'}), '(8, high=16, size=1)\n', (578, 598), True, 'import numpy as np\n'), ((623, 659), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(5)', 'size': '(1)'}), '(1, high=5, size=1)\n', (640, 659), True, 'import numpy as np\n'), ((683, 719), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(5)', 'size': '(1)'}), '(1, high=5, size=1)\n', (700, 719), True, 'import numpy as np\n'), ((743, 779), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(5)', 'size': '(1)'}), '(1, high=5, size=1)\n', (760, 779), True, 'import numpy as np\n'), ((804, 847), 'numpy.random.randint', 'np.random.randint', (['stride_d'], {'high': '(8)', 'size': '(1)'}), '(stride_d, high=8, size=1)\n', (821, 847), True, 'import numpy as np\n'), ((871, 914), 'numpy.random.randint', 'np.random.randint', (['stride_h'], {'high': '(8)', 'size': '(1)'}), '(stride_h, high=8, size=1)\n', (888, 914), True, 'import numpy as np\n'), ((938, 981), 'numpy.random.randint', 'np.random.randint', (['stride_w'], {'high': '(8)', 'size': '(1)'}), '(stride_w, high=8, size=1)\n', (955, 981), True, 'import numpy as np\n'), ((1006, 1042), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(4)', 'size': '(1)'}), '(1, high=4, size=1)\n', (1023, 1042), True, 'import numpy as np\n'), ((1066, 1102), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(4)', 'size': '(1)'}), '(1, high=4, size=1)\n', (1083, 1102), True, 'import numpy as np\n'), ((1126, 1162), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(4)', 'size': '(1)'}), '(1, high=4, size=1)\n', (1143, 1162), True, 'import numpy as np\n'), ((2462, 2499), 'numpy.random.randint', 'np.random.randint', (['(-6)'], {'high': '(6)', 'size': '(1)'}), '(-6, high=6, size=1)\n', (2479, 2499), True, 'import numpy as np\n'), ((2523, 2559), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(2)', 'size': '(1)'}), '(1, high=2, size=1)\n', (2540, 2559), True, 'import numpy as np\n'), ((2583, 2620), 'numpy.random.randint', 'np.random.randint', (['(-6)'], {'high': '(6)', 'size': '(1)'}), '(-6, high=6, size=1)\n', (2600, 2620), True, 'import numpy as np\n'), ((2644, 2680), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(2)', 'size': '(1)'}), '(1, high=2, size=1)\n', (2661, 2680), True, 'import numpy as np\n'), ((2704, 2741), 'numpy.random.randint', 'np.random.randint', (['(-6)'], {'high': '(6)', 'size': '(1)'}), '(-6, high=6, size=1)\n', (2721, 2741), True, 'import numpy as np\n'), ((2765, 2801), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(2)', 'size': '(1)'}), '(1, high=2, size=1)\n', (2782, 2801), True, 'import numpy as np\n'), ((3851, 3870), 'numpy.shape', 'np.shape', (['t_src_out'], {}), '(t_src_out)\n', (3859, 3870), True, 'import numpy as np\n'), ((3891, 3910), 'numpy.shape', 'np.shape', (['t_src_out'], {}), '(t_src_out)\n', (3899, 3910), True, 'import numpy as np\n'), ((3931, 3950), 'numpy.shape', 'np.shape', (['t_src_out'], {}), '(t_src_out)\n', (3939, 3950), True, 'import numpy as np\n'), ((1695, 1735), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': 'pad_w', 'size': '(1)'}), '(0, high=pad_w, size=1)\n', (1712, 1735), True, 'import numpy as np\n'), ((2029, 2069), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': 'pad_h', 'size': '(1)'}), '(0, high=pad_h, size=1)\n', (2046, 2069), True, 'import numpy as np\n'), ((2358, 2398), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': 'pad_d', 'size': '(1)'}), '(0, high=pad_d, size=1)\n', (2375, 2398), True, 'import numpy as np\n'), ((3695, 3828), 'torch.nn.functional.conv3d', 'fn.conv3d', (['t_src_in1', 't_weight'], {'bias': 't_bias', 'stride': '(stride_d, stride_h, stride_w)', 'dilation': '(dilation_d, dilation_h, dilation_w)'}), '(t_src_in1, t_weight, bias=t_bias, stride=(stride_d, stride_h,\n stride_w), dilation=(dilation_d, dilation_h, dilation_w))\n', (3704, 3828), True, 'from torch.nn import functional as fn\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 15 23:23:10 2021
@author: Rares
"""
from .preprocess import extractFeatures
import librosa
from librosa import util
import xgboost as xgb
import pandas as pd
import numpy as np
from object_cache import object_cache
@object_cache
def loadAndGetMfcc(folder, sampleRate=44100, nCoeffs=32):
"""
Parameters
----------
folder : String
Path to the folder containing the samples
sampleRate : int, optional
Sample rate in Hz
The default is 44100.
nCoeffs : int, optional
Number of MFCC coefficients to obtain
The default is 32.
Returns
-------
mfccCoeffs : Numpy array of shape ([number of files], nCoeffs)
The MFCC coefficients for each audio file
"""
files = util.find_files(folder, recurse=False)
nFiles = len(files)
mfccCoeffs = np.empty((nFiles, nCoeffs))
fileIndex = 0
for filename in files:
(sound, fs) = librosa.load(filename, sampleRate, res_type="kaiser_fast")
sound = util.normalize(sound)
mfccCoeffs[fileIndex] = extractFeatures(sound, sampleRate, nCoeffs)
fileIndex += 1
return mfccCoeffs
def combineFeatures(kickFeatures, snareFeatures):
"""
Gets the training feature and label arrays from the kick and snare features
Kick -> 0
Snare -> 1
Parameters
----------
kickFeatures : Numpy array of shape ([number of sounds], [number of coefficients])
MFCC feature array for kicks
snareFeatures : Numpy array of shape ([number of sounds], [number of coefficients])
MFCC feature array for snares
Returns
-------
features: numpy array containing all features
labels: numpy array containing the feature labels
"""
nKicks = kickFeatures.shape[0]
nSnares = snareFeatures.shape[0]
features = np.concatenate((kickFeatures, snareFeatures))
labels = np.empty((nKicks + nSnares, 1))
labels[0:nKicks] = 0
labels[nKicks:] = 1
#featuresFrame = pd.DataFrame(features)
#labelFrame = pd.DataFrame(labels)
#return (featuresFrame, labelFrame)
return (features, labels)
| [
"librosa.util.find_files",
"numpy.empty",
"numpy.concatenate",
"librosa.util.normalize",
"librosa.load"
] | [((837, 875), 'librosa.util.find_files', 'util.find_files', (['folder'], {'recurse': '(False)'}), '(folder, recurse=False)\n', (852, 875), False, 'from librosa import util\n'), ((919, 946), 'numpy.empty', 'np.empty', (['(nFiles, nCoeffs)'], {}), '((nFiles, nCoeffs))\n', (927, 946), True, 'import numpy as np\n'), ((1942, 1987), 'numpy.concatenate', 'np.concatenate', (['(kickFeatures, snareFeatures)'], {}), '((kickFeatures, snareFeatures))\n', (1956, 1987), True, 'import numpy as np\n'), ((2002, 2033), 'numpy.empty', 'np.empty', (['(nKicks + nSnares, 1)'], {}), '((nKicks + nSnares, 1))\n', (2010, 2033), True, 'import numpy as np\n'), ((1017, 1075), 'librosa.load', 'librosa.load', (['filename', 'sampleRate'], {'res_type': '"""kaiser_fast"""'}), "(filename, sampleRate, res_type='kaiser_fast')\n", (1029, 1075), False, 'import librosa\n'), ((1093, 1114), 'librosa.util.normalize', 'util.normalize', (['sound'], {}), '(sound)\n', (1107, 1114), False, 'from librosa import util\n')] |
import matplotlib.pyplot as plt
import pandas as pd
import sys
import csv
import math
import neat
from sklearn.metrics import accuracy_score
from dateutil.parser import parse
from datetime import datetime
# from SPOT.spot import bidSPOT,dSPOT,SPOT
from functools import reduce
import numpy as np
import random
def col_based_combine_matrix(df, n_star):
'''
new col based reshape operation
'''
if df.shape[0]%n_star != 0:
print('need to trim df!')
return -1
loop_count = n_star
reshape_chunk_row_count = int(df.shape[0]/n_star)
# df.shape[1]->original col num, n_star-1 because need to remove duplicate col chunk
reshape_chunk_col_count = int(df.shape[1]*(n_star-1))
combined_matrix = np.array([])
for i in range(loop_count):
# remove duplicate rows
np_array = df.to_numpy()
# remove duplicate chunk
duplicate_chunk_start = i*reshape_chunk_row_count
duplicate_chunk_end = duplicate_chunk_start + reshape_chunk_row_count
if i == 0:
# first chunk
np_array = np_array[duplicate_chunk_end:,]
elif loop_count-1 == i:
# last chunk
np_array = np_array[:duplicate_chunk_start,]
else:
upper_matrix = np_array[:duplicate_chunk_start,]
lower_matrix = np_array = np_array[duplicate_chunk_end:,]
np_array = np.concatenate((upper_matrix,lower_matrix))
# reshaped cols will be combined horizontally
reshape_matrix = np.array([])
for m in range(loop_count-1):
if m == 0:
reshape_matrix = np_array[:reshape_chunk_row_count,]
# elif m == loop_count-1-1:
# reshape_matrix = np.concatenate([reshape_matrix,np_array[reshape_chunk_row_count*(loop_count-2):,]],axis=1)
else:
start = m*reshape_chunk_row_count
end = start + reshape_chunk_row_count
reshape_matrix = np.concatenate([reshape_matrix,np_array[start:end,]],axis=1)
if i == 0:
combined_matrix = reshape_matrix
else:
combined_matrix = np.concatenate((combined_matrix,reshape_matrix))
return combined_matrix
def logsinh(x, tmp_min, logsinh_a, logsinh_b, epsilon):
'''
tmp_min is the minimum value of array x
'''
return math.log10(math.sinh((x-tmp_min)*logsinh_b+logsinh_a+epsilon))/logsinh_b
def convert_row_index_row_based_reshape(original_extreme_row_index_list, row_based_reshape_factor, total_rows):
'''
this function transform original row index to reshape matrix index
based on the row based reshape method
row_based_reshape_factor denotes the number of rows will be combined, e.g. originally matrix is n1 * n2 and factor is n`
the transformed matrix will be (n1/n`)*(n2*n`)
'''
tmp_extreme = [int(i/row_based_reshape_factor) for i in original_extreme_row_index_list]
# remove duplicate element
reshape_extreme_row_index_list = []
[reshape_extreme_row_index_list.append(x) for x in tmp_extreme if x not in reshape_extreme_row_index_list]
# use ceil function because extra rows will be counted as a new row for the reshape matrix
reshape_normal_row_index_list = np.setdiff1d(range(math.ceil(total_rows/row_based_reshape_factor)),reshape_extreme_row_index_list)
# sort
reshape_extreme_row_index_list = sorted(reshape_extreme_row_index_list)
reshape_normal_row_index_list = sorted(reshape_normal_row_index_list)
return reshape_normal_row_index_list, reshape_extreme_row_index_list
def convert_row_index_col_based_reshape(original_extreme_row_index_list, col_based_reshape_factor, total_rows):
'''
this function transform original row index to reshape matrix index
based on the col based reshape method
col_based_reshape_factor denotes the number of cols will be combined horizontally, e.g. originally matrix is n1 * n2 and factor is n`
the transformed matrix will be n`*(n1*n2/n`)
'''
tmp_extreme = [int(i%col_based_reshape_factor) for i in original_extreme_row_index_list]
# remove duplicate element
reshape_extreme_row_index_list = []
[reshape_extreme_row_index_list.append(x) for x in tmp_extreme if x not in reshape_extreme_row_index_list]
# use ceil function because extra rows will be counted as a new row for the reshape matrix
reshape_normal_row_index_list = np.setdiff1d(range(math.ceil(col_based_reshape_factor)),reshape_extreme_row_index_list)
# sort
reshape_extreme_row_index_list = sorted(reshape_extreme_row_index_list)
reshape_normal_row_index_list = sorted(reshape_normal_row_index_list)
return reshape_normal_row_index_list, reshape_extreme_row_index_list
def combine_normal_extreme(total_data_size, results_alarms, results_normal, array_filled_extreme, array_filled_normal, shape_x, shape_y):
'''
This function combine normal and extreme events
shape_x, shape_y are the original df x and y size
array_filled_extreme, array_filled_normal stores original extreme and normal events
results_alarms, results_normal stores the mapping index of original matrix
'''
array_filled = np.array([])
tmp_normal_index = 0
tmp_extreme_index = 0
for df_index in range(0,total_data_size):
# current is extreme event
if df_index in results_alarms:
array_filled = np.append(array_filled, array_filled_extreme[tmp_extreme_index])
tmp_extreme_index = tmp_extreme_index + 1
elif df_index in results_normal:
array_filled = np.append(array_filled, array_filled_normal[tmp_normal_index])
tmp_normal_index = tmp_normal_index + 1
# reshape from 1d to 2d
return array_filled.reshape(shape_x,shape_y)
def make_holes_matrix(df, percentage, exclude_col, seed = 1):
# def make_holes_matrix(df, percentage, exclude_col, seed = 100):
'''
this function will make holes (NAN) in a matrix except the exclude col
the number of holes is decided by percentage
'''
# rows*(col-1), -1 because exclude col
total_num = df.shape[0]*(df.shape[1]-1)
holes_num = int(total_num*percentage)
index_exclude_col = df.columns.get_loc(exclude_col)
# randomly generate holes' positions
r = random.Random(seed)
col_num = range(1)
if index_exclude_col == 0:
col_num = list(range(1, df.shape[1]))
else:
col_num = list(range(0, index_exclude_col))+list(range(index_exclude_col+1, df.shape[1]))
row_num = list(range(0, df.shape[0]))
count = 0
select_index_tuple = []
while count != holes_num:
row_index = r.choice(row_num)
col_index = r.choice(col_num)
if (row_index, col_index) not in select_index_tuple:
select_index_tuple.append((row_index, col_index))
df.iloc[row_index, col_index] = np.nan
count = count + 1
def eval_genomes(genomes, config): #function Used for training model
# using the training set
for genome_id, genome in genomes:
genome.fitness = -1
net = neat.nn.RecurrentNetwork.create(genome, config)
for xi, xo in zip(X_train, y_train):
output = net.activate(xi)
genome.fitness -= (output[0] - xo) ** 2 #Distance from
# the correct output summed for all 84 inputs patterns
def r0(filename, threshold, extreme_events_x, extreme_events_y, variable_name='runoff_obs'):
'''
this function filter extreme events by using threshold
'''
# get data from CSV file
pd_df = pd.read_csv(filename)
data = pd_df[variable_name].tolist()
total_len = len(data)
# initalize i and j indices
i = 0
j = 1
if abs(data[i]-data[j]) >= threshold:
extreme_events_x.append(i)
while j < total_len-1:
for count in range(total_len-j):
# this is an extreme event
if abs(data[i]-data[j]) >= threshold:
extreme_events_x.append(j)
extreme_events_y.append(data[j])
j = j + 1
# this is a normal event
else:
break
i = j
j = j + 1
def r1(filename, threshold, extreme_events_x, extreme_events_y, variable_name='runoff_obs'):
'''
this function filter extreme events by comparing (tmp_max-tmp_min) and threshold
'''
# get data from CSV file
pd_df = pd.read_csv(filename)
data = pd_df[variable_name].tolist()
total_len = len(data)
# initalize i and j indices
i = 0
j = 1
if abs(data[i]-data[j]) >= threshold:
extreme_events_x.append(i)
while j < total_len-1:
for count in range(total_len-j):
tmp_max = max(data[i:j+1])
tmp_min = min(data[i:j+1])
# this is an extreme event
if tmp_max - tmp_min >= threshold:
extreme_events_x.append(j)
extreme_events_y.append(data[j])
j = j + 1
# this is a normal event
else:
break
i = j
j = j + 1
def r2(filename, threshold, extreme_events_x, extreme_events_y, variable_name='runoff_obs'):
'''
this function filter extreme events by using ramp rate threshold
it is better to normalize our data before use this function
'''
# get data from CSV file
pd_df = pd.read_csv(filename)
data = pd_df[variable_name].tolist()
total_len = len(data)
# initalize i and j indices
i = 0
j = 1
if abs(data[i]-data[j]) >= threshold:
extreme_events_x.append(i)
while j < total_len-1:
for count in range(total_len-j):
data_diff = abs(data[i]-data[j])
x_diff = float(j - i)
# this is an extreme event
if (data_diff/x_diff) >= threshold:
extreme_events_x.append(j)
extreme_events_y.append(data[j])
j = j + 1
# this is a normal event
else:
break
i = j
j = j + 1
def obtain_rc(data, i, j, beta):
'''
this function calculates rc value
'''
if beta >= 1 or beta < 0:
print("rc beta should between 0 and 1")
sys.exit()
for count in range(j-i+1):
m = i+count
pm = data[m]
# tmp_list is pi, ..., pm
tmp_list = data[i:m+1]
if pm <= beta*max(tmp_list):
return 0
return 1
def ramp_detect(filename, threshold0, threshold2, extreme_events_x, extreme_events_y, beta, variable_name='runoff_obs'):
'''
this method combines r0 and r2; and uses rc
r1 is not included here because it r1 does not work with our "prms_input_since 2003" data file
'''
pd_df = pd.read_csv(filename)
data = pd_df[variable_name].tolist()
total_len = len(data)
# initalize i and j indices
i = 0
j = 1
if abs(data[i]-data[j]) >= threshold0 and abs(data[i]-data[j]) >= threshold2:
extreme_events_x.append(i)
while j < total_len-1:
for count in range(total_len-j):
# int(True) = 1; int(False) = 0
r0 = int(abs(data[i]-data[j]) >= threshold0)
data_diff = abs(data[i]-data[j])
x_diff = float(j - i)
r2 = int((data_diff/x_diff) >= threshold2)
rc = obtain_rc(data, i, j, beta)
# this is an extreme event
if rc*r0*r2 == 1:
extreme_events_x.append(j)
extreme_events_y.append(data[j])
j = j + 1
# this is a normal event
else:
break
i = j
j = j + 1
def calculate_ramp_score(filename, threshold0, threshold2, beta, variable_name='runoff_obs'):
'''
this function calculate ramp score based on W(i,j)
'''
return 0
def backward_check_if_extreme_event(filename, threshold0, threshold2, extreme_events_x, extreme_events_y,
normal_event_x, normal_event_y, beta, variable_name='runoff_obs'):
'''
This function check if the current point is extreme event
The difference from the "ramp_detect" function is that this
function check each point backward
'''
pd_df = pd.read_csv(filename)
data = pd_df[variable_name].tolist()
total_len = len(data)
# total_len-1 because we don't consider if the first element is an extreme event
for count in range(total_len-1):
j = total_len - count - 1
i = j - 1
# TODO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# should not only consider one point before, should consider a bunch of points before j
# becuase we do not consider the size of current ramp which ends with j
# therefore, we only check if the point (i) before j fulfil rc*r0*r1 == 1
# however, training data is still useful, which is used to tune the parameters,such as beta
# int(True) = 1; int(False) = 0
r0 = int(abs(data[i]-data[j]) >= threshold0)
data_diff = abs(data[i]-data[j])
x_diff = float(j - i)
r2 = int((data_diff/x_diff) >= threshold2)
rc = obtain_rc(data, i, j, beta)
# this is an extreme event
if rc*r0*r2 == 1:
extreme_events_x.append(j)
extreme_events_y.append(data[j])
else:
normal_event_x.append(j)
normal_event_y.append(data[j])
def vis_two_list(list1_x, list1_y, list2_x, list2_y, fig_title=''):
'''
this function vis two lists
'''
fig, ax = plt.subplots()
ax.scatter(list1_x,list1_y, label='normal')
ax.scatter(list2_x,list2_y, label='extreme')
legend = ax.legend(loc='upper right', shadow=True)
# legend = ax.legend(loc='upper right', shadow=True, prop={'size': 20})
plt.title(fig_title)
plt.show()
def vis_one_list(list1_x, list1_y, fig_title=''):
'''
this function vis one list
'''
fig, ax = plt.subplots()
ax.plot(list1_x,list1_y, label='normal')
legend = ax.legend(loc='upper right', shadow=True)
# legend = ax.legend(loc='upper right', shadow=True, prop={'size': 20})
plt.title(fig_title)
plt.show()
def vis_all(list1_x, list1_y, list2_x, list2_y, list3_x, list3_y):
'''
this function vis two lists
'''
fig, ax = plt.subplots()
ax.scatter(list1_x,list1_y, label='normal')
ax.scatter(list2_x,list2_y, label='extreme')
ax.scatter(list3_x,list3_y, label='all')
legend = ax.legend(loc='upper right', shadow=True, prop={'size': 20})
plt.show()
def collect_normal(filename, extreme_events_x, normal_event_x, normal_event_y, variable_name='runoff_obs'):
'''
'''
pd_df = pd.read_csv(filename)
data = pd_df[variable_name].tolist()
total_len = len(data)
for count in range(total_len):
if count not in extreme_events_x:
normal_event_x.append(count)
normal_event_y.append(data[count])
def output_list_to_file(filename, list_x):
'''
1-D array is outputted into a column of a CSV file
'''
with open(filename,'wb') as result_file:
for i in list_x:
result_file.write(str(i)+'\n')
def generate_extreme_event_label(extreme_x_label, total_len):
'''
this function returns a list (map), if 1 then extreme event
else normal event
'''
result = [0]*total_len
for count in range(total_len):
if count in extreme_x_label:
result[count] = 1
else:
result[count] = 0
return result
def split_file_based_on_threshold(input_file, output_extreme_event_file, output_normal_event_file,
variable_name='runoff_obs', threshold_col_name='threshold_method', label_method='label_method'):
'''
This function splits file into two output files based on threshold
'''
pd_df = pd.read_csv(input_file)
# filter
df_normal = pd_df[pd_df[variable_name]<pd_df[threshold_col_name]]
df_extreme = pd_df[pd_df[variable_name]>=pd_df[threshold_col_name]]
# remove threshold and map col
df_normal_clean = df_normal.drop(columns=[threshold_col_name,label_method])
df_extreme_clean = df_extreme.drop(columns=[threshold_col_name,label_method])
# write to file
df_normal_clean.to_csv(output_normal_event_file,index=False)
df_extreme_clean.to_csv(output_extreme_event_file,index=False)
def split_file_based_on_label(input_file, output_extreme_event_file, output_normal_event_file,
variable_name='runoff_obs', threshold_col_name='threshold_method', label_method='label_method'):
'''
This function splits file into two output files based on label (1 is extreme event)
'''
pd_df = pd.read_csv(input_file)
# filter
df_normal = pd_df[pd_df[label_method]==0]
df_extreme = pd_df[pd_df[label_method]==1]
# remove threshold and map col
df_normal_clean = df_normal.drop(columns=[threshold_col_name,label_method])
df_extreme_clean = df_extreme.drop(columns=[threshold_col_name,label_method])
# write to file
df_normal_clean.to_csv(output_normal_event_file,index=False)
df_extreme_clean.to_csv(output_extreme_event_file,index=False)
def accuracy_rate_cal(df, predict_extreme_index, extreme_event_ground_truth_col_name = "Student_Flag",
extreme_events_flag = 1, time_col_name = 'Date', extreme_event_col_name = 'NO3N'):
'''
extreme_event_ground_truth_col_name: which col is the extreme event flag col
extreme_events_flag: what value is used to mark extreme events
This function is designed for only two classes (1 extreme events, 0 normal events) accuracy rate calculation
'''
total_len = df.shape[0]
expert_outlier = df[df[extreme_event_ground_truth_col_name]==extreme_events_flag]
ground_truth_index = expert_outlier.index.to_numpy()
ground_truth = []
ground_truth_extreme_event_timestamp = []
ground_truth_extreme_event_values = []
predict_label = []
predict_extreme_event_timestamp = []
predict_extreme_event_vales = []
# if we have more than 2 classes, need to update this for loop
for i in range(total_len):
if i in ground_truth_index:
# if ground_truth_index has the counter
# it means it is an anomaly event
ground_truth.append(1)
ground_truth_extreme_event_timestamp.append(df.iloc[i][time_col_name])
ground_truth_extreme_event_values.append(df.iloc[i][extreme_event_col_name])
else:
ground_truth.append(0)
if i in predict_extreme_index:
predict_label.append(1)
predict_extreme_event_timestamp.append(df.iloc[i][time_col_name])
predict_extreme_event_vales.append(df.iloc[i][extreme_event_col_name])
else:
predict_label.append(0)
print("sum ground_truth: ", sum(ground_truth))
print("sum predict_label: ", sum(predict_label))
# accuracy_score is calculated with (TP+TN)/(TP+TN+FP+FN)
print("Accuracy Score is: ", accuracy_score(ground_truth, predict_label))
return ground_truth_extreme_event_timestamp,ground_truth_extreme_event_values,predict_extreme_event_timestamp,predict_extreme_event_vales
def vis_normal_events_and_extreme_events(df, ground_truth_extreme_event_timestamp,
ground_truth_extreme_event_values, predict_extreme_event_timestamp,
predict_extreme_event_vales, time_col_name = 'Date', extreme_event_col_name = 'NO3N'):
'''
this function visualize normal events and extreme events
df should have time col
'''
time = df[time_col_name]
# parse function can automatically convert time str
# to datetime
# 8/2/2016 12:29
x_time = [parse(x) for x in time]
values = df[extreme_event_col_name]
# rotate the x axis title or overlap
plt.xticks(rotation=45)
# preview first 500 or too big
plt.scatter(x_time, values, label=extreme_event_col_name)
plt.scatter(ground_truth_extreme_event_timestamp, ground_truth_extreme_event_values, facecolors='none', edgecolors='r', label="Ground Truth Extreme Event")
plt.scatter(predict_extreme_event_timestamp, predict_extreme_event_vales, marker='^', label="Predicted Extreme Events")
plt.legend()
plt.show()
def count_nan_for_each_col(df):
'''
'''
print("number of nan:")
print(df.isna().sum())
print("Percentage of nan:")
print(df.isna().sum()/len(df.index))
print("total len is: ", len(df.index))
def get_time_stamp_array(df, time_col='timestamp'):
'''
this function is used to convert
string to datetime
'''
return pd.to_datetime(df[time_col])
def gen_holes(df, df2, total_num=100):
'''
generate a random number [x,y]
0<x<n1; 0<y<n2; df.shape = (n1,n2)
no repeated pair
make holes (nan) in df based on ramdom number pairs
return df, a dictionary about holes and original number
The reason we input two df copies because df is likt a pointer
when we df.iloc[x,y]=np.nan, it will impact 'value':df2.iloc[x,y]
'''
# -1 because index starting from 0 for df elements
n1 = df.shape[0] - 1
n2 = df.shape[1] - 1
result_list = []
count = 0
while count < total_num:
x, y = randint(0, n1), randint(0, n2)
if [x,y] not in result_list:
# if df.iloc[x,y] == np.nan:
# print(x,y)
# return
# print("values: {} for {}, {}, type is {}".format(df.iloc[x,y],x,y, type(df.iloc[x,y])))
result_list.append({'coordinate': [x,y], 'value':df2.iloc[x,y]})
count = count + 1
# make holes
df.iloc[x,y]=np.nan
return df, result_list
def accuracy_checking(result_np_array, ground_truth_list):
'''
df should be filled with predicted values for all nan holes
ground_truth_list should be a list of dicts {'coordinate': [x,y], 'value':df.iloc[x,y]}
'''
prediction = []
ground_truth = []
for element in ground_truth_list:
curr_coordinate_x = element['coordinate'][0]
curr_coordinate_y = element['coordinate'][1]
prediction.append(result_np_array[curr_coordinate_x, curr_coordinate_y])
ground_truth.append(element['value'])
# test vis
# x_index = []
# for i in range(len(ground_truth)):
# x_index.append(i)
# plt.scatter( x_index, prediction, label="prediction")
# plt.scatter( x_index, ground_truth, label="ground_truth")
# plt.xlabel('Index')
# plt.ylabel('Values')
# plt.show()
print("R^2 is: ", r2_score(ground_truth, prediction))
return prediction, ground_truth
def get_nse(list1,list2):
'''
Nash-Sutcliffe efficiency
list1 is model simulated value
list2 is observed data
'''
if len(list1) != len(list2):
raise Exception('two lists have different lengths')
list_len = len(list1)
sum_diff_power = 0
sum_diff_o_power = 0
mean_list2 = sum(list2) / len(list2)
for count in range(list_len):
sum_diff_power = sum_diff_power + (list1[count]-list2[count])**2
sum_diff_o_power = sum_diff_o_power + (list2[count]-mean_list2)**2
result = sum_diff_power/sum_diff_o_power
return 1 - result
def get_avg_gap_size(df, col_name):
'''
this function calculate avg missing data gap length
for col with the input name (col_name)
'''
# remove all holes
df_no_nan = df[col_name].dropna()
index_without_holes = df_no_nan.index.tolist()
# calculate difference between adjacent elements
# without gap, it should be 1, with gap it will be gap size + 1
diff_list = []
for i in range(1, len(index_without_holes)):
# -1 because without gap, it should be 1, with gap it will be gap size + 1
diff_list.append(index_without_holes[i] - index_without_holes[i-1] - 1)
# this part is for the last gap (from n to the end of df)
total_len = df.shape[0]
# total_len - 1 is the last element index
diff_list.append(total_len - 1 - index_without_holes[-1])
# remove all 0s from the list
gap_size = [i for i in diff_list if i != 0]
if gap_size == []:
# no gap
return 0
avg_gap_size = sum(gap_size)/len(gap_size)
print("Avg gap for {} is {}".format(col_name, avg_gap_size))
return avg_gap_size
# CCRM starts from here
# this function is originally from https://code.sololearn.com/cqMD5wu2rhUJ/#py
def factors(n):
a =[1]
for i in range(2,(n+1),1):
if(n%i==0):
a = a+[i]
return a
def check_if_df_col_all_0(df):
'''
this function checks if a df contains a col with only zeros
inspired by the code at: https://stackoverflow.com/questions/21164910/how-do-i-delete-a-column-that-contains-only-zeros-in-pandas
'''
# return results for each col, if all 0s then False, if not all 0s true
check_col_results = (df != 0).any(axis=0)
return False in check_col_results.to_list()
def reshape(df, curr_factor):
'''
reshape df based on current factor
'''
# first cur_factor rows, first col
result_df = df.iloc[:curr_factor,0].copy()
n1 = df.shape[0]
n2 = df.shape[1]
quotient = int(n1/curr_factor)
# used to check if this is the first time concat
flag_first_time = True
for curr_col in range(n2):
# range(1,total_merge_time) => 1, 2, 3, ..., total_merge_time - 1
for curr_row in range(quotient):
if flag_first_time:
# skip the first time combination, because it is already inital result_df
flag_first_time = False
else:
curr_df = df.iloc[curr_row*curr_factor:(curr_row+1)*curr_factor,curr_col].copy().reset_index(drop=True)
result_df = pd.concat([result_df, curr_df], ignore_index=True, axis=1)
return result_df
def ccrm(df, input_factor=-1):
'''
based on the paper An alternating direction method of
multipliers based approach for pmu data recovery
this function is used to reshape a matrix (n1 by n2)
if factor==-1, then use the first usable factor,
if not, use the input one
'''
n1 = df.shape[0]
n2 = df.shape[1]
n1_factors = factors(n1)
n1_factor_decent = sorted(n1_factors, reverse=True)
# do not check 1 and itself
check_factors = n1_factor_decent[1:-1]
qualified_factor = []
# check which factors work
for factor in check_factors:
# this rule is a little bit different from paper algorithm 2
# I added =
# if (n1/factor>=n2) and math.ceil(n1/(factor+1))<=n2:
# qualified_factor.append(factor)
# removed the rules for now...///////////////!!!!
qualified_factor.append(factor)
print("Qualified factors include: "+str(qualified_factor))
# if no qualified factor
if qualified_factor == []:
print("no qualified factor in n1")
return -1
if input_factor == -1:
# reshape, I only return the first possible df
for i in qualified_factor:
curr_reshape = reshape(df, i)
if check_if_df_col_all_0(curr_reshape) == False:
# this means curr_reshape is one of the possible qualified reshape
print("After reshape, the current shape is:"+str(curr_reshape.shape))
return curr_reshape, i
else:
print("Warning: Factor "+str(i)+
" causes at least one col with all 0s.")
return curr_reshape, i
elif input_factor in qualified_factor:
# reshape with the input_factor
curr_reshape = reshape(df, input_factor)
if check_if_df_col_all_0(curr_reshape) == False:
# this means curr_reshape is one of the possible qualified reshape
print("After reshape, the current shape is:"+str(curr_reshape.shape))
return curr_reshape, input_factor
else:
print("Warning: Factor "+str(input_factor)+
" causes at least one col with all 0s.")
return curr_reshape, input_factor
else:
print("Error: input_factor is not one of the qualified ones.")
return -1
print("no possible reshape based on the ccrm rules")
return -1
def ccrm_reverse(reshape_df, original_n1, original_n2):
'''
this function reverses the ccrm reshape results to the original
matrix, original_n1 is the original rows count
original_n2 is the original cols count
factor is the factor used for reshape in ccrm and it should be
reshape_df.shape[0]
'''
n1 = reshape_df.shape[0]
n2 = reshape_df.shape[1]
each_col_loop_times = int(original_n1/n1)
for curr_col_id in range(original_n2):
# prepare the original col
starting_point = curr_col_id*each_col_loop_times
result_col = reshape_df.iloc[:,starting_point]
for curr_col_chunk_id in range(1,each_col_loop_times):
curr_chunk = reshape_df.iloc[:,starting_point+curr_col_chunk_id]
result_col = pd.concat([result_col,curr_chunk], ignore_index=True, axis=0)
# first col
if curr_col_id == 0:
result_df = result_col
else:
result_df = pd.concat([result_df, result_col], ignore_index=True, axis=1)
return result_df
# CCRM ends here
# row based reshape
def row_based_reshape(df, input_factor=-1):
'''
this function is different from CCRM
CCRM reshape a matrix col by col
this function reshape a matrix row by row
this row based reshape can be thinked as reverse of ccrm with transpose(df)
try to draw a 6*4 matrix and reshape it to 2*12, it will be clearer
'''
n1 = df.shape[0]
n2 = df.shape[1]
n1_factors = factors(n1)
n1_factor_decent = sorted(n1_factors, reverse=True)
# do not check 1 and itself
check_factors = n1_factor_decent[1:-1]
qualified_factor = []
# check which factors work
for factor in check_factors:
# this rule is a little bit different from paper algorithm 2
# I added =
# if (n1/factor>=n2) and math.ceil(n1/(factor+1))<=n2:
# qualified_factor.append(factor)
# removed the rules for now...///////////////!!!!
qualified_factor.append(factor)
print("Qualified factors (lags) include: "+str(qualified_factor))
# if no qualified factor
if qualified_factor == []:
print("no qualified factor in n1")
return -1
df_transpose = df.transpose()
# reorder every element from minimum to maximum
qualified_factor.sort()
if input_factor == -1:
# reshape, I only return the first possible df
for i in qualified_factor:
# i is the factor
curr_reshape = ccrm_reverse(df_transpose, int(n2*i) , int(n1/i))
if check_if_df_col_all_0(curr_reshape) == False:
# this means curr_reshape is one of the possible qualified reshape
print("After reshape, the current shape is:"+str(curr_reshape.transpose().shape))
return curr_reshape.transpose(), i
else:
print("Warning: Factor "+str(i)+
" causes at least one col with all 0s.")
return curr_reshape.transpose(), i
elif input_factor in qualified_factor:
# reshape with the input_factor
curr_reshape = ccrm_reverse(df_transpose, int(n2*input_factor) , int(n1/input_factor))
if check_if_df_col_all_0(curr_reshape) == False:
# this means curr_reshape is one of the possible qualified reshape
print("Orignal matrix shape is:"+str(n1)+", "+str(n2))
print("After reshape, the current shape is:"+str(curr_reshape.transpose().shape))
return curr_reshape.transpose(), input_factor
else:
print("Warning: Factor "+str(input_factor)+
" causes at least one col with all 0s.")
return curr_reshape.transpose(), input_factor
else:
print("Error: input_factor is not one of the qualified ones.")
return -1
print("no possible reshape based on the ccrm rules")
return -1
def row_based_reshape_reverse(reshape_df, original_n1, original_n2):
'''
this function reverses the row based reshape results to the original
matrix, original_n1 is the original rows count
original_n2 is the original cols count
'''
factor = original_n2
reshape_df_tran = reshape_df.transpose()
try:
curr_reshape = reshape(reshape_df_tran, factor)
except:
print("Reshape does not work.")
raise
return curr_reshape.transpose()
# row based reshape ends here
def draw_autocorr(input_np_arr, input_maxlags=1000):
'''
this function visualizes the autocorrelations
of an input array
e.g. input can be df['q_cms'].values
'''
plt.acorr(input_np_arr, maxlags=input_maxlags)
plt.title('Autocorrelation VS lag')
plt.xlabel('Lag (number of rows)')
plt.ylabel('Autocorrelation')
# Display the autocorrelation plot
plt.show()
def randomly_create_continous_gap(df, gap_size, col_name, extreme_events_arr):
'''
this function will randomly create a continous gap
for one variable of a df with the size of gap_size_percent*len
'''
total_len = len(extreme_events_arr)
# from front to back
start_index = random.randint(0,total_len-gap_size-1)
end_index = start_index + gap_size
for index in extreme_events_arr[start_index:end_index]:
df[col_name].iloc[index] = np.nan
return df, start_index, end_index
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.array",
"sys.exit",
"math.sinh",
"pandas.to_datetime",
"random.Random",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"numpy.concatenate",
"random.randint",
"dateutil.parser.parse",
"matplotlib.pyplot.xticks",
"matplotlib.pyp... | [((698, 710), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (706, 710), True, 'import numpy as np\n'), ((4780, 4792), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4788, 4792), True, 'import numpy as np\n'), ((5776, 5795), 'random.Random', 'random.Random', (['seed'], {}), '(seed)\n', (5789, 5795), False, 'import random\n'), ((6960, 6981), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (6971, 6981), True, 'import pandas as pd\n'), ((7656, 7677), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (7667, 7677), True, 'import pandas as pd\n'), ((8454, 8475), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (8465, 8475), True, 'import pandas as pd\n'), ((9593, 9614), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (9604, 9614), True, 'import pandas as pd\n'), ((10839, 10860), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (10850, 10860), True, 'import pandas as pd\n'), ((12024, 12038), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12036, 12038), True, 'import matplotlib.pyplot as plt\n'), ((12258, 12278), 'matplotlib.pyplot.title', 'plt.title', (['fig_title'], {}), '(fig_title)\n', (12267, 12278), True, 'import matplotlib.pyplot as plt\n'), ((12280, 12290), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12288, 12290), True, 'import matplotlib.pyplot as plt\n'), ((12391, 12405), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12403, 12405), True, 'import matplotlib.pyplot as plt\n'), ((12576, 12596), 'matplotlib.pyplot.title', 'plt.title', (['fig_title'], {}), '(fig_title)\n', (12585, 12596), True, 'import matplotlib.pyplot as plt\n'), ((12598, 12608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12606, 12608), True, 'import matplotlib.pyplot as plt\n'), ((12728, 12742), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12740, 12742), True, 'import matplotlib.pyplot as plt\n'), ((12950, 12960), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12958, 12960), True, 'import matplotlib.pyplot as plt\n'), ((13089, 13110), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (13100, 13110), True, 'import pandas as pd\n'), ((14119, 14142), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (14130, 14142), True, 'import pandas as pd\n'), ((14930, 14953), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (14941, 14953), True, 'import pandas as pd\n'), ((17777, 17800), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (17787, 17800), True, 'import matplotlib.pyplot as plt\n'), ((17835, 17892), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_time', 'values'], {'label': 'extreme_event_col_name'}), '(x_time, values, label=extreme_event_col_name)\n', (17846, 17892), True, 'import matplotlib.pyplot as plt\n'), ((17894, 18057), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ground_truth_extreme_event_timestamp', 'ground_truth_extreme_event_values'], {'facecolors': '"""none"""', 'edgecolors': '"""r"""', 'label': '"""Ground Truth Extreme Event"""'}), "(ground_truth_extreme_event_timestamp,\n ground_truth_extreme_event_values, facecolors='none', edgecolors='r',\n label='Ground Truth Extreme Event')\n", (17905, 18057), True, 'import matplotlib.pyplot as plt\n'), ((18051, 18174), 'matplotlib.pyplot.scatter', 'plt.scatter', (['predict_extreme_event_timestamp', 'predict_extreme_event_vales'], {'marker': '"""^"""', 'label': '"""Predicted Extreme Events"""'}), "(predict_extreme_event_timestamp, predict_extreme_event_vales,\n marker='^', label='Predicted Extreme Events')\n", (18062, 18174), True, 'import matplotlib.pyplot as plt\n'), ((18172, 18184), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18182, 18184), True, 'import matplotlib.pyplot as plt\n'), ((18186, 18196), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18194, 18196), True, 'import matplotlib.pyplot as plt\n'), ((18521, 18549), 'pandas.to_datetime', 'pd.to_datetime', (['df[time_col]'], {}), '(df[time_col])\n', (18535, 18549), True, 'import pandas as pd\n'), ((29397, 29443), 'matplotlib.pyplot.acorr', 'plt.acorr', (['input_np_arr'], {'maxlags': 'input_maxlags'}), '(input_np_arr, maxlags=input_maxlags)\n', (29406, 29443), True, 'import matplotlib.pyplot as plt\n'), ((29445, 29480), 'matplotlib.pyplot.title', 'plt.title', (['"""Autocorrelation VS lag"""'], {}), "('Autocorrelation VS lag')\n", (29454, 29480), True, 'import matplotlib.pyplot as plt\n'), ((29482, 29516), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Lag (number of rows)"""'], {}), "('Lag (number of rows)')\n", (29492, 29516), True, 'import matplotlib.pyplot as plt\n'), ((29518, 29547), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Autocorrelation"""'], {}), "('Autocorrelation')\n", (29528, 29547), True, 'import matplotlib.pyplot as plt\n'), ((29585, 29595), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29593, 29595), True, 'import matplotlib.pyplot as plt\n'), ((29877, 29920), 'random.randint', 'random.randint', (['(0)', '(total_len - gap_size - 1)'], {}), '(0, total_len - gap_size - 1)\n', (29891, 29920), False, 'import random\n'), ((1359, 1371), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1367, 1371), True, 'import numpy as np\n'), ((6499, 6546), 'neat.nn.RecurrentNetwork.create', 'neat.nn.RecurrentNetwork.create', (['genome', 'config'], {}), '(genome, config)\n', (6530, 6546), False, 'import neat\n'), ((9137, 9147), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9145, 9147), False, 'import sys\n'), ((17034, 17077), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['ground_truth', 'predict_label'], {}), '(ground_truth, predict_label)\n', (17048, 17077), False, 'from sklearn.metrics import accuracy_score\n'), ((17677, 17685), 'dateutil.parser.parse', 'parse', (['x'], {}), '(x)\n', (17682, 17685), False, 'from dateutil.parser import parse\n'), ((1870, 1919), 'numpy.concatenate', 'np.concatenate', (['(combined_matrix, reshape_matrix)'], {}), '((combined_matrix, reshape_matrix))\n', (1884, 1919), True, 'import numpy as np\n'), ((2072, 2130), 'math.sinh', 'math.sinh', (['((x - tmp_min) * logsinh_b + logsinh_a + epsilon)'], {}), '((x - tmp_min) * logsinh_b + logsinh_a + epsilon)\n', (2081, 2130), False, 'import math\n'), ((2941, 2989), 'math.ceil', 'math.ceil', (['(total_rows / row_based_reshape_factor)'], {}), '(total_rows / row_based_reshape_factor)\n', (2950, 2989), False, 'import math\n'), ((4062, 4097), 'math.ceil', 'math.ceil', (['col_based_reshape_factor'], {}), '(col_based_reshape_factor)\n', (4071, 4097), False, 'import math\n'), ((4961, 5025), 'numpy.append', 'np.append', (['array_filled', 'array_filled_extreme[tmp_extreme_index]'], {}), '(array_filled, array_filled_extreme[tmp_extreme_index])\n', (4970, 5025), True, 'import numpy as np\n'), ((26027, 26089), 'pandas.concat', 'pd.concat', (['[result_col, curr_chunk]'], {'ignore_index': '(True)', 'axis': '(0)'}), '([result_col, curr_chunk], ignore_index=True, axis=0)\n', (26036, 26089), True, 'import pandas as pd\n'), ((26175, 26236), 'pandas.concat', 'pd.concat', (['[result_df, result_col]'], {'ignore_index': '(True)', 'axis': '(1)'}), '([result_df, result_col], ignore_index=True, axis=1)\n', (26184, 26236), True, 'import pandas as pd\n'), ((1247, 1291), 'numpy.concatenate', 'np.concatenate', (['(upper_matrix, lower_matrix)'], {}), '((upper_matrix, lower_matrix))\n', (1261, 1291), True, 'import numpy as np\n'), ((1730, 1792), 'numpy.concatenate', 'np.concatenate', (['[reshape_matrix, np_array[start:end,]]'], {'axis': '(1)'}), '([reshape_matrix, np_array[start:end,]], axis=1)\n', (1744, 1792), True, 'import numpy as np\n'), ((5124, 5186), 'numpy.append', 'np.append', (['array_filled', 'array_filled_normal[tmp_normal_index]'], {}), '(array_filled, array_filled_normal[tmp_normal_index])\n', (5133, 5186), True, 'import numpy as np\n'), ((23158, 23216), 'pandas.concat', 'pd.concat', (['[result_df, curr_df]'], {'ignore_index': '(True)', 'axis': '(1)'}), '([result_df, curr_df], ignore_index=True, axis=1)\n', (23167, 23216), True, 'import pandas as pd\n')] |
import os
import random
import argparse
import multiprocessing
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as data
from torchvision import models, transforms
import torchvision.datasets as datasets
from utils import Bar, config, mkdir_p, AverageMeter, accuracy
from datetime import datetime
from tensorboardX import SummaryWriter
def train(model, criterion, opt, softmax, bar, epoch, loader, losses, top1, top5, writer):
# for training
model.train()
for batch_idx, (inputs, labels) in enumerate(loader):
outputs = model(inputs.cuda())
outputs = softmax(outputs)
loss = criterion(outputs, labels.cuda())
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, labels.cuda().data, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
opt.zero_grad()
loss.backward()
opt.step()
# plot progress
bar.suffix = 'Epoch {epoch} - Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f} | ({batch}/{size})'.format(
epoch=epoch,
batch=batch_idx + 1,
size=len(loader),
total=bar.elapsed_td,
eta=bar.eta_td,
loss=loss.item(),
top1=top1.avg,
top5=top5.avg,
)
n_iter = epoch * len(loader) + batch_idx + 1
writer.add_scalar('Train/loss', loss.data.item(), n_iter)
writer.add_scalar('Train/top1', prec1.data.item(), n_iter)
writer.add_scalar('Train/top5', prec5.data.item(), n_iter)
bar.next()
writer.add_scalar('Avg.loss', losses.avg, epoch)
writer.add_scalar('Avg.top1', top1.avg, epoch)
writer.add_scalar('Avg.top5', top5.avg, epoch)
bar.finish()
def test(model, criterion, softmax, bar, epoch, loader, losses, top1, top5, writer):
model.eval()
for batch_idx, (inputs, labels) in enumerate(loader):
outputs = model(inputs.cuda())
outputs = softmax(outputs)
loss = criterion(outputs, labels.cuda())
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, labels.cuda().data, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
# plot progress
bar.suffix = 'Epoch {epoch} - Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f} | ({batch}/{size})'.format(
epoch=epoch,
batch=batch_idx + 1,
size=len(loader),
total=bar.elapsed_td,
eta=bar.eta_td,
loss=loss.item(),
top1=top1.avg,
top5=top5.avg,
)
n_iter = epoch * len(loader) + batch_idx + 1
writer.add_scalar('Test/loss', loss.data.item(), n_iter)
writer.add_scalar('Test/top1', prec1.data.item(), n_iter)
writer.add_scalar('Test/top5', prec5.data.item(), n_iter)
bar.next()
writer.add_scalar('Avg.loss', losses.avg, epoch)
writer.add_scalar('Avg.top1', top1.avg, epoch)
writer.add_scalar('Avg.top5', top5.avg, epoch)
bar.finish()
def main():
global parser, args, args
# arguments
parser = argparse.ArgumentParser(description='byol-lightning-test')
# Architecture & hyper-parameter
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet',
help='model architecture: | [resnet, ...] (default: resnet18)')
parser.add_argument('--depth', type=int, default=18, help='Model depth.')
parser.add_argument('-c', '--checkpoint', default='../checkpoints', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--epoch', type=int, default=100, help='Epoch')
parser.add_argument('--batch-size', type=int, default=32, help='Epoch')
parser.add_argument('--lr', '--learning-rate', default=1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--num-classes', type=int, default=100, help='Epoch')
parser.add_argument('--from-scratch', action='store_true', default=False,
help='use pre-trained model')
parser.add_argument('--tune-all', action='store_true', default=False,
help='use pre-trained model')
# Device options
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--model-path', '--mp', type=str,
help='byol trained model path')
# Paths
parser.add_argument('-d', '--dataset', default='neu', type=str)
parser.add_argument('--image_folder', type=str, required=True,
help='path to your folder of images for self-supervised learning')
parser.add_argument('--board-path', '--bp', default='../board', type=str,
help='tensorboardx path')
parser.add_argument('--board-tag', '--tg', default='fine-tuned', type=str,
help='tensorboardx writer tag')
args = parser.parse_args()
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Torch Seed
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
# Random Lib Seed
random.seed(args.manualSeed)
# Numpy Seed
np.random.seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
# constants
args.image_size = 256
args.workers = multiprocessing.cpu_count()
args.task_time = datetime.now().isoformat()
output_name = "{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
args.depth,
args.batch_size,
args.lr,
args.board_tag)
args.checkpoint = os.path.join(args.checkpoint, args.dataset, output_name, args.task_time)
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
config.save_config(args, os.path.join(args.checkpoint, "config.txt"))
writer_train = SummaryWriter(
log_dir=os.path.join(args.board_path, args.dataset, output_name, args.task_time, "train"))
writer_test = SummaryWriter(
log_dir=os.path.join(args.board_path, args.dataset, output_name, args.task_time, "test"))
if args.arch is "resnet":
if args.depth == 18:
model = models.resnet18(pretrained=False).cuda()
elif args.depth == 34:
model = models.resnet34(pretrained=False).cuda()
elif args.depth == 50:
model = models.resnet50(pretrained=False).cuda()
elif args.depth == 101:
model = models.resnet101(pretrained=False).cuda()
else:
assert ("Not supported Depth")
if not args.from_scratch:
checkpoint = torch.load(args.model_path)
model.load_state_dict(checkpoint)
print("\t==> Fine tune full layers? : {}".format(str(args.tune_all)))
# Simple manual fine tuning logic
# if full == False, only last layer will be fine tuned~!!
if not args.tune_all:
params = model.parameters()
for param in params:
param.requires_grad = False
model.num_classes = args.num_classes
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, args.num_classes)
model = torch.nn.DataParallel(model).cuda()
opt = torch.optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss().cuda()
softmax = nn.Softmax(1).cuda()
# Data loading code
traindir = os.path.join(args.image_folder, 'train')
testdir = os.path.join(args.image_folder, 'test')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trainloader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(args.image_size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.ToTensor(),
# normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
testloader = torch.utils.data.DataLoader(
datasets.ImageFolder(testdir, transforms.Compose([
transforms.Resize(args.image_size),
transforms.ToTensor(),
# normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
losses_train = AverageMeter()
top1_train = AverageMeter()
top5_train = AverageMeter()
losses_test = AverageMeter()
top1_test = AverageMeter()
top5_test = AverageMeter()
for epoch in range(args.epoch):
bar_train = Bar('Processing', max=len(trainloader))
bar_test = Bar('Processing', max=len(testloader))
train(model, criterion, opt, softmax, bar_train, epoch, trainloader, losses_train, top1_train, top5_train,
writer_train)
test(model, criterion, softmax, bar_test, epoch, testloader, losses_test, top1_test, top5_test,
writer_test)
# save your improved network
torch.save(model.state_dict(), os.path.join(args.checkpoint, 'byol-finetune.pt'))
if __name__ == '__main__':
main()
| [
"torch.nn.CrossEntropyLoss",
"utils.mkdir_p",
"torchvision.models.resnet18",
"multiprocessing.cpu_count",
"torchvision.transforms.ColorJitter",
"torch.cuda.is_available",
"argparse.ArgumentParser",
"os.path.isdir",
"numpy.random.seed",
"torchvision.transforms.ToTensor",
"random.randint",
"torc... | [((3391, 3449), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""byol-lightning-test"""'}), "(description='byol-lightning-test')\n", (3414, 3449), False, 'import argparse\n'), ((5475, 5500), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5498, 5500), False, 'import torch\n'), ((5645, 5673), 'random.seed', 'random.seed', (['args.manualSeed'], {}), '(args.manualSeed)\n', (5656, 5673), False, 'import random\n'), ((5695, 5726), 'numpy.random.seed', 'np.random.seed', (['args.manualSeed'], {}), '(args.manualSeed)\n', (5709, 5726), True, 'import numpy as np\n'), ((5858, 5885), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (5883, 5885), False, 'import multiprocessing\n'), ((6287, 6359), 'os.path.join', 'os.path.join', (['args.checkpoint', 'args.dataset', 'output_name', 'args.task_time'], {}), '(args.checkpoint, args.dataset, output_name, args.task_time)\n', (6299, 6359), False, 'import os\n'), ((7750, 7787), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'args.num_classes'], {}), '(num_ftrs, args.num_classes)\n', (7759, 7787), True, 'import torch.nn as nn\n'), ((8016, 8056), 'os.path.join', 'os.path.join', (['args.image_folder', '"""train"""'], {}), "(args.image_folder, 'train')\n", (8028, 8056), False, 'import os\n'), ((8071, 8110), 'os.path.join', 'os.path.join', (['args.image_folder', '"""test"""'], {}), "(args.image_folder, 'test')\n", (8083, 8110), False, 'import os\n'), ((8127, 8202), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (8147, 8202), False, 'from torchvision import models, transforms\n'), ((9060, 9074), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9072, 9074), False, 'from utils import Bar, config, mkdir_p, AverageMeter, accuracy\n'), ((9092, 9106), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9104, 9106), False, 'from utils import Bar, config, mkdir_p, AverageMeter, accuracy\n'), ((9124, 9138), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9136, 9138), False, 'from utils import Bar, config, mkdir_p, AverageMeter, accuracy\n'), ((9157, 9171), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9169, 9171), False, 'from utils import Bar, config, mkdir_p, AverageMeter, accuracy\n'), ((9188, 9202), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9200, 9202), False, 'from utils import Bar, config, mkdir_p, AverageMeter, accuracy\n'), ((9219, 9233), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9231, 9233), False, 'from utils import Bar, config, mkdir_p, AverageMeter, accuracy\n'), ((5594, 5618), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (5608, 5618), False, 'import random\n'), ((5752, 5795), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.manualSeed'], {}), '(args.manualSeed)\n', (5778, 5795), False, 'import torch\n'), ((6371, 6401), 'os.path.isdir', 'os.path.isdir', (['args.checkpoint'], {}), '(args.checkpoint)\n', (6384, 6401), False, 'import os\n'), ((6411, 6435), 'utils.mkdir_p', 'mkdir_p', (['args.checkpoint'], {}), '(args.checkpoint)\n', (6418, 6435), False, 'from utils import Bar, config, mkdir_p, AverageMeter, accuracy\n'), ((6465, 6508), 'os.path.join', 'os.path.join', (['args.checkpoint', '"""config.txt"""'], {}), "(args.checkpoint, 'config.txt')\n", (6477, 6508), False, 'import os\n'), ((7283, 7310), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (7293, 7310), False, 'import torch\n'), ((9730, 9779), 'os.path.join', 'os.path.join', (['args.checkpoint', '"""byol-finetune.pt"""'], {}), "(args.checkpoint, 'byol-finetune.pt')\n", (9742, 9779), False, 'import os\n'), ((5908, 5922), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5920, 5922), False, 'from datetime import datetime\n'), ((6561, 6646), 'os.path.join', 'os.path.join', (['args.board_path', 'args.dataset', 'output_name', 'args.task_time', '"""train"""'], {}), "(args.board_path, args.dataset, output_name, args.task_time,\n 'train')\n", (6573, 6646), False, 'import os\n'), ((6693, 6778), 'os.path.join', 'os.path.join', (['args.board_path', 'args.dataset', 'output_name', 'args.task_time', '"""test"""'], {}), "(args.board_path, args.dataset, output_name, args.task_time, 'test'\n )\n", (6705, 6778), False, 'import os\n'), ((7801, 7829), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (7822, 7829), False, 'import torch\n'), ((7912, 7933), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7931, 7933), True, 'import torch.nn as nn\n'), ((7955, 7968), 'torch.nn.Softmax', 'nn.Softmax', (['(1)'], {}), '(1)\n', (7965, 7968), True, 'import torch.nn as nn\n'), ((6855, 6888), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (6870, 6888), False, 'from torchvision import models, transforms\n'), ((8359, 8393), 'torchvision.transforms.Resize', 'transforms.Resize', (['args.image_size'], {}), '(args.image_size)\n', (8376, 8393), False, 'from torchvision import models, transforms\n'), ((8407, 8440), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (8438, 8440), False, 'from torchvision import models, transforms\n'), ((8454, 8485), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {}), '()\n', (8483, 8485), False, 'from torchvision import models, transforms\n'), ((8499, 8536), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.4)', '(0.4)', '(0.4)'], {}), '(0.4, 0.4, 0.4)\n', (8521, 8536), False, 'from torchvision import models, transforms\n'), ((8550, 8571), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8569, 8571), False, 'from torchvision import models, transforms\n'), ((8829, 8863), 'torchvision.transforms.Resize', 'transforms.Resize', (['args.image_size'], {}), '(args.image_size)\n', (8846, 8863), False, 'from torchvision import models, transforms\n'), ((8877, 8898), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8896, 8898), False, 'from torchvision import models, transforms\n'), ((6947, 6980), 'torchvision.models.resnet34', 'models.resnet34', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (6962, 6980), False, 'from torchvision import models, transforms\n'), ((7039, 7072), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (7054, 7072), False, 'from torchvision import models, transforms\n'), ((7132, 7166), 'torchvision.models.resnet101', 'models.resnet101', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (7148, 7166), False, 'from torchvision import models, transforms\n')] |
# Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import tarfile
import numpy as np
from six.moves import urllib
import pathlib
from imageio import imread
import tensorflow as tf
import glob
import scipy.misc
import math
import sys
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
assert(len(images[0].shape) == 3)
assert(np.max(images[0]) > 10)
assert(np.min(images[0]) >= 0.0)
bs = 1
with tf.Session() as sess:
preds = []
n_batches = int(math.ceil(float(len(images)) / float(bs)))
for i in range(n_batches):
sys.stdout.write(".")
sys.stdout.flush()
inp = images[(i * bs):min((i + 1) * bs, len(images))]
inp = np.expand_dims(np.concatenate(inp, 0), 0)
pred = sess.run(softmax, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
# This function is called automatically.
def _init_inception():
global softmax
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session() as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.set_shape(tf.TensorShape(new_shape))
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3, [1, 2]), w)
softmax = tf.nn.softmax(logits)
def _handle_path(path):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
x = [imread(str(fn)).astype(np.float32) for fn in files]
return x
def calculate_is_given_paths(path):
if softmax is None:
_init_inception()
if not os.path.exists(path):
raise RuntimeError("Invalid path: %s" % path)
images = _handle_path(path)
IS, _ = get_inception_score(images)
return IS
if __name__ == "__main__":
IS = calculate_is_given_paths(sys.argv[1])
print("IS: ", IS)
| [
"tarfile.open",
"numpy.log",
"tensorflow.nn.softmax",
"numpy.mean",
"pathlib.Path",
"tensorflow.Session",
"tensorflow.GraphDef",
"numpy.max",
"numpy.exp",
"numpy.concatenate",
"numpy.min",
"sys.stdout.flush",
"tensorflow.import_graph_def",
"numpy.std",
"numpy.sum",
"six.moves.urllib.re... | [((747, 764), 'numpy.max', 'np.max', (['images[0]'], {}), '(images[0])\n', (753, 764), True, 'import numpy as np\n'), ((780, 797), 'numpy.min', 'np.min', (['images[0]'], {}), '(images[0])\n', (786, 797), True, 'import numpy as np\n'), ((822, 834), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (832, 834), True, 'import tensorflow as tf\n'), ((1223, 1247), 'numpy.concatenate', 'np.concatenate', (['preds', '(0)'], {}), '(preds, 0)\n', (1237, 1247), True, 'import numpy as np\n'), ((2060, 2117), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['DATA_URL', 'filepath', '_progress'], {}), '(DATA_URL, filepath, _progress)\n', (2086, 2117), False, 'from six.moves import urllib\n'), ((2409, 2422), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2420, 2422), True, 'import tensorflow as tf\n'), ((2471, 2510), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (2490, 2510), True, 'import tensorflow as tf\n'), ((2562, 2574), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2572, 2574), True, 'import tensorflow as tf\n'), ((3229, 3250), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (3242, 3250), True, 'import tensorflow as tf\n'), ((3319, 3332), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (3326, 3332), True, 'import numpy as np\n'), ((3417, 3435), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (3429, 3435), False, 'import pathlib\n'), ((961, 982), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (977, 982), False, 'import sys\n'), ((991, 1009), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1007, 1009), False, 'import sys\n'), ((1539, 1554), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1546, 1554), True, 'import numpy as np\n'), ((1556, 1570), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (1562, 1570), True, 'import numpy as np\n'), ((2023, 2041), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2039, 2041), False, 'import sys\n'), ((2239, 2269), 'tarfile.open', 'tarfile.open', (['filepath', '"""r:gz"""'], {}), "(filepath, 'r:gz')\n", (2251, 2269), False, 'import tarfile\n'), ((3185, 3210), 'tensorflow.squeeze', 'tf.squeeze', (['pool3', '[1, 2]'], {}), '(pool3, [1, 2])\n', (3195, 3210), True, 'import tensorflow as tf\n'), ((1101, 1123), 'numpy.concatenate', 'np.concatenate', (['inp', '(0)'], {}), '(inp, 0)\n', (1115, 1123), True, 'import numpy as np\n'), ((1481, 1494), 'numpy.sum', 'np.sum', (['kl', '(1)'], {}), '(kl, 1)\n', (1487, 1494), True, 'import numpy as np\n'), ((1516, 1526), 'numpy.exp', 'np.exp', (['kl'], {}), '(kl)\n', (1522, 1526), True, 'import numpy as np\n'), ((1402, 1414), 'numpy.log', 'np.log', (['part'], {}), '(part)\n', (1408, 1414), True, 'import numpy as np\n'), ((3059, 3084), 'tensorflow.TensorShape', 'tf.TensorShape', (['new_shape'], {}), '(new_shape)\n', (3073, 3084), True, 'import tensorflow as tf\n'), ((1439, 1455), 'numpy.mean', 'np.mean', (['part', '(0)'], {}), '(part, 0)\n', (1446, 1455), True, 'import numpy as np\n')] |
import numpy as np
class Individual(object):
def __init__(self, individual_vector, max_inputs, search_space, index=0):
self.iv = individual_vector
self.mi = max_inputs
self.ss = search_space
# Generate config when generating individual
self.index = index
self.config_list = [oc.parse_config(iv) for iv, oc in zip(self.iv, self.ss.get_opeartion_config(self.index))]
self.code = np.concatenate(self.iv, axis=0)
def get_length(self):
return len(self.code)
def get_n_op(self):
return len(self.iv)
def copy(self):
return Individual(self.iv, self.mi, self.ss, index=self.index)
def generate_node_config(self):
return self.config_list
def update_individual(self, individual_vector):
return Individual(individual_vector, self.mi, self.ss, index=self.index)
def __eq__(self, other):
return np.array_equal(self.code, other.code)
def __str__(self):
return "code:" + str(self.code)
def __hash__(self):
return hash(str(self))
class MultipleBlockIndividual(object):
def __init__(self, individual_list):
self.individual_list = individual_list
self.code = np.concatenate([i.code for i in self.individual_list])
def get_individual(self, index):
return self.individual_list[index]
def generate_node_config(self, index):
return self.individual_list[index].generate_node_config()
def update_individual(self, individual_vector):
raise NotImplemented
def __eq__(self, other):
return np.array_equal(self.code, other.code)
def __str__(self):
return "code:" + str(self.code)
def __hash__(self):
return hash(str(self))
| [
"numpy.array_equal",
"numpy.concatenate"
] | [((439, 470), 'numpy.concatenate', 'np.concatenate', (['self.iv'], {'axis': '(0)'}), '(self.iv, axis=0)\n', (453, 470), True, 'import numpy as np\n'), ((921, 958), 'numpy.array_equal', 'np.array_equal', (['self.code', 'other.code'], {}), '(self.code, other.code)\n', (935, 958), True, 'import numpy as np\n'), ((1228, 1282), 'numpy.concatenate', 'np.concatenate', (['[i.code for i in self.individual_list]'], {}), '([i.code for i in self.individual_list])\n', (1242, 1282), True, 'import numpy as np\n'), ((1601, 1638), 'numpy.array_equal', 'np.array_equal', (['self.code', 'other.code'], {}), '(self.code, other.code)\n', (1615, 1638), True, 'import numpy as np\n')] |
import json
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
vocab_size = 1000
embedding_dim = 16
max_length = 32
trunc_type = 'post'
padding_type = 'post'
oov_tok = "<OOV>"
training_size = 350
with open("sarcasm.json", "r") as f:
data = json.load(f)
sentences = []
labels = []
urls = []
for item in data:
sentences.append(item['headline'])
labels.append(item['is_sarcastic'])
urls.append(item['article_link'])
training_sentences = sentences[:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[:training_size]
testing_labels = labels[training_size:]
training_labels = np.array(training_labels)
testing_labels = np.array(testing_labels)
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
print(f"word_index: {word_index}")
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length,
padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length,
padding=padding_type, truncating=trunc_type)
print(f"sentences: {sentences}\ntraining_sequence: {training_sequences}\ntraining_padded:\n{training_padded}"
f"training_labels: \n{training_labels}")
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
num_epochs = 20
history = model.fit(training_padded, training_labels, epochs=num_epochs,verbose=2)
# validation_data=(testing_padded, testing_labels), verbose=1)
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
# plt.plot(history.history['val_'+string])
plt.xlabel('Epochs')
plt.ylabel(string)
# plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
print(decode_review(training_padded[1]))
print(training_sentences[1]) | [
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Embedding",
"numpy.array",
"tensorflow.keras.preprocessing.text.Tokenizer",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"tensorflo... | [((745, 770), 'numpy.array', 'np.array', (['training_labels'], {}), '(training_labels)\n', (753, 770), True, 'import numpy as np\n'), ((788, 812), 'numpy.array', 'np.array', (['testing_labels'], {}), '(testing_labels)\n', (796, 812), True, 'import numpy as np\n'), ((826, 876), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'vocab_size', 'oov_token': 'oov_tok'}), '(num_words=vocab_size, oov_token=oov_tok)\n', (835, 876), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((1079, 1180), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['training_sequences'], {'maxlen': 'max_length', 'padding': 'padding_type', 'truncating': 'trunc_type'}), '(training_sequences, maxlen=max_length, padding=padding_type,\n truncating=trunc_type)\n', (1092, 1180), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((1295, 1395), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['testing_sequences'], {'maxlen': 'max_length', 'padding': 'padding_type', 'truncating': 'trunc_type'}), '(testing_sequences, maxlen=max_length, padding=padding_type,\n truncating=trunc_type)\n', (1308, 1395), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((363, 375), 'json.load', 'json.load', (['f'], {}), '(f)\n', (372, 375), False, 'import json\n'), ((2200, 2233), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[string]'], {}), '(history.history[string])\n', (2208, 2233), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2305), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (2295, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2310, 2328), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['string'], {}), '(string)\n', (2320, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2375, 2385), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2383, 2385), True, 'import matplotlib.pyplot as plt\n'), ((1617, 1694), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['vocab_size', 'embedding_dim'], {'input_length': 'max_length'}), '(vocab_size, embedding_dim, input_length=max_length)\n', (1642, 1694), True, 'import tensorflow as tf\n'), ((1700, 1740), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'tf.keras.layers.GlobalAveragePooling1D', ([], {}), '()\n', (1738, 1740), True, 'import tensorflow as tf\n'), ((1746, 1790), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(24)'], {'activation': '"""relu"""'}), "(24, activation='relu')\n", (1767, 1790), True, 'import tensorflow as tf\n'), ((1796, 1842), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1817, 1842), True, 'import tensorflow as tf\n')] |
import numpy as np
import astropy.units as u
from ..spectra.spectrum1d import Spectrum1D
from ..fitting.continuum import fit_generic_continuum
def single_peak_continuum():
np.random.seed(0)
x = np.linspace(0., 10., 200)
y_single = 3 * np.exp(-0.5 * (x - 6.3)**2 / 0.1**2)
y_single += np.random.normal(0., 0.2, x.shape)
y_continuum = 3.2 * np.exp(-0.5 * (x - 5.6)**2 / 4.8**2)
y_single += y_continuum
return x, y_single
def test_continuum_fit():
"""
This test fits the the first simulated spectrum from the fixture. The
initial guesses are manually set here with bounds that essentially make
sense as the functionality of the test is to make sure the fit works and
we get a reasonable answer out **given** good initial guesses.
"""
x_single_continuum, y_single_continuum = single_peak_continuum()
s_single_continuum = Spectrum1D(flux=y_single_continuum*u.Jy, spectral_axis=x_single_continuum*u.um)
g1_fit = fit_generic_continuum(s_single_continuum)
y_continuum_fitted = g1_fit(s_single_continuum.spectral_axis)
y_continuum_fitted_expected = np.array([1.71364056, 1.87755574, 2.05310622, 2.23545755, 2.41977527,
2.60122493, 2.77497207, 2.93618225, 3.080021, 3.20165388,
3.29624643, 3.3589642, 3.38497273, 3.36943758, 3.30752428,
3.19439839, 3.02522545, 2.79517101, 2.49940062, 2.13307982])
assert np.allclose(y_continuum_fitted.value[::10], y_continuum_fitted_expected, atol=1e-5)
def test_continuum_calculation():
"""
This test fits the the first simulated spectrum from the fixture. The
initial guesses are manually set here with bounds that essentially make
sense as the functionality of the test is to make sure the fit works and
we get a reasonable answer out **given** good initial guesses.
"""
x_single_continuum, y_single_continuum = single_peak_continuum()
spectrum = Spectrum1D(flux=y_single_continuum*u.Jy, spectral_axis=x_single_continuum*u.um)
g1_fit = fit_generic_continuum(spectrum)
spectrum_normalized = spectrum / g1_fit(spectrum.spectral_axis)
y_continuum_fitted_expected = np.array([1.15139925, 0.98509363, 0.73700614, 1.00911864, 0.913129,
0.93145533, 0.94904202, 1.04162879, 0.90851397, 0.9494352,
1.07812394, 1.06376489, 0.98705237, 0.94569623, 0.83502377,
0.91909416, 0.89662208, 1.01458511, 0.96124191, 0.94847744])
assert np.allclose(spectrum_normalized.flux.value[::10], y_continuum_fitted_expected, atol=1e-5)
| [
"numpy.random.normal",
"numpy.allclose",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"numpy.random.seed"
] | [((180, 197), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (194, 197), True, 'import numpy as np\n'), ((206, 233), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(200)'], {}), '(0.0, 10.0, 200)\n', (217, 233), True, 'import numpy as np\n'), ((304, 339), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.2)', 'x.shape'], {}), '(0.0, 0.2, x.shape)\n', (320, 339), True, 'import numpy as np\n'), ((1123, 1385), 'numpy.array', 'np.array', (['[1.71364056, 1.87755574, 2.05310622, 2.23545755, 2.41977527, 2.60122493, \n 2.77497207, 2.93618225, 3.080021, 3.20165388, 3.29624643, 3.3589642, \n 3.38497273, 3.36943758, 3.30752428, 3.19439839, 3.02522545, 2.79517101,\n 2.49940062, 2.13307982]'], {}), '([1.71364056, 1.87755574, 2.05310622, 2.23545755, 2.41977527, \n 2.60122493, 2.77497207, 2.93618225, 3.080021, 3.20165388, 3.29624643, \n 3.3589642, 3.38497273, 3.36943758, 3.30752428, 3.19439839, 3.02522545, \n 2.79517101, 2.49940062, 2.13307982])\n', (1131, 1385), True, 'import numpy as np\n'), ((1515, 1603), 'numpy.allclose', 'np.allclose', (['y_continuum_fitted.value[::10]', 'y_continuum_fitted_expected'], {'atol': '(1e-05)'}), '(y_continuum_fitted.value[::10], y_continuum_fitted_expected,\n atol=1e-05)\n', (1526, 1603), True, 'import numpy as np\n'), ((2260, 2521), 'numpy.array', 'np.array', (['[1.15139925, 0.98509363, 0.73700614, 1.00911864, 0.913129, 0.93145533, \n 0.94904202, 1.04162879, 0.90851397, 0.9494352, 1.07812394, 1.06376489, \n 0.98705237, 0.94569623, 0.83502377, 0.91909416, 0.89662208, 1.01458511,\n 0.96124191, 0.94847744]'], {}), '([1.15139925, 0.98509363, 0.73700614, 1.00911864, 0.913129, \n 0.93145533, 0.94904202, 1.04162879, 0.90851397, 0.9494352, 1.07812394, \n 1.06376489, 0.98705237, 0.94569623, 0.83502377, 0.91909416, 0.89662208,\n 1.01458511, 0.96124191, 0.94847744])\n', (2268, 2521), True, 'import numpy as np\n'), ((2652, 2746), 'numpy.allclose', 'np.allclose', (['spectrum_normalized.flux.value[::10]', 'y_continuum_fitted_expected'], {'atol': '(1e-05)'}), '(spectrum_normalized.flux.value[::10],\n y_continuum_fitted_expected, atol=1e-05)\n', (2663, 2746), True, 'import numpy as np\n'), ((251, 291), 'numpy.exp', 'np.exp', (['(-0.5 * (x - 6.3) ** 2 / 0.1 ** 2)'], {}), '(-0.5 * (x - 6.3) ** 2 / 0.1 ** 2)\n', (257, 291), True, 'import numpy as np\n'), ((364, 404), 'numpy.exp', 'np.exp', (['(-0.5 * (x - 5.6) ** 2 / 4.8 ** 2)'], {}), '(-0.5 * (x - 5.6) ** 2 / 4.8 ** 2)\n', (370, 404), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use('ggplot')
mpl.rcParams['text.color'] = 'black'
mpl.rcParams['savefig.facecolor'] = 'white'
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['axes.labelcolor'] = '#000000'
mpl.rcParams['xtick.color'] = '#000000'
mpl.rcParams['ytick.color'] = '#000000'
mpl.rcParams['legend.facecolor'] = '#ffffff'
mpl.rcParams['lines.linewidth'] = 0.8
def single_result(output_file: str, populations: np.ndarray) -> None:
assert populations.ndim == 2
plt.plot(populations[:, 0])
plt.plot(populations[:, 1])
plt.plot(populations[:, 2])
plt.ylim(0.0, np.max(populations) * 1.01)
plt.savefig(output_file, dpi=300)
plt.clf()
def alpha_merged(output_file: str, populations: np.ndarray) -> None:
for population in populations:
plt.plot(population, alpha=0.5)
plt.xlabel('t')
plt.ylabel('population')
plt.ylim(0.0, np.max(populations) * 1.01)
plt.savefig(output_file, dpi=300)
plt.clf()
def all(output_dir: str, all_populations: np.ndarray) -> None:
assert all_populations.shape[2] == 3
# data_num = all_populations.shape[0]
# years = all_populations.shape[1]
output_file = os.path.join(output_dir, 'single_result.png')
single_result(output_file, all_populations[0])
# population results for each species
species_populations = [
all_populations[:, :, 0],
all_populations[:, :, 1],
all_populations[:, :, 2],
]
for i, species_population in enumerate(species_populations):
output_file = os.path.join(output_dir, 'alpha_merged_{}.png'.format(i))
alpha_merged(output_file, species_population)
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.style.use",
"os.path.join",
"numpy.max"
] | [((112, 135), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (125, 135), True, 'import matplotlib.pyplot as plt\n'), ((573, 600), 'matplotlib.pyplot.plot', 'plt.plot', (['populations[:, 0]'], {}), '(populations[:, 0])\n', (581, 600), True, 'import matplotlib.pyplot as plt\n'), ((605, 632), 'matplotlib.pyplot.plot', 'plt.plot', (['populations[:, 1]'], {}), '(populations[:, 1])\n', (613, 632), True, 'import matplotlib.pyplot as plt\n'), ((637, 664), 'matplotlib.pyplot.plot', 'plt.plot', (['populations[:, 2]'], {}), '(populations[:, 2])\n', (645, 664), True, 'import matplotlib.pyplot as plt\n'), ((717, 750), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {'dpi': '(300)'}), '(output_file, dpi=300)\n', (728, 750), True, 'import matplotlib.pyplot as plt\n'), ((755, 764), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (762, 764), True, 'import matplotlib.pyplot as plt\n'), ((916, 931), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (926, 931), True, 'import matplotlib.pyplot as plt\n'), ((936, 960), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""population"""'], {}), "('population')\n", (946, 960), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1044), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {'dpi': '(300)'}), '(output_file, dpi=300)\n', (1022, 1044), True, 'import matplotlib.pyplot as plt\n'), ((1049, 1058), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1056, 1058), True, 'import matplotlib.pyplot as plt\n'), ((1266, 1311), 'os.path.join', 'os.path.join', (['output_dir', '"""single_result.png"""'], {}), "(output_dir, 'single_result.png')\n", (1278, 1311), False, 'import os\n'), ((879, 910), 'matplotlib.pyplot.plot', 'plt.plot', (['population'], {'alpha': '(0.5)'}), '(population, alpha=0.5)\n', (887, 910), True, 'import matplotlib.pyplot as plt\n'), ((684, 703), 'numpy.max', 'np.max', (['populations'], {}), '(populations)\n', (690, 703), True, 'import numpy as np\n'), ((979, 998), 'numpy.max', 'np.max', (['populations'], {}), '(populations)\n', (985, 998), True, 'import numpy as np\n')] |
import numpy as np
import os
from collections import defaultdict
import auditing_args
res_dir = os.path.join(auditing_args.args['save_dir'], 'results')
print(res_dir)
all_nps = [f for f in os.listdir(res_dir) if f.endswith('.npy') and f.startswith('batch')]
def parse_name(fname):
splt = fname.split('-')
splt[7] = splt[7][:-4]
return tuple([splt[v] for v in [1, 4, 5, 6, 7]])
print(all_nps[:5])
print([parse_name(n) for n in all_nps[:5]])
combined = defaultdict(list)
for arr_f in all_nps:
arr = np.load(os.path.join(res_dir, arr_f), allow_pickle=True)
print(arr_f, parse_name(arr_f))
combined[parse_name(arr_f)].append(arr)
for name in combined:
print(combined[name])
for name in combined:
print(name, np.concatenate(combined[name]).ravel().shape)
np.save(os.path.join(res_dir, '-'.join(['bkd'] + list(name))), np.concatenate(combined[name]).ravel())
| [
"numpy.concatenate",
"os.listdir",
"os.path.join",
"collections.defaultdict"
] | [((97, 152), 'os.path.join', 'os.path.join', (["auditing_args.args['save_dir']", '"""results"""'], {}), "(auditing_args.args['save_dir'], 'results')\n", (109, 152), False, 'import os\n'), ((465, 482), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (476, 482), False, 'from collections import defaultdict\n'), ((190, 209), 'os.listdir', 'os.listdir', (['res_dir'], {}), '(res_dir)\n', (200, 209), False, 'import os\n'), ((524, 552), 'os.path.join', 'os.path.join', (['res_dir', 'arr_f'], {}), '(res_dir, arr_f)\n', (536, 552), False, 'import os\n'), ((854, 884), 'numpy.concatenate', 'np.concatenate', (['combined[name]'], {}), '(combined[name])\n', (868, 884), True, 'import numpy as np\n'), ((741, 771), 'numpy.concatenate', 'np.concatenate', (['combined[name]'], {}), '(combined[name])\n', (755, 771), True, 'import numpy as np\n')] |
import numpy as np
import h5py
from sklearn.preprocessing import StandardScaler
from scipy import signal as sig
from pathlib import Path
from net import RNN
# LOADING
def check_load(p="project_datasets/"):
return Path(p+"pre_data.mat").exists()
def save_pre_processed(X, y, Xval, yval, Xtest, ytest, p="project_datasets/"):
dataset = h5py.File(p+'pre_data.mat', 'w')
dataset.create_dataset('train_x', data=X)
dataset.create_dataset('train_y', data=y)
dataset.create_dataset('val_x', data=Xval)
dataset.create_dataset('val_y', data=yval)
dataset.create_dataset('test_x', data=Xtest)
dataset.create_dataset('test_y', data=ytest)
dataset.close()
def load_pre_processed(folder_path="project_datasets/"):
dataset = h5py.File(folder_path+'pre_data.mat', 'r')
X = np.copy(dataset.get('train_x'))
y = np.copy(dataset.get('train_y'))
Xv = np.copy(dataset.get('val_x'))
yv = np.copy(dataset.get('val_y'))
Xt = np.copy(dataset.get('test_x'))
yt = np.copy(dataset.get('test_y'))
return X, y, Xv, yv, Xt, yt
def load(number, folder_path="project_datasets/"):
folder_path += 'A0%dT_slice.mat' % number
A01T = h5py.File(folder_path, 'r')
X = np.copy(A01T['image'])
y = np.copy(A01T['type'])
y = y[0, 0:X.shape[0]:1]
y = np.asarray(y, dtype=np.int32)
return X, y
def recode_y(y):
return np.subtract(y, np.min(y))
# PREPROCESS
def add_noise(X, std=0.02):
G = np.random.normal(0, std, X.shape)
return np.add(G, X)
def normalize_data(X, mean=None, var=None, axis=0, kdims=True):
if mean is None or var is None:
mean = np.mean(X, axis=axis, keepdims=kdims)
var = np.var(X, axis=axis, keepdims=kdims)
return np.divide(np.subtract(X, mean), np.sqrt(var) + 1e-7), mean, var
def standardize_data(X, mean=None, var=None, axis=None):
if mean is None or var is None:
mean = np.mean(X)
var = np.var(X)
return np.divide(np.subtract(X, mean), np.sqrt(var) + 1e-7), mean, var
def percentile(X, p=5):
q1 = np.percentile(X, p)
q3 = np.percentile(X, 100-p)
X[X < q1] = q1
X[X > q3] = q3
return X
def butter_filter(X, hz=4, filter='highpass', order=3):
f = sig.butter(order, hz / 125, filter, False, 'ba')
return sig.lfilter(f[0], f[1], X, axis=2)
def butter_band(X, hzl, hzh, filter='bandpass', order=3):
f = sig.butter(order, (hzl/125, hzh/125), filter, False, 'ba')
return sig.lfilter(f[0], f[1], X, axis=2)
def drop_nan(X, y):
idx = np.unique(np.argwhere(np.isnan(X))[:,0])
X = np.delete(X, idx, axis=0)
y = np.delete(y, idx, axis=0)
return X, y
def expand_dims(X, Xv, Xt):
return np.expand_dims(X, 3), np.expand_dims(Xv, 3), np.expand_dims(Xt, 3)
def skip_in(X, n=125):
return X[:, :, n:]
def swap_axis(X):
return np.moveaxis(X, 1, 2)
# AUGMENT
def augment_noise(X, y, p=0.25):
N = X.shape[0]
added = int(N * p)
idx = np.arange(0, N)
np.random.shuffle(idx)
Xtra = X[idx[:added]]
noise = np.random.normal(1, 0.05, (added, X.shape[1], X.shape[2]))
Xtra = np.multiply(Xtra, noise)
Xtra = np.concatenate((X, Xtra), axis=0)
ytra = np.concatenate((y, y[idx[:added]]), axis=0)
return Xtra, ytra
def windowing(X, y, n_start=0, window=200, stride=50):
N, C, T = X.shape
s0 = int((T-window - n_start) / stride) + 1
Xf = np.zeros((s0*N, C, window))
t = 0
while(n_start + t*stride + window <= T):
Xf[t*N:(t+1)*N] = X[:, :, n_start+t*stride:n_start+t*stride+window]
t += 1
return Xf, np.vstack([y]*t)
def augment_frequency(X, freqz, ceiling=True):
N = len(freqz)
a, b, c = X.shape
if ceiling:
d = b * N
else:
d = b * (N-1)
Xf = np.zeros((a, d, c))
for i in range(0, N):
if i == N-1:
if ceiling:
Xf[:, i * b:(i + 1) * b, :] = butter_band(X, freqz[i], 124)
else:
Xf[:, i * b:(i + 1) * b, :] = butter_band(X, freqz[i], freqz[i + 1])
return Xf
def diff(X, o = 1,axis=2):
a, b, c = X.shape
Xf = np.zeros((a, (1+o)*b, c))
Xf[:, :b, :] = X
Xf[:, b:2*b, :] = np.gradient(X,axis=axis)
if o ==2:
Xf[:, 2*b:, :] = np.gradient(Xf[:, b:2*b, :], axis=axis)
return Xf
def power(X, w=40, axis=2, l=22):
a, b, c = X.shape
Xf = np.zeros((a, b+l, c))
p = np.array([np.sum(np.square(X[:, :l, i-(w-1):i+1]), axis=axis) if i>(w-1) else np.sum(np.square(X[:, :l, :i+1]), axis=axis) for i in range(c)])
Xf[:, b:, :] = np.swapaxes(np.swapaxes(p/w**2, 0, 2), 0, 1)
return Xf
def shuffle(X, y):
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
return X[idx], y[idx] | [
"numpy.sqrt",
"numpy.moveaxis",
"numpy.gradient",
"numpy.arange",
"numpy.mean",
"numpy.multiply",
"pathlib.Path",
"numpy.delete",
"numpy.asarray",
"numpy.subtract",
"numpy.vstack",
"numpy.concatenate",
"numpy.min",
"numpy.random.normal",
"numpy.add",
"h5py.File",
"numpy.square",
"n... | [((346, 380), 'h5py.File', 'h5py.File', (["(p + 'pre_data.mat')", '"""w"""'], {}), "(p + 'pre_data.mat', 'w')\n", (355, 380), False, 'import h5py\n'), ((760, 804), 'h5py.File', 'h5py.File', (["(folder_path + 'pre_data.mat')", '"""r"""'], {}), "(folder_path + 'pre_data.mat', 'r')\n", (769, 804), False, 'import h5py\n'), ((1189, 1216), 'h5py.File', 'h5py.File', (['folder_path', '"""r"""'], {}), "(folder_path, 'r')\n", (1198, 1216), False, 'import h5py\n'), ((1225, 1247), 'numpy.copy', 'np.copy', (["A01T['image']"], {}), "(A01T['image'])\n", (1232, 1247), True, 'import numpy as np\n'), ((1256, 1277), 'numpy.copy', 'np.copy', (["A01T['type']"], {}), "(A01T['type'])\n", (1263, 1277), True, 'import numpy as np\n'), ((1315, 1344), 'numpy.asarray', 'np.asarray', (['y'], {'dtype': 'np.int32'}), '(y, dtype=np.int32)\n', (1325, 1344), True, 'import numpy as np\n'), ((1469, 1502), 'numpy.random.normal', 'np.random.normal', (['(0)', 'std', 'X.shape'], {}), '(0, std, X.shape)\n', (1485, 1502), True, 'import numpy as np\n'), ((1514, 1526), 'numpy.add', 'np.add', (['G', 'X'], {}), '(G, X)\n', (1520, 1526), True, 'import numpy as np\n'), ((2065, 2084), 'numpy.percentile', 'np.percentile', (['X', 'p'], {}), '(X, p)\n', (2078, 2084), True, 'import numpy as np\n'), ((2094, 2119), 'numpy.percentile', 'np.percentile', (['X', '(100 - p)'], {}), '(X, 100 - p)\n', (2107, 2119), True, 'import numpy as np\n'), ((2237, 2285), 'scipy.signal.butter', 'sig.butter', (['order', '(hz / 125)', 'filter', '(False)', '"""ba"""'], {}), "(order, hz / 125, filter, False, 'ba')\n", (2247, 2285), True, 'from scipy import signal as sig\n'), ((2298, 2332), 'scipy.signal.lfilter', 'sig.lfilter', (['f[0]', 'f[1]', 'X'], {'axis': '(2)'}), '(f[0], f[1], X, axis=2)\n', (2309, 2332), True, 'from scipy import signal as sig\n'), ((2401, 2463), 'scipy.signal.butter', 'sig.butter', (['order', '(hzl / 125, hzh / 125)', 'filter', '(False)', '"""ba"""'], {}), "(order, (hzl / 125, hzh / 125), filter, False, 'ba')\n", (2411, 2463), True, 'from scipy import signal as sig\n'), ((2471, 2505), 'scipy.signal.lfilter', 'sig.lfilter', (['f[0]', 'f[1]', 'X'], {'axis': '(2)'}), '(f[0], f[1], X, axis=2)\n', (2482, 2505), True, 'from scipy import signal as sig\n'), ((2588, 2613), 'numpy.delete', 'np.delete', (['X', 'idx'], {'axis': '(0)'}), '(X, idx, axis=0)\n', (2597, 2613), True, 'import numpy as np\n'), ((2622, 2647), 'numpy.delete', 'np.delete', (['y', 'idx'], {'axis': '(0)'}), '(y, idx, axis=0)\n', (2631, 2647), True, 'import numpy as np\n'), ((2853, 2873), 'numpy.moveaxis', 'np.moveaxis', (['X', '(1)', '(2)'], {}), '(X, 1, 2)\n', (2864, 2873), True, 'import numpy as np\n'), ((2971, 2986), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (2980, 2986), True, 'import numpy as np\n'), ((2991, 3013), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (3008, 3013), True, 'import numpy as np\n'), ((3053, 3111), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.05)', '(added, X.shape[1], X.shape[2])'], {}), '(1, 0.05, (added, X.shape[1], X.shape[2]))\n', (3069, 3111), True, 'import numpy as np\n'), ((3123, 3147), 'numpy.multiply', 'np.multiply', (['Xtra', 'noise'], {}), '(Xtra, noise)\n', (3134, 3147), True, 'import numpy as np\n'), ((3160, 3193), 'numpy.concatenate', 'np.concatenate', (['(X, Xtra)'], {'axis': '(0)'}), '((X, Xtra), axis=0)\n', (3174, 3193), True, 'import numpy as np\n'), ((3205, 3248), 'numpy.concatenate', 'np.concatenate', (['(y, y[idx[:added]])'], {'axis': '(0)'}), '((y, y[idx[:added]]), axis=0)\n', (3219, 3248), True, 'import numpy as np\n'), ((3410, 3439), 'numpy.zeros', 'np.zeros', (['(s0 * N, C, window)'], {}), '((s0 * N, C, window))\n', (3418, 3439), True, 'import numpy as np\n'), ((3784, 3803), 'numpy.zeros', 'np.zeros', (['(a, d, c)'], {}), '((a, d, c))\n', (3792, 3803), True, 'import numpy as np\n'), ((4121, 4150), 'numpy.zeros', 'np.zeros', (['(a, (1 + o) * b, c)'], {}), '((a, (1 + o) * b, c))\n', (4129, 4150), True, 'import numpy as np\n'), ((4190, 4215), 'numpy.gradient', 'np.gradient', (['X'], {'axis': 'axis'}), '(X, axis=axis)\n', (4201, 4215), True, 'import numpy as np\n'), ((4376, 4399), 'numpy.zeros', 'np.zeros', (['(a, b + l, c)'], {}), '((a, b + l, c))\n', (4384, 4399), True, 'import numpy as np\n'), ((4660, 4681), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (4669, 4681), True, 'import numpy as np\n'), ((4686, 4708), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (4703, 4708), True, 'import numpy as np\n'), ((1406, 1415), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (1412, 1415), True, 'import numpy as np\n'), ((1644, 1681), 'numpy.mean', 'np.mean', (['X'], {'axis': 'axis', 'keepdims': 'kdims'}), '(X, axis=axis, keepdims=kdims)\n', (1651, 1681), True, 'import numpy as np\n'), ((1696, 1732), 'numpy.var', 'np.var', (['X'], {'axis': 'axis', 'keepdims': 'kdims'}), '(X, axis=axis, keepdims=kdims)\n', (1702, 1732), True, 'import numpy as np\n'), ((1920, 1930), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (1927, 1930), True, 'import numpy as np\n'), ((1945, 1954), 'numpy.var', 'np.var', (['X'], {}), '(X)\n', (1951, 1954), True, 'import numpy as np\n'), ((2707, 2727), 'numpy.expand_dims', 'np.expand_dims', (['X', '(3)'], {}), '(X, 3)\n', (2721, 2727), True, 'import numpy as np\n'), ((2729, 2750), 'numpy.expand_dims', 'np.expand_dims', (['Xv', '(3)'], {}), '(Xv, 3)\n', (2743, 2750), True, 'import numpy as np\n'), ((2752, 2773), 'numpy.expand_dims', 'np.expand_dims', (['Xt', '(3)'], {}), '(Xt, 3)\n', (2766, 2773), True, 'import numpy as np\n'), ((3600, 3618), 'numpy.vstack', 'np.vstack', (['([y] * t)'], {}), '([y] * t)\n', (3609, 3618), True, 'import numpy as np\n'), ((4254, 4295), 'numpy.gradient', 'np.gradient', (['Xf[:, b:2 * b, :]'], {'axis': 'axis'}), '(Xf[:, b:2 * b, :], axis=axis)\n', (4265, 4295), True, 'import numpy as np\n'), ((4581, 4610), 'numpy.swapaxes', 'np.swapaxes', (['(p / w ** 2)', '(0)', '(2)'], {}), '(p / w ** 2, 0, 2)\n', (4592, 4610), True, 'import numpy as np\n'), ((220, 244), 'pathlib.Path', 'Path', (["(p + 'pre_data.mat')"], {}), "(p + 'pre_data.mat')\n", (224, 244), False, 'from pathlib import Path\n'), ((1756, 1776), 'numpy.subtract', 'np.subtract', (['X', 'mean'], {}), '(X, mean)\n', (1767, 1776), True, 'import numpy as np\n'), ((1977, 1997), 'numpy.subtract', 'np.subtract', (['X', 'mean'], {}), '(X, mean)\n', (1988, 1997), True, 'import numpy as np\n'), ((1778, 1790), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (1785, 1790), True, 'import numpy as np\n'), ((1999, 2011), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (2006, 2011), True, 'import numpy as np\n'), ((2560, 2571), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (2568, 2571), True, 'import numpy as np\n'), ((4423, 4461), 'numpy.square', 'np.square', (['X[:, :l, i - (w - 1):i + 1]'], {}), '(X[:, :l, i - (w - 1):i + 1])\n', (4432, 4461), True, 'import numpy as np\n'), ((4491, 4518), 'numpy.square', 'np.square', (['X[:, :l, :i + 1]'], {}), '(X[:, :l, :i + 1])\n', (4500, 4518), True, 'import numpy as np\n')] |
""" This file provides several classes for generate random numbers/vectors
for error estimation/simulation.
"""
import abc
import numpy as np
class RandomGenerator:
"""Abstract class for random number/vector generation
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def generate(self):
r""" Generate a random sample
"""
raise NotImplementedError()
class UniformGenerator(RandomGenerator):
""" The class used for generating data according to a uniform
distribution in a square.
"""
def __init__(self, low, high, n):
self.low = np.array(low).reshape((n, 1))
self.high = np.array(high).reshape((n, 1))
self.dim = n
def generate(self):
return np.random.uniform(self.low, self.high)
class NormalGenerator(RandomGenerator):
""" NormalGenerator generate random vector from a multi-variate normal
disribution.
"""
def __init__(self, mean, cov, n):
self.mean = np.array(mean).reshape((n, 1))
self.cov = np.array(cov).reshape((n, n))
self.n = n
def generate(self):
return np.random.multivariate_normal(
self.mean.reshape((self.n,)), self.cov.T
).reshape((self.n, 1))
class PoissonGenerator(RandomGenerator):
""" Poisson Distribution Generator
"""
def __init__(self, lam):
self.lam = lam
def generate(self):
return np.random.poisson(lam=self.lam)
| [
"numpy.array",
"numpy.random.poisson",
"numpy.random.uniform"
] | [((747, 785), 'numpy.random.uniform', 'np.random.uniform', (['self.low', 'self.high'], {}), '(self.low, self.high)\n', (764, 785), True, 'import numpy as np\n'), ((1422, 1453), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': 'self.lam'}), '(lam=self.lam)\n', (1439, 1453), True, 'import numpy as np\n'), ((605, 618), 'numpy.array', 'np.array', (['low'], {}), '(low)\n', (613, 618), True, 'import numpy as np\n'), ((655, 669), 'numpy.array', 'np.array', (['high'], {}), '(high)\n', (663, 669), True, 'import numpy as np\n'), ((986, 1000), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (994, 1000), True, 'import numpy as np\n'), ((1036, 1049), 'numpy.array', 'np.array', (['cov'], {}), '(cov)\n', (1044, 1049), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# coding: utf-8
"""Benchmarking and format conversion libraries
Functions that implement various Hi-C differential analysis tools in our
current format and framework for benchmarking purposes.
"""
import numpy as np
from scipy import sparse
from scipy.ndimage import gaussian_filter
import functools
try:
import rpy2
except (ImportError, RuntimeError):
pass
DEFAULT_BINNING = 1000
def tsv2csv(tsv, binning=DEFAULT_BINNING, output=None):
matrix = np.genfromtxt(tsv, dtype=None, comments=None, delimiter="\t")
(_, _, pos1, *_, pos2, _, _, _, _) = zip(*matrix)
positions1 = np.array(pos1, dtype=np.int32) // binning
positions2 = np.array(pos2, dtype=np.int32) // binning
minimum = min(np.amin(positions1), np.amin(positions2))
positions1 -= minimum
positions2 -= minimum
n = int(max(np.amax(positions1), np.amax(positions2))) + 1
assert len(positions1) == len(
positions2
), "Mismatch between lengths {} and {}".format(
len(positions1), len(positions2)
)
sparse_matrix = sparse.coo_matrix(
(np.ones(len(positions1)), (positions1, positions2)), shape=(n, n)
)
dense_matrix = np.array(sparse_matrix.todense(), dtype=np.int32)
if output is not None:
np.savetxt(output, dense_matrix, fmt="%i")
return dense_matrix
def misha2csv(misha=None, binning=DEFAULT_BINNING, output=None):
r_library_expression = """
library("shaman");
library("misha")
"""
if misha is None:
r_import_expression = """
gsetroot(shaman_get_test_track_db());
contact_map <- gextract("hic_obs", gintervals.2d(2, 175e06,
178e06, 2, 175e06, 178e06), colnames="score")
"""
else:
rpy2.robjects.r.assign("path", misha)
r_import_expression = """
contact_map <- gextract("hic_obs", gintervals.2d(2, 0,
178e06, 2, 175e06, 178e06), colnames="score")
"""
rpy2.robjects.r(r_library_expression)
rpy2.robjects.r(r_import_expression)
# rpy2.robjects.r("write.table(contact_map, 'exported_map.csv')")
# matrix = np.genfromtxt("exported_map.csv", dtype=None, skip_header=True)
matrix = rpy2.robjects.r["contact_map"]
(_, _, start1, end1, _, start2, end2, contacts, _) = zip(*matrix)
pos1 = (np.array(start1) + np.array(end1)) // 2
pos2 = (np.array(start2) + np.array(end2)) // 2
positions1 = np.array(pos1) // binning
positions2 = np.array(pos2) // binning
minimum = min(np.amin(positions1), np.amin(positions2))
positions1 -= minimum
positions2 -= minimum
n = int(max(np.amax(positions1), np.amax(positions2))) + 1
assert len(positions1) == len(
positions2
), "Mismatch between lengths {} and {}".format(
len(positions1), len(positions2)
)
sparse_matrix = sparse.coo_matrix(
(contacts, (positions1, positions2)), shape=(n, n)
)
dense_matrix = np.array(sparse_matrix.todense(), dtype=np.int32)
if output is not None:
np.savetxt(output, dense_matrix, fmt="%i")
return dense_matrix
gaussian_blurring = functools.partial(gaussian_filter, sigma=1)
def hiccompare2csv(datasets=None, binning=DEFAULT_BINNING, output=None):
if datasets is None:
datasets = ("HMEC.chr22", "NHEK.chr22")
for dataset in datasets:
r_expression = """
library("HiCcompare");
data("{}")
""".format(
dataset
)
rpy2.robjects.r(r_expression)
pos1, pos2, contacts = np.array(rpy2.robjects.r[dataset])
pos1 //= binning
pos2 //= binning
minimum = min(np.amin(pos1), np.amin(pos2))
pos1 -= minimum
pos2 -= minimum
n = int(max(np.amax(pos1), np.amax(pos2))) + 1
assert len(pos1) == len(
pos2
), "Mismatch between lengths {} and {}".format(len(pos1), len(pos2))
sparse_matrix = sparse.coo_matrix(
(contacts, (pos1, pos2)), shape=(n, n)
)
dense_matrix = np.array(sparse_matrix.todense(), dtype=np.int32)
if output is not None:
np.savetxt(output, dense_matrix, fmt="%i")
yield dense_matrix + dense_matrix.T - np.diag(np.diag(dense_matrix))
| [
"numpy.amax",
"numpy.amin",
"rpy2.robjects.r.assign",
"numpy.diag",
"numpy.array",
"functools.partial",
"numpy.savetxt",
"scipy.sparse.coo_matrix",
"numpy.genfromtxt",
"rpy2.robjects.r"
] | [((3126, 3169), 'functools.partial', 'functools.partial', (['gaussian_filter'], {'sigma': '(1)'}), '(gaussian_filter, sigma=1)\n', (3143, 3169), False, 'import functools\n'), ((490, 551), 'numpy.genfromtxt', 'np.genfromtxt', (['tsv'], {'dtype': 'None', 'comments': 'None', 'delimiter': '"""\t"""'}), "(tsv, dtype=None, comments=None, delimiter='\\t')\n", (503, 551), True, 'import numpy as np\n'), ((1962, 1999), 'rpy2.robjects.r', 'rpy2.robjects.r', (['r_library_expression'], {}), '(r_library_expression)\n', (1977, 1999), False, 'import rpy2\n'), ((2004, 2040), 'rpy2.robjects.r', 'rpy2.robjects.r', (['r_import_expression'], {}), '(r_import_expression)\n', (2019, 2040), False, 'import rpy2\n'), ((2847, 2916), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(contacts, (positions1, positions2))'], {'shape': '(n, n)'}), '((contacts, (positions1, positions2)), shape=(n, n))\n', (2864, 2916), False, 'from scipy import sparse\n'), ((624, 654), 'numpy.array', 'np.array', (['pos1'], {'dtype': 'np.int32'}), '(pos1, dtype=np.int32)\n', (632, 654), True, 'import numpy as np\n'), ((683, 713), 'numpy.array', 'np.array', (['pos2'], {'dtype': 'np.int32'}), '(pos2, dtype=np.int32)\n', (691, 713), True, 'import numpy as np\n'), ((744, 763), 'numpy.amin', 'np.amin', (['positions1'], {}), '(positions1)\n', (751, 763), True, 'import numpy as np\n'), ((765, 784), 'numpy.amin', 'np.amin', (['positions2'], {}), '(positions2)\n', (772, 784), True, 'import numpy as np\n'), ((1280, 1322), 'numpy.savetxt', 'np.savetxt', (['output', 'dense_matrix'], {'fmt': '"""%i"""'}), "(output, dense_matrix, fmt='%i')\n", (1290, 1322), True, 'import numpy as np\n'), ((1755, 1792), 'rpy2.robjects.r.assign', 'rpy2.robjects.r.assign', (['"""path"""', 'misha'], {}), "('path', misha)\n", (1777, 1792), False, 'import rpy2\n'), ((2428, 2442), 'numpy.array', 'np.array', (['pos1'], {}), '(pos1)\n', (2436, 2442), True, 'import numpy as np\n'), ((2471, 2485), 'numpy.array', 'np.array', (['pos2'], {}), '(pos2)\n', (2479, 2485), True, 'import numpy as np\n'), ((2516, 2535), 'numpy.amin', 'np.amin', (['positions1'], {}), '(positions1)\n', (2523, 2535), True, 'import numpy as np\n'), ((2537, 2556), 'numpy.amin', 'np.amin', (['positions2'], {}), '(positions2)\n', (2544, 2556), True, 'import numpy as np\n'), ((3036, 3078), 'numpy.savetxt', 'np.savetxt', (['output', 'dense_matrix'], {'fmt': '"""%i"""'}), "(output, dense_matrix, fmt='%i')\n", (3046, 3078), True, 'import numpy as np\n'), ((3484, 3513), 'rpy2.robjects.r', 'rpy2.robjects.r', (['r_expression'], {}), '(r_expression)\n', (3499, 3513), False, 'import rpy2\n'), ((3545, 3579), 'numpy.array', 'np.array', (['rpy2.robjects.r[dataset]'], {}), '(rpy2.robjects.r[dataset])\n', (3553, 3579), True, 'import numpy as np\n'), ((3940, 3997), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(contacts, (pos1, pos2))'], {'shape': '(n, n)'}), '((contacts, (pos1, pos2)), shape=(n, n))\n', (3957, 3997), False, 'from scipy import sparse\n'), ((2318, 2334), 'numpy.array', 'np.array', (['start1'], {}), '(start1)\n', (2326, 2334), True, 'import numpy as np\n'), ((2337, 2351), 'numpy.array', 'np.array', (['end1'], {}), '(end1)\n', (2345, 2351), True, 'import numpy as np\n'), ((2370, 2386), 'numpy.array', 'np.array', (['start2'], {}), '(start2)\n', (2378, 2386), True, 'import numpy as np\n'), ((2389, 2403), 'numpy.array', 'np.array', (['end2'], {}), '(end2)\n', (2397, 2403), True, 'import numpy as np\n'), ((3654, 3667), 'numpy.amin', 'np.amin', (['pos1'], {}), '(pos1)\n', (3661, 3667), True, 'import numpy as np\n'), ((3669, 3682), 'numpy.amin', 'np.amin', (['pos2'], {}), '(pos2)\n', (3676, 3682), True, 'import numpy as np\n'), ((4137, 4179), 'numpy.savetxt', 'np.savetxt', (['output', 'dense_matrix'], {'fmt': '"""%i"""'}), "(output, dense_matrix, fmt='%i')\n", (4147, 4179), True, 'import numpy as np\n'), ((855, 874), 'numpy.amax', 'np.amax', (['positions1'], {}), '(positions1)\n', (862, 874), True, 'import numpy as np\n'), ((876, 895), 'numpy.amax', 'np.amax', (['positions2'], {}), '(positions2)\n', (883, 895), True, 'import numpy as np\n'), ((2627, 2646), 'numpy.amax', 'np.amax', (['positions1'], {}), '(positions1)\n', (2634, 2646), True, 'import numpy as np\n'), ((2648, 2667), 'numpy.amax', 'np.amax', (['positions2'], {}), '(positions2)\n', (2655, 2667), True, 'import numpy as np\n'), ((3754, 3767), 'numpy.amax', 'np.amax', (['pos1'], {}), '(pos1)\n', (3761, 3767), True, 'import numpy as np\n'), ((3769, 3782), 'numpy.amax', 'np.amax', (['pos2'], {}), '(pos2)\n', (3776, 3782), True, 'import numpy as np\n'), ((4235, 4256), 'numpy.diag', 'np.diag', (['dense_matrix'], {}), '(dense_matrix)\n', (4242, 4256), True, 'import numpy as np\n')] |
# @title Zurich Instruments HDAWG instrument driver
# @author <NAME>
# @contrib <NAME>, <NAME>, <NAME>
# @date 2020-09-14
# @version v0.835.1
# @other The author of this driver takes no responsibility for
# any and all bugs and frustration caused by Labber and/or
# affiliated Zurich Instruments hardware and software.
# Correspondence should be with the author.
#
#######################################################
""" Labber driver for the Zurich Instruments HDAWG. """
#######################################################
# Python rudimentaries
from __future__ import print_function
from BaseDriver import LabberDriver, Error, IdError
from datetime import datetime
import glob
import inspect
import numpy as np
import os
import psutil
import re
import shutil
import textwrap
import time
# Zurich Instruments functionality
import zhinst.ziPython as ziPython
import zhinst.utils as ziUtils
# Main Labber driver class
class Driver(LabberDriver):
'''This class implements a Labber driver.
In order to establish a connection to the HDAWG instrument, please select
the Zurich Instruments HDAWG driver in the 'Add instruments' dialogue.
Select USB or TCPIP in the interface list, followed by providing the
device serial. The serial is provided on the device on the form 'DEV$$$$',
Should such a serial not be provided, the driver allows for auto-connecting
to an instrument should the phrase <autodetect> or <autoconnect> be
provided. This is not recommended in cases where there are multiple
Zurich Instruments devices connected to the Instrument server PC.
'''
def performOpen(self, options={}):
'''Perform the action of opening the instrument.
'''
# Instantiate the instrument connection, the ZI API, AWG module,
# and more.
self.instantiateInstrumentConnection()
# Create an initial configuration of stored waveforms.
# These will constitute a local set used to track what waveforms are
# used / changed etc.
self.defaultWaveformConfiguration()
# If configured to, signal LEDs after completing startup
if self.getValue('Signal LEDs on startup'):
self.daq.setInt('/' + self.dev + '/system/identify', 1)
def performClose(self, bError=False, options={}):
'''Perform the close instrument connection operation.
'''
# It has been chosen not to include a true power-off at this stage
# (/system/shutdown, 1) as enabling and disabling the instrument in
# such a recurring fashion would cause a lot of delay.
# A try-exception is done since the API session might not have
# been instantiated.
try:
self.daq.setInt('/'+str(self.dev)+'/awgs/0/enable', 0)
# If configured to, turn off all outputs when closing the device
if self.getValue('Disable outputs on close'):
for i in range(0, self.n_ch):
self.daq.setInt('/'+str(self.dev)+'/sigouts/'+str(i)+'/direct',0)
self.setValue('Channel '+str(i+1)+' - Bypass DAC to port', False)
self.daq.setInt('/'+str(self.dev)+'/sigouts/'+str(i)+'/on',0)
self.setValue('Channel '+str(i+1)+' - Output', False)
self.daq.setInt('/'+str(self.dev)+'/sigouts/'+str(i)+'/filter', 0)
self.setValue('Channel '+str(i+1)+' - Filter', False)
# If configured to, signal LEDs when disconnecting
if self.getValue('Signal LEDs on close'):
self.daq.setInt('/' + self.dev + '/system/identify', 1)
except:
# TODO So ZIAPINotFoundException is generated. How will we define a suitable exception to be thrown at this instance?
# TODO Likely using some ziUtils.ZIAPINotFoundException or similar. Ask ZI.
self.log( \
"Could not close the device; " + \
"there is likely no connection to the ZI API.",level=30)
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
'''Perform the Set Value instrument operation.
Variables are subject to change between one experiment and another.
To my knowledge, there is no way of registering whether a
variable changed in the measurement editor. Thus, all waveforms must
be seen as subject to change. The solution is to keep a record
of all waveforms locally, fetch each and every waveform at the
start of a new measurement, and then compare them for differences.
This in turn is somewhat wasteful, but there is no other algorithmic
way of guaranteeing that every waveform will be according to the
user specification in the Measurement editor.
This function should return the actual value set by the instrument.
'''
# isFirstCall is true each and every time the 'program pointer' of
# the measurement setup is pointing at the top.
if self.isFirstCall(options):
pass
# Is performSetValue attempting to execute a standard ZI API call?
# (or a command based on the string / other datatype?)
if '/%s/' in quant.set_cmd:
if 'double /' in quant.set_cmd:
self.daq.setDouble( \
quant.set_cmd.replace('double ','') % self.dev, \
value if not (quant.datatype > 1) \
else float(quant.getCmdStringFromValue(value)) \
)
elif 'int /' in quant.set_cmd:
self.daq.setInt( \
quant.set_cmd.replace('int ','') % self.dev, \
value if not (quant.datatype > 1) \
else int(quant.getCmdStringFromValue(value)) \
)
elif 'boolean /' in quant.set_cmd:
if quant.datatype == 1:
self.daq.setInt( \
quant.set_cmd.replace('boolean ','') % self.dev, \
(1 if value else 0) \
)
elif quant.datatype == 2:
# Throw suboptimal warning
self.log( \
"Note: setting booleans using combinational " + \
"lists is very suboptimal due to ambiguity in " + \
"the APIs.\nConsider changing the instruction " + \
"set_cmd type to integer, using the cmd_defs " + \
"1 and 0 for \'True\' and \'False\' " + \
"respectively ("+quant.name+")." , level=30)
fetch_bool = quant.getCmdStringFromValue(value).lower()
if (fetch_bool == 'false') or (fetch_bool == '0'):
# Do False-case
self.daq.setInt( \
quant.set_cmd.replace(\
'boolean ','') % self.dev \
, 0 \
)
elif (fetch_bool == 'true') or (fetch_bool == '1'):
# Do True-case
self.daq.setInt( \
quant.set_cmd.replace(\
'boolean ','') % self.dev \
, 1 \
)
else:
raise ValueError( \
"Unrecognised boolean value for quantity " + \
"name \'"+quant.name+"\' (received \'" + \
str(value)+"\').")
else:
raise ValueError( \
"Bad datatype for quantity \'" + quant.name + \
"\,' expected boolean or combo (of booleans).")
elif 'other /' in quant.set_cmd:
# Due to the nature of the 'other' datatype, this driver
# constructs a 'Python switch-case' where every entry spells
# out a prepared version of the quant.name string.
def Minimise_inter_device_asynchronous_jitter(self, value):
''' TODO missing text
'''
# If this command is run (and value is True),
# we must update the sequencer.
self.sequencer_demands_updating = True
# Prep. the sequencer generation stage 3:
# 'SYNCHRONISE_TO_BEATING_FREQUENCY'
self.update_local_awg_program[3] = True
# The sequencer program generation in turn checks the
# 'Minimise inter-device asynchronous jitter' flag which
# at this time may be False, since value is returned
# *after* isFinalCall has run. Thus, we must force-set
# the flag from here.
self.setValue( \
'Minimise inter-device asynchronous jitter', \
value \
)
# Modifications may be done to the internal trigger period
self.perform_repetition_check = True
# Setting this value to true, since it involves the
# usage of oscillators, may change the channel grouping
# type.
if value or \
self.getValue('Use oscillator-based repetition delay'):
# A channel grouping of 4x2 is required.
self.daq.setInt( \
'/'+self.dev+'/system/awg/channelgrouping', 0)
else:
# A channel grouping of 1x8 is sufficient.
if self.daq.getInt( \
'/'+self.dev+'/system/awg/channelgrouping') != 2:
# The grouping should be changed.
self.daq.setInt( \
'/'+self.dev+'/system/awg/channelgrouping', 2)
def Beat_frequency(self, value):
''' TODO missing text
'''
# Set oscillator 1 to the beat frequency of the sequencers.
beat_frequency = abs(value)
previous_osc_freq = \
self.daq.getDouble('/'+str(self.dev)+'/oscs/0/freq')
iterfreq = 2
while(iterfreq <= 32):
setval = beat_frequency / iterfreq
if setval < 299000000:
self.daq.setDouble( \
'/'+str(self.dev)+'/oscs/0/freq', \
setval)
self.daq.sync()
if self.daq.getDouble( \
'/'+str(self.dev)+'/oscs/0/freq') == setval:
# All is fine. Update value and break.
self.setValue('Beat frequency', setval)
break
iterfreq *= 2
# Check whether the set was successfull
if iterfreq > 32:
# Not fine, reset and raise error.
self.daq.setDouble( \
'/'+str(self.dev)+'/oscs/0/freq',\
previous_osc_freq )
raise ArithmeticError( \
"Cannot set oscillator 1 to an even dividend " + \
"of "+str(beat_frequency)+" Sa/s)" )
# TODO This may be solvable by moving commands around in the 'other' datatype category right here.
self.log('WARNING: Changing the beat frequency was fine and all but we must now also change the internal repetition rate if that was set to match the beat frequency.',level=30)
def Internal_trigger_period(self, value):
'''TODO missing text
'''
# Is the user changing the internal trigger period,
# while the system is set to use an oscillator as the
# internal repetition delay source?
# Is the system set to use oscillator 2 as the internal
# repetition source trigger?
if self.getValue( \
'Use oscillator-based repetition delay'):
# TODO The first thing which should happen, is to
# check whether the new requested period is reasonable.
# This check includes for instance checking whether
# it is too small or large to represent by the
# oscillator. If yes, modify the period.
# Parry for infinite frequencies, check limits.
# This part has been somewhat optimised, see if-cases.
# For instance the >= 0 or not check is flag-checkable.
if value >= 0:
# Value is positive
if value < 8.333333333333333e-10:
# Value is an issue, the oscillator cannot
# go any faster. Limit the requested period.
value = 8.333333333333333e-10
else:
# Value is negative
if value > -8.333333333333333e-10:
# Value is an issue, the oscillator cannot
# go any faster. Limit the requested period.
value = -8.333333333333333e-10
''' # See triple-apostrophe comment at while loop below
# Fetch the current set value for the repetition
# oscillator.
previous_osc_freq = self.daq.getDouble( \
'/'+str(self.dev)+'/oscs/1/freq')'''
# Must we synchronise to the jitter-free clause?
if self.getValue( \
'Minimise inter-device asynchronous jitter'):
# Fetch the beat frequency (oscillator) and its
# period for repeated usage later.
beat_oscill = self.daq.getDouble( \
'/'+str(self.dev)+'/oscs/0/freq')
beat_peri = abs(1 / beat_oscill)
''' # The below while-segment has been commented,
# fetching a perfectly representable value for
# the next 'guess' of the while loop is pretty
# complicated.
attempts = 0
while (attempts <= 30):'''
# Are the devices *not* in sync?
if beat_oscill != 0:
# A beat frequency exists. The oscillator
# must be set to a whole multiple of the
# beat-frequency oscillator.
# Is the requested value *not* a legal
# and valid multiple of the beat period?
if not (value % beat_peri == 0):
# The requested repetition frequency is
# not a multiple of the beat frequency,
# and has to be modified.
# Is value even smaller than the beat
# period?
# TODO is this check still valid after adding 'sense checks?'
if value > beat_peri:
# value mod. beat_peri != 0
nearest_int = \
round(value / beat_peri)
value = nearest_int * beat_peri
else:
# Value is smaller than (or equal
# to) the beat period. Set the
# value to the lowest feasible.
value = beat_peri
# Get the corresponding frequency
rep_frequency = abs(1 / value)
# Try to set rep_frequency.
# if success - report and break
# else: attempt += 1, value = TODO
self.daq.setDouble( \
'/'+str(self.dev)+'/oscs/1/freq', \
rep_frequency)
self.daq.sync()
value = 1 / self.daq.getDouble( \
'/'+str(self.dev)+'/oscs/1/freq')
'''# See triple-apostrophe comment at the while-
# loop above.
received_rpf = self.daq.getDouble( \
'/'+str(self.dev)+'/oscs/1/freq')
if received_rpf == rep_frequency:
# All is fine, the value was set and legal.
break
else:
# Failure. Increment attempts.
attempts += 1
# Update value.
# TODO Getting this next guess is pretty
# much a PhD in itself to get right.
# Hence the commented code.
value = value * attempts / 30'''
else:
# The user has not requested to synchronise the
# delay to the beat frequency oscillator.
# No jitter-free clause needed.
rep_frequency = abs(1 / value)
self.daq.setDouble( \
'/'+str(self.dev)+'/oscs/1/freq', \
rep_frequency)
self.daq.sync()
value = 1 / self.daq.getDouble( \
'/'+str(self.dev)+'/oscs/1/freq')
''' # See triple-apostrophe comment above.
# Check whether the set was successfull
if attempts > 30:
# Not fine, reset and raise error.
self.daq.setDouble( \
'/'+str(self.dev)+'/oscs/1/freq',\
previous_osc_freq)
raise ArithmeticError( \
"Could not modify the requested repetition "+ \
"rate to an exact oscillator value given " + \
"the currently set beat frequency." )'''
else:
# The user has requested not to use an oscillator
# for the internal trigger period. This implies
# a change of the sequencer program.
self.sequencer_demands_updating = True
# The internal repetition rate value has to be
# set already at this stage, as the value
# returns after the isFinalCall check which
# might depend on this value.
self.setValue('Internal trigger period', value)
# Sanity check for validness of internal repetition rate
self.perform_repetition_check = True
def Use_oscillator_based_repetition_delay(self, value):
'''TODO
'''
# If this command is run (and value is True),
# we must update the sequencer.
self.sequencer_demands_updating = True
# Prep. the sequencer generation stage 2:
# WAIT_FOR_INITIAL_TRIGGER, DELAY_BEFORE_LOOP_END,
# WAIT_FOR_TRIGGER_TO_REPEAT
self.update_local_awg_program[2] = True
# The sequencer program generation in turn checks the
# 'Use oscillator-based repetition delay' flag which
# at this time may be False, since value is returned
# *after* isFinalCall has run. Thus, we must force-set
# the flag from here.
self.setValue( \
'Use oscillator-based repetition delay', \
value \
)
# Setting this value to true, since it involves the
# usage of oscillators, may change the channel grouping
# type.
if value or self.getValue( \
'Minimise inter-device asynchronous jitter'):
# A channel grouping of 4x2 is required.
self.daq.setInt( \
'/'+self.dev+'/system/awg/channelgrouping', 0)
else:
# A channel grouping of 1x8 is sufficient.
if self.daq.getInt( \
'/'+self.dev+'/system/awg/channelgrouping') != 2:
# The grouping should be changed.
self.daq.setInt( \
'/'+self.dev+'/system/awg/channelgrouping', 2)
def Reference_clock(self, value):
'''TODO write text
'''
# Is the user changing the reference clock?
# As the clock will revert to the 'Internal' mode in case
# of failure, more complex behaviour is required than a
# simple ZI API call.
# To save on the waiting time: if the system is running
# in external mode, and we're trying to set it to external
# at bootup, simply ignore the set.
req_value = int(quant.getCmdStringFromValue(value))
rfclk = str( \
'/'+self.dev+'/system/clocks/referenceclock/source' )
if not (self.daq.getInt( rfclk ) == req_value):
# Set the new value
self.daq.setInt( rfclk, req_value )
# Wait for the 'Reference clock' value to eventually
# rebounce. Half of a second is a typical good value.
time.sleep(0.5)
# Fetch the new value and compare differences.
value = self.daq.getInt( rfclk )
# Did we fail to change the value?
# TODO This if-case together with the if
# requested_value below can likely be algorithmically
# optimised.
if value != req_value:
if req_value == 1:
# Has the user requested to halt the system in
# case this happens?
if self.getValue( \
'Halt on external clock failure'):
raise RuntimeError( \
"Halted: Could not lock the " + \
"reference clock to an external " + \
"signal.")
else:
# Send a lock failure warning.
self.log( \
"Warning: Could not lock the " + \
"reference clock to an external " + \
"signal.")
else:
# Send an unlock failure warning.
self.log( \
"Warning: Could not unlock the " + \
"reference clock from the external " + \
"signal.")
def Output_sample_rate(self, value):
'''TODO
'''
# Is the user changing a ZIAPI double which may invalidate
# the current internal repetition delay value?
# Modify the sample rate clock
self.daq.setDouble( \
quant.set_cmd.replace('other ','') % self.dev, \
value \
)
# This operation is delicate, thus we monitor a status
# string for its current status.
upload_timeout_ms = 2950
clock_status = 2 # 2 = 'Busy' acc. to ZI HDAWG doc.
# Give it a tiny wait
time.sleep(0.050) # TODO This value should be minimised
# elf/status provides information whether the upload is
# succeeding.
while (clock_status != 0) and (upload_timeout_ms >= 0):
# Fetch progress
clock_status = \
self.daq.getInt( \
'/%s/system/clocks/sampleclock/status' \
% self.dev)
# Shortcut ending
if clock_status == 0:
break
# Waiting sequence
time.sleep(0.050)
upload_timeout_ms -= 50
# Check for sample clock change timeout
if upload_timeout_ms <= 0:
raise RuntimeError( \
"Failed to set \'Output sample rate\' due " + \
"to command timeout.")
# Sample clock change reported failure
elif clock_status == 1:
raise RuntimeError( \
"Failed to set \'Output sample rate\' due " + \
"to some unknown device error.")
# This command may change the validness of the internal
# repetition delay.
self.perform_repetition_check = True
def Output_sample_rate_divisor(self, value):
'''TODO
'''
# Is the user changing the sampling rate combo?
self.daq.setInt( \
quant.set_cmd.replace('other ','') % self.dev, \
int(quant.getCmdStringFromValue(value)) \
)
# This command may change the validness of the internal
# repetition delay.
self.perform_repetition_check = True
def Sequencer_triggers(self, value):
'''TODO
'''
# This command may change the validness of the internal
# repetition delay.
self.perform_repetition_check = True
# Prep. the sequencer generation stage 4:
# START_TRIGGER_PULSE, END_TRIGGER_PULSE
self.update_local_awg_program[4] = True
# Prep. the sequencer generation stage 5:
# DELAY_BEFORE_END_TRIGGER
self.update_local_awg_program[5] = True
def Delay_before_end_trigger_changes(self, value):
'''TODO
'''
# Is the user changing a value which should trigger the
# internal repetition check?
# This command may change the validness of the internal
# repetition delay.
self.perform_repetition_check = True
# Prep. the sequencer generation stage 5:
# DELAY_BEFORE_END_TRIGGER
self.update_local_awg_program[5] = True
def Run_mode(self, value):
'''TODO
'''
# Is the user changing the Run mode?
# This command will require a change in the
# sequencer program.
self.sequencer_demands_updating = True
# If changing back to 'Internal trigger' -> we may need
# to double-check that the internal repetition rate is
# valid. All previous calls during other Run modes
# have been ignored.
self.perform_repetition_check = True
# We now make a note to the generateSequencerProgram
# that the run mode has changed.
# Prep. the sequencer generation stage 2:
# WAIT_FOR_INITIAL_TRIGGER, DELAY_BEFORE_LOOP_END,
# WAIT_FOR_TRIGGER_TO_REPEAT
self.update_local_awg_program[2] = True
# The Labber-stored value for 'Run mode' must be updated
# at this location as the generateSequencerProgram function
# will run before the setValue default after isFinalCall.
self.setValue('Run mode', value)
def Range(self, value):
'''TODO
'''
# Get the channel in question
channel = int( \
( \
quant.name.replace(' - Range','') \
).replace('Channel ',''))
# Alter the output range, make sure to update the
# self-object list of ranges. This list is used when
# resetting the output range after disabling the direct
# output.
val = float(quant.getCmdStringFromValue(value))
# Execute command
self.daq.setDouble( \
'/%s/sigouts/%s/range' % (self.dev, channel-1), val \
)
# Update list
self.previous_ranges[channel-1] = val
def Bypass_DAC_to_port(self, value):
''' TODO
For your information, the DAC bypass to port function
is known as 'Direct output' in ZI LabOne.
'''
# Get the channel in question
channel = int( \
( \
quant.name.replace(' - Bypass DAC to port','') \
).replace('Channel ',''))
# Disable and restore if false, merely enable if true
if not value:
# Execute disablement
self.daq.setInt( \
'/%s/sigouts/%s/direct' % (self.dev,channel-1), 0 \
)
# Note to reader: this clause usually changes the
# measurement range in such a way that a relay
# will toggle the instrument into said range.
# Meaning that a weird double-klicking is expected.
self.daq.setDouble( \
'/%s/sigouts/%s/range' % (self.dev,channel-1), \
float(self.previous_ranges[channel-1]) \
)
else:
# Merely execute the enablement
self.daq.setInt( \
'/%s/sigouts/%s/direct' % (self.dev,channel-1), 1 \
)
def Output_Marker_config(self, value):
'''TODO
'''
# The config can change three major topics:
# 1. Start time for marker 1/2
# 2. Duration of marker 1/2
# 3. Whether there are any markers left to be played.
# Which channel was it and what channel are we talking?
split = (quant.name).split(' Marker ', 1)
channel = int(split[0].replace('Output ' ,'')) -1
marker = int((split[1].replace(' start time','')).replace(' duration','')) -1
# Get the current sample rate (per divisor)
sample_rate = \
self.getValue('Output sample rate') / \
2**self.getValueIndex('Output sample rate divisor')
# Change marker start or duration?
if('st' in quant.name):
# Start it is. Convert value to samples.
start = int(round(value * sample_rate))
# Fetch the current duration.
duration = int(self.marker_configuration[channel,marker,1])
# Update the marker configuration.
self.configureMarker(channel,marker,start,duration)
else:
# So it's duration then.
duration = int(round(value * sample_rate))
# Fetch the current start.
start = int(self.marker_configuration[channel,marker,0])
# Update the marker configuration.
self.configureMarker(channel,marker,start,duration)
# Setup and fetch a 'switch/case' clause
quant_name_swicas = \
((quant.name).replace(' ','_')).replace('-','_')
switch_case = {
'Trigger_out_delay':\
Delay_before_end_trigger_changes,
'Dynamic_repetition_rate':\
Delay_before_end_trigger_changes,
'Calibrate_trigger_out_delay':\
Delay_before_end_trigger_changes,
'Halt_on_illegal_repetition_rate':\
Delay_before_end_trigger_changes,
'Calibrate_internal_trigger_period':\
Delay_before_end_trigger_changes,
'Channel_1___Bypass_DAC_to_port':\
Bypass_DAC_to_port,
'Channel_2___Bypass_DAC_to_port':\
Bypass_DAC_to_port,
'Channel_3___Bypass_DAC_to_port':\
Bypass_DAC_to_port,
'Channel_4___Bypass_DAC_to_port':\
Bypass_DAC_to_port,
'Channel_5___Bypass_DAC_to_port':\
Bypass_DAC_to_port,
'Channel_6___Bypass_DAC_to_port':\
Bypass_DAC_to_port,
'Channel_7___Bypass_DAC_to_port':\
Bypass_DAC_to_port,
'Channel_8___Bypass_DAC_to_port':\
Bypass_DAC_to_port,
'Channel_1___Range':\
Range,
'Channel_2___Range':\
Range,
'Channel_3___Range':\
Range,
'Channel_4___Range':\
Range,
'Channel_5___Range':\
Range,
'Channel_6___Range':\
Range,
'Channel_7___Range':\
Range,
'Channel_8___Range':\
Range,
'Output_1_Marker_1_start_time':\
Output_Marker_config,
'Output_1_Marker_2_start_time':\
Output_Marker_config,
'Output_2_Marker_1_start_time':\
Output_Marker_config,
'Output_2_Marker_2_start_time':\
Output_Marker_config,
'Output_3_Marker_1_start_time':\
Output_Marker_config,
'Output_3_Marker_2_start_time':\
Output_Marker_config,
'Output_4_Marker_1_start_time':\
Output_Marker_config,
'Output_4_Marker_2_start_time':\
Output_Marker_config,
'Output_5_Marker_1_start_time':\
Output_Marker_config,
'Output_5_Marker_2_start_time':\
Output_Marker_config,
'Output_6_Marker_1_start_time':\
Output_Marker_config,
'Output_6_Marker_2_start_time':\
Output_Marker_config,
'Output_7_Marker_1_start_time':\
Output_Marker_config,
'Output_7_Marker_2_start_time':\
Output_Marker_config,
'Output_8_Marker_1_start_time':\
Output_Marker_config,
'Output_8_Marker_2_start_time':\
Output_Marker_config,
'Output_1_Marker_1_duration':\
Output_Marker_config,
'Output_1_Marker_2_duration':\
Output_Marker_config,
'Output_2_Marker_1_duration':\
Output_Marker_config,
'Output_2_Marker_2_duration':\
Output_Marker_config,
'Output_3_Marker_1_duration':\
Output_Marker_config,
'Output_3_Marker_2_duration':\
Output_Marker_config,
'Output_4_Marker_1_duration':\
Output_Marker_config,
'Output_4_Marker_2_duration':\
Output_Marker_config,
'Output_5_Marker_1_duration':\
Output_Marker_config,
'Output_5_Marker_2_duration':\
Output_Marker_config,
'Output_6_Marker_1_duration':\
Output_Marker_config,
'Output_6_Marker_2_duration':\
Output_Marker_config,
'Output_7_Marker_1_duration':\
Output_Marker_config,
'Output_7_Marker_2_duration':\
Output_Marker_config,
'Output_8_Marker_1_duration':\
Output_Marker_config,
'Output_8_Marker_2_duration':\
Output_Marker_config,
'Run_mode':\
Run_mode,
'Beat_frequency':\
Beat_frequency,
'Reference_clock':\
Reference_clock,
'Output_sample_rate':\
Output_sample_rate,
'Sequencer_triggers':\
Sequencer_triggers,
'Internal_trigger_period':\
Internal_trigger_period,
'Output_sample_rate_divisor':\
Output_sample_rate_divisor,
'Use_oscillator_based_repetition_delay':\
Use_oscillator_based_repetition_delay,
'Minimise_inter_device_asynchronous_jitter':\
Minimise_inter_device_asynchronous_jitter
}
# Execute
switch_case.get(quant_name_swicas)(self, value)
# TODO Is it even necessary to pass on the value parameter?
# Because, quant seems to work just fine.
# TODO This switch-case setup can likely be moved elsewhere.
# For instance to defaultWaveformConfiguration or similar.
elif 'string /' in quant.set_cmd:
# The quant name if-case is commented as there is currently
# only one command in the instruction file which uses the
# string datatype.
#if quant.name == 'Command line box':
# This portion only runs if the user is giving an explicit
# command line command.
if not ', ' in value:
self.log("Parser error: \', \' missing.", level=30)
else:
# Grab the substring after ', ' - store the process value.
parsed = value[value.index(', ')+2 : ]
proc = (value.replace(', ' + parsed,'')).lower()
if 'int /%s/' in proc:
try:
self.daq.setInt( \
(proc.replace('int ','')) % self.dev, \
int(parsed) \
)
self.log("ZIAPI command accepted: \'" + value + \
"\'",level=30)
except: # TODO define this exception
# These lines are mainly for debug.
#self.log("ZIAPI command line parser exception: "+ \
# "cannot interpret \'" + value + "\' as"+ \
# " a valid \'int\' command.",level=30)
pass
elif 'double /%s/' in proc:
try:
self.daq.setDouble( \
(proc.replace( \
'double ','') \
) % self.dev, float(parsed) \
)
self.log("ZIAPI command accepted: \'" + value + \
"\'",level=30)
except: # TODO define this exception
# These lines are mainly for debug.
#self.log("ZIAPI command line parser exception: "+ \
# "cannot interpret \'" + value + "\' as"+ \
# " a valid \'double\' command.",level=30)
pass
elif 'boolean /%s/' in proc:
try:
if (parsed.lower() == 'true') \
or (parsed.lower() == '1') \
or (parsed.lower() == 'ya boi'):
self.daq.setInt( \
(proc.replace('boolean ','')) \
% self.dev, 1 \
)
self.log("ZIAPI command accepted: \'" +value+ \
"\'",level=30)
elif (parsed.lower() == 'false') \
or (parsed.lower() == '0') \
or (parsed.lower() == 'na'):
self.daq.setInt( \
(proc.replace('boolean ','')) \
% self.dev, 0 \
)
self.log("ZIAPI command accepted: \'" +value+ \
"\'",level=30)
except:
# These lines are mainly for debug.
#self.log("ZIAPI command line parser exception: "+ \
# "cannot interpret \'" + value + "\' as"+ \
# " a valid \'boolean\' command.",level=30)
pass
elif 'awgmodule' in proc: # No capitalisation on M
# Fix the lower()-conversion
proc = proc.replace('awgmodule','awgModule')
# The AWG module is datatype agnostic as to integers
# versus doubles. Any attempt to hard-define such
# a datatype results is in vain, ergo remove all
# (feasible) datatype specifiers from the beginning
# of the command line parsed.
if 'int ' in proc:
proc = proc.replace('int ','')
elif 'double ' in proc:
proc = proc.replace('double ','')
elif 'boolean ' in proc:
if (parsed.lower() == 'true') \
or (parsed.lower() == '1') \
or (parsed.lower() == 'ya boi'):
proc.replace('boolean ','')
parsed = 1
elif (parsed.lower() == 'false') \
or (parsed.lower() == '0') \
or (parsed.lower() == 'na'):
proc.replace('boolean ','')
parsed = 0
try:
self.awgModule.set(proc, float(parsed))
self.log("ZIAPI command accepted: \'" + value + \
"\'",level=30)
except: # TODO define this exception
# # These lines are mainly for debug.
# #self.log("ZIAPI command line parser "+ \
# # "exception: cannot interpret \'" + \
# # value + "\' as"+ \
# # " a valid awgModule command.",level=30)
pass
#else:
# # These lines are mainly for debug
# #self.log("Warning: the command line parser did not"+\
# #" understand at all what you put in the command " +\
# #"line box: \'"+str(value)+"\'",level=30)
#pass
else:
raise NotImplementedError( \
"Unrecognised ZI API command: " + quant.set_cmd \
)
# Is the setValue attempting to set an awgModule value?
elif 'awgModule' in quant.set_cmd:
self.awgModule.set( \
quant.set_cmd, \
value if not (quant.datatype > 1) \
else float(quant.getCmdStringFromValue(value)) \
)
# In the final call, we may have had changes that require uploading
# new waveforms or even requires recompiling the sequencer code.
# 'isFinalCall' is true each and every time the 'program pointer' of
# the measurement setup is pointing at the bottom.
if self.isFinalCall(options):
# Prepare for adjusting the buffer length.
''' Two variables keep track of said length:
- self.buffer_length:
The actual length value sent onto the sequence generator.
Will correspond to the longest waveform length of all
loaded waveforms in the previous isFinalCall-run.
- current_buffer_length:
Buffer length following the very latest update, will update
the self.buffer_length after the wave for-loop if the
current buffer length does not correspond to the previous
one (= the required maximum buffers do not match).
'''
current_buffer_length = 0
# Keep track of the highest waveform in use.
# This is used when declaring the sequencer program as well
# as determining whether to upload a waveform interleaved or
# in single mode in writeWaveformToMemory. 0 corresponds to
# zero waveforms being used. 1 corresponds to one waveform in
# use total, playing on output 1.
self.highest_waveform_in_use = 0
# Figure out whether we require waveform uploading
for wave in range(0, self.n_ch):
# Fetch the current waveform. It may either have been
# declared directly, or by using waveform primitives.
current_waveform = self.fetchAndAssembleWaveform(wave)
# Counteract Labber randomly returning [None].
if np.array_equal(current_waveform,[None]):
current_waveform = []
# TODO:
# Remove this portion when Labber starts returning
# values as expected.
self.log( \
"Labber encountered an internal (non-critical) " + \
"error." , \
level=30)
# TODO:
# Insert code for dropping this measurement point entirely.
#self.log( \
# "Labber encountered an internal (non-critical) " + \
# "error. This waveform update is discarded." , \
# level=30)
# In case Labber actually returned [None], then the length
# of the current waveform can impossibly be longer
# than the current buffer length. Hence the elif below.
# Calculate a new buffer length
elif len(current_waveform) > current_buffer_length:
current_buffer_length = len(current_waveform)
# Algorithmic piggyback, we know that if this is true
# then this is automatically the highest waveform in use.
self.highest_waveform_in_use = wave +1
elif not len(current_waveform) == 0:
self.highest_waveform_in_use = wave +1
# Acquire the previous waveform for future comparisons
previous_waveform = self.loaded_waveforms[wave]
# Has something happened?
# TODO This comparison should be hashed in the future.
if not np.array_equal(current_waveform, previous_waveform):
# Is the loaded waveform None?
if len(current_waveform) == 0:
# The user has requested to unload a waveform.
self.loaded_waveforms[wave] = []
# The sequencer should thus be updated. The waveform
# being unloaded should not be marked as 'changed' as
# there will be no memory allocated for it.
self.sequencer_demands_updating = True
# Prep. the sequencer generation stage 0:
# MARKER_DECLARATION, WAVEFORM_DECLARATION, PLAYWAVE, WAITWAVE
self.update_local_awg_program[0] = True
else: # len(current_waveform) > 0:
# The user is changing the waveform, update and tag
# the waveform for update ('something changed').
self.loaded_waveforms[wave] = current_waveform
self.waveform_changed[wave] = True
# Is this an entirely new waveform?
if len(previous_waveform) == 0:
# A new waveform was added, update the sequencer.
self.sequencer_demands_updating = True
# Prep. the sequencer generation stage 0:
# MARKER_DECLARATION, WAVEFORM_DECLARATION,
# PLAYWAVE, WAITWAVE
self.update_local_awg_program[0] = True
# Does the longest waveform in the new waveform package differ
# from the previously used buffer length? Ergo, update sequencer?
if current_buffer_length != self.buffer_length:
self.buffer_length = current_buffer_length
self.sequencer_demands_updating = True
# Prep. the sequencer generation stage 0:
# MARKER_DECLARATION, WAVEFORM_DECLARATION, PLAYWAVE, WAITWAVE
self.update_local_awg_program[0] = True
# Has any runtime values tampered with the internal repetition
# rate? Ie. must we check whether the repetition rate is valid?
if self.perform_repetition_check:
# Reset call flag
self.perform_repetition_check = False
if self.getValue('Run mode') == 'Internal trigger':
# Run check, at this stage we even know what waveforms to
# change and what the buffer length is.
self.checkInternalRepetitionRateValid()
# The next task on the agenda is to carry out a potential
# sequencer update and / or upload new waveforms.
if self.sequencer_demands_updating:
# Halt the sequencer. # TODO look at the compile-code.
self.awgModule.set('awgModule/awg/enable', 0)
# Recompile the sequencer, this requires re-uploading all
# waveforms anew. This is mainly due to the most common
# triggering condition for sequencer re-compilation, being
# buffer length discrepancy versus the old sequencer code.
self.updateSequencer()
self.sequencer_demands_updating = False
# The writeWaveform function will inject and reset the
# changed-status of the waveform(s) to False.
self.writeWaveformToMemory()
# Enable playback again. # TODO look at the compile-code.
self.awgModule.set('awgModule/awg/enable', 1)
elif np.any(self.waveform_changed):
# The sequencer can remain the same. However, there were
# changes done to the loaded waveforms.
# The writeWaveform function will reset the changed-status
# of the waveform(s) to False.
self.writeWaveformToMemory()
return value
def performGetValue(self, quant, options={}):
'''Perform the Get Value instrument operation.
TODO not written.
'''
# Is performGetValue attempting to execute a standard ZI API call?
if '/%s/' in quant.get_cmd:
if 'double /' in quant.get_cmd:
if quant.datatype == 0:
return self.daq.getDouble(\
quant.get_cmd.replace('double ','') % self.dev \
)
elif quant.datatype == 2:
return quant.getValueFromCmdString( \
self.daq.getDouble( \
quant.get_cmd.replace('double ','') % self.dev\
) \
)
else:
raise ValueError( \
"Bad datatype for quantity \'" + quant.name + \
"\,' expected double or combo (of doubles).")
elif 'int /' in quant.get_cmd:
if quant.datatype == 2:
return quant.getValueFromCmdString( \
self.daq.getInt( \
quant.get_cmd.replace('int ','') % self.dev \
) \
)
# As of 20190913, Labber does not support integer types.
# Thus a get_cmd of datatype int would correspond exclusively
# to a combinational list.
# elif quant.datatype < 2:
# return self.daq.getInt(\
# quant.get_cmd.replace('int ','') % self.dev \
# )
else:
raise ValueError( \
"Bad datatype for quantity \'" + quant.name + \
"\,' expected combo (of integers).")
elif 'boolean /' in quant.get_cmd:
if quant.datatype == 1: \
return self.daq.getInt( \
quant.get_cmd.replace('boolean ','') % self.dev \
) > 0
elif quant.datatype == 2:
# Throw suboptimal warning
self.log( \
"Note: getting booleans using combinational lists " +\
"is very suboptimal due to ambiguity in the APIs. " +\
"\nConsider changing the instruction get_cmd type to"+\
" integer, using the cmd_defs 1 and 0 for \'True\' " +\
"and \'False\' respectively ("+quant.name+")." ,\
level=30)
# Fetch True or False, and try to return it.
# Due to string ambiguity, several try-exceptions are made.
fetched_bool = self.daq.getInt( \
quant.get_cmd.replace('boolean ','') % self.dev \
) > 0
try:
return quant.getValueFromCmdString(str(fetched_bool))
except: # TODO: define this exception
try:
return quant.getValueFromCmdString( \
'1' if fetched_bool else '0' \
)
except: # TODO define this exception
# If all else fails, return the lower case version
# of the string-parsed boolean. If this throws an
# error, the user is not using a reasonable name
# for a boolean.
return quant.getValueFromCmdString( \
str(fetched_bool).lower() \
)
else:
raise ValueError( \
"Bad datatype for quantity \'" + quant.name + \
"\,' expected boolean or combo (of booleans).")
elif 'string /' in quant.get_cmd:
# Check whether this is the command line parser
if quant.name == 'Command line box':
# Return the default value
return 'double /%s/example/0/command/13, 313.0'
elif 'other /' in quant.get_cmd:
# TODO This performGet 'other /'-category should be made more
# effective. Even a switch-case perhaps?
# TODO If there is no need to include any other get command,
# other than - Range, then the 'return status quo' should
# be put as a 'if not '- Range' in quant.name:' to speed
# up the process further.
# Acquire more complex datatype values. Fortunately, the
# get routine for these are all quite simple.
if ' - Range' in quant.name:
# Unfortunately, '/range' does not return a number which
# may be passed through straight.
# Round the return.
return quant.getValueFromCmdString( \
round(
self.daq.getDouble( \
quant.get_cmd.replace('other ','') % self.dev \
) \
, 1) \
)
'''
TODO Executing a performGet to fetch the current marker
settings might be unnecessary. This would likely
only return zeroes on bootup since the marker
settings at this time would have been defaulted.
Although the code below is verified and may be
uncommented at any time should you wish to use it.
elif ('Output ' in quant.name) and ('Marker ' in quant.name):
# The user is requesting the currently set marker
# duration or start time for some given channel and marker.
# Which channel was it and what channel are we talking?
split = (quant.name).split(' Marker ', 1)
channel = int(split[0].replace('Output ' ,'')) -1
marker = int((split[1].replace(' start time','')).replace(' duration','')) -1
# Get the current sample rate (per divisor)
sample_rate = \
self.getValue('Output sample rate') / \
2**self.getValueIndex('Output sample rate divisor')
# Get marker start or duration?
if('st' in quant.name):
# Start it is. Get value (converted to time).
return self.marker_configuration[channel,marker,0] / sample_rate
else:
# Duration then. Get value (converted to time).
return self.marker_configuration[channel,marker,1] / sample_rate
'''
else:
# Return status quo
return quant.getValue()
else:
raise NotImplementedError( \
"Unrecognised ZI API or other command: " + quant.get_cmd \
)
# Is the getValue attempting to get an awgModule value?
elif 'awgModule' in quant.get_cmd:
if quant.datatype != 2:
# TODO:
# For some reason, acquiring 'enable' from the AWG module
# works completely opposite to other parametres. This causes
# the following atrocity, and should be reported as a bug /
# missing feature to Zurich Instruments.
# Frankly the entirety of awgModule/awgs/enable is still very
# broken as of 20191016.
if not 'awgModule/awg/enable' in quant.get_cmd:
return self.awgModule.get(quant.get_cmd)
else:
self.daq.sync()
return ((( \
self.awgModule.get('awgModule/awg/enable') \
).get('awg')).get('enable')[0] > 0)
else:
return quant.getValueFromCmdString( \
self.awgModule.get(quant.get_cmd) \
)
return quant.getValue()
################################
""" Marker configuration """
################################
def configureMarker(self, channel=0, marker=0, start=0, duration=0):
''' TODO
'''
# Safekeeping hard type conversion
start = int(start)
duration = int(duration)
# Update the currently held configuration.
# Check whether there is a change to be made.
old_start = self.marker_configuration[channel,marker,0]
old_duration = self.marker_configuration[channel,marker,1]
# Get new values.
self.marker_configuration[channel,marker,0] = start
self.marker_configuration[channel,marker,1] = duration
# Difference?
if (old_start != start) or (old_duration != duration):
# Then this waveform should be tagged for updating!
self.waveform_changed[channel] = True
# Does the channel even have marker data?
# And is this even a sequencer change?
# If we changed state between "any True" and "none True", then we
# should also update the sequencer.
if duration <= 0:
# Check if any waveform has markers:
if any(self.waveform_has_markers):
# Update current status of the removed markers.
self.waveform_has_markers[channel] = False
# Was this the last removal?
if not any(self.waveform_has_markers):
# If yes, then this is a sequencer change!
self.sequencer_demands_updating = True
# Prep. the sequencer generation stage 0:
# MARKER_DECLARATION, WAVEFORM_DECLARATION,
# PLAYWAVE, WAITWAVE
self.update_local_awg_program[0] = True
# Note: should this be false, then all waveforms are already
# without markers, and we can skip updating whether there
# are markers or not present.
else:
# Check if no waveforms have markers:
if not any(self.waveform_has_markers):
# Update current status of the added markers.
self.waveform_has_markers[channel] = True
# Was this the first addition?
if any(self.waveform_has_markers):
# If yes, then this is a sequencer change!
self.sequencer_demands_updating = True
# Prep. the sequencer generation stage 0:
# MARKER_DECLARATION, WAVEFORM_DECLARATION,
# PLAYWAVE, WAITWAVE
self.update_local_awg_program[0] = True
else:
# Ok, so this is not a sequencer change.
# Just tag the waveform.
self.waveform_has_markers[channel] = True
################################
""" Instrument instantiation """
################################
def instantiateInstrumentConnection(self):
''' TODO This function sets up the instrument connection, fetches
an instance of the ZI API, connects to the AWG module, and
more.
'''
# Check whether LabOne is running on the Instrument Server PC.
# A returned True implies successful operation.
assert self.isLabOneRunning(), \
"The operating system did not return a valid process " + \
"entry for LabOne. The program is likely not running. " + \
"Please start LabOne, or try rebooting the Instrument " + \
"Server PC."
# Acquire device serial / Check whether the user wishes to autoconnect
user_address_input = self.comCfg.address
# Is this an autoconnect attempt?
if user_address_input == '<autodetect>' or \
user_address_input == '<autoconnect>':
# Attempt autoconnect attempt using API level 6
self.daq = ziUtils.autoConnect(api_level = 6)
self.dev = ziUtils.autoDetect(self.daq)
# Set the amount of channels available
device_model = self.daq.getByte(
str('/'+self.dev+'/features/devtype')
)
# Set the amount of channels depending on model
if 'HDAWG' in device_model:
self.n_ch = int(re.sub('HDAWG','',device_model))
assert (self.n_ch <= 16) and (self.n_ch > 0), \
"The device reported an unreasonable amount of " + \
"channels. The driver is thus not compatible with " + \
"this device."
else:
raise AutoconnectFailure( \
"The autoconnected device did not identify as an " + \
"HDAWG. Please specify device serial manually in the " + \
"address field.")
# Acquire device options
device_options = self.daq.getByte(
str('/'+self.dev+'/features/options')
)
else:
# Will attempt to connect to the specified device
self.dev_uppercase = user_address_input.upper()
if 'HDAWG-' in self.dev_uppercase:
self.dev_uppercase = self.dev_uppercase.replace('HDAWG-','')
self.dev = self.dev_uppercase.lower()
# Assert that the assigned serial string is valid
assert 'DEV' in self.dev_uppercase, \
"Error: Illegal name of instrument (missing \'DEV\')."
# Scan for connected devices, acquire device props
discov = ziPython.ziDiscovery()
props = discov.get(discov.find(self.dev_uppercase))
# Props provides a usable API level and additional information
ZI_API = props['apilevel']
self.log("The server address for the specified address is: \'" + \
props['serveraddress']+"\', at port "+ \
str(props['serverport'])+".",level=30)
# Generate API session
self.daq, self.device, device_info = ziUtils.create_api_session(
user_address_input,
ZI_API,
required_devtype = "HDAWG",
required_err_msg = \
"The device does not respond like an HDAWG should. " +\
"You may have attempted to connect to the HDAWG at " +\
"an unexpected moment in time, or provided a serial (" +\
str(self.dev_uppercase)+") which does not belong to " +\
"an HDAWG.")
# Acquire device model and its installed options
device_model = device_info['devicetype']
device_options = device_info['options']
# Set the amount of channels depending on model
try:
self.n_ch = int(re.sub('HDAWG','',device_model))
except: # TODO This exception does not have a clear exemption, even SystemClose is a valid exception.
raise AttributeError( \
"The device returned an unexpected model name: \'" + \
str(device_model) + "\'")
assert (self.n_ch <= 16) and (self.n_ch > 0), \
"The device reported an unreasonable amount of channels. " + \
"The driver is thus not compatible with this device."
""" Force connection interface """
# Connect identified device to session
# self.daq.connectDevice(
# self.dev_uppercase,
# props['interfaces'][0]
# )
# Update the device options in the instrument server
self.setValue('CNT installed', False)
self.setValue( 'MF installed', False)
self.setValue( 'ME installed', False)
self.setValue('SKW installed', False)
self.setValue( 'PC installed', False)
while len(device_options) > 0:
option_installed_str = str(next(iter(device_options)))
try:
self.setValue(option_installed_str+' installed', True)
except: # TODO find a suitable exception to use here.
self.log( \
"WARNING: The device reported an unrecognised option (" +\
option_installed_str+") - features stemming from having "+\
"this option installed may not be usable in this driver."+\
" Should this warning remain after updating Labber, " +\
"please send a bug report to <EMAIL>." ,\
level=30)
device_options.remove(option_installed_str)
# TODO This section is invalid until Labber adds support for multiple
# state_quant properties for the same instruction file value.
# Update the channel amount and the related options
# for the driver instruction file
# for channel_check in range(1, 9): # TODO 9 (= 8) should be increased
# if channel_check <= self.n_ch:
# self.setValue('Output channel '+str(channel_check)+' detected', True)
# else:
# self.setValue('Output channel '+str(channel_check)+' detected', False)
# Check if the API release version differs from the connected
# data server's release version.
ziUtils.api_server_version_check(self.daq)
# Report successful connection
self.log('Connected to device '+str(self.dev.upper())+'.', level=30)
# Acquire AWG module control
self.fetchAwgModule()
##################
""" AWG module """
##################
def fetchAwgModule(self):
'''This function fetches the AWG module from the API session.
'''
# Fetching the AWG module may throw a general error.
awgModuleAttempts = 3
while awgModuleAttempts > 0:
try:
# Do the actual module instantiation in order to acquire
# AWG control.
self.awgModule = self.daq.awgModule() # Acq. module
self.awgModule.set('awgModule/device', self.dev) # Set dev. ID
# Instantiate the thread
self.awgModule.execute()
# Acquire the AWG data directory and its waveform directory.
self.awg_data_dir = \
self.awgModule.getString('awgModule/directory')
self.awg_waveform_dir = \
os.path.join(self.awg_data_dir, "awg", "waves")
# Identify whether the waveform directory exists.
if not os.path.isdir(self.awg_waveform_dir):
raise DirectoryNotInPath( \
"Did not recognise AWG module waveform directory " + \
"\'{}\'. Did you modify it?".format(self.awg_waveform_dir))
# Clear the loop
awgModuleAttempts = -1
except Exception as awg_fetch_exception: # TODO define exception
self.log( \
"WARNING: \'awgModule fetch\' timeout. " + \
str(awgModuleAttempts-1) + \
" awgModule fetch attempt(s) remaining.",level=30)
awgModuleAttempts -= 1
if awgModuleAttempts == 0:
raise RuntimeError( \
"Failed to acquire AWG module. The returned error was:" + \
"\n##############################\n"+str(awg_fetch_exception))
time.sleep(5) # TODO is this waiting clause a valid tactic?
############################
""" Resetting the device """
############################
def defaultWaveformConfiguration(self):
'''This function generates a default waveform information set.
This can very well be used to reset said configuration if needed.
'''
# Declare the set of loaded waveforms.
self.loaded_waveforms = [[]] * self.n_ch # All waveform data.
self.waveform_changed = [False] * self.n_ch # Update this channel?
# Declare the marker configuration and whether to play markers.
# Syntax: channel, marker (1 or 2), [start value, duration]
self.marker_configuration = np.zeros((self.n_ch, 2, 2))
self.waveform_has_markers = [False] * self.n_ch
self.declare_marker = [False] * self.n_ch
# Declare a flag for detecting when the sequencer requires an update.
self.sequencer_demands_updating = False
# Declare the initial buffer length
self.buffer_length = 0
# Declare an initial highest waveform in use. 0 corresponds to no
# waveforms declared at all. 1 corresponds to one waveform declared
# in total, playing on output 1.
self.highest_waveform_in_use = 0
# Initialise a default flag value, monitoring whether to perform
# an internal repetition delay check. The default is 'Do check.'
self.perform_repetition_check = True
# Declare a default sequencer program.
self.local_awg_program = { \
'WAVEFORM_DECLARATION' : "&" , \
'WHILE_LOOP_START' : "while(true){" , \
'WAIT_FOR_INITIAL_TRIGGER' : "&" , \
'SYNCHRONISE_TO_BEATING_FREQUENCY' : "&" , \
'START_TRIGGER_PULSE' : "&" , \
'PLAYWAVE' : "&" , \
'WAITWAVE' : "&" , \
'DELAY_BEFORE_END_TRIGGER' : "&" , \
'END_TRIGGER_PULSE' : "&" , \
'DELAY_BEFORE_LOOP_END' : "&" , \
'WAIT_FOR_TRIGGER_TO_REPEAT' : "&" , \
'WHILE_LOOP_END' : "}\n\n" , \
'TIMESTAMP' : "&" , \
}
# Generate an initial update-parameter list for the sequencer program
# generator.
self.update_local_awg_program = [True] * 6
# Generate a list for keeping track of the output ranges, its content
# of which disabling the 'Bypass DAC to port' option will return to.
# Upon startup, we don't really have any idea what would be the
# last set value. Thus, poll the instrument for said values.
self.previous_ranges = [1.0] * self.n_ch
for i in range(0,self.n_ch):
self.previous_ranges[i] = float(round(self.daq.getDouble( \
'/%s/sigouts/%s/range' % (self.dev, str(i))),1))
##############################################################
""" Writing waveforms to memory and updating the sequencer """
##############################################################
def updateSequencer(self):
'''Description goes here.
'''
self.generateSequencerProgram()
self.compileAndUploadSourceString()
# After blasting the sequencer memory,
# we must restore the now lost waveforms.
for wave in range(0, self.n_ch):
# TODO Checking for lengths is hardly optimised, right?
# Thus, this section may be made more efficient. For instance using
# some waveform_used list.
if len(self.loaded_waveforms[wave]) > 0:
self.waveform_changed[wave] = True
def compileAndUploadSourceString( self, \
compile_timeout_ms = 10000, \
upload_timeout_ms = 10000):
'''Description goes here.
'''
# Transfer the source string to the compiler.
self.awgModule.set( \
'awgModule/compiler/sourcestring',
self.plain_local_awg_program)
self.log( "Note: if the instrument halts unexpectedly at " + \
"this time, check the local AWG program and/or " + \
"restart the device entirely." , \
level=30)
# Run the compilation process
while (self.awgModule.getInt('awgModule/compiler/status') == -1) \
and (compile_timeout_ms >= 0):
# Timeout monitoring, and setting the polling time
compile_timeout_ms -= 50
time.sleep(0.050)
# Monitor whether the user halts the measurement.
if self.isStopped():
raise CompileAndUploadFailure( "The measurement was " + \
"halted unexpectedly.")
# Fetch compilation status
compiler_status = self.awgModule.getInt('awgModule/compiler/status')
# Check for compilation timeout
if compile_timeout_ms <= 0:
raise CompileAndUploadFailure("The compilation process timed out.")
# Compiler reports success.
elif self.awgModule.getInt('awgModule/compiler/status') == 0:
# Included in the elif-tree to catch and abort the other checks.
pass
# self.log( "Compilation fully successful, will " + \
# "upload the program to the instrument.", level=30)
# Compiler reports failure.
elif compiler_status == 1:
raise CompileAndUploadFailure( \
self.awgModule.getString('awgModule/compiler/statusstring'))
# Compiler reports successful with warnings.
elif compiler_status == 2:
self.log(
"Compilation successful with warnings, will upload " + \
"the program to the instrument.\nCompiler warning: " + \
self.awgModule.getString('awgModule/compiler/statusstring'), \
level=30)
# TODO The -1 compiler status is currently unknown, although it does
# exist. What should be done about -1? This is likely something that ZI
# has to answer.
# TODO They have been contacted.
elif compiler_status == -1:
raise CompileAndUploadFailure( "Compilation failure: compiler "+ \
"returned status \'-1\' which " + \
"seems to indicate \'compiler " + \
"at idle state.\' The compiler "+ \
"message was: \n" + \
self.awgModule.getString(\
'awgModule/compiler/statusstring'))
# Unknown error
elif compiler_status != 0:
raise CompileAndUploadFailure( "Unknown compiler status " + \
"reported by instrument. " + \
"Please report this error: " + \
"status integer = \'" + \
str(compiler_status)+"\'")
# Initiate upload process.
report_line = 1
time.sleep(0.2) # TODO: This delay should be minimised.
# elf/status provides information whether the upload is succeeding.
while (self.awgModule.getDouble('awgModule/progress') < 1.0) \
and (self.awgModule.getInt('awgModule/elf/status') != 1) \
and (upload_timeout_ms >= 0):
# Fetch progress
progress = self.awgModule.getDouble('awgModule/progress') * 100.0
if progress >= 100.0:
break # Take a shortcut in case of tiny sequencer snippets
# Print status
self.log("< {} > awgModule/progress: {:.1f}%".format( \
report_line, \
progress \
), \
level=30 \
)
# Increments the current amount of printed objects
report_line += 1
# The delay should be minimised to the smallest number possible
# not affecting the overall performance. For instance, a de-sync
# would be considered performance-breaking.
if progress >= 98.0:
time.sleep(0.025)
compile_timeout_ms -= 25
elif progress >= 70.0:
time.sleep(0.300)
compile_timeout_ms -= 300
else:
time.sleep(0.600)
compile_timeout_ms -= 600
# Fetch upload status
elf_status = self.awgModule.getInt('awgModule/elf/status')
# Check for upload timeout
if upload_timeout_ms <= 0:
raise CompileAndUploadFailure("The upload process timed out.")
# Upload reported success
elif elf_status == 0:
self.log("< {} > awgModule/progress: 100% - Success".format( \
report_line
), \
level=30 \
)
# Upload reported failure (by not reporting success)
elif elf_status == 1:
raise CompileAndUploadFailure( \
"Upload to the instrument failed at {:.2f}".format( \
self.awgModule.getDouble('awgModule/progress') * 100.0 \
) \
)
# Unknown error
else:
raise CompileAndUploadFailure( \
"Unknown upload status reported " + \
"by instrument at {:.2f}".format( \
self.awgModule.getDouble('awgModule/progress') * 100.0 \
) \
)
# # TODO Delete or leave in?
# # If the device was playing before, enable playback again.
# # Ensure that the value is indeed set.
# if AWG_playback_status == 1:
# timeout = 0.0
# while (((self.awgModule.get('awgModule/awg/enable')).get('awg'))\
# .get('enable')[0] != 1) and (timeout <= 2.0):
# # TODO these values should be minimised
# time.sleep(0.05)
# timeout += 0.05
# self.awgModule.set('awgModule/awg/enable', 1)
# Perform a final check whether the sequencer has run out of memory
cache_utilisation = self.daq.getDouble( \
'/'+str(self.dev)+'/awgs/0/waveform/memoryusage')
if cache_utilisation > 0.9999:
if self.getValue('Halt on cache overflow'):
raise MemoryError( "The sequencer ran out of cache space " + \
"("+str(cache_utilisation * 100.0)+"%)" + \
". Disable \'Halt on cache overflow\' " + \
"or reduce waveform lengths to continue.")
else:
self.log( "Warning: out of sequencer cache memory. " + \
"Expect lower performance.", level=30)
def writeWaveformToMemory(self):
''' Upload waveform vector data to device memory.
TODO insert more description.
'''
# Resetting the core and wave indices
core_index = 0
# Acquiring package length
n = self.buffer_length
# First, we disable the playback. # TODO necessary?
'''self.daq.setInt('/'+str(self.dev)+'/awgs/0/enable', 0)'''
# In order to not crash the device, all waveforms must be uploaded
# interleaved except the last one if it's odd. Not used channels
# must also be declared as phantom waveforms up to the highest channel
# in use, causing a large waste of resources.
# Upload waveform vector data, poll all channels pairwise.
# Remember that highest_waveform_in_use = 0 corresponds to no
# waveforms declared for playback, and 1 corresponds to update
# waveform 0 for channel 1. This way, this loop will not trigger
# when there are no waveforms to play back.
for channel in range(0, self.highest_waveform_in_use, 2):
# Upload waveforms?
if self.waveform_changed[channel] or \
self.waveform_changed[channel+1]:
# Because the user may have changed the measurement range
# between the two measurement points in question,
# we must fetch and re-check both x1 and x2.
# Reset flags:
self.waveform_changed[channel ] = False
self.waveform_changed[channel+1] = False
# Load waveforms channel and channel+1 for treatment
x1 = np.asarray( self.loaded_waveforms[channel ] , dtype = float )
x2 = np.asarray( self.loaded_waveforms[channel+1] , dtype = float )
# Get their lengths, used several times.
len_x1 = len(x1)
len_x2 = len(x2)
# Get the output range of the channels. When running waves with
# 'Direct output' enabled, the output range is fixed to 800 mV.
# The direct output is known as 'Bypass DAC to port' in the
# instruction file due to repeated confusion in usage cases.
'''TODO This is unnecessary right? Since the instrument
and/or command automatically changes the output range when
the 'Direct mode' (Bypass) is in effect? Meaning that
first checking whether it is bypassed is unnecessary.'''
if not self.getValue( \
'Channel %d - Bypass DAC to port' % (channel + 1)):
output_range_x1 = float(self.getCmdStringFromValue( \
'Channel %d - Range' % (channel + 1)))
else:
output_range_x1 = 0.8
if not self.getValue( \
'Channel %d - Bypass DAC to port' % (channel + 2)):
output_range_x2 = float(self.getCmdStringFromValue( \
'Channel %d - Range' % (channel + 2)))
else:
output_range_x2 = 0.8
# Prepare mnemonic package
data = np.zeros((n, 3))
data[:len_x1, 0] = x1 / output_range_x1
data[:len_x2, 1] = x2 / output_range_x2
assert np.max(abs(data[:,0])) <= 1, \
"Halted. The HDAWG was tasked to play a value on " + \
"channel "+str(channel+1)+" larger than the " + \
"channel's range. The absolute value of the maximum " + \
"was "+str(np.max(abs(x1)))+" V."
assert np.max(abs(data[:,1])) <= 1, \
"Halted. The HDAWG was tasked to play a value on " + \
"channel "+str(channel+2)+" larger than the " + \
"channel's range. The absolute value of the maximum " + \
"was "+str(np.max(abs(x2)))+" V."
# Convert the array data to an injectable data format.
# The appropriate core index is hard-coded to 0 as we either
# replace the first waveform, or both the first and second
# waveform of the core in question. In both cases, the index
# should be 0.
# Does the waveform contain markers? This check is done
# in order to speed up uploading, since most waveforms will
# not contain markers.
if self.waveform_has_markers[channel] or \
self.waveform_has_markers[channel+1]:
# The waveform has associated marker data.
# The promise up to this point is that this marker data
# *must* be of the same length as the waveforms themselves.
# Load associated marker data for treatment. Remember that
# markers are stored per channel output, and contains both
# available markers on that channel.
# TODO THIS DOES NOT LOAD MARKER 2
# TODO There is a mismatch in the marker data.
# There is a single spurious single datapoint that
# is left turned on.
marker = 0
x_marks = np.zeros(n)
x_marks[int(self.marker_configuration[channel,marker,0]): int(self.marker_configuration[channel,marker,0])+int(self.marker_configuration[channel,marker,1])] = 1
data[:, 2] = x_marks
# TODO If ZI adds support for different-length upload packets,
# then the marker data cannot be locked to be strictly the
# length of the buffer.
# Will there be an interleaved upload?
# Note the optimisation:
# if channel+1 <= self.highest_waveform_in_use-1:
if channel <= self.highest_waveform_in_use-2:
inject = \
ziUtils.convert_awg_waveform( wave1=data[:,0], \
wave2=data[:,1], \
markers=data[:,2])
else:
inject = \
ziUtils.convert_awg_waveform( wave1=data[:,0], \
markers=data[:,2])
try:
# Set command basis
base = '/%s/awgs/%d/' % (self.dev, core_index)
# Inject the injectable data. Note that all uploads
# whatsoever will be sent to wave index 0, even
# interleaved ones.
self.daq.setVector(base + 'waveform/waves/0', inject)
except Exception as setVector_exception:
# Get time of error
error_timestamp = \
(datetime.now()).strftime("%d-%b-%Y (%H:%M:%S)")
self.log( "WARNING: There was an exception when " +\
"attempting to upload waveforms (with " +\
"markers) at time: "+\
error_timestamp, level=30)
# Get exception
self.log( \
"The exception was: " + str(setVector_exception),\
level=30)
else:
# The waveform does not have associated markers.
# Perform normal upload.
# Will there be an interleaved upload?
# Note the optimisation:
# if channel+1 <= self.highest_waveform_in_use-1:
if channel <= self.highest_waveform_in_use-2:
inject = \
ziUtils.convert_awg_waveform( wave1=data[:,0], \
wave2=data[:,1])
else:
inject = \
ziUtils.convert_awg_waveform( wave1=data[:,0] )
# Set command basis
base = '/%s/awgs/%d/' % (self.dev, core_index)
try:
# Inject the injectable data. Note that all uploads
# whatsoever will be sent to wave index 0, even
# interleaved ones.
self.daq.setVector(base + 'waveform/waves/0', inject)
except Exception as setVector_exception:
# Get time of error
error_timestamp = \
(datetime.now()).strftime("%d-%b-%Y (%H:%M:%S)")
self.log( "WARNING: There was an exception when " + \
"attempting to upload waveforms " + \
"(without markers) at time: " + \
error_timestamp, level=30)
# Get exception
self.log( \
"The exception was: " + str(setVector_exception), \
level=30)
# Increase the core index for the next run of the for-loop
core_index += 1
# Attempt to enable instrument (even after injection failure).
remaining_enable_attempts = 3
while remaining_enable_attempts >= 0:
try:
# Re-enable the playback
self.daq.setInt('/'+str(self.dev)+'/awgs/0/enable', 1)
# Success
remaining_enable_attempts = -1
except Exception: # TODO define exception
self.log( \
"WARNING: setVector timeout. " + \
str(remaining_enable_attempts-1) + \
" upload attempt(s) remaining.", level=30)
remaining_enable_attempts -= 1
time.sleep(5) # TODO is this waiting clause a valid tactic?
if remaining_enable_attempts == 0:
# Shall we consider waiting for device to auto-restore?
if self.getValue( \
'Attempt API reconnection when the HDAWG crashes'):
# Perform long wait.
halt_time = self.getValue('Time to wait')
self.log( + \
"The measurement was halted by the instrument " +\
"driver for device \'"+self.dev_uppercase +\
"\' because the device crashed. The measurement" +\
" will now wait for "+str(halt_time)+" seconds " +\
"and attempt to reconnect to the ZI API.",level=30)
time.sleep(halt_time)
# Attempt to re-fetch the API.
self.instantiateInstrumentConnection()
else:
raise RuntimeError( \
"HDAWG \'"+self.dev_uppercase+"\' has crashed; the " + \
"device does not respond to any calls from the PC. " + \
"Consider restarting the device using the front button.")
def fetchAndAssembleWaveform(self, wave):
'''TODO
Following experiments, CK (author) concluded that uploading segments
of playWave and filling them retroactively with setWave uploads is not
a scalable solution, mainly as the amount of uploads increase for each
and every possible combination of waveform primitives.
Hence, the solution was to assemble primitives before simply uploading
the finished segment as a flat waveform.
'''
# We have received a request to assemble waveform 'wave.'
# Is the blueprint empty?
blueprint = \
self.getValueArray('Waveform '+str(wave+1)+' sequence blueprint')
# Prepare waveform for assembly
assembled_waveform = []
if len(blueprint) > 0: # TODO is it possible that Labber returns a [None] at this stage?
# There is a blueprint, assemble the waveform.
# The syntax of the blueprint is:
# [S1, P1, S2, P2, S3, P3] where for Blueprint X (1...n_ch) -
# insert S1 zeroes, insert primitive 1, insert S2 zeroes, etc.
# The unit is volts. Vectors start at 0. The number of elements
# in a blueprint is always even.
# Example
# Primitive 4: [0.313 0.13 0.313 0.13]
# Primitive 16: [3.13 31.3 0 0 0.3]
# Blueprint 7: [3, 4, 2, 16]
# Result 7: [0 0 0 0.313 0.13 0.313 0.13 0 0 3.13 31.3 0 0 0.3]
# For blueprint X, [insert Y zeroes, insert primitive Z ...]
for i in range(0,len(blueprint),2):
assembled_waveform.extend([0] * blueprint[i])
assembled_waveform.extend( \
self.getValueArray( \
'Waveform primitive '+str(blueprint[i+1]+1) \
) \
)
else:
# No blueprint is given, default to fetching the
# Channel - Waveform vector
assembled_waveform = \
self.getValueArray('Channel '+str(wave+1)+' - Waveform')
return assembled_waveform
#####################################################
""" Check validness of requested repetition delay """
#####################################################
def checkInternalRepetitionRateValid(self):
''' TODO finish writing stuff
RETURNS BY ASSIGNING A SELF VALUE: The internal delay period following
calculation, ie. how much time (in seconds) will be pure delay.
Should this value be negative, then there is insufficient time left
for internally delaying the repetition to the user-requested value.
Required ini values to have been set for this calculation to be valid
are listed below. Do note that these values are NOT order-sensitive
in the instruction file. Because, all of these ini commands simply
trigger an Internal repetition period check upon the next isFinalCall
via setting the boolean self.perform_repetition_check.
- Internal trigger period
- Sequencer triggers
- Output sample rate
- Output sample rate divisor
- Trigger out delay
- Calibrate trigger out delay
- AWGX - Waveform , where X = self.n_ch amount of waveforms defined
- Calibrate internal trigger period
- Halt on illegal repetition rate
- Dynamic repetition rate
When calling to check whether the internal trigger period is valid,
the value stored in Labber at 'Internal trigger period' will be
seen as the new deadline.
'''
# Fetch the value to be checked
requested_trigger_period = self.getValue('Internal trigger period')
# Count the amount of trigger cycles required for setting
# the triggers.
# TODO Remember to remove this part when changing from
# setTrigger to markers.
sequencer_trigger = self.getValue('Sequencer triggers')
if sequencer_trigger == \
'Send at AWG program start':
trigger_cycles = 2
elif sequencer_trigger == \
'Send at AWG program finish':
trigger_cycles = 2
elif sequencer_trigger == \
'Hold high during playback':
trigger_cycles = 2
elif sequencer_trigger == \
'Send at AWG program start + finish':
trigger_cycles = 4
else:
trigger_cycles = 0
# Fetch the sequencer clock and the required amount of
# wait cycles before the final trigger (if any).
sample_rate = \
self.getValue('Output sample rate') / \
2**self.getValueIndex('Output sample rate divisor')
# The sequencer operational count (OPS) is 1/8 of the
# sample clock.
sequencer_clk = sample_rate / 8.0
# Now once we have fetched the sequencer clock rate, we may
# acquire the amount of wait cycles requested before the
# trigger.
wait_cycles_before_trigger = 0
# Shall there be a trigger out delay?
if self.getValue('Trigger out delay') > 0:
if self.getValue('Sequencer triggers') in [ \
'Send at AWG program finish', \
'Send at AWG program start + finish' ]:
# At this instance, we (rightly so) expect the
# trigger out delay to be confirmed as valid.
# Fetch the requested trigger out delay.
trigger_out_delay = \
self.getValue('Trigger out delay') - \
self.getValue('Calibrate trigger out delay')
# Calculate how many cycles of the sequencer clock is required
# to equal the user-requested delay. Send it to UserReg(0).
wait_cycles_before_trigger = \
int(round(trigger_out_delay * sequencer_clk))
time_for_running_auxiliary_code = \
(wait_cycles_before_trigger + trigger_cycles) \
/ sequencer_clk
# How much time is spent playing waveforms? We require the buffer
# length and the amount of currently playing waves. The latter is
# solved by observing self.highest_waveform_in_use. Remember that
# if no waveform is declared, highest_waveform_in_use = 0.
# The 2* factor stems from 2 sequencer cycles being added for every
# declared waveform in the sequencer program. This in turn stems
# from playWave, where two arguments (one cycle each) are required
# to get a waveform onto the output.
time_for_playing_waveforms = \
(self.buffer_length / sample_rate) + \
2 * (self.highest_waveform_in_use / sequencer_clk)
# Perform the final check
internal_delay_period = \
requested_trigger_period \
- time_for_running_auxiliary_code \
- time_for_playing_waveforms \
- self.getValue('Calibrate internal trigger period')
# Internal trigger period valid or invalid?
if internal_delay_period < 0:
# Negative = invalid
# Do the following checks:
if self.getValue('Halt on illegal repetition rate'):
# The repetition rate is illegal since the internal delay
# period is negative, hence 'the requested' minus
# 'the internal' below.
raise AssertionError(
"Instrument halted: the sequencer program requires " + \
"more time to play than the requested internal " + \
"repetition rate. With the current settings, the " + \
"requested trigger period must be increased to " + \
str(requested_trigger_period - internal_delay_period) + \
" s minimum. Should the settings change, this minimum "+ \
"value may increase. It is thus good practice to add " + \
"some additional time to the updated value.")
elif self.getValue('Dynamic repetition rate'):
# Expand the calculated delay.
# Note: subtracting a negative value.
internal_delay_period = \
requested_trigger_period - internal_delay_period
# Update the Labber setting.
self.setValue( \
'Internal trigger period', \
internal_delay_period
)
# # Insert the calculated value into the sequencer program.
# internal_trigger_waiting = \
# int(round(internal_delay_period * sequencer_clk))
# self.local_awg_program = self.local_awg_program.replace(\
# '&DELAY_BEFORE_LOOP_END', \
# 'wait(' +str(internal_trigger_waiting)+ ');')
else:
# The repetition rate is illegal but the user wishes to
# ignore said fact.
self.log( "Warning: illegal repetition rate detected " + \
"and ignored.", level=30)
# "Return" an internal delay period. This is used in the sequencer
# program generator.
self.verified_internal_delay_period = internal_delay_period
#######################################
""" Generate AWG sequencer program. """
#######################################
def generateSequencerProgram(self):
'''This function generates a local AWG program, that in turn will be
uploaded into the sequencer.
The general layout of the sequencer program generation is to assemble
a skeleton dictionary bearing &-tags. Depending on a vast array of
options, these tags will be modified by the generation functions
accordingly. {'waveform_declaration','&'} may for instance be replaced
with the waveform declarations, enabling the instrument to play the
Labber-defined waveforms.
Default skeleton:
self.local_awg_program = { \
'WAVEFORM_DECLARATION' : "&" , \
'WHILE_LOOP_START' : "&" , \
'WAIT_FOR_INITIAL_TRIGGER' : "&" , \
'SYNCHRONISE_TO_BEATING_FREQUENCY' : "&" , \
'START_TRIGGER_PULSE' : "&" , \
'PLAYWAVE' : "&" , \
'WAITWAVE' : "&" , \
'DELAY_BEFORE_END_TRIGGER' : "&" , \
'END_TRIGGER_PULSE' : "&" , \
'DELAY_BEFORE_LOOP_END' : "&" , \
'WAIT_FOR_TRIGGER_TO_REPEAT' : "&" , \
'WHILE_LOOP_END' : "&" , \
'TIMESTAMP' : "&" , \
}
'''
# Calculate basic clock and samling rates, used for several functions
# in the sequencer program generation.
sample_rate = \
self.getValue('Output sample rate') / \
2**self.getValueIndex('Output sample rate divisor')
# The sequencer operational count (OPS) is 1/8 of the sample clock.
sequencer_clk = sample_rate / 8.0
# The channel grouping has been modified at the performSet for
# every command that involves the usage of the on-board oscillators.
# # # # Generate program # # # #
# TODO DEBUG
self.log('Should we update local program [0]? : '+str(self.update_local_awg_program[0])+'\nDID any waveform have markers? = '+str(any(self.waveform_has_markers)),level=30)
# Are there any changes to entry 0:
# MARKER_DECLARATION, WAVEFORM_DECLARATION, PLAYWAVE, WAITWAVE?
if self.update_local_awg_program[0]:
# Waveform declaration and playwave compiler prototypes.
waveform_declaration_setup = ''
playwave_setup = ''
# Should we place commas between waveforms?
first_waveform_declared = False
# Should there be a marker declaration in the beginning?
if any(self.waveform_has_markers):
# Add marker declaration.
waveform_declaration_setup += \
'wave w_m = marker({0}, 1);\n'.format(self.buffer_length)
# What waveforms should be declared with a marker?
self.declare_marker = [False] * self.n_ch
for n in range(0, self.highest_waveform_in_use, 2):
# For all channels
if n < self.highest_waveform_in_use-1:
if self.waveform_has_markers[n] or self.waveform_has_markers[n+1]:
self.declare_marker[n] = True
self.declare_marker[n+1] = True
elif n == self.highest_waveform_in_use-1:
# But, if this waveform is the highest waveform in use,
# and the following (non-existant) waveform has marker
# data, then do not declare markers on the higher part
# of the waveform pair.
if self.waveform_has_markers[n]:
self.declare_marker[n] = True
# How many waveforms should be declared?
# Remember that self.highest_waveform_in_use = 0 corresponds to no
# waveforms declared.
for n in range(0, self.highest_waveform_in_use):
# Is this waveform wasted? If len > 0, then no.
if len(self.loaded_waveforms[n]) > 0:
# TODO This here below is a variant waveform
# declaration using randomUniform. I've been told that
# using zeros might cause unwanted optimisation in the
# SeqC compiler, so that for instance the setVector
# command would not be able to correctly upload
# waveforms.
# 'wave w{0} = randomUniform({1},1e-4) + m1;\n'\
# .format(n+1, self.buffer_length)
if(self.declare_marker[n]):
waveform_declaration_setup += \
'wave w{0} = zeros({1}) + w_m;\n'\
.format(n+1, self.buffer_length)
else:
waveform_declaration_setup += \
'wave w{0} = zeros({1});\n'\
.format(n+1, self.buffer_length)
else:
# Waveform is wasted. Add markers or not?
if(self.declare_marker[n]):
waveform_declaration_setup += \
'wave w{0} = zeros({1}) + w_m; // Unused.\n'\
.format(n+1, self.buffer_length)
else:
waveform_declaration_setup += \
'wave w{0} = zeros({1}); // Unused.\n'\
.format(n+1, self.buffer_length)
# Waveform initial declaration / generation
if first_waveform_declared:
playwave_setup += ', {0}, w{0}'.format(n+1)
else:
# Declare the first waveform for playback
playwave_setup += '{0}, w{0}'.format(n+1)
first_waveform_declared = True
# The condition for checking the waveform declaration is covered
# by the playwave setup condition, thus the actions have been
# combined.
if playwave_setup != '':
self.local_awg_program.update({ \
'WAVEFORM_DECLARATION':waveform_declaration_setup + '\n', \
'PLAYWAVE':'\tplayWave('+playwave_setup+');\n', \
'WAITWAVE':'\twaitWave();\n'})
else:
# There are no waves to play, remove all instances related
# to playing a wave. The HDAWG has a tendancy to crash if this
# step is done improperly.
self.local_awg_program.update({ \
'WAVEFORM_DECLARATION':'', \
'PLAYWAVE':'', \
'WAITWAVE':''})
# Are there any changes to entry 1:
# WHILE_LOOP_START, WHILE_LOOP_END?
# (Aka: 'Is the measurement of some single-shot type?)'
if self.update_local_awg_program[1]:
# TODO: perform a check whether this is a single shot measurement.
# if( Single shot measurement )
''' TODO There is currently no setting which modifies this part of the generateSequencerProgram function. '''
self.local_awg_program.update({ \
'WHILE_LOOP_START':'while(true){\n', \
'WHILE_LOOP_END':'}\n\n'})
# else:
# self.local_awg_program.update({ \
# 'WHILE_LOOP_START':'', \
# 'WHILE_LOOP_END':''})
# Are there any changes to entry 2:
# WAIT_FOR_INITIAL_TRIGGER, DELAY_BEFORE_LOOP_END,
# WAIT_FOR_TRIGGER_TO_REPEAT?
if self.update_local_awg_program[2]:
# How and when should the HDAWG play the sequencer?
trigger_mode = self.getValue('Run mode')
if trigger_mode == 'Play once, then external trigger':
# The 'Play once, then external trigger' option is very similar
# to the external trigger apart from playing the AWG once
# to initiate the measurement cycle.
self.local_awg_program.update({ \
'WAIT_FOR_INITIAL_TRIGGER':'', \
'WAIT_FOR_TRIGGER_TO_REPEAT':'\twaitDigTrigger(1);\n', \
'DELAY_BEFORE_LOOP_END':''})
elif trigger_mode == 'Internal trigger':
# On internal trigger, set up a delay at the end of
# the sequencer program.
# Trash the 'wait_for_trigger' tags.
self.local_awg_program.update({ \
'WAIT_FOR_INITIAL_TRIGGER':'', \
'WAIT_FOR_TRIGGER_TO_REPEAT':''})
# At this point in time, the isFinalCall subfunction
# already checked and verified the internal repetition delay
# if any. If the "returned" verified_internal_delay_period is
# negative, and the checkInternalRepetitionRateValid function
# did not halt the program - then perform the following action:
if self.verified_internal_delay_period < 0:
# The checked internal delay period is negative ergo
# impossible to represent.
self.local_awg_program.update({ \
'DELAY_BEFORE_LOOP_END': \
'\t// Invalid internal repetition delay.\n'})
elif self.getValue('Use oscillator-based repetition delay'):
# Insert oscillator waiting code
self.local_awg_program.update({ \
'DELAY_BEFORE_LOOP_END':'\twaitSineOscPhase(2);\n'})
else:
# Insert the calculated wait delay before the final loop
# as done by the checkInternalRepetitionRateValid function.
internal_delay_period = self.verified_internal_delay_period
internal_trigger_waiting = \
int(round(internal_delay_period * sequencer_clk))
self.local_awg_program.update({ \
'DELAY_BEFORE_LOOP_END': \
'\twait(' + str(internal_trigger_waiting) + ');\n'})
elif trigger_mode == 'External trigger':
# On external trigger, the AWG will halt its execution in the
# beginning of the sequencer program. It proceeds to await an
# external triggering signal.
self.local_awg_program.update({ \
'WAIT_FOR_INITIAL_TRIGGER':'\twaitDigTrigger(1);\n', \
'WAIT_FOR_TRIGGER_TO_REPEAT':'', \
'DELAY_BEFORE_LOOP_END':''})
else:
raise ValueError( \
"Unknown run mode acquired, there is likely " + \
"an error in the driver .ini-file.")
# Are there any changes to entry 3:
# SYNCHRONISE_TO_BEATING_FREQUENCY?
if self.update_local_awg_program[3]:
# Synchronise to beating frequency to minimise inter-device jitter?
if self.getValue('Minimise inter-device asynchronous jitter'):
self.local_awg_program.update({ \
'SYNCHRONISE_TO_BEATING_FREQUENCY':'\twaitSineOscPhase(1);\n'})
else:
self.local_awg_program.update({ \
'SYNCHRONISE_TO_BEATING_FREQUENCY':''})
# Are there any changes to entry 4:
# START_TRIGGER_PULSE, END_TRIGGER_PULSE?
if self.update_local_awg_program[4]:
# Sequencer triggers
sequencer_trigger = self.getValue('Sequencer triggers')
if sequencer_trigger == 'Send at AWG program start':
# On 'Send at AWG program start,' send an initial digital
# marker pulse and remove all other markers.
self.local_awg_program.update({ \
'START_TRIGGER_PULSE': \
'\tsetTrigger(0b1111); setTrigger(0b0000);\n', \
'END_TRIGGER_PULSE': \
'' \
}) # TODO This paragraph will be changed at version 0.84
elif sequencer_trigger == 'Send at AWG program finish':
# On 'Send at AWG program finish,' send a final digital marker
# pulse and remove all other markers.
self.local_awg_program.update({ \
'START_TRIGGER_PULSE': \
'', \
'END_TRIGGER_PULSE': \
'\tsetTrigger(0b1111); setTrigger(0b0000);\n' \
})
elif sequencer_trigger == 'Hold high during playback':
# On 'Hold high during playback,' send an initial marker start,
# and as a final marker gesture pull it low.
self.local_awg_program.update({ \
'START_TRIGGER_PULSE': \
'\tsetTrigger(0b1111);\n', \
'END_TRIGGER_PULSE': \
'\tsetTrigger(0b0000);\n' \
})
elif sequencer_trigger == 'Send at AWG program start + finish':
# On 'Send at AWG program start + finish,' send a marker both
# at the sequencer program start and finish.
self.local_awg_program.update({ \
'START_TRIGGER_PULSE': \
'\tsetTrigger(0b1111); setTrigger(0b0000);\n', \
'END_TRIGGER_PULSE': \
'\tsetTrigger(0b1111); setTrigger(0b0000);\n' \
})
elif sequencer_trigger == 'Do not send sequencer triggers':
# On 'Do not send any triggers,' remove all program
# tags related to generating sequencer triggers.
self.local_awg_program.update({ \
'START_TRIGGER_PULSE': \
'', \
'END_TRIGGER_PULSE': \
'' \
})
else:
raise ValueError( \
"Unknown option selected for sequencer triggers. " + \
"There is likely an error in the driver .ini-file.")
# Are there any changes to entry 5:
# DELAY_BEFORE_END_TRIGGER?
if self.update_local_awg_program[5]:
# Shall there be a trigger out delay?
if self.getValue('Trigger out delay') > 0 \
and self.getValue('Sequencer triggers') in [ \
'Send at AWG program finish', \
'Send at AWG program start + finish' ]:
# Is the requested trigger out delay representable?
# Calculate the lowest representable delay:
# Because of the layout of the combinational list, it is
# sufficient to acquire the value index.
# Three softcore cycles are required for acquiring the userreg
# content. When trying to set the lowest possible delay without
# removing the getUserReg clause altogether, adding these three
# strikes is crucial.
lowest_representable_delay = 3 / sequencer_clk
# TODO:
# Check whether there is in fact some calculation error that
# causes the delay to overshoot (thus removing the purpose of
# calibrating the Trigger out delay)
# Fetch the requested trigger out delay.
trigger_out_delay = self.getValue('Trigger out delay') \
- self.getValue('Calibrate trigger out delay')
if trigger_out_delay >= lowest_representable_delay:
# The requested Trigger out delay is representable.
# Calculate how many cycles of the sequencer clock is
# required to equal the user-requested delay.
# Send it to UserReg(0).
self.daq.setDouble( \
'/' + self.dev + '/awgs/0/userregs/0', \
int(round(trigger_out_delay * sequencer_clk))
)
# Insert the command itself.
self.local_awg_program.update({ \
'DELAY_BEFORE_END_TRIGGER':'\twait(getUserReg(0));'})
else:
# Not representable.
if trigger_out_delay != 0:
self.log( \
"Warning: the \'Trigger out delay\' requested " +\
"is lower than the minimum representable delay " +\
"at the selected sample clock rate.", level=30)
# Return 0 to the user.
self.setValue('Trigger out delay', 0)
# Remove the tag.
self.local_awg_program.update({ \
'DELAY_BEFORE_END_TRIGGER':''})
else:
# No trigger out delay was requested.
self.local_awg_program.update({ \
'DELAY_BEFORE_END_TRIGGER':''})
# Are there any changes to TIMESTAMP?
# Yes. Most scientists agree that time moves forward.
# Insert final message and timestamp into the sequencer code.
timestamp = (datetime.now()).strftime("%d-%b-%Y (%H:%M:%S)")
self.local_awg_program.update({'TIMESTAMP': \
"// This sequencer code was automatically generated at " + \
timestamp})
# Reset the entire list. This is likely the quickest operation, right?
self.update_local_awg_program = [False] * 6
# Generate a plain text local AWG program from the dictionary.
self.plain_local_awg_program = ''
for key in self.local_awg_program:
self.plain_local_awg_program += self.local_awg_program[key]
# Sanity check
if '&' in self.plain_local_awg_program:
raise SystemError(\
"The local AWG sequencer program has not been generated " + \
"properly. This bug should not appear, please report it." + \
"\n\nThe generated AWG program was:\n" + \
self.plain_local_awg_program )
###################################################
""" Functions not related to Labber explicitly. """
###################################################
def isLabOneRunning(self):
'''This function asserts that LabOne is running.
'''
# For all running process ID's:
for process_id in psutil.pids():
# Acquire current process information to sift through
process_information = psutil.Process(process_id)
# Is this the ziService process?
if 'ziService' in process_information.name():
return True
# Failure fallback:
return False
def printAllListItemsToFile(self, list, name_of_file = 'ListOutput.txt', halt_after_write = False):
''' TODO DEBUG
This is a debug function, it will be removed before the final release.
'''
with open('C:\\Users\\qtlab\\Desktop\\'+str(name_of_file), 'w') as f:
for item in list:
f.write("%s\n" % item)
assert halt_after_write == False, "Wrote list to file!"
####################################
""" Miscellaneous functionality. """
####################################
if __name__ == '__main__':
raise NotImplementedError("This driver is currently not executable " + \
"from the command line.")
class AutoconnectFailure(Exception):
pass
class DirectoryNotInPath(Exception):
pass
class CompileAndUploadFailure(Exception):
pass
class CloseDeviceException(Exception):
pass | [
"psutil.pids",
"zhinst.utils.autoConnect",
"psutil.Process",
"os.path.join",
"numpy.asarray",
"time.sleep",
"zhinst.utils.autoDetect",
"numpy.any",
"zhinst.ziPython.ziDiscovery",
"numpy.zeros",
"datetime.datetime.now",
"zhinst.utils.api_server_version_check",
"numpy.array_equal",
"os.path.... | [((82924, 82966), 'zhinst.utils.api_server_version_check', 'ziUtils.api_server_version_check', (['self.daq'], {}), '(self.daq)\n', (82956, 82966), True, 'import zhinst.utils as ziUtils\n'), ((86185, 86212), 'numpy.zeros', 'np.zeros', (['(self.n_ch, 2, 2)'], {}), '((self.n_ch, 2, 2))\n', (86193, 86212), True, 'import numpy as np\n'), ((93319, 93334), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (93329, 93334), False, 'import time\n'), ((141203, 141216), 'psutil.pids', 'psutil.pids', ([], {}), '()\n', (141214, 141216), False, 'import psutil\n'), ((77228, 77260), 'zhinst.utils.autoConnect', 'ziUtils.autoConnect', ([], {'api_level': '(6)'}), '(api_level=6)\n', (77247, 77260), True, 'import zhinst.utils as ziUtils\n'), ((77287, 77315), 'zhinst.utils.autoDetect', 'ziUtils.autoDetect', (['self.daq'], {}), '(self.daq)\n', (77305, 77315), True, 'import zhinst.utils as ziUtils\n'), ((78998, 79020), 'zhinst.ziPython.ziDiscovery', 'ziPython.ziDiscovery', ([], {}), '()\n', (79018, 79020), True, 'import zhinst.ziPython as ziPython\n'), ((90450, 90466), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (90460, 90466), False, 'import time\n'), ((141330, 141356), 'psutil.Process', 'psutil.Process', (['process_id'], {}), '(process_id)\n', (141344, 141356), False, 'import psutil\n'), ((56718, 56758), 'numpy.array_equal', 'np.array_equal', (['current_waveform', '[None]'], {}), '(current_waveform, [None])\n', (56732, 56758), True, 'import numpy as np\n'), ((62976, 63005), 'numpy.any', 'np.any', (['self.waveform_changed'], {}), '(self.waveform_changed)\n', (62982, 63005), True, 'import numpy as np\n'), ((84179, 84226), 'os.path.join', 'os.path.join', (['self.awg_data_dir', '"""awg"""', '"""waves"""'], {}), "(self.awg_data_dir, 'awg', 'waves')\n", (84191, 84226), False, 'import os\n'), ((94520, 94537), 'time.sleep', 'time.sleep', (['(0.025)'], {}), '(0.025)\n', (94530, 94537), False, 'import time\n'), ((99172, 99227), 'numpy.asarray', 'np.asarray', (['self.loaded_waveforms[channel]'], {'dtype': 'float'}), '(self.loaded_waveforms[channel], dtype=float)\n', (99182, 99227), True, 'import numpy as np\n'), ((99257, 99316), 'numpy.asarray', 'np.asarray', (['self.loaded_waveforms[channel + 1]'], {'dtype': 'float'}), '(self.loaded_waveforms[channel + 1], dtype=float)\n', (99267, 99316), True, 'import numpy as np\n'), ((100843, 100859), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (100851, 100859), True, 'import numpy as np\n'), ((139804, 139818), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (139816, 139818), False, 'from datetime import datetime\n'), ((58694, 58745), 'numpy.array_equal', 'np.array_equal', (['current_waveform', 'previous_waveform'], {}), '(current_waveform, previous_waveform)\n', (58708, 58745), True, 'import numpy as np\n'), ((77647, 77680), 're.sub', 're.sub', (['"""HDAWG"""', '""""""', 'device_model'], {}), "('HDAWG', '', device_model)\n", (77653, 77680), False, 'import re\n'), ((80316, 80349), 're.sub', 're.sub', (['"""HDAWG"""', '""""""', 'device_model'], {}), "('HDAWG', '', device_model)\n", (80322, 80349), False, 'import re\n'), ((84329, 84365), 'os.path.isdir', 'os.path.isdir', (['self.awg_waveform_dir'], {}), '(self.awg_waveform_dir)\n', (84342, 84365), False, 'import os\n'), ((85389, 85402), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (85399, 85402), False, 'import time\n'), ((94651, 94666), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (94661, 94666), False, 'import time\n'), ((94766, 94781), 'time.sleep', 'time.sleep', (['(0.6)'], {}), '(0.6)\n', (94776, 94781), False, 'import time\n'), ((103218, 103229), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (103226, 103229), True, 'import numpy as np\n'), ((104038, 104127), 'zhinst.utils.convert_awg_waveform', 'ziUtils.convert_awg_waveform', ([], {'wave1': 'data[:, 0]', 'wave2': 'data[:, 1]', 'markers': 'data[:, 2]'}), '(wave1=data[:, 0], wave2=data[:, 1], markers=\n data[:, 2])\n', (104066, 104127), True, 'import zhinst.utils as ziUtils\n'), ((104341, 104407), 'zhinst.utils.convert_awg_waveform', 'ziUtils.convert_awg_waveform', ([], {'wave1': 'data[:, 0]', 'markers': 'data[:, 2]'}), '(wave1=data[:, 0], markers=data[:, 2])\n', (104369, 104407), True, 'import zhinst.utils as ziUtils\n'), ((106226, 106290), 'zhinst.utils.convert_awg_waveform', 'ziUtils.convert_awg_waveform', ([], {'wave1': 'data[:, 0]', 'wave2': 'data[:, 1]'}), '(wave1=data[:, 0], wave2=data[:, 1])\n', (106254, 106290), True, 'import zhinst.utils as ziUtils\n'), ((106447, 106493), 'zhinst.utils.convert_awg_waveform', 'ziUtils.convert_awg_waveform', ([], {'wave1': 'data[:, 0]'}), '(wave1=data[:, 0])\n', (106475, 106493), True, 'import zhinst.utils as ziUtils\n'), ((108702, 108715), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (108712, 108715), False, 'import time\n'), ((109709, 109730), 'time.sleep', 'time.sleep', (['halt_time'], {}), '(halt_time)\n', (109719, 109730), False, 'import time\n'), ((29657, 29673), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (29667, 29673), False, 'import time\n'), ((26771, 26786), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (26781, 26786), False, 'import time\n'), ((30423, 30439), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (30433, 30439), False, 'import time\n'), ((105189, 105203), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (105201, 105203), False, 'from datetime import datetime\n'), ((107174, 107188), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (107186, 107188), False, 'from datetime import datetime\n')] |
import io
import os
import cv2
import numpy as np
from PIL import Image
from keras import backend
from base64 import b64encode
from keras.models import Model
import matplotlib.pyplot as plt
from keras_retinanet import models
from keras_retinanet.utils.colors import label_color
from keras_retinanet.utils.visualization import draw_box, draw_caption
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
class Core:
def __init__(self, model_filename: str = "/Trained-Model/drone-detection-v5.h5") -> None:
self.current_path = os.getcwd()
self.model_path = self.current_path + model_filename
self.labels_to_names = {0: 'drone', 1: 'dummy'}
self.model = None
def get_model(self) -> Model:
print(self.model_path)
return models.load_model(self.model_path, backbone_name='resnet50')
def set_model(self, model: Model) -> 'Core':
self.model = model
return self
@staticmethod
def load_image_by_path(filename: str) -> np.ndarray:
return read_image_bgr(filename)
@staticmethod
def load_image_by_memory(file: bytes) -> np.ndarray:
image = np.asarray(Image.open(io.BytesIO(file)).convert('RGB'))
return image[:, :, ::-1].copy()
@staticmethod
def convert_rgb_to_bgr(image: np.ndarray) -> np.ndarray:
return image[:, :, ::-1].copy()
@staticmethod
def pre_process_image(image: np.ndarray) -> tuple:
pre_processed_image = preprocess_image(image)
resized_image, scale = resize_image(pre_processed_image)
return resized_image, scale
@staticmethod
def predict(model: Model, image: np.ndarray, scale: float) -> tuple:
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
boxes /= scale
return boxes, scores, labels
def predict_with_graph_loaded_model(self, image: np.ndarray, scale: float) -> tuple:
with backend.get_session().as_default():
with backend.get_session().graph.as_default():
return self.predict(self.model, image, scale)
def predict_with_graph(self, model: Model, image: np.ndarray, scale: float) -> tuple:
with backend.get_session().graph.as_default() as g:
return self.predict(model, image, scale)
@staticmethod
def clear_graph_session() -> None:
backend.clear_session()
return None
@staticmethod
def get_drawing_image(image: np.ndarray) -> np.ndarray:
# copy to draw on
drawing_image = image.copy()
drawing_image = cv2.cvtColor(drawing_image, cv2.COLOR_BGR2RGB)
return drawing_image
def draw_boxes_in_image(self, drawing_image: np.ndarray, boxes: np.ndarray, scores: np.ndarray,
threshold: float = 0.3) -> list:
detections = []
for box, score in zip(boxes[0], scores[0]):
# scores are sorted so we can break
if len(detections) > 0:
threshold = 0.5
if score < threshold:
break
detections.append({"box": [int(coord) for coord in box], "score": int(score * 100)})
color = label_color(0)
b = box.astype(int)
draw_box(drawing_image, b, color=color)
caption = "{} {:.3f}".format(self.labels_to_names[0], score)
draw_caption(drawing_image, b, caption)
return detections
@staticmethod
def visualize(drawing_image: np.ndarray) -> None:
plt.figure(figsize=(15, 15))
plt.axis('off')
plt.imshow(drawing_image)
plt.show()
@staticmethod
def convert_numpy_array_to_base64(image: np.ndarray, extension: str = ".png") -> bytes:
data = cv2.imencode(extension, image)[1].tostring()
return b64encode(data)
| [
"base64.b64encode",
"io.BytesIO",
"matplotlib.pyplot.imshow",
"keras_retinanet.utils.colors.label_color",
"keras.backend.clear_session",
"keras_retinanet.utils.visualization.draw_box",
"matplotlib.pyplot.axis",
"keras_retinanet.models.load_model",
"keras_retinanet.utils.image.resize_image",
"keras... | [((572, 583), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (581, 583), False, 'import os\n'), ((808, 868), 'keras_retinanet.models.load_model', 'models.load_model', (['self.model_path'], {'backbone_name': '"""resnet50"""'}), "(self.model_path, backbone_name='resnet50')\n", (825, 868), False, 'from keras_retinanet import models\n'), ((1057, 1081), 'keras_retinanet.utils.image.read_image_bgr', 'read_image_bgr', (['filename'], {}), '(filename)\n', (1071, 1081), False, 'from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\n'), ((1494, 1517), 'keras_retinanet.utils.image.preprocess_image', 'preprocess_image', (['image'], {}), '(image)\n', (1510, 1517), False, 'from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\n'), ((1549, 1582), 'keras_retinanet.utils.image.resize_image', 'resize_image', (['pre_processed_image'], {}), '(pre_processed_image)\n', (1561, 1582), False, 'from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\n'), ((2387, 2410), 'keras.backend.clear_session', 'backend.clear_session', ([], {}), '()\n', (2408, 2410), False, 'from keras import backend\n'), ((2597, 2643), 'cv2.cvtColor', 'cv2.cvtColor', (['drawing_image', 'cv2.COLOR_BGR2RGB'], {}), '(drawing_image, cv2.COLOR_BGR2RGB)\n', (2609, 2643), False, 'import cv2\n'), ((3533, 3561), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (3543, 3561), True, 'import matplotlib.pyplot as plt\n'), ((3570, 3585), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3578, 3585), True, 'import matplotlib.pyplot as plt\n'), ((3594, 3619), 'matplotlib.pyplot.imshow', 'plt.imshow', (['drawing_image'], {}), '(drawing_image)\n', (3604, 3619), True, 'import matplotlib.pyplot as plt\n'), ((3628, 3638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3636, 3638), True, 'import matplotlib.pyplot as plt\n'), ((3825, 3840), 'base64.b64encode', 'b64encode', (['data'], {}), '(data)\n', (3834, 3840), False, 'from base64 import b64encode\n'), ((1766, 1795), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1780, 1795), True, 'import numpy as np\n'), ((3201, 3215), 'keras_retinanet.utils.colors.label_color', 'label_color', (['(0)'], {}), '(0)\n', (3212, 3215), False, 'from keras_retinanet.utils.colors import label_color\n'), ((3260, 3299), 'keras_retinanet.utils.visualization.draw_box', 'draw_box', (['drawing_image', 'b'], {'color': 'color'}), '(drawing_image, b, color=color)\n', (3268, 3299), False, 'from keras_retinanet.utils.visualization import draw_box, draw_caption\n'), ((3386, 3425), 'keras_retinanet.utils.visualization.draw_caption', 'draw_caption', (['drawing_image', 'b', 'caption'], {}), '(drawing_image, b, caption)\n', (3398, 3425), False, 'from keras_retinanet.utils.visualization import draw_box, draw_caption\n'), ((1960, 1981), 'keras.backend.get_session', 'backend.get_session', ([], {}), '()\n', (1979, 1981), False, 'from keras import backend\n'), ((3765, 3795), 'cv2.imencode', 'cv2.imencode', (['extension', 'image'], {}), '(extension, image)\n', (3777, 3795), False, 'import cv2\n'), ((1196, 1212), 'io.BytesIO', 'io.BytesIO', (['file'], {}), '(file)\n', (1206, 1212), False, 'import io\n'), ((2221, 2242), 'keras.backend.get_session', 'backend.get_session', ([], {}), '()\n', (2240, 2242), False, 'from keras import backend\n'), ((2013, 2034), 'keras.backend.get_session', 'backend.get_session', ([], {}), '()\n', (2032, 2034), False, 'from keras import backend\n')] |
import numpy as np
def haldane_honeycomb(kx, ky, m=0.5, phi=np.pi/2):
k = np.array([kx / np.sqrt(3.), ky * 2. / 3.])
t1 = t2 = 1.
a1 = np.array([np.sqrt(3) * 0.5, 0.5])
a2 = np.array([0, -1])
a3 = np.array([-np.sqrt(3) * 0.5, 0.5])
b1 = a2 - a3
b2 = a3 - a1
b3 = a1 - a2
pauli0 = np.eye(2)
pauli1 = np.array([[0, 1], [1, 0]])
pauli2 = np.array([[0, -1j], [1j, 0]])
pauli3 = np.array([[1, 0], [0, -1]])
hk = 2 * t2 * np.cos(phi) * (
np.cos(k @ b1) + np.cos(k @ b2) + np.cos(k @ b3)
) * pauli0 + t1 * (
(np.cos(k @ a1) + np.cos(k @ a2) + np.cos(k @ a3)) * pauli1 +
(np.sin(k @ a1) + np.sin(k @ a2) + np.sin(k @ a3)) * pauli2
) + (m - 2 * t2 * np.sin(phi) * (
np.sin(k @ b1) + np.sin(k @ b2) + np.sin(k @ b3)
)) * pauli3
return hk
if __name__ == "__main__":
from chern import Hamiltonian, Chern
from functools import partial
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--m', type=float, help='haldane mass', default=0.5)
parser.add_argument('--phi', type=float, help='phi flux', default=np.pi/2)
args = parser.parse_args()
hk = Hamiltonian(partial(haldane_honeycomb, m=args.m, phi=args.phi), "haldane")
cn = Chern(hk)
print(f"\nThe Chern number for haldane(m={args.m:5.2f},phi={args.phi:5.2f}) is: {cn.chern}\n")
| [
"numpy.eye",
"numpy.sqrt",
"argparse.ArgumentParser",
"numpy.array",
"functools.partial",
"numpy.cos",
"numpy.sin",
"chern.Chern"
] | [((195, 212), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (203, 212), True, 'import numpy as np\n'), ((323, 332), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (329, 332), True, 'import numpy as np\n'), ((346, 372), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (354, 372), True, 'import numpy as np\n'), ((386, 419), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (394, 419), True, 'import numpy as np\n'), ((429, 456), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (437, 456), True, 'import numpy as np\n'), ((990, 1015), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1013, 1015), False, 'import argparse\n'), ((1296, 1305), 'chern.Chern', 'Chern', (['hk'], {}), '(hk)\n', (1301, 1305), False, 'from chern import Hamiltonian, Chern\n'), ((1224, 1274), 'functools.partial', 'partial', (['haldane_honeycomb'], {'m': 'args.m', 'phi': 'args.phi'}), '(haldane_honeycomb, m=args.m, phi=args.phi)\n', (1231, 1274), False, 'from functools import partial\n'), ((96, 108), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (103, 108), True, 'import numpy as np\n'), ((162, 172), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (169, 172), True, 'import numpy as np\n'), ((233, 243), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (240, 243), True, 'import numpy as np\n'), ((476, 487), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (482, 487), True, 'import numpy as np\n'), ((538, 552), 'numpy.cos', 'np.cos', (['(k @ b3)'], {}), '(k @ b3)\n', (544, 552), True, 'import numpy as np\n'), ((745, 756), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (751, 756), True, 'import numpy as np\n'), ((807, 821), 'numpy.sin', 'np.sin', (['(k @ b3)'], {}), '(k @ b3)\n', (813, 821), True, 'import numpy as np\n'), ((504, 518), 'numpy.cos', 'np.cos', (['(k @ b1)'], {}), '(k @ b1)\n', (510, 518), True, 'import numpy as np\n'), ((521, 535), 'numpy.cos', 'np.cos', (['(k @ b2)'], {}), '(k @ b2)\n', (527, 535), True, 'import numpy as np\n'), ((624, 638), 'numpy.cos', 'np.cos', (['(k @ a3)'], {}), '(k @ a3)\n', (630, 638), True, 'import numpy as np\n'), ((698, 712), 'numpy.sin', 'np.sin', (['(k @ a3)'], {}), '(k @ a3)\n', (704, 712), True, 'import numpy as np\n'), ((773, 787), 'numpy.sin', 'np.sin', (['(k @ b1)'], {}), '(k @ b1)\n', (779, 787), True, 'import numpy as np\n'), ((790, 804), 'numpy.sin', 'np.sin', (['(k @ b2)'], {}), '(k @ b2)\n', (796, 804), True, 'import numpy as np\n'), ((590, 604), 'numpy.cos', 'np.cos', (['(k @ a1)'], {}), '(k @ a1)\n', (596, 604), True, 'import numpy as np\n'), ((607, 621), 'numpy.cos', 'np.cos', (['(k @ a2)'], {}), '(k @ a2)\n', (613, 621), True, 'import numpy as np\n'), ((664, 678), 'numpy.sin', 'np.sin', (['(k @ a1)'], {}), '(k @ a1)\n', (670, 678), True, 'import numpy as np\n'), ((681, 695), 'numpy.sin', 'np.sin', (['(k @ a2)'], {}), '(k @ a2)\n', (687, 695), True, 'import numpy as np\n')] |
from datetime import datetime
import numpy as np
from pup.common.constants import NUM_SECONDS_IN_DAY
from pup.common.datatypes import Traj, Checkin
from pup.common.enums import PricingType, DegradationType, ReconstructionMethod, TrajectoryIntervalType, \
PreviousPurchaseType
from pup.config import Config
from pup.reconstruction import reconstruction_common
MIN_TRAJECTORY_SIZE_TO_SAVE_PREDICTIONS = 2000
PREVIOUS_PURCHASES_SUBSAMPLING_RATIO_001 = 0.01
PREVIOUS_PURCHASES_SUBSAMPLING_RATIO_005 = 0.05
PREVIOUS_PURCHASES_SUBSAMPLING_RATIO_02 = 0.2
PREVIOUS_PURCHASES_NOISE_300 = 300
PREVIOUS_PURCHASES_NOISE_400 = 400
def get_degradation_from_config():
degradation_type = Config.query_degradation_type
if degradation_type is None:
raise ValueError('Invalid degradation type: {}'.format(degradation_type))
degradation_value = 0.0
if degradation_type == DegradationType.ADD_NOISE:
degradation_value = Config.query_add_noise_magnitude
elif degradation_type == DegradationType.SUBSAMPLING or \
degradation_type == DegradationType.SUBSTART or \
degradation_type == DegradationType.SUB_TIME:
degradation_value = Config.query_subsampling_ratio
return degradation_type, degradation_value
def prepare_reconstruction_evaluation_timestamps(trajectory: Traj, pricing_type: PricingType) -> list:
"""
Prepare timestamps for reconstruction evaluation
:param trajectory: the original trajectory
:param pricing_type: pricing type
:return: list of timestamps
"""
if pricing_type == PricingType.IG_TRAJ_DURATION:
start_timestamp = trajectory[0].timestamp
end_timestamp = trajectory[-1].timestamp + 1
elif pricing_type == PricingType.IG_TRAJ_DAY:
t = datetime.fromtimestamp(trajectory[0].timestamp, trajectory[0].datetime.tzinfo)
traj_date = datetime(t.year, t.month, t.day, 0, 0, 0, tzinfo=t.tzinfo)
start_timestamp = traj_date.timestamp()
end_timestamp = start_timestamp + NUM_SECONDS_IN_DAY
else:
raise ValueError("Not supported pricing type: {}".format(pricing_type.name))
return list(range(int(start_timestamp), int(end_timestamp)))
def get_single_component_output_file_name(
prefix, suffix,
trajectory_interval, query_pricing_type,
degradation_type, degradation_value,
transformation_type, start_prior, previous_purchases,
grid_cell_len=1000, default_location_measurement_std=3,
reconstruction_method=ReconstructionMethod.GAUSSIAN_PROCESS) -> str:
"""
Get the file name for result output of Single Component pricing
:param prefix: file name prefix
:param suffix: file name suffix
:return: file name
"""
output = '{}_grid_{}_defstd_{}_{}_pricing_{}_degrade_{}_trans_{}'.format(
prefix,
int(grid_cell_len),
int(default_location_measurement_std),
trajectory_interval.name,
query_pricing_type.name,
degradation_type.name,
transformation_type.name
)
if degradation_type == DegradationType.SUBSAMPLING or \
degradation_type == DegradationType.SUBSTART or \
degradation_type == DegradationType.SUB_TIME:
output = '{}_sub_ratio_{:.3f}'.format(output, degradation_value)
elif degradation_type == DegradationType.ADD_NOISE:
output = '{}_noise_{}'.format(output, int(degradation_value))
elif degradation_type == DegradationType.NONE:
output = '{}_no_degrade'.format(output)
if query_pricing_type == PricingType.RECONSTRUCTION:
output = '{}_reconstruct_{}'.format(
output,
reconstruction_method.name,
)
elif query_pricing_type == PricingType.IG_TRAJ_DAY or \
query_pricing_type == PricingType.IG_TRAJ_DURATION:
output = '{}_reconstruct_{}'.format(
output,
reconstruction_method.name,
)
output = '{}_prior_{}_prev_purchases_{}'.format(
output,
start_prior.name,
previous_purchases.name
)
elif query_pricing_type == PricingType.HISTOGRAM_ENTROPY:
output = '{}_hist_entropy'.format(
output
)
if suffix is not None:
output += '_{}'.format(suffix)
return output
def combine_noisy_queried_data(noisy_traj: Traj, prev_noisy_traj: Traj, previous_purchases: PreviousPurchaseType) -> Traj:
combined_traj: Traj = list()
for i in range(len(noisy_traj)):
new_c = noisy_traj[i]
prev_c = prev_noisy_traj[i]
# Combine mean
# Combine previous purchase std with new std.
# For example, if prev purchase is 300m, new purchase is 400m, combined noise = 1 /(1/(300^2) + 1/(400^2)) = 240
if previous_purchases == PreviousPurchaseType.SAME_TRAJ_NOISE_300_COMBINED:
prev_std = PREVIOUS_PURCHASES_NOISE_300
else:
prev_std = PREVIOUS_PURCHASES_NOISE_400
new_std = reconstruction_common.prepare_measurement_std(noisy_traj)
combined_std = combine_two_measurement_stds(prev_std, new_std)
c = Checkin(c_id=new_c.c_id,
user_id=new_c.user_id,
timestamp=new_c.timestamp,
datetime=new_c.datetime,
lat=new_c.lat,
lon=new_c.lat,
measurement_std=new_c.measurement_std,
location_id=new_c.location_id,
trajectory_idx=new_c.trajectory_idx)
c.x = combine_two_measurement_mean(new_c.x, prev_c.x, new_std, prev_std)
c.y = combine_two_measurement_mean(new_c.y, prev_c.y, new_std, prev_std)
c.measurement_std = combined_std
combined_traj.append(c)
return combined_traj
def combine_two_measurement_mean(new_mean: float, prev_mean: float, new_std: float, prev_std: float):
""" Combine two measurement means using inverse-variance weighting
Source: https://en.wikipedia.org/wiki/Inverse-variance_weighting
:return:
"""
new_w = 1 / (new_std * new_std)
prev_w = 1 / (prev_std * prev_std)
combined_mean = (new_w * new_mean + prev_w * prev_mean) / (new_w + prev_w)
return combined_mean
def combine_two_measurement_stds(prev_std: float, new_std: float) -> float:
""" Combine two measurement std using inverse-variance weighting
Source: https://en.wikipedia.org/wiki/Inverse-variance_weighting
:param prev_std:
:param new_std:
:return:
"""
return np.sqrt(1.0 / (1.0 / (prev_std * prev_std) + 1.0 / (new_std * new_std)))
def cal_min_sigma_preds(sigmas_preds):
"""
Calculate min of sigma for each predictions
:param sigmas_preds: list of standard deviations for all predictions of all models
:return: min sigmas in the same numpy size as the first input sigma
"""
# find min sigmas
min_sigmas_preds = sigmas_preds[0].copy()
for i in range(1, len(sigmas_preds)):
min_sigmas_preds = np.minimum(min_sigmas_preds, sigmas_preds[i])
return min_sigmas_preds
def prepare_x_pred(trajectory: Traj, pricing_type: PricingType) -> np.ndarray:
"""
Prepare x_pred
:param trajectory: the original trajectory
:param pricing_type: pricing type
:return: x_pred as unscaled feature matrix of size n x 1
"""
eval_timestamps = prepare_reconstruction_evaluation_timestamps(trajectory, pricing_type)
x_values = np.asarray(eval_timestamps)
x_values = x_values.reshape((-1, 1))
return x_values | [
"datetime.datetime",
"datetime.datetime.fromtimestamp",
"numpy.sqrt",
"numpy.minimum",
"numpy.asarray",
"pup.reconstruction.reconstruction_common.prepare_measurement_std",
"pup.common.datatypes.Checkin"
] | [((6545, 6617), 'numpy.sqrt', 'np.sqrt', (['(1.0 / (1.0 / (prev_std * prev_std) + 1.0 / (new_std * new_std)))'], {}), '(1.0 / (1.0 / (prev_std * prev_std) + 1.0 / (new_std * new_std)))\n', (6552, 6617), True, 'import numpy as np\n'), ((7466, 7493), 'numpy.asarray', 'np.asarray', (['eval_timestamps'], {}), '(eval_timestamps)\n', (7476, 7493), True, 'import numpy as np\n'), ((5005, 5062), 'pup.reconstruction.reconstruction_common.prepare_measurement_std', 'reconstruction_common.prepare_measurement_std', (['noisy_traj'], {}), '(noisy_traj)\n', (5050, 5062), False, 'from pup.reconstruction import reconstruction_common\n'), ((5147, 5397), 'pup.common.datatypes.Checkin', 'Checkin', ([], {'c_id': 'new_c.c_id', 'user_id': 'new_c.user_id', 'timestamp': 'new_c.timestamp', 'datetime': 'new_c.datetime', 'lat': 'new_c.lat', 'lon': 'new_c.lat', 'measurement_std': 'new_c.measurement_std', 'location_id': 'new_c.location_id', 'trajectory_idx': 'new_c.trajectory_idx'}), '(c_id=new_c.c_id, user_id=new_c.user_id, timestamp=new_c.timestamp,\n datetime=new_c.datetime, lat=new_c.lat, lon=new_c.lat, measurement_std=\n new_c.measurement_std, location_id=new_c.location_id, trajectory_idx=\n new_c.trajectory_idx)\n', (5154, 5397), False, 'from pup.common.datatypes import Traj, Checkin\n'), ((7020, 7065), 'numpy.minimum', 'np.minimum', (['min_sigmas_preds', 'sigmas_preds[i]'], {}), '(min_sigmas_preds, sigmas_preds[i])\n', (7030, 7065), True, 'import numpy as np\n'), ((1776, 1854), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['trajectory[0].timestamp', 'trajectory[0].datetime.tzinfo'], {}), '(trajectory[0].timestamp, trajectory[0].datetime.tzinfo)\n', (1798, 1854), False, 'from datetime import datetime\n'), ((1875, 1933), 'datetime.datetime', 'datetime', (['t.year', 't.month', 't.day', '(0)', '(0)', '(0)'], {'tzinfo': 't.tzinfo'}), '(t.year, t.month, t.day, 0, 0, 0, tzinfo=t.tzinfo)\n', (1883, 1933), False, 'from datetime import datetime\n')] |
import pdb
import torch
import sys # NOQA
sys.path.insert(0, '..') # NOQA: E402
import numpy as np
import argparse
import torch.multiprocessing as mp
import os
import glob
import copy
import math
import pathlib
from logger.logger import Logger
import matplotlib
import matplotlib.pyplot as plt
import datetime, time
#from debugtools import compile_results
from utils import step_wrapper, reset_wrapper
import copy
import pygame
from alternateController.potential_field_controller import PotentialFieldController as PFController
from alternateController.social_forces_controller import SocialForcesController
from rlmethods.b_actor_critic import ActorCritic
from rlmethods.b_actor_critic import Policy
from tqdm import tqdm
from envs.drone_data_utils import classify_pedestrians
from envs.drone_data_utils import get_pedestrians_in_viscinity
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser()
#general arguments
parser.add_argument('--render', action='store_true', help="show the env.")
parser.add_argument('--num-trajs', type=int, default=50)
parser.add_argument('--max-ep-length', type=int, default=600, help='Max length of a single episode.')
parser.add_argument('--feat-extractor', type=str, default=None, help='The name of the \
feature extractor to be used in the experiment.')
parser.add_argument('--run-exact', action='store_true')
parser.add_argument('--subject', type=int, default=None)
parser.add_argument('--seed', type=int, default=789)
parser.add_argument('--on-server', action='store_true')
#**************************************************************************#
#arguments related to the environment
parser.add_argument('--annotation-file', type=str,
default='../envs/expert_datasets/\
university_students/annotation/processed/frame_skip_1/\
students003_processed_corrected.txt', help='The location of the annotation file to \
be used to run the environment.')
parser.add_argument('--reward-path' , type=str, nargs='?', default= None)
parser.add_argument('--reward-net-hidden-dims', nargs="*", type=int, default=[128])
#**************************************************************************#
#agent related arguments
parser.add_argument('--agent-type', type=str, default='Potential_field', help='The type of agent to be used to \
in the environment. It can be either a RL/IRL agent, or an alternative controller agent. \
Different agents will then have different arguments.')
#arguments for a network based agent
parser.add_argument('--policy-path', type=str, nargs='?', default=None)
parser.add_argument('--policy-net-hidden-dims', nargs="*", type=int, default=[128])
#arguments for a potential field agent
'''
/home/abhisek/Study/Robotics/deepirl/experiments/results/Beluga/IRL Runs/
Drone_environment_univ_students003_DroneFeatureRisk_updated_risk_v2_general_3kiter2019-09-27 10:24:41-reg-0-seed-8788-lr-0.001/
saved-models/17.pt
'''
#argument for some other agent
#*************************************************************************#
#parameters for informatio collector
parser.add_argument('--save-plots', action='store_true', default=False)
parser.add_argument('--store-results', action='store_true', default=False)
parser.add_argument('--save-folder', type=str, default=None, help= 'The name of the folder to \
store experiment related information.')
#************************************************************************#
parser.add_argument('--reward-analysis', action='store_true', default=False)
parser.add_argument('--crash-analysis', action='store_true', default=False)
parser.add_argument('--plain-run', action='store_true', default=True)
def check_parameters(args):
if args.agent_type=='Policy_network':
if args.policy_path is None or args.policy_net_hidden_dims is None:
print("Please provide correct information to load a policy network.")
exit()
if args.feat_extractor is None:
print("Please provide a feature extractor to continue.")
exit()
if args.reward_analysis:
if args.reward_path is None or args.reward_net_hidden_dims is None:
print("Please provide reward network details to perform reward analysis.")
exit()
#**************************************************
thresh1 = 10
thresh2 = 15
step_size = 2
agent_width = 10
obs_width = 10
grid_size = 3
#**************************************************
ts=time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
args = parser.parse_args()
#checks if all the parameters are in order
check_parameters(args)
if args.on_server:
matplotlib.use('Agg')
os.environ['SDL_VIDEODRIVER'] = 'dummy'
#*************************************************
#initialize information collector
from envs.drone_env_utils import InformationCollector
info_collector = InformationCollector(run_info=args.agent_type,
thresh=thresh2*step_size,
plot_info=args.save_plots,
store_info=args.store_results,
)
#*************************************************
#initialize environment
from envs.gridworld_drone import GridWorldDrone
consider_heading = True
np.random.seed(args.seed)
env = GridWorldDrone(display=args.render, is_onehot=False,
seed=args.seed, obstacles=None,
show_trail=True,
is_random=False,
subject=args.subject,
annotation_file=args.annotation_file,
tick_speed=60,
obs_width=10,
step_size=step_size,
agent_width=agent_width,
external_control=True,
replace_subject=args.run_exact,
show_comparison=True,
consider_heading=consider_heading,
show_orientation=True,
rows=576, cols=720, width=grid_size)
print('Environment initalized successfully.')
#*************************************************
#initialize the feature extractor
from featureExtractor.drone_feature_extractor import DroneFeatureSAM1, DroneFeatureMinimal
from featureExtractor.drone_feature_extractor import DroneFeatureOccup, DroneFeatureRisk
from featureExtractor.drone_feature_extractor import DroneFeatureRisk_v2, DroneFeatureRisk_speed, DroneFeatureRisk_speedv2
if args.feat_extractor == 'DroneFeatureSAM1':
feat_ext = DroneFeatureSAM1(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
thresh1=thresh1, thresh2=thresh2)
if args.feat_extractor == 'DroneFeatureOccup':
feat_ext = DroneFeatureOccup(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
window_size=window_size)
if args.feat_extractor == 'DroneFeatureRisk':
feat_ext = DroneFeatureRisk(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
show_agent_persp=True,
thresh1=thresh1, thresh2=thresh2)
if args.feat_extractor == 'DroneFeatureRisk_v2':
feat_ext = DroneFeatureRisk_v2(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
show_agent_persp=False,
thresh1=thresh1, thresh2=thresh2)
if args.feat_extractor == 'DroneFeatureRisk_speed':
feat_ext = DroneFeatureRisk_speed(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
show_agent_persp=True,
thresh1=thresh1, thresh2=thresh2)
if args.feat_extractor == 'DroneFeatureRisk_speedv2':
feat_ext = DroneFeatureRisk_speedv2(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
thresh1=18, thresh2=30)
#*************************************************
#initialize the agent
if args.agent_type == 'Policy_network':
#initialize the network
print (args.policy_net_hidden_dims)
print (feat_ext.state_rep_size)
print (env.action_space)
pdb.set_trace()
agent = Policy(feat_ext.state_rep_size, env.action_space.n, hidden_dims=args.policy_net_hidden_dims)
if args.policy_path:
agent.load(args.policy_path)
else:
print('Provide a policy path')
if args.agent_type == 'Potential_field':
#initialize the PF agent
max_speed = env.max_speed
orient_quant = env.orient_quantization
orient_div = len(env.orientation_array)
speed_quant = env.speed_quantization
speed_div = len(env.speed_array)
attr_mag = 3
rep_mag = 2
agent = PFController(speed_div, orient_div, orient_quant)
if args.agent_type == 'Social_forces':
orient_quant = env.orient_quantization
orient_div = len(env.orientation_array)
speed_quant = env.speed_quantization
speed_div = len(env.speed_array)
agent = SocialForcesController(speed_div, orient_div, orient_quant)
if args.agent_type == 'Default':
env.external_control = False
agent = None
#the person from the video
pass
#*************************************************
#load reward network if present
if args.reward_path is not None:
from irlmethods.deep_maxent import RewardNet
state_size = feat_ext.extract_features(env.reset()).shape[0]
reward_net = RewardNet(state_size, args.reward_net_hidden_dims)
reward_net.load(args.reward_path)
#*************************************************
#play
def reward_analysis():
'''
A function to analysis the rewards against actions for a given policy.
A helpful visualization/ debugging tool
'''
for i in range(args.num_trajs):
#reset the world
state=env.reset()
if args.feat_extractor is not None:
feat_ext.reset()
state_feat = feat_ext.extract_features(state)
#pass
#reset the information collector
info_collector.reset_info(state)
done=False
t = 0
while t < args.max_ep_length and not done:
reward_arr = np.zeros(9)
reward_arr_true = np.zeros(9)
if args.feat_extractor is not None:
#************reward analysis block*************
if args.reward_analysis:
for i in range(9): #as there are 9 actions
action = i
state, reward_true, _ , _ = env.step(action)
print('Taking a step', action)
if args.feat_extractor is not None:
state_feat_temp = feat_ext.extract_features(state)
reward_arr[i] = reward_net(state_feat_temp)
reward_arr_true[i] = reward_true
state = env.rollback(1)
state_feat = feat_ext.rollback(2, state)
#print(reward_arr)
#**********************************************
#making sure the graphics are consistent
#if t>0: #skip if this is the first frame
# state_feat = feat_ext.extract_features(state)
#**********************************************
#selecting the action
#action selection for network
if args.agent_type=='Policy_network':
#pdb.set_trace()
action = agent.eval_action(state_feat)
else:
#action selection for alternate controller namely potential field
action = agent.eval_action(state)
#pdb.set_trace()
#print('The action finally taken :', action)
#action = int(np.argmax(reward_arr_true))
#**********************************************
if args.reward_analysis:
#comparing the reward network
true_reward_norm = (reward_arr_true - reward_arr_true.mean())/(reward_arr_true.std()+np.finfo(float).eps)
network_reward_norm = (reward_arr - reward_arr.mean())/(reward_arr.std()+np.finfo(float).eps)
#print('The true reward normalized:\n', true_reward_norm)
#print('The network reward normalized: \n', network_reward_norm)
plt.plot(true_reward_norm, c='r')
plt.plot(network_reward_norm, c='b')
plt.plot(probs.cpu().detach().numpy(), c='g')
#action = np.argmax(true_reward_norm)
#print('Action taken from here:', action)
#comparing the policy network
if args.render:
feat_ext.overlay_bins(state)
else:
action = agent.eval_action(state)
#pdb.set_trace()
state, reward, done, _ = env.step(action)
if args.feat_extractor is not None:
state_feat = feat_ext.extract_features(state)
if args.reward_path is not None:
reward = reward_net(state_feat)
#if args.reward_analysis:
print('Reward : {} for action {}:'.format(reward, action))
#pdb.set_trace()
plt.show()
info_collector.collect_information_per_frame(state)
t+=1
info_collector.collab_end_traj_results()
info_collector.collab_end_results()
info_collector.plot_information()
def crash_analysis():
'''
A visualizing/ debugging tool to analyse with ease the states and conditions
right before an agent crashes
'''
for i in range(args.num_trajs):
#reset the world
crash_analysis = False
state = env.reset()
print('Current subject :', env.cur_ped)
if args.feat_extractor is not None:
feat_ext.reset()
state_feat = feat_ext.extract_features(state)
#pass
#reset the information collector
done = False
t = 0
while t < args.max_ep_length and not done:
if args.feat_extractor is not None:
if args.agent_type == 'Policy_network':
action = agent.eval_action(state_feat)
else:
#action selection for alternate controller namely potential field
action = agent.eval_action(state)
if args.render:
feat_ext.overlay_bins(state)
else:
action = agent.eval_action(state)
#pdb.set_trace()
state, reward_true, done, _ = env.step(action)
if args.feat_extractor is not None:
state_feat = feat_ext.extract_features(state)
if crash_analysis:
pdb.set_trace()
if args.reward_path is not None:
reward = reward_net(state_feat)
else:
reward = reward_true
#if args.reward_analysis:
print('Reward : {} for action {}:'.format(reward, action))
#pdb.set_trace()
if done:
print('Crash frame : ', env.current_frame)
print('Agent position history :')
for i in range(len(feat_ext.agent_state_history)):
print(feat_ext.agent_state_history[i]['position'], env.heading_dir_history[i])
if args.crash_analysis:
if reward_true < -0.5:
if not crash_analysis:
if t > 10:
state = env.rollback(10)
state_feat = feat_ext.rollback(11, state)
else:
state = env.rollback(t-1)
state_feat = feat_ext.rollback(t, state)
print('Current frame after rollback :', env.current_frame)
for i in range(len(feat_ext.agent_state_history)):
print(feat_ext.agent_state_history[i]['position'], env.heading_dir_history[i])
done = False
crash_analysis = True
else:
break
else:
break
t += 1
def agent_drift_analysis(agent=agent,
agent_type=args.agent_type,
ped_list=None,
pos_reset=20,
):
'''
if order='by_ped' the drift information is collected per pedestrian
if order='by_density' the drift information is collected per density of nearby peds
step interval after which to reset the position
input : agent, agent_type and pos_reset.
Plays the agent on the provided environment with the assigned reset value
for the assigned number of trajectories. Can be played with or without render
returns :
The an array that contains the drift analysis for each of the
pedestrians in the list for the given pos_reset.
'''
drift_value = 0
segment_counter = 0
env.cur_ped = None
print('Starting drift analysis of agent :{}. Reset\
interval :{}'.format(agent_type, pos_reset))
if ped_list is not None:
num_trajs = len(ped_list)
else:
num_trajs = args.num_trajs
#an array containing the drift value for each pedestrian
drift_info_detailed = np.zeros(num_trajs)
for i in tqdm(range(num_trajs)):
#reset the world
crash_analysis = False
if ped_list is None:
state = env.reset()
else:
state = env.reset_and_replace(ped=ped_list[i])
env.goal_state = copy.deepcopy(env.return_position(env.cur_ped, env.current_frame + pos_reset)['position'])
env.state['goal_state'] = copy.deepcopy(env.goal_state)
state = copy.deepcopy(env.state)
#print('Current subject :', env.cur_ped)
final_frame = env.final_frame
if args.feat_extractor is not None:
feat_ext.reset()
state_feat = feat_ext.extract_features(state)
state_feat = torch.from_numpy(state_feat).type(torch.FloatTensor).to(DEVICE)
#pass
#reset the information collector
info_collector.reset_info(state)
done = False
t = 0
drift_per_ped = 0
segment_counter_per_ped = 0
abs_counter = env.current_frame
while abs_counter < final_frame:
stop_points = []
if args.feat_extractor is not None:
if agent_type == 'Policy_network':
action = agent.eval_action(state_feat)
else:
#action selection for alternate controller namely potential field
action = agent.eval_action(state)
'''
if args.render:
feat_ext.overlay_bins(state)
'''
else:
action = agent.eval_action(state)
state, reward_true, done, _ = env.step(action)
drift_value += np.linalg.norm(env.ghost_state['position'] - env.agent_state['position'], 2)
drift_per_ped += np.linalg.norm(env.ghost_state['position'] - env.agent_state['position'], 2)
if args.feat_extractor is not None:
state_feat = feat_ext.extract_features(state)
state_feat = torch.from_numpy(state_feat).type(torch.FloatTensor).to(DEVICE)
if crash_analysis:
pdb.set_trace()
if args.reward_path is not None:
reward = reward_net(state_feat)
else:
reward = reward_true
#info_collector.collect_information_per_frame(state)
t += 1
abs_counter += 1
if t%pos_reset == 0:
#reset the position of the agent
#print('t :', t)
#print('resetting')
segment_counter += 1
segment_counter_per_ped += 1
#print('Drift value : {} for segment {}'.format(drift_value, segment_counter))
env.agent_state = env.return_position(env.cur_ped, env.current_frame)
env.state['agent_state'] = copy.deepcopy(env.agent_state)
'''
pos = env.agent_state['position']
stop_points.append(pos)
for pos in stop_points:
pygame.draw.circle(pygame.display.get_surface(), (0,0,0), (int(pos[1]), int(pos[0])), 20)
pygame.display.update()
'''
env.goal_state = env.return_position(env.cur_ped, env.current_frame + pos_reset)['position']
env.state['goal_state'] = copy.deepcopy(env.goal_state)
state = copy.deepcopy(env.state)
env.release_control = False
t = 0
done = False
if segment_counter_per_ped == 0:
segment_counter_per_ped = 1
drift_info_detailed[i] = drift_per_ped/segment_counter_per_ped
return drift_info_detailed
'''
def agent_drift_analysis_by_density(agent=agent,
agent_type=args.agent_type,
ped_list=None,
viscinity=30,
pos_reset=20):
# step interval after which to reset the position
# input : agent, agent_type,
# ped_list (optional) - list of pedestrians
# viscinity (optional) - radius around the agent to get pedestrian density
# pos_reset - frames after which to reset the agent
# Plays the agent on the provided environment with the assigned reset value
# for the assigned number of trajectories. Can be played with or without render
# returns :
# The an array that contains the drift analysis for each of the
# pedestrians in the list for the given pos_reset.
drift_value = 0
segment_counter = 0
env.cur_ped = None
print('Starting drift analysis of agent :{}. Reset\
interval :{}'.format(agent_type, pos_reset))
if ped_list is not None:
num_trajs = len(ped_list)
else:
num_trajs = args.num_trajs
#an array containing the drift value for each pedestrian
drift_info_detailed = {}
for i in tqdm(range(num_trajs)):
#reset the world
crash_analysis = False
if ped_list is None:
state = env.reset()
else:
state = env.reset_and_replace(ped=ped_list[i])
env.goal_state = copy.deepcopy(env.return_position(env.cur_ped, env.current_frame + pos_reset)['position'])
env.state['goal_state'] = copy.deepcopy(env.goal_state)
state = copy.deepcopy(env.state)
#print('Current subject :', env.cur_ped)
final_frame = env.final_frame
if args.feat_extractor is not None:
feat_ext.reset()
state_feat = feat_ext.extract_features(state)
#pass
#reset the information collector
info_collector.reset_info(state)
done = False
t = 0
abs_counter = env.current_frame
while abs_counter < final_frame:
stop_points = []
if args.feat_extractor is not None:
if agent_type == 'Policy_network':
action = agent.eval_action(state_feat)
else:
#action selection for alternate controller namely potential field
action = agent.eval_action(state)
# if args.render:
# feat_ext.overlay_bins(state)
else:
action = agent.eval_action(state)
state, reward_true, done, _ = env.step(action)
#use the state to get the nearby density
#use the density to classify the frame and store the dirft information
#accordingly
peds_nearby = get_pedestrians_in_viscinity(state, viscinity)
drift_value = np.linalg.norm(env.ghost_state['position'] - env.agent_state['position'], 2)
if str(peds_nearby) in drift_info_detailed.keys():
drift_info_detailed[str(peds_nearby)] += drift_value
else:
drift_info_detailed[str(peds_nearby)] = drift_value
if args.feat_extractor is not None:
state_feat = feat_ext.extract_features(state)
if crash_analysis:
pdb.set_trace()
if args.reward_path is not None:
reward = reward_net(state_feat)
else:
reward = reward_true
#info_collector.collect_information_per_frame(state)
t += 1
abs_counter += 1
if t%pos_reset == 0:
#reset the position of the agent
#print('t :', t)
#print('resetting')
segment_counter += 1
segment_counter_per_ped += 1
#print('Drift value : {} for segment {}'.format(drift_value, segment_counter))
env.agent_state = env.return_position(env.cur_ped, env.current_frame)
env.state['agent_state'] = copy.deepcopy(env.agent_state)
# pos = env.agent_state['position']
# stop_points.append(pos)
# for pos in stop_points:
# pygame.draw.circle(pygame.display.get_surface(), (0,0,0), (int(pos[1]), int(pos[0])), 20)
# pygame.display.update()
env.goal_state = env.return_position(env.cur_ped, env.current_frame + pos_reset)['position']
env.state['goal_state'] = copy.deepcopy(env.goal_state)
state = copy.deepcopy(env.state)
env.release_control = False
t = 0
done = False
drift_info_detailed[i] = drift_per_ped/segment_counter_per_ped
return drift_info_detailed
'''
def play_environment(ped_list, path=None, max_traj_length=1000):
agent_type = args.agent_type
for i in tqdm(range(len(ped_list))):
#reset the world
state = env.reset_and_replace(ped=ped_list[i])
#print("Starting pedestrian :", ped_list[i])
final_frame = env.final_frame
if args.feat_extractor is not None:
feat_ext.reset()
state_feat = feat_ext.extract_features(state)
state_feat = torch.from_numpy(state_feat).type(torch.FloatTensor).to(DEVICE)
state_list = [state]
done = False
t = 0
abs_counter = env.current_frame
#while abs_counter < final_frame:
while t < max_traj_length:
if args.feat_extractor is not None:
if agent_type == 'Policy_network':
action = agent.eval_action(state_feat)
elif agent_type == 'Potential_field':
#action selection for alternate controller namely potential field
action = agent.eval_action(state)
else:
action = 0
'''
if args.render:
feat_ext.overlay_bins(state)
'''
else:
action = agent.eval_action(state)
state, reward_true, done, _ = env.step(action)
state_list.append(state)
if args.feat_extractor is not None:
state_feat = feat_ext.extract_features(state)
state_feat = torch.from_numpy(state_feat).type(torch.FloatTensor).to(DEVICE)
if done:
if path is not None:
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
#print('Storing for ', ped_list[i])
np.save(os.path.join(path, 'traj%s.states' % str(ped_list[i])), state_list)
break
#info_collector.collect_information_per_frame(state)
abs_counter += 1
t += 1
def drift_analysis(agent_list, agent_type_list,
ped_list=None,
start_interval=10,
reset_interval=10,
max_interval=100):
'''
input : a list of agents and the reset interval
returns :
n lists of size total_ep_length/reset_interval which contains
the avg total drift value for that agent in that reset value
'''
#drift_list is a list that contains arrays which contain drift information of
#individual pedestrians
drift_lists = []
cur_reset_interval = start_interval
reset_interval_limit = max_interval
for i in range(len(agent_list)):
drift_list_per_agent = []
while cur_reset_interval <= reset_interval_limit:
drift_list_per_agent.append(agent_drift_analysis(agent_list[i], agent_type_list[i], ped_list=ped_list, pos_reset=cur_reset_interval))
cur_reset_interval += reset_interval
drift_lists.append(drift_list_per_agent)
cur_reset_interval = start_interval
#plot drift_lists
return drift_lists
if __name__ == '__main__':
'''
agent_drift_analysis(80)
'''
#**************** performing reward analysis
'''
reward_analysis()
'''
#************ performing drift analysis **************
#initialize the agents
#for potential field agent
attr_mag = 3
rep_mag = 2
#agent = PFController()
agent_list = []
#easy, med, hard = classify_pedestrians(args.annotation_file, 30)
#agent_type_list = ['Potential_field']
agent_type_list = []
#agent initialized from the commandline
#agent_file_list = ['/home/abhisek/Study/Robotics/deepirl/experiments/results/Beluga/IRL Runs/Variable-speed-hit-full-run-suppressed-local-updated-features2019-12-14_16:38:00-policy_net-256--reward_net-256--reg-0.001-seed-9-lr-0.0005/saved-models/28.pt']
#agent_file_list.append('/home/abhisek/Study/Robotics/deepirl/experiments/results/Quadra/RL Runs/Possible_strawman2019-12-16 12:22:05DroneFeatureRisk_speedv2-seed-789-policy_net-256--reward_net-128--total-ep-8000-max-ep-len-500/policy-models/0.pt')
'''
for agent_file in agent_file_list:
agent_temp = Policy(feat_ext.state_rep_size, env.action_space.n, hidden_dims=args.policy_net_hidden_dims)
agent_temp.load(agent_file)
agent_list.append(agent_temp)
agent_type_list.append('Policy_network')
'''
start_interval = 50
reset_int = 30
reset_lim = 170
#dirft list is list where [[agent1_drift info][agent2_drift_info]]
#where agent1_dirft_info = [[array containing drift info of peds for a given reset pos]]
data = np.genfromtxt('./Pedestrian_info/all150.csv', delimiter=' ')
ped_list = data[:, 1]
ped_list = ped_list.astype(int)
ped_list = np.sort(ped_list)
play_environment(ped_list, path='./PFController',
max_traj_length=args.max_ep_length)
sys.exit()
#ped_list = np.concatenate((easy, med, hard), axis=0)
ped_list_name = 'all'
drift_lists = drift_analysis(agent_list, agent_type_list, ped_list=ped_list, start_interval=start_interval, reset_interval=reset_int, max_interval=reset_lim)
drift_info_numpy = np.asarray(drift_lists)
np.save('master_drift_array-50-170-30', drift_info_numpy)
pdb.set_trace()
###################
'''
for i in range(drift_info_numpy.shape[1]):
drift_info_particular_segment = drift_info_numpy[:, i, :]
drift_diff = drift_info_particular_segment[0, :] - drift_info_particular_segment[1, :]
drift_diff_frac = np.divide(drift_diff, drift_info_particular_segment[0, :])
drift_diff_frac_with_ped = np.concatenate((np.expand_dims(drift_diff_frac, 1),
np.expand_dims(ped_list, 1)),
axis=1)
sort_index = np.argsort(drift_diff_frac)
sort_diff_and_ped = drift_diff_frac_with_ped[sort_index, :]
x_axis_2 = np.arange(drift_info_numpy.shape[-1])
plt.bar(x_axis_2, sort_diff_and_ped[:,0])
plt.xticks(x_axis_2, sort_diff_and_ped[:, 1])
file_name = ped_list_name + str(start_interval) +'.csv'
#np.savetxt(file_name, sort_diff_and_ped)
plt.show()
pdb.set_trace()
'''
#####################
x_axis = np.arange(int((reset_lim-start_interval)/reset_int)+1)
#get the mean and std deviation of pedestrians from drift_lists
fig, ax = plt.subplots()
for i in range(len(drift_lists)):
mean_drift = [np.mean(drift_info_interval) for drift_info_interval in drift_lists[i]]
std_div_drift = [np.std(drift_info_interval) for drift_info_interval in drift_lists[i]]
ax.errorbar(x_axis, mean_drift, yerr=std_div_drift, label=agent_type_list[i]+str(i),
capsize=5, capthick=3, alpha=0.5)
ax.set_xticks(x_axis)
ax.set_xticklabels(start_interval+x_axis*reset_int)
ax.set_xlabel('Reset interval (in frames)')
ax.set_ylabel('Divergence from ground truth')
ax.legend()
plt.show()
#*******************************************
'''
data = np.genfromtxt('./Pedestrian_info/all150.csv', delimiter=' ')
pdb.set_trace()
ped_list = data[:,1]
ped_list = ped_list.astype(int)
play_environment(ped_list.tolist())
''' | [
"sys.path.insert",
"rlmethods.b_actor_critic.Policy",
"torch.from_numpy",
"torch.cuda.is_available",
"sys.exit",
"copy.deepcopy",
"numpy.linalg.norm",
"numpy.genfromtxt",
"numpy.save",
"numpy.mean",
"irlmethods.deep_maxent.RewardNet",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.sort"... | [((43, 67), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (58, 67), False, 'import sys\n'), ((925, 950), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (948, 950), False, 'import argparse\n'), ((4579, 4590), 'time.time', 'time.time', ([], {}), '()\n', (4588, 4590), False, 'import datetime, time\n'), ((5006, 5142), 'envs.drone_env_utils.InformationCollector', 'InformationCollector', ([], {'run_info': 'args.agent_type', 'thresh': '(thresh2 * step_size)', 'plot_info': 'args.save_plots', 'store_info': 'args.store_results'}), '(run_info=args.agent_type, thresh=thresh2 * step_size,\n plot_info=args.save_plots, store_info=args.store_results)\n', (5026, 5142), False, 'from envs.drone_env_utils import InformationCollector\n'), ((5439, 5464), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (5453, 5464), True, 'import numpy as np\n'), ((5471, 5922), 'envs.gridworld_drone.GridWorldDrone', 'GridWorldDrone', ([], {'display': 'args.render', 'is_onehot': '(False)', 'seed': 'args.seed', 'obstacles': 'None', 'show_trail': '(True)', 'is_random': '(False)', 'subject': 'args.subject', 'annotation_file': 'args.annotation_file', 'tick_speed': '(60)', 'obs_width': '(10)', 'step_size': 'step_size', 'agent_width': 'agent_width', 'external_control': '(True)', 'replace_subject': 'args.run_exact', 'show_comparison': '(True)', 'consider_heading': 'consider_heading', 'show_orientation': '(True)', 'rows': '(576)', 'cols': '(720)', 'width': 'grid_size'}), '(display=args.render, is_onehot=False, seed=args.seed,\n obstacles=None, show_trail=True, is_random=False, subject=args.subject,\n annotation_file=args.annotation_file, tick_speed=60, obs_width=10,\n step_size=step_size, agent_width=agent_width, external_control=True,\n replace_subject=args.run_exact, show_comparison=True, consider_heading=\n consider_heading, show_orientation=True, rows=576, cols=720, width=\n grid_size)\n', (5485, 5922), False, 'from envs.gridworld_drone import GridWorldDrone\n'), ((4782, 4803), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (4796, 4803), False, 'import matplotlib\n'), ((6696, 6839), 'featureExtractor.drone_feature_extractor.DroneFeatureSAM1', 'DroneFeatureSAM1', ([], {'agent_width': 'agent_width', 'obs_width': 'obs_width', 'step_size': 'step_size', 'grid_size': 'grid_size', 'thresh1': 'thresh1', 'thresh2': 'thresh2'}), '(agent_width=agent_width, obs_width=obs_width, step_size=\n step_size, grid_size=grid_size, thresh1=thresh1, thresh2=thresh2)\n', (6712, 6839), False, 'from featureExtractor.drone_feature_extractor import DroneFeatureSAM1, DroneFeatureMinimal\n'), ((7027, 7162), 'featureExtractor.drone_feature_extractor.DroneFeatureOccup', 'DroneFeatureOccup', ([], {'agent_width': 'agent_width', 'obs_width': 'obs_width', 'step_size': 'step_size', 'grid_size': 'grid_size', 'window_size': 'window_size'}), '(agent_width=agent_width, obs_width=obs_width, step_size=\n step_size, grid_size=grid_size, window_size=window_size)\n', (7044, 7162), False, 'from featureExtractor.drone_feature_extractor import DroneFeatureOccup, DroneFeatureRisk\n'), ((7354, 7524), 'featureExtractor.drone_feature_extractor.DroneFeatureRisk', 'DroneFeatureRisk', ([], {'agent_width': 'agent_width', 'obs_width': 'obs_width', 'step_size': 'step_size', 'grid_size': 'grid_size', 'show_agent_persp': '(True)', 'thresh1': 'thresh1', 'thresh2': 'thresh2'}), '(agent_width=agent_width, obs_width=obs_width, step_size=\n step_size, grid_size=grid_size, show_agent_persp=True, thresh1=thresh1,\n thresh2=thresh2)\n', (7370, 7524), False, 'from featureExtractor.drone_feature_extractor import DroneFeatureOccup, DroneFeatureRisk\n'), ((7743, 7918), 'featureExtractor.drone_feature_extractor.DroneFeatureRisk_v2', 'DroneFeatureRisk_v2', ([], {'agent_width': 'agent_width', 'obs_width': 'obs_width', 'step_size': 'step_size', 'grid_size': 'grid_size', 'show_agent_persp': '(False)', 'thresh1': 'thresh1', 'thresh2': 'thresh2'}), '(agent_width=agent_width, obs_width=obs_width, step_size\n =step_size, grid_size=grid_size, show_agent_persp=False, thresh1=\n thresh1, thresh2=thresh2)\n', (7762, 7918), False, 'from featureExtractor.drone_feature_extractor import DroneFeatureRisk_v2, DroneFeatureRisk_speed, DroneFeatureRisk_speedv2\n'), ((8153, 8328), 'featureExtractor.drone_feature_extractor.DroneFeatureRisk_speed', 'DroneFeatureRisk_speed', ([], {'agent_width': 'agent_width', 'obs_width': 'obs_width', 'step_size': 'step_size', 'grid_size': 'grid_size', 'show_agent_persp': '(True)', 'thresh1': 'thresh1', 'thresh2': 'thresh2'}), '(agent_width=agent_width, obs_width=obs_width,\n step_size=step_size, grid_size=grid_size, show_agent_persp=True,\n thresh1=thresh1, thresh2=thresh2)\n', (8175, 8328), False, 'from featureExtractor.drone_feature_extractor import DroneFeatureRisk_v2, DroneFeatureRisk_speed, DroneFeatureRisk_speedv2\n'), ((8570, 8710), 'featureExtractor.drone_feature_extractor.DroneFeatureRisk_speedv2', 'DroneFeatureRisk_speedv2', ([], {'agent_width': 'agent_width', 'obs_width': 'obs_width', 'step_size': 'step_size', 'grid_size': 'grid_size', 'thresh1': '(18)', 'thresh2': '(30)'}), '(agent_width=agent_width, obs_width=obs_width,\n step_size=step_size, grid_size=grid_size, thresh1=18, thresh2=30)\n', (8594, 8710), False, 'from featureExtractor.drone_feature_extractor import DroneFeatureRisk_v2, DroneFeatureRisk_speed, DroneFeatureRisk_speedv2\n'), ((9055, 9070), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (9068, 9070), False, 'import pdb\n'), ((9084, 9181), 'rlmethods.b_actor_critic.Policy', 'Policy', (['feat_ext.state_rep_size', 'env.action_space.n'], {'hidden_dims': 'args.policy_net_hidden_dims'}), '(feat_ext.state_rep_size, env.action_space.n, hidden_dims=args.\n policy_net_hidden_dims)\n', (9090, 9181), False, 'from rlmethods.b_actor_critic import Policy\n'), ((9605, 9654), 'alternateController.potential_field_controller.PotentialFieldController', 'PFController', (['speed_div', 'orient_div', 'orient_quant'], {}), '(speed_div, orient_div, orient_quant)\n', (9617, 9654), True, 'from alternateController.potential_field_controller import PotentialFieldController as PFController\n'), ((9874, 9933), 'alternateController.social_forces_controller.SocialForcesController', 'SocialForcesController', (['speed_div', 'orient_div', 'orient_quant'], {}), '(speed_div, orient_div, orient_quant)\n', (9896, 9933), False, 'from alternateController.social_forces_controller import SocialForcesController\n'), ((10311, 10361), 'irlmethods.deep_maxent.RewardNet', 'RewardNet', (['state_size', 'args.reward_net_hidden_dims'], {}), '(state_size, args.reward_net_hidden_dims)\n', (10320, 10361), False, 'from irlmethods.deep_maxent import RewardNet\n'), ((18644, 18663), 'numpy.zeros', 'np.zeros', (['num_trajs'], {}), '(num_trajs)\n', (18652, 18663), True, 'import numpy as np\n'), ((32069, 32129), 'numpy.genfromtxt', 'np.genfromtxt', (['"""./Pedestrian_info/all150.csv"""'], {'delimiter': '""" """'}), "('./Pedestrian_info/all150.csv', delimiter=' ')\n", (32082, 32129), True, 'import numpy as np\n'), ((32208, 32225), 'numpy.sort', 'np.sort', (['ped_list'], {}), '(ped_list)\n', (32215, 32225), True, 'import numpy as np\n'), ((32347, 32357), 'sys.exit', 'sys.exit', ([], {}), '()\n', (32355, 32357), False, 'import sys\n'), ((32632, 32655), 'numpy.asarray', 'np.asarray', (['drift_lists'], {}), '(drift_lists)\n', (32642, 32655), True, 'import numpy as np\n'), ((32660, 32717), 'numpy.save', 'np.save', (['"""master_drift_array-50-170-30"""', 'drift_info_numpy'], {}), "('master_drift_array-50-170-30', drift_info_numpy)\n", (32667, 32717), True, 'import numpy as np\n'), ((32722, 32737), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (32735, 32737), False, 'import pdb\n'), ((33911, 33925), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (33923, 33925), True, 'import matplotlib.pyplot as plt\n'), ((34510, 34520), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (34518, 34520), True, 'import matplotlib.pyplot as plt\n'), ((878, 903), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (901, 903), False, 'import torch\n'), ((4596, 4631), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (4627, 4631), False, 'import datetime, time\n'), ((19043, 19072), 'copy.deepcopy', 'copy.deepcopy', (['env.goal_state'], {}), '(env.goal_state)\n', (19056, 19072), False, 'import copy\n'), ((19089, 19113), 'copy.deepcopy', 'copy.deepcopy', (['env.state'], {}), '(env.state)\n', (19102, 19113), False, 'import copy\n'), ((11046, 11057), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (11054, 11057), True, 'import numpy as np\n'), ((11088, 11099), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (11096, 11099), True, 'import numpy as np\n'), ((14310, 14320), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14318, 14320), True, 'import matplotlib.pyplot as plt\n'), ((20322, 20398), 'numpy.linalg.norm', 'np.linalg.norm', (["(env.ghost_state['position'] - env.agent_state['position'])", '(2)'], {}), "(env.ghost_state['position'] - env.agent_state['position'], 2)\n", (20336, 20398), True, 'import numpy as np\n'), ((20428, 20504), 'numpy.linalg.norm', 'np.linalg.norm', (["(env.ghost_state['position'] - env.agent_state['position'])", '(2)'], {}), "(env.ghost_state['position'] - env.agent_state['position'], 2)\n", (20442, 20504), True, 'import numpy as np\n'), ((33986, 34014), 'numpy.mean', 'np.mean', (['drift_info_interval'], {}), '(drift_info_interval)\n', (33993, 34014), True, 'import numpy as np\n'), ((34083, 34110), 'numpy.std', 'np.std', (['drift_info_interval'], {}), '(drift_info_interval)\n', (34089, 34110), True, 'import numpy as np\n'), ((21500, 21530), 'copy.deepcopy', 'copy.deepcopy', (['env.agent_state'], {}), '(env.agent_state)\n', (21513, 21530), False, 'import copy\n'), ((22003, 22032), 'copy.deepcopy', 'copy.deepcopy', (['env.goal_state'], {}), '(env.goal_state)\n', (22016, 22032), False, 'import copy\n'), ((22057, 22081), 'copy.deepcopy', 'copy.deepcopy', (['env.state'], {}), '(env.state)\n', (22070, 22081), False, 'import copy\n'), ((13376, 13409), 'matplotlib.pyplot.plot', 'plt.plot', (['true_reward_norm'], {'c': '"""r"""'}), "(true_reward_norm, c='r')\n", (13384, 13409), True, 'import matplotlib.pyplot as plt\n'), ((13430, 13466), 'matplotlib.pyplot.plot', 'plt.plot', (['network_reward_norm'], {'c': '"""b"""'}), "(network_reward_norm, c='b')\n", (13438, 13466), True, 'import matplotlib.pyplot as plt\n'), ((15899, 15914), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (15912, 15914), False, 'import pdb\n'), ((20764, 20779), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (20777, 20779), False, 'import pdb\n'), ((19357, 19385), 'torch.from_numpy', 'torch.from_numpy', (['state_feat'], {}), '(state_feat)\n', (19373, 19385), False, 'import torch\n'), ((27792, 27820), 'torch.from_numpy', 'torch.from_numpy', (['state_feat'], {}), '(state_feat)\n', (27808, 27820), False, 'import torch\n'), ((29006, 29024), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (29018, 29024), False, 'import pathlib\n'), ((13058, 13073), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (13066, 13073), True, 'import numpy as np\n'), ((13172, 13187), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (13180, 13187), True, 'import numpy as np\n'), ((20644, 20672), 'torch.from_numpy', 'torch.from_numpy', (['state_feat'], {}), '(state_feat)\n', (20660, 20672), False, 'import torch\n'), ((28864, 28892), 'torch.from_numpy', 'torch.from_numpy', (['state_feat'], {}), '(state_feat)\n', (28880, 28892), False, 'import torch\n')] |
import numpy as np
import glob
import random
import torch
import torch.utils.data
from analyzer.data.utils.data_raw import *
from analyzer.data.utils.data_misc import *
from analyzer.data.augmentation import Augmentor
class PairDataset():
'''
This Dataloader will prepare sample that are pairs for feeding the contrastive
learning algorithm.
'''
def __init__(self, cfg, iter_num: int = -1):
self.cfg = cfg
self.chunks_path = self.cfg.SSL.USE_PREP_DATASET
self.sample_volume_size = (64, 64, 64)
self.sample_stride = (1, 1, 1)
self.cl_mode = self.cfg.MODE.PROCESS.replace('cl', '')
self.augmentor = Augmentor(self.sample_volume_size)
# Data information if you want to produce input on the fly.
if not self.cfg.SSL.USE_PREP_DATASET:
self.volume, self.label = self.get_input()
self.volume_size = [np.array(self.volume.shape)]
self.sample_volume_size = np.array(self.sample_volume_size).astype(int)
self.sample_stride = np.array(self.sample_stride).astype(int)
self.sample_size = [count_volume(self.volume_size[x], self.sample_volume_size, self.sample_stride)
for x in range(len(self.volume_size))]
self.sample_num = np.array([np.prod(x) for x in self.sample_size])
self.sample_num_a = np.sum(self.sample_num)
self.sample_num_c = np.cumsum([0] + list(self.sample_num))
self.iter_num = max(iter_num, self.sample_num_a)
print('Dataset chunks that will be iterated over: {}'.format(self.iter_num))
def __len__(self):
if not self.cfg.SSL.USE_PREP_DATASET:
return self.iter_num
else:
with h5py.File(self.chunks_path, 'r') as f:
return len(f['id'])
def __getitem__(self, idx):
return self.create_sample_pair(idx)
def create_sample_pair(self, idx):
'''Create a sample pair that will be used for contrastive learning.
'''
if not self.cfg.SSL.USE_PREP_DATASET:
sample = self.reject_sample()
else:
with h5py.File(self.chunks_path, 'r') as f:
sample = f['chunk'][idx]
unique_label = int(f['id'][idx])
if 'gt' in list(f.keys()):
gt_label = int(f['gt'][idx])
else:
gt_label = None
if sample.ndim > 3:
sample = np.squeeze(sample)
if self.cl_mode == 'train':
sample_pair = self.augmentor(sample)
return (sample_pair, unique_label, gt_label)
else:
return (np.expand_dims(sample, axis=0).copy(), unique_label, gt_label)
def create_chunk_volume(self):
'''
Function creates small chunk from input volume that is processed
into the training model.
'''
pos = self.get_pos(self.sample_volume_size)
pos, out_vol, out_label = self.crop_with_pos(pos, self.sample_volume_size)
return pos, self.create_masked_input(out_vol, out_label)
def create_masked_input(self, vol: np.ndarray, label: np.ndarray) -> np.ndarray:
'''
Create masked input volume, that is pure EM where the mask is not 0. Otherwise all
values set to 0. Returns the prepared mask.
:params vol (numpy.ndarray): volume that is EM input.
:params label (numpy.ndarray): associated label volume.
'''
vol[np.where(label == 0)] = 0
return np.array(vol)
def get_input(self):
'''Get input volume and labels.'''
emfns = sorted(glob.glob(self.cfg.DATASET.EM_PATH + '*.' + self.cfg.DATASET.FILE_FORMAT))
labelfns = sorted(glob.glob(self.cfg.DATASET.LABEL_PATH + '*.' + self.cfg.DATASET.FILE_FORMAT))
if len(emfns) == 1:
vol = readvol(emfns[0])
label = readvol(labelfns[0])
else:
vol = folder2Vol(chunk_size=self.cfg.DATASET.CHUNK_SIZE, fns=emfns, file_format=self.cfg.DATASET.FILE_FORMAT)
label = folder2Vol(chunk_size=self.cfg.DATASET.CHUNK_SIZE, fns=labelfns, file_format=self.cfg.DATASET.FILE_FORMAT)
return vol, label
def crop_with_pos(self, pos, vol_size):
out_volume = (crop_volume(
self.volume, vol_size, pos[1:])/255.0).astype(np.float32)
out_label = crop_volume(
self.label, vol_size, pos[1:])
return pos, out_volume, out_label
def get_pos(self, vol_size):
pos = [0, 0, 0, 0]
# pick a dataset
did = self.index_to_dataset(random.randint(0, self.sample_num - 1))
pos[0] = did
# pick a position
tmp_size = count_volume(
self.volume_size[did], vol_size, self.sample_stride)
tmp_pos = [random.randint(0, tmp_size[x]-1) * self.sample_stride[x]
for x in range(len(tmp_size))]
pos[1:] = tmp_pos
return pos
def index_to_dataset(self, index):
return np.argmax(index < self.sample_num_c) - 1
def reject_sample(self):
'''function makes sure that sample contains actual objects that are
sufficiently large enough.'''
while True:
_, sample = self.create_chunk_volume()
if np.count_nonzero(sample) > 0:
return sample
| [
"analyzer.data.augmentation.Augmentor",
"numpy.prod",
"numpy.where",
"numpy.argmax",
"numpy.squeeze",
"numpy.count_nonzero",
"numpy.array",
"numpy.sum",
"numpy.expand_dims",
"random.randint",
"glob.glob"
] | [((666, 700), 'analyzer.data.augmentation.Augmentor', 'Augmentor', (['self.sample_volume_size'], {}), '(self.sample_volume_size)\n', (675, 700), False, 'from analyzer.data.augmentation import Augmentor\n'), ((3562, 3575), 'numpy.array', 'np.array', (['vol'], {}), '(vol)\n', (3570, 3575), True, 'import numpy as np\n'), ((1383, 1406), 'numpy.sum', 'np.sum', (['self.sample_num'], {}), '(self.sample_num)\n', (1389, 1406), True, 'import numpy as np\n'), ((3521, 3541), 'numpy.where', 'np.where', (['(label == 0)'], {}), '(label == 0)\n', (3529, 3541), True, 'import numpy as np\n'), ((3668, 3741), 'glob.glob', 'glob.glob', (["(self.cfg.DATASET.EM_PATH + '*.' + self.cfg.DATASET.FILE_FORMAT)"], {}), "(self.cfg.DATASET.EM_PATH + '*.' + self.cfg.DATASET.FILE_FORMAT)\n", (3677, 3741), False, 'import glob\n'), ((3769, 3845), 'glob.glob', 'glob.glob', (["(self.cfg.DATASET.LABEL_PATH + '*.' + self.cfg.DATASET.FILE_FORMAT)"], {}), "(self.cfg.DATASET.LABEL_PATH + '*.' + self.cfg.DATASET.FILE_FORMAT)\n", (3778, 3845), False, 'import glob\n'), ((4632, 4670), 'random.randint', 'random.randint', (['(0)', '(self.sample_num - 1)'], {}), '(0, self.sample_num - 1)\n', (4646, 4670), False, 'import random\n'), ((5044, 5080), 'numpy.argmax', 'np.argmax', (['(index < self.sample_num_c)'], {}), '(index < self.sample_num_c)\n', (5053, 5080), True, 'import numpy as np\n'), ((903, 930), 'numpy.array', 'np.array', (['self.volume.shape'], {}), '(self.volume.shape)\n', (911, 930), True, 'import numpy as np\n'), ((4836, 4870), 'random.randint', 'random.randint', (['(0)', '(tmp_size[x] - 1)'], {}), '(0, tmp_size[x] - 1)\n', (4850, 4870), False, 'import random\n'), ((5315, 5339), 'numpy.count_nonzero', 'np.count_nonzero', (['sample'], {}), '(sample)\n', (5331, 5339), True, 'import numpy as np\n'), ((970, 1003), 'numpy.array', 'np.array', (['self.sample_volume_size'], {}), '(self.sample_volume_size)\n', (978, 1003), True, 'import numpy as np\n'), ((1049, 1077), 'numpy.array', 'np.array', (['self.sample_stride'], {}), '(self.sample_stride)\n', (1057, 1077), True, 'import numpy as np\n'), ((1312, 1322), 'numpy.prod', 'np.prod', (['x'], {}), '(x)\n', (1319, 1322), True, 'import numpy as np\n'), ((2506, 2524), 'numpy.squeeze', 'np.squeeze', (['sample'], {}), '(sample)\n', (2516, 2524), True, 'import numpy as np\n'), ((2701, 2731), 'numpy.expand_dims', 'np.expand_dims', (['sample'], {'axis': '(0)'}), '(sample, axis=0)\n', (2715, 2731), True, 'import numpy as np\n')] |
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import numpy as np
import matplotlib.pyplot as plt
char_images = [
['###', #line
' ',
' '],
[' ', #line
'###',
' '],
[' ', #line
' ',
'###'],
['# ', #line
'# ',
'# '],
[' # ', #line
' # ',
' # '],
[' #', #line
' #',
' #'],
[' ', #no line
' ',
' '],
['# ', #no line
' ',
' #'],
[' ', #no line
' # ',
' '],
[' ', #no line
' ',
' #'],
[' #', #no line
' ##',
'# '],
['# #', #no line
' # ',
' # '],
[' # ', #no line
'# ',
'# #'],
['## ', #no line
' ',
' #'],
['# ', #no line
' ##',
' '],
['###', #no line
'###',
'###'],
]
lines = [
[ 1 ],
[ 1 ],
[ 1 ],
[ 1 ],
[ 1 ],
[ 1 ],
[ 0 ],
[ 0 ],
[ 0 ],
[ 0 ],
[ 0 ],
[ 0 ],
[ 0 ],
[ 0 ],
[ 0 ],
[ 0 ],
]
bin_images = np.array([
[
[
[ 1.0 ] if px == '#' else [ 0.0 ] #Make the image pixels consist of single element vectors.
for px in row
] for row in img
] for img in char_images
], np.float32)
#Like the indexed bigrams, but instead this is a bunch of 2x2 image regions.
bin_regions = np.unique(np.concatenate([ bin_images[:, i:i+2, j:j+2,:] for i in range(2) for j in range(2) ]), axis=0)
#Represent regions as binary numbers to make it easier to show them on charts.
char_regions = [
''.join('1' if b == [1.0] else '0' for row in region for b in row)
for region in bin_regions.tolist()
]
###################################
class Model(object):
def __init__(self):
learning_rate = 1.0
momentum = 0.9
init_stddev = 1e-2
embed_size = 2
kernel_width = 2
kernel_height = 2
kernel_size = 2
self.graph = tf.Graph()
with self.graph.as_default():
self.images = tf.placeholder(tf.float32, [None, None, None, 1], 'images')
self.targets = tf.placeholder(tf.float32, [None, 1], 'targets')
self.params = []
batch_size = tf.shape(self.images)[0]
with tf.variable_scope('hidden'):
W = tf.get_variable('W', [kernel_height, kernel_width, 1, kernel_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev)) #Note that the image consists of a grid of single element (1) vectors.
b = tf.get_variable('b', [kernel_size], tf.float32, tf.zeros_initializer())
self.params.extend([ W, b ])
self.conv_hs = tf.sigmoid(tf.nn.conv2d(self.images, W, [1,1,1,1], 'VALID') + b)
#Perform max pooling but first turn the resultant grid of vectors into a sequence in order to become a single vector after pooling.
num_conv_rows = tf.shape(self.conv_hs)[1]
num_conv_cols = tf.shape(self.conv_hs)[2]
flat_hs = tf.reshape(self.conv_hs, [ batch_size, num_conv_rows*num_conv_cols, kernel_size ])
self.pool_hs = tf.reduce_max(flat_hs, axis=1) #Max pooling
with tf.variable_scope('output'):
W = tf.get_variable('W', [kernel_size, 1], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
b = tf.get_variable('b', [1], tf.float32, tf.zeros_initializer())
self.params.extend([ W, b ])
logits = tf.matmul(self.pool_hs, W) + b
self.probs = tf.sigmoid(logits)
self.error = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.targets, logits=logits))
self.optimiser_step = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(self.error)
self.init = tf.global_variables_initializer()
self.graph.finalize()
self.sess = tf.Session()
def initialise(self):
return self.sess.run([ self.init ], { })
def close(self):
self.sess.close()
def optimisation_step(self, images, targets):
return self.sess.run([ self.optimiser_step ], { self.images: images, self.targets: targets })
def get_params(self):
return self.sess.run(self.params, { })
def get_error(self, images, targets):
return self.sess.run([ self.error ], { self.images: images, self.targets: targets })[0]
def predict(self, images):
return self.sess.run([ self.probs ], { self.images: images })[0]
def get_conv(self, images):
return self.sess.run([ self.conv_hs ], { self.images: images })[0]
def get_pool(self, images):
return self.sess.run([ self.pool_hs ], { self.images: images })[0]
###################################
max_epochs = 2000
(fig, axs) = plt.subplots(1, 2)
region_plots = list()
region_texts = list()
for char_region in char_regions:
[ region_plot ] = axs[0].plot([ 0 ], [ 0 ], linestyle='', marker='o', markersize=10)
region_plots.append(region_plot)
region_text = axs[0].text(0, 0, char_region, fontdict={ 'fontsize': 8 })
region_texts.append(region_text)
axs[0].set_xlim(0.0, 1.0)
axs[0].set_xlabel('d0')
axs[0].set_ylim(0.0, 1.0)
axs[0].set_ylabel('d1')
axs[0].grid(True)
axs[0].set_title('Regions')
[ train_error_plot ] = axs[1].plot([], [], color='red', linestyle='-', linewidth=1, label='train')
axs[1].set_xlim(0, max_epochs)
axs[1].set_xlabel('epoch')
axs[1].set_ylim(0.0, 2.0)
axs[1].set_ylabel('XE')
axs[1].grid(True)
axs[1].set_title('Error progress')
axs[1].legend()
fig.tight_layout()
fig.show()
###################################
model = Model()
model.initialise()
train_errors = list()
print('epoch', 'train error', sep='\t')
for epoch in range(1, max_epochs+1):
train_error = model.get_error(bin_images, lines)
train_errors.append(train_error)
if epoch%100 == 0:
print(epoch, train_error, sep='\t')
convs = model.get_conv(bin_regions)
for (region_plot, region_text, conv) in zip(region_plots, region_texts, convs.tolist()):
region_plot.set_data([ conv[0][0][0] ], [ conv[0][0][1] ])
region_text.set_position( (conv[0][0][0], conv[0][0][1]) )
train_error_plot.set_data(np.arange(len(train_errors)), train_errors)
plt.draw()
fig.canvas.flush_events()
model.optimisation_step(bin_images, lines)
print()
print('region', 'vector', sep='\t')
convs = model.get_conv(bin_regions)
for (char_region, conv) in zip(char_regions, convs.tolist()):
print(char_region, np.round(conv[0][0], 3), sep='\t')
print()
probs = model.predict(bin_images)
print('image/line')
for (char_image, prob) in zip(char_images, probs.tolist()):
print('---')
print('\n'.join(char_image))
print('---')
print(np.round(prob[0], 3), sep='\t')
print()
model.close() | [
"tensorflow.shape",
"numpy.array",
"tensorflow.zeros_initializer",
"tensorflow.Graph",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.random_normal_initializer",
"tensorflow.matmul",
"numpy.round",
"tensorflow.nn.conv2d",
"tensorflow.v... | [((16, 49), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (39, 49), False, 'import warnings\n'), ((74, 136), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (108, 136), True, 'import tensorflow as tf\n'), ((1550, 1665), 'numpy.array', 'np.array', (["[[[([1.0] if px == '#' else [0.0]) for px in row] for row in img] for img in\n char_images]", 'np.float32'], {}), "([[[([1.0] if px == '#' else [0.0]) for px in row] for row in img] for\n img in char_images], np.float32)\n", (1558, 1665), True, 'import numpy as np\n'), ((5494, 5512), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (5506, 5512), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2522), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2520, 2522), True, 'import tensorflow as tf\n'), ((7007, 7017), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (7015, 7017), True, 'import matplotlib.pyplot as plt\n'), ((7270, 7293), 'numpy.round', 'np.round', (['conv[0][0]', '(3)'], {}), '(conv[0][0], 3)\n', (7278, 7293), True, 'import numpy as np\n'), ((7505, 7525), 'numpy.round', 'np.round', (['prob[0]', '(3)'], {}), '(prob[0], 3)\n', (7513, 7525), True, 'import numpy as np\n'), ((2588, 2647), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, None, 1]', '"""images"""'], {}), "(tf.float32, [None, None, None, 1], 'images')\n", (2602, 2647), True, 'import tensorflow as tf\n'), ((2675, 2723), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""targets"""'], {}), "(tf.float32, [None, 1], 'targets')\n", (2689, 2723), True, 'import tensorflow as tf\n'), ((4466, 4499), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4497, 4499), True, 'import tensorflow as tf\n'), ((4572, 4584), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4582, 4584), True, 'import tensorflow as tf\n'), ((2780, 2801), 'tensorflow.shape', 'tf.shape', (['self.images'], {}), '(self.images)\n', (2788, 2801), True, 'import tensorflow as tf\n'), ((2835, 2862), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""hidden"""'], {}), "('hidden')\n", (2852, 2862), True, 'import tensorflow as tf\n'), ((3609, 3695), 'tensorflow.reshape', 'tf.reshape', (['self.conv_hs', '[batch_size, num_conv_rows * num_conv_cols, kernel_size]'], {}), '(self.conv_hs, [batch_size, num_conv_rows * num_conv_cols,\n kernel_size])\n', (3619, 3695), True, 'import tensorflow as tf\n'), ((3723, 3753), 'tensorflow.reduce_max', 'tf.reduce_max', (['flat_hs'], {'axis': '(1)'}), '(flat_hs, axis=1)\n', (3736, 3753), True, 'import tensorflow as tf\n'), ((3785, 3812), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output"""'], {}), "('output')\n", (3802, 3812), True, 'import tensorflow as tf\n'), ((4164, 4182), 'tensorflow.sigmoid', 'tf.sigmoid', (['logits'], {}), '(logits)\n', (4174, 4182), True, 'import tensorflow as tf\n'), ((4236, 4311), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'self.targets', 'logits': 'logits'}), '(labels=self.targets, logits=logits)\n', (4275, 4311), True, 'import tensorflow as tf\n'), ((2964, 3012), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (2992, 3012), True, 'import tensorflow as tf\n'), ((3153, 3175), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (3173, 3175), True, 'import tensorflow as tf\n'), ((3499, 3521), 'tensorflow.shape', 'tf.shape', (['self.conv_hs'], {}), '(self.conv_hs)\n', (3507, 3521), True, 'import tensorflow as tf\n'), ((3557, 3579), 'tensorflow.shape', 'tf.shape', (['self.conv_hs'], {}), '(self.conv_hs)\n', (3565, 3579), True, 'import tensorflow as tf\n'), ((3885, 3933), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (3913, 3933), True, 'import tensorflow as tf\n'), ((3993, 4015), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (4013, 4015), True, 'import tensorflow as tf\n'), ((4104, 4130), 'tensorflow.matmul', 'tf.matmul', (['self.pool_hs', 'W'], {}), '(self.pool_hs, W)\n', (4113, 4130), True, 'import tensorflow as tf\n'), ((4360, 4411), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate', 'momentum'], {}), '(learning_rate, momentum)\n', (4386, 4411), True, 'import tensorflow as tf\n'), ((3264, 3315), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['self.images', 'W', '[1, 1, 1, 1]', '"""VALID"""'], {}), "(self.images, W, [1, 1, 1, 1], 'VALID')\n", (3276, 3315), True, 'import tensorflow as tf\n')] |
from __future__ import print_function
import os
import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras.utils import test_utils
from keras import optimizers, Input
from keras.models import Sequential, Model, load_model
from keras.layers.core import Dense, Activation, Lambda
from keras.utils.np_utils import to_categorical
from keras import backend as K
from optimizer import ClippedOptimizer
num_classes = 2
def get_test_data():
np.random.seed(1337)
(x_train, y_train), _ = test_utils.get_test_data(num_train=1000,
num_test=200,
input_shape=(10,),
classification=True,
num_classes=num_classes)
y_train = to_categorical(y_train)
return x_train, y_train
def _test_optimizer(optimizer, target=0.75):
x_train, y_train = get_test_data()
# if the input optimizer is not a ClippedOptimizer, wrap the optimizer
# with a default ClippedOptimizer
if optimizer.__class__.__name__ != ClippedOptimizer.__name__:
optimizer = ClippedOptimizer(optimizer, normalization='l2')
model = Sequential()
model.add(Dense(10, input_shape=(x_train.shape[1],)))
model.add(Activation('relu'))
model.add(Dense(y_train.shape[1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
assert history.history['acc'][-1] >= target
# Test optimizer serialization and deserialization.
config = optimizers.serialize(optimizer)
optim = optimizers.deserialize(config)
new_config = optimizers.serialize(optim)
assert config == new_config
# Test weights saving and loading.
original_weights = optimizer.weights
model.save('temp.h5')
temp_model = load_model('temp.h5')
loaded_weights = temp_model.optimizer.weights
assert len(original_weights) == len(loaded_weights)
os.remove('temp.h5')
# Test constraints.
model = Sequential()
dense = Dense(10,
input_shape=(x_train.shape[1],),
kernel_constraint=lambda x: 0. * x + 1.,
bias_constraint=lambda x: 0. * x + 2., )
model.add(dense)
model.add(Activation('relu'))
model.add(Dense(y_train.shape[1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.train_on_batch(x_train[:10], y_train[:10])
kernel, bias = dense.get_weights()
assert_allclose(kernel, 1.)
assert_allclose(bias, 2.)
def _test_no_grad(optimizer):
inp = Input([3])
x = Dense(10)(inp)
x = Lambda(lambda l: 1.0 * K.reshape(K.cast(K.argmax(l), 'float32'), [-1, 1]))(x)
mod = Model(inp, x)
mod.compile(optimizer, 'mse')
with pytest.raises(ValueError):
mod.fit(np.zeros([10, 3]), np.zeros([10, 1], np.float32), batch_size=10, epochs=10)
def test_sgd_clipped_from_string():
sgd = ClippedOptimizer('sgd', normalization='l2')
_test_optimizer(sgd)
_test_no_grad(sgd)
def test_sgd_clipped_max():
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
sgd = ClippedOptimizer(sgd, normalization='max')
_test_optimizer(sgd)
_test_no_grad(sgd)
def test_sgd_clipped_min_max():
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
sgd = ClippedOptimizer(sgd, normalization='min_max')
_test_optimizer(sgd)
_test_no_grad(sgd)
def test_sgd_clipped_l1():
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
sgd = ClippedOptimizer(sgd, normalization='l1')
_test_optimizer(sgd)
_test_no_grad(sgd)
def test_sgd_clipped_l2():
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
sgd = ClippedOptimizer(sgd, normalization='l2')
_test_optimizer(sgd)
_test_no_grad(sgd)
def test_sgd_clipped_l1_l2():
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
sgd = ClippedOptimizer(sgd, normalization='l1_l2')
_test_optimizer(sgd)
_test_no_grad(sgd)
def test_sgd_clipped_std():
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
sgd = ClippedOptimizer(sgd, normalization='std')
_test_optimizer(sgd)
_test_no_grad(sgd)
def test_sgd_clipped_average_l1():
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
sgd = ClippedOptimizer(sgd, normalization='avg_l1')
_test_optimizer(sgd)
_test_no_grad(sgd)
def test_sgd_clipped_average_l2():
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
sgd = ClippedOptimizer(sgd, normalization='avg_l2')
_test_optimizer(sgd)
_test_no_grad(sgd)
def test_sgd_clipped_average_l1_l2():
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
sgd = ClippedOptimizer(sgd, normalization='avg_l1_l2')
_test_optimizer(sgd)
_test_no_grad(sgd)
def test_rmsprop_clipped():
_test_optimizer(optimizers.RMSprop())
_test_optimizer(optimizers.RMSprop(decay=1e-3))
def test_adagrad_clipped():
_test_optimizer(optimizers.Adagrad())
_test_optimizer(optimizers.Adagrad(decay=1e-3))
def test_adadelta_clipped():
_test_optimizer(optimizers.Adadelta())
_test_optimizer(optimizers.Adadelta(decay=1e-3))
def test_adam_clipped():
_test_optimizer(optimizers.Adam())
_test_optimizer(optimizers.Adam(decay=1e-3))
def test_adamax_clipped():
_test_optimizer(optimizers.Adamax())
_test_optimizer(optimizers.Adamax(decay=1e-3))
def test_nadam_clipped():
_test_optimizer(optimizers.Nadam())
def test_adam_amsgrad_clipped():
_test_optimizer(optimizers.Adam(amsgrad=True))
_test_optimizer(optimizers.Adam(amsgrad=True, decay=1e-3))
def test_clipnorm_clipped():
sgd = optimizers.SGD(lr=0.01, momentum=0.9)
sgd = ClippedOptimizer(sgd, normalization='l2', clipnorm=0.5)
_test_optimizer(sgd)
def test_clipvalue_clipped():
sgd = optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=0.5)
sgd = ClippedOptimizer(sgd, normalization='l2')
_test_optimizer(sgd)
def test_wrong_normalization():
with pytest.raises(ValueError):
ClippedOptimizer('sgd', normalization=None)
@pytest.mark.skipif(K.backend() != 'tensorflow', reason='TFOptimizer requires TF backend')
def test_tf_optimizer():
with pytest.raises(NotImplementedError):
import tensorflow as tf
tf_opt = optimizers.TFOptimizer(tf.train.GradientDescentOptimizer(0.1))
ClippedOptimizer(tf_opt, normalization='l2')
def test_add_normalizer():
def dummy_normalization(grad):
norm = K.mean(K.abs(grad)) + K.epsilon()
return norm
func_name = 'dummy'
# add the function to the name list
ClippedOptimizer.set_normalization_function(func_name, dummy_normalization)
# check if it exists in the name list now
name_list = ClippedOptimizer.get_normalization_functions()
assert func_name in name_list
# train a model on this new normalizer
sgd = ClippedOptimizer('sgd', normalization=func_name)
_test_optimizer(sgd)
_test_no_grad(sgd)
if __name__ == '__main__':
pytest.main([__file__])
| [
"keras.optimizers.SGD",
"keras.utils.test_utils.get_test_data",
"keras.optimizers.Adadelta",
"os.remove",
"optimizer.ClippedOptimizer",
"numpy.testing.assert_allclose",
"pytest.main",
"keras.optimizers.Nadam",
"numpy.random.seed",
"keras.models.Model",
"keras.backend.epsilon",
"keras.backend.a... | [((470, 490), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (484, 490), True, 'import numpy as np\n'), ((519, 642), 'keras.utils.test_utils.get_test_data', 'test_utils.get_test_data', ([], {'num_train': '(1000)', 'num_test': '(200)', 'input_shape': '(10,)', 'classification': '(True)', 'num_classes': 'num_classes'}), '(num_train=1000, num_test=200, input_shape=(10,),\n classification=True, num_classes=num_classes)\n', (543, 642), False, 'from keras.utils import test_utils\n'), ((865, 888), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['y_train'], {}), '(y_train)\n', (879, 888), False, 'from keras.utils.np_utils import to_categorical\n'), ((1264, 1276), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1274, 1276), False, 'from keras.models import Sequential, Model, load_model\n'), ((1772, 1803), 'keras.optimizers.serialize', 'optimizers.serialize', (['optimizer'], {}), '(optimizer)\n', (1792, 1803), False, 'from keras import optimizers, Input\n'), ((1816, 1846), 'keras.optimizers.deserialize', 'optimizers.deserialize', (['config'], {}), '(config)\n', (1838, 1846), False, 'from keras import optimizers, Input\n'), ((1864, 1891), 'keras.optimizers.serialize', 'optimizers.serialize', (['optim'], {}), '(optim)\n', (1884, 1891), False, 'from keras import optimizers, Input\n'), ((2049, 2070), 'keras.models.load_model', 'load_model', (['"""temp.h5"""'], {}), "('temp.h5')\n", (2059, 2070), False, 'from keras.models import Sequential, Model, load_model\n'), ((2181, 2201), 'os.remove', 'os.remove', (['"""temp.h5"""'], {}), "('temp.h5')\n", (2190, 2201), False, 'import os\n'), ((2239, 2251), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2249, 2251), False, 'from keras.models import Sequential, Model, load_model\n'), ((2264, 2394), 'keras.layers.core.Dense', 'Dense', (['(10)'], {'input_shape': '(x_train.shape[1],)', 'kernel_constraint': '(lambda x: 0.0 * x + 1.0)', 'bias_constraint': '(lambda x: 0.0 * x + 2.0)'}), '(10, input_shape=(x_train.shape[1],), kernel_constraint=lambda x: 0.0 *\n x + 1.0, bias_constraint=lambda x: 0.0 * x + 2.0)\n', (2269, 2394), False, 'from keras.layers.core import Dense, Activation, Lambda\n'), ((2800, 2828), 'numpy.testing.assert_allclose', 'assert_allclose', (['kernel', '(1.0)'], {}), '(kernel, 1.0)\n', (2815, 2828), False, 'from numpy.testing import assert_allclose\n'), ((2832, 2858), 'numpy.testing.assert_allclose', 'assert_allclose', (['bias', '(2.0)'], {}), '(bias, 2.0)\n', (2847, 2858), False, 'from numpy.testing import assert_allclose\n'), ((2900, 2910), 'keras.Input', 'Input', (['[3]'], {}), '([3])\n', (2905, 2910), False, 'from keras import optimizers, Input\n'), ((3030, 3043), 'keras.models.Model', 'Model', (['inp', 'x'], {}), '(inp, x)\n', (3035, 3043), False, 'from keras.models import Sequential, Model, load_model\n'), ((3254, 3297), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['"""sgd"""'], {'normalization': '"""l2"""'}), "('sgd', normalization='l2')\n", (3270, 3297), False, 'from optimizer import ClippedOptimizer\n'), ((3386, 3438), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, momentum=0.9, nesterov=True)\n', (3400, 3438), False, 'from keras import optimizers, Input\n'), ((3449, 3491), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""max"""'}), "(sgd, normalization='max')\n", (3465, 3491), False, 'from optimizer import ClippedOptimizer\n'), ((3584, 3636), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, momentum=0.9, nesterov=True)\n', (3598, 3636), False, 'from keras import optimizers, Input\n'), ((3647, 3693), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""min_max"""'}), "(sgd, normalization='min_max')\n", (3663, 3693), False, 'from optimizer import ClippedOptimizer\n'), ((3781, 3833), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, momentum=0.9, nesterov=True)\n', (3795, 3833), False, 'from keras import optimizers, Input\n'), ((3844, 3885), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""l1"""'}), "(sgd, normalization='l1')\n", (3860, 3885), False, 'from optimizer import ClippedOptimizer\n'), ((3973, 4025), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, momentum=0.9, nesterov=True)\n', (3987, 4025), False, 'from keras import optimizers, Input\n'), ((4036, 4077), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""l2"""'}), "(sgd, normalization='l2')\n", (4052, 4077), False, 'from optimizer import ClippedOptimizer\n'), ((4168, 4220), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, momentum=0.9, nesterov=True)\n', (4182, 4220), False, 'from keras import optimizers, Input\n'), ((4231, 4275), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""l1_l2"""'}), "(sgd, normalization='l1_l2')\n", (4247, 4275), False, 'from optimizer import ClippedOptimizer\n'), ((4364, 4416), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, momentum=0.9, nesterov=True)\n', (4378, 4416), False, 'from keras import optimizers, Input\n'), ((4427, 4469), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""std"""'}), "(sgd, normalization='std')\n", (4443, 4469), False, 'from optimizer import ClippedOptimizer\n'), ((4565, 4617), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, momentum=0.9, nesterov=True)\n', (4579, 4617), False, 'from keras import optimizers, Input\n'), ((4628, 4673), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""avg_l1"""'}), "(sgd, normalization='avg_l1')\n", (4644, 4673), False, 'from optimizer import ClippedOptimizer\n'), ((4769, 4821), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, momentum=0.9, nesterov=True)\n', (4783, 4821), False, 'from keras import optimizers, Input\n'), ((4832, 4877), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""avg_l2"""'}), "(sgd, normalization='avg_l2')\n", (4848, 4877), False, 'from optimizer import ClippedOptimizer\n'), ((4976, 5028), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, momentum=0.9, nesterov=True)\n', (4990, 5028), False, 'from keras import optimizers, Input\n'), ((5039, 5087), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""avg_l1_l2"""'}), "(sgd, normalization='avg_l1_l2')\n", (5055, 5087), False, 'from optimizer import ClippedOptimizer\n'), ((6005, 6042), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)'}), '(lr=0.01, momentum=0.9)\n', (6019, 6042), False, 'from keras import optimizers, Input\n'), ((6053, 6108), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""l2"""', 'clipnorm': '(0.5)'}), "(sgd, normalization='l2', clipnorm=0.5)\n", (6069, 6108), False, 'from optimizer import ClippedOptimizer\n'), ((6176, 6228), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)', 'clipvalue': '(0.5)'}), '(lr=0.01, momentum=0.9, clipvalue=0.5)\n', (6190, 6228), False, 'from keras import optimizers, Input\n'), ((6239, 6280), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['sgd'], {'normalization': '"""l2"""'}), "(sgd, normalization='l2')\n", (6255, 6280), False, 'from optimizer import ClippedOptimizer\n'), ((6959, 7034), 'optimizer.ClippedOptimizer.set_normalization_function', 'ClippedOptimizer.set_normalization_function', (['func_name', 'dummy_normalization'], {}), '(func_name, dummy_normalization)\n', (7002, 7034), False, 'from optimizer import ClippedOptimizer\n'), ((7098, 7144), 'optimizer.ClippedOptimizer.get_normalization_functions', 'ClippedOptimizer.get_normalization_functions', ([], {}), '()\n', (7142, 7144), False, 'from optimizer import ClippedOptimizer\n'), ((7233, 7281), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['"""sgd"""'], {'normalization': 'func_name'}), "('sgd', normalization=func_name)\n", (7249, 7281), False, 'from optimizer import ClippedOptimizer\n'), ((7363, 7386), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (7374, 7386), False, 'import pytest\n'), ((1203, 1250), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['optimizer'], {'normalization': '"""l2"""'}), "(optimizer, normalization='l2')\n", (1219, 1250), False, 'from optimizer import ClippedOptimizer\n'), ((1291, 1333), 'keras.layers.core.Dense', 'Dense', (['(10)'], {'input_shape': '(x_train.shape[1],)'}), '(10, input_shape=(x_train.shape[1],))\n', (1296, 1333), False, 'from keras.layers.core import Dense, Activation, Lambda\n'), ((1349, 1367), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1359, 1367), False, 'from keras.layers.core import Dense, Activation, Lambda\n'), ((1383, 1406), 'keras.layers.core.Dense', 'Dense', (['y_train.shape[1]'], {}), '(y_train.shape[1])\n', (1388, 1406), False, 'from keras.layers.core import Dense, Activation, Lambda\n'), ((1422, 1443), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1432, 1443), False, 'from keras.layers.core import Dense, Activation, Lambda\n'), ((2478, 2496), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2488, 2496), False, 'from keras.layers.core import Dense, Activation, Lambda\n'), ((2512, 2535), 'keras.layers.core.Dense', 'Dense', (['y_train.shape[1]'], {}), '(y_train.shape[1])\n', (2517, 2535), False, 'from keras.layers.core import Dense, Activation, Lambda\n'), ((2551, 2572), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2561, 2572), False, 'from keras.layers.core import Dense, Activation, Lambda\n'), ((2919, 2928), 'keras.layers.core.Dense', 'Dense', (['(10)'], {}), '(10)\n', (2924, 2928), False, 'from keras.layers.core import Dense, Activation, Lambda\n'), ((3087, 3112), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3100, 3112), False, 'import pytest\n'), ((5186, 5206), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {}), '()\n', (5204, 5206), False, 'from keras import optimizers, Input\n'), ((5228, 5259), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'decay': '(0.001)'}), '(decay=0.001)\n', (5246, 5259), False, 'from keras import optimizers, Input\n'), ((5310, 5330), 'keras.optimizers.Adagrad', 'optimizers.Adagrad', ([], {}), '()\n', (5328, 5330), False, 'from keras import optimizers, Input\n'), ((5352, 5383), 'keras.optimizers.Adagrad', 'optimizers.Adagrad', ([], {'decay': '(0.001)'}), '(decay=0.001)\n', (5370, 5383), False, 'from keras import optimizers, Input\n'), ((5435, 5456), 'keras.optimizers.Adadelta', 'optimizers.Adadelta', ([], {}), '()\n', (5454, 5456), False, 'from keras import optimizers, Input\n'), ((5478, 5510), 'keras.optimizers.Adadelta', 'optimizers.Adadelta', ([], {'decay': '(0.001)'}), '(decay=0.001)\n', (5497, 5510), False, 'from keras import optimizers, Input\n'), ((5558, 5575), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (5573, 5575), False, 'from keras import optimizers, Input\n'), ((5597, 5625), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'decay': '(0.001)'}), '(decay=0.001)\n', (5612, 5625), False, 'from keras import optimizers, Input\n'), ((5675, 5694), 'keras.optimizers.Adamax', 'optimizers.Adamax', ([], {}), '()\n', (5692, 5694), False, 'from keras import optimizers, Input\n'), ((5716, 5746), 'keras.optimizers.Adamax', 'optimizers.Adamax', ([], {'decay': '(0.001)'}), '(decay=0.001)\n', (5733, 5746), False, 'from keras import optimizers, Input\n'), ((5795, 5813), 'keras.optimizers.Nadam', 'optimizers.Nadam', ([], {}), '()\n', (5811, 5813), False, 'from keras import optimizers, Input\n'), ((5870, 5899), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'amsgrad': '(True)'}), '(amsgrad=True)\n', (5885, 5899), False, 'from keras import optimizers, Input\n'), ((5921, 5963), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'amsgrad': '(True)', 'decay': '(0.001)'}), '(amsgrad=True, decay=0.001)\n', (5936, 5963), False, 'from keras import optimizers, Input\n'), ((6349, 6374), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6362, 6374), False, 'import pytest\n'), ((6384, 6427), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['"""sgd"""'], {'normalization': 'None'}), "('sgd', normalization=None)\n", (6400, 6427), False, 'from optimizer import ClippedOptimizer\n'), ((6555, 6589), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (6568, 6589), False, 'import pytest\n'), ((6711, 6755), 'optimizer.ClippedOptimizer', 'ClippedOptimizer', (['tf_opt'], {'normalization': '"""l2"""'}), "(tf_opt, normalization='l2')\n", (6727, 6755), False, 'from optimizer import ClippedOptimizer\n'), ((6450, 6461), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (6459, 6461), True, 'from keras import backend as K\n'), ((3130, 3147), 'numpy.zeros', 'np.zeros', (['[10, 3]'], {}), '([10, 3])\n', (3138, 3147), True, 'import numpy as np\n'), ((3149, 3178), 'numpy.zeros', 'np.zeros', (['[10, 1]', 'np.float32'], {}), '([10, 1], np.float32)\n', (3157, 3178), True, 'import numpy as np\n'), ((6663, 6701), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.1)'], {}), '(0.1)\n', (6696, 6701), True, 'import tensorflow as tf\n'), ((6857, 6868), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6866, 6868), True, 'from keras import backend as K\n'), ((6842, 6853), 'keras.backend.abs', 'K.abs', (['grad'], {}), '(grad)\n', (6847, 6853), True, 'from keras import backend as K\n'), ((2982, 2993), 'keras.backend.argmax', 'K.argmax', (['l'], {}), '(l)\n', (2990, 2993), True, 'from keras import backend as K\n')] |
# convex optimization for SimRank
import math
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from sklearn import preprocessing
from simrank import adj_mat
G = [
(1, 2),
(2, 1)
]
A = adj_mat(G)
A = A.toarray()
P = preprocessing.normalize(A, 'l1', axis=0)
def f(R, P, c=0.6):
'''
the obj function for 2d case
R: (x,y)
P: the normalized adj matrix
'''
n = P.shape[0]
y = np.dot(R, R.T) - c * np.dot(P.T, np.dot(R, np.dot(R.T, P))) + \
c * np.diag(np.dot(P.T, np.dot(R, np.dot(R.T, P)))) - np.eye(n)
return math.pow(np.linalg.norm(y, "fro"), 2)
@np.vectorize
def compute_z(x, y):
if x < 0 or y < 0:
return -2
R = np.matrix([x, y]).T
return f(R, P)
def draw_3D_dplot(P):
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
X = np.arange(0, 2, 0.1)
Y = np.arange(0, 2, 0.1)
X, Y = np.meshgrid(X, Y)
# R = np.sqrt(X**2 + Y**2)
# Z = np.sin(R)
Z = compute_z(X, Y)
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
def test():
A = adj_mat(G)
A = A.toarray()
P = preprocessing.normalize(A, 'l1', axis=0)
# print(type(P), P.shape)
# y = f(np.matrix([1,2]).T, P)
# print(y)
draw_3D_dplot(P)
if __name__ == "__main__":
test()
| [
"numpy.eye",
"numpy.arange",
"matplotlib.ticker.LinearLocator",
"matplotlib.pyplot.figure",
"numpy.dot",
"numpy.linalg.norm",
"numpy.meshgrid",
"sklearn.preprocessing.normalize",
"numpy.matrix",
"matplotlib.ticker.FormatStrFormatter",
"simrank.adj_mat",
"matplotlib.pyplot.show"
] | [((288, 298), 'simrank.adj_mat', 'adj_mat', (['G'], {}), '(G)\n', (295, 298), False, 'from simrank import adj_mat\n'), ((319, 359), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['A', '"""l1"""'], {'axis': '(0)'}), "(A, 'l1', axis=0)\n", (342, 359), False, 'from sklearn import preprocessing\n'), ((848, 860), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (858, 860), True, 'import matplotlib.pyplot as plt\n'), ((921, 941), 'numpy.arange', 'np.arange', (['(0)', '(2)', '(0.1)'], {}), '(0, 2, 0.1)\n', (930, 941), True, 'import numpy as np\n'), ((950, 970), 'numpy.arange', 'np.arange', (['(0)', '(2)', '(0.1)'], {}), '(0, 2, 0.1)\n', (959, 970), True, 'import numpy as np\n'), ((982, 999), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (993, 999), True, 'import numpy as np\n'), ((1485, 1495), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1493, 1495), True, 'import matplotlib.pyplot as plt\n'), ((1518, 1528), 'simrank.adj_mat', 'adj_mat', (['G'], {}), '(G)\n', (1525, 1528), False, 'from simrank import adj_mat\n'), ((1557, 1597), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['A', '"""l1"""'], {'axis': '(0)'}), "(A, 'l1', axis=0)\n", (1580, 1597), False, 'from sklearn import preprocessing\n'), ((630, 639), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (636, 639), True, 'import numpy as np\n'), ((660, 684), 'numpy.linalg.norm', 'np.linalg.norm', (['y', '"""fro"""'], {}), "(y, 'fro')\n", (674, 684), True, 'import numpy as np\n'), ((775, 792), 'numpy.matrix', 'np.matrix', (['[x, y]'], {}), '([x, y])\n', (784, 792), True, 'import numpy as np\n'), ((1302, 1319), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (1315, 1319), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((1354, 1381), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (1372, 1381), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((504, 518), 'numpy.dot', 'np.dot', (['R', 'R.T'], {}), '(R, R.T)\n', (510, 518), True, 'import numpy as np\n'), ((547, 561), 'numpy.dot', 'np.dot', (['R.T', 'P'], {}), '(R.T, P)\n', (553, 561), True, 'import numpy as np\n'), ((610, 624), 'numpy.dot', 'np.dot', (['R.T', 'P'], {}), '(R.T, P)\n', (616, 624), True, 'import numpy as np\n')] |
from keras.models import load_model
import cv2
import numpy as np
from numpy import genfromtxt
import pandas as pd
import tensorflow as tf
import utils as K
np.set_printoptions(threshold=np.nan)
path1='/Users/victor_sy_wang/Developer/ML/openface/images/examples-aligned/lennon-1.png'
path2='/Users/victor_sy_wang/Developer/ML/openface/data/lfw/dlib-affine-sz/Aaron_Eckhart/Aaron_Eckhart_0001.png'
path3='/Users/victor_sy_wang/Developer/ML/keras-facenet/data/dlib-affine-sz/Abel_Pacheco/Abel_Pacheco_0001.png'
img = cv2.imread(path2, 1)
img = img[...,::-1]
img = np.around(np.transpose(img, (2,0,1))/255.0, decimals=12)
model = load_model('./model/nn4.small2.v1.h5')
x_train = np.array([img])
y = model.predict_on_batch(x_train)
print(y)
| [
"keras.models.load_model",
"numpy.array",
"numpy.transpose",
"cv2.imread",
"numpy.set_printoptions"
] | [((158, 195), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (177, 195), True, 'import numpy as np\n'), ((518, 538), 'cv2.imread', 'cv2.imread', (['path2', '(1)'], {}), '(path2, 1)\n', (528, 538), False, 'import cv2\n'), ((631, 669), 'keras.models.load_model', 'load_model', (['"""./model/nn4.small2.v1.h5"""'], {}), "('./model/nn4.small2.v1.h5')\n", (641, 669), False, 'from keras.models import load_model\n'), ((681, 696), 'numpy.array', 'np.array', (['[img]'], {}), '([img])\n', (689, 696), True, 'import numpy as np\n'), ((575, 603), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (587, 603), True, 'import numpy as np\n')] |
import numpy as np
import torch
import itertools
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from skimage.measure import compare_psnr, compare_ssim
from skimage.restoration import denoise_nl_means, estimate_sigma
import skimage.io as sio
from glow.glow import Glow
from dcgan.dcgan import Generator
import json
import os
import warnings
warnings.filterwarnings("ignore")
def solveInpainting(args):
if args.prior == 'glow':
GlowInpaint(args)
elif args.prior == 'dcgan':
GANInpaint(args)
elif args.prior == 'glowred':
GlowREDInpaint(args)
else:
raise "prior not defined correctly"
def np_to_torch(img_np):
"""Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
"""
# return torch.from_numpy(img_np)[None, :].float().cuda()
return torch.from_numpy(img_np).float().cuda()
def torch_to_np(img_torch):
"""Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
"""
return img_torch.detach().cpu().numpy() # add [0] later
def Denoiser(d_name, sigma_f, x_f):
x = torch_to_np(x_f)
if d_name == 'nlm':
patch_kw = dict(patch_size=5, # 5x5 patches
patch_distance=6, # 13x13 search area
multichannel=True)
s0 = np.mean(estimate_sigma(x[0], multichannel=True))
s1 = np.mean(estimate_sigma(x[1], multichannel=True))
x0 = denoise_nl_means(x[0], h=s0, sigma=s0, fast_mode=False, **patch_kw)
x1 = denoise_nl_means(x[1], h=s1, sigma=s1, fast_mode=False, **patch_kw)
x = np.stack([x0, x1])
else:
raise "other denoisers not implemented"
x_f = np_to_torch(x)
return x_f
import itertools
from pprint import pprint
inputdata = [
['a', 'b', 'c'],
['d'],
['e', 'f'],
]
result = list(itertools.product(*inputdata))
def GlowREDInpaint(args):
# loopOver = zip(args.gamma)
hyperparams = [args.gamma, args.alpha, args.beta]
loopOver = list(itertools.product(*hyperparams))
for gamma, alpha, beta in loopOver:
skip_to_next = False # flag to skip to next loop if recovery is fails due to instability
n = args.size * args.size * 3
modeldir = "./trained_models/%s/glow" % args.model
test_folder = "./test_images/%s" % args.dataset
save_path = "./results/%s/%s" % (args.dataset, args.experiment)
# loading dataset
trans = transforms.Compose([transforms.Resize((args.size, args.size)), transforms.ToTensor()])
test_dataset = datasets.ImageFolder(test_folder, transform=trans)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batchsize, drop_last=False,
shuffle=False)
# loading glow configurations
config_path = modeldir + "/configs.json"
with open(config_path, 'r') as f:
configs = json.load(f)
# regularizor
gamma = torch.tensor(gamma, requires_grad=True, dtype=torch.float, device=args.device)
# alpha = args.alpha
# beta = args.beta
# getting test images
Original = []
Recovered = []
Masked = []
Mask = []
Residual_Curve = []
for i, data in enumerate(test_dataloader):
# getting batch of data
x_test = data[0]
x_test = x_test.clone().to(device=args.device)
n_test = x_test.size()[0]
assert n_test == args.batchsize, "please make sure that no. of images are evenly divided by batchsize"
# generate mask
mask = gen_mask(args.inpaint_method, args.size, args.mask_size)
mask = np.array([mask for i in range(n_test)])
mask = mask.reshape([n_test, 1, args.size, args.size])
mask = torch.tensor(mask, dtype=torch.float, requires_grad=False, device=args.device)
# loading glow model
glow = Glow((3, args.size, args.size),
K=configs["K"], L=configs["L"],
coupling=configs["coupling"],
n_bits_x=configs["n_bits_x"],
nn_init_last_zeros=configs["last_zeros"],
device=args.device)
glow.load_state_dict(torch.load(modeldir + "/glowmodel.pt"))
glow.eval()
# making a forward to record shapes of z's for reverse pass
_ = glow(glow.preprocess(torch.zeros_like(x_test)))
# initializing z from Gaussian
if args.init_strategy == "random":
z_sampled = np.random.normal(0, args.init_std, [n_test, n])
z_sampled = torch.tensor(z_sampled, requires_grad=True, dtype=torch.float, device=args.device)
# initializing z from image with noise filled only in masked region
elif args.init_strategy == "noisy_filled":
x_noisy_filled = x_test.clone().detach()
noise = np.random.normal(0, 0.2, x_noisy_filled.size())
noise = torch.tensor(noise, dtype=torch.float, device=args.device)
noise = noise * (1 - mask)
x_noisy_filled = x_noisy_filled + noise
x_noisy_filled = torch.clamp(x_noisy_filled, 0, 1)
z, _, _ = glow(x_noisy_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from image with masked region inverted
elif args.init_strategy == "inverted_filled":
x_inverted_filled = x_test.clone().detach()
missing_x = x_inverted_filled.clone()
missing_x = missing_x.data.cpu().numpy()
missing_x = missing_x[:, :, ::-1, ::-1]
missing_x = torch.tensor(missing_x.copy(), dtype=torch.float, device=args.device)
missing_x = (1 - mask) * missing_x
x_inverted_filled = x_inverted_filled * mask
x_inverted_filled = x_inverted_filled + missing_x
z, _, _ = glow(x_inverted_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from masked image ( masked region as zeros )
elif args.init_strategy == "black_filled":
x_black_filled = x_test.clone().detach()
x_black_filled = mask * x_black_filled
x_black_filled = x_black_filled * mask
z, _, _ = glow(x_black_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from noisy complete image
elif args.init_strategy == "noisy":
x_noisy = x_test.clone().detach()
noise = np.random.normal(0, 0.05, x_noisy.size())
noise = torch.tensor(noise, dtype=torch.float, device=args.device)
x_noisy = x_noisy + noise
x_noisy = torch.clamp(x_noisy, 0, 1)
z, _, _ = glow(x_noisy - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from image with only noise in masked region
elif args.init_strategy == "only_noise_filled":
x_noisy_filled = x_test.clone().detach()
noise = np.random.normal(0, 0.2, x_noisy_filled.size())
noise = torch.tensor(noise, dtype=torch.float, device=args.device)
noise = noise * (1 - mask)
x_noisy_filled = mask * x_noisy_filled + noise
x_noisy_filled = torch.clamp(x_noisy_filled, 0, 1)
z, _, _ = glow(x_noisy_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
else:
raise "Initialization strategy not defined"
# selecting optimizer
if args.optim == "adam":
optimizer = torch.optim.Adam([z_sampled], lr=args.lr, )
elif args.optim == "lbfgs":
optimizer = torch.optim.LBFGS([z_sampled], lr=args.lr, )
# metrics to record over training
psnr_t = torch.nn.MSELoss().to(device=args.device)
residual = []
x_f = (x_test * mask).clone()
u = torch.zeros_like(x_test)
# running optimizer steps
for t in range(args.steps):
def closure():
optimizer.zero_grad()
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False)
x_gen = glow.postprocess(x_gen, floor_clamp=False)
x_masked_test = x_test * mask
x_masked_gen = x_gen * mask
global residual_t
residual_t = ((x_masked_gen - x_masked_test) ** 2).view(len(x_masked_test), -1).sum(dim=1).mean()
z_reg_loss_t = gamma * z_sampled.norm(dim=1).mean()
residual_x = beta * ((x_gen - (x_f - u)) ** 2).view(len(x_gen), -1).sum(dim=1).mean()
loss_t = residual_t + z_reg_loss_t + residual_x
psnr = psnr_t(x_test, x_gen)
psnr = 10 * np.log10(1 / psnr.item())
print("\rAt step=%0.3d|loss=%0.4f|residual_t=%0.4f|residual_x=%0.4f|z_reg=%0.5f|psnr=%0.3f" % (
t, loss_t.item(), residual_t.item(), residual_x.item(), z_reg_loss_t.item(), psnr), end="\r")
loss_t.backward()
return loss_t
def denoiser_step(x_f, u):
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False).detach()
x_gen = glow.postprocess(x_gen, floor_clamp=False)
x_f = 1 / (beta + alpha) * (beta * Denoiser(args.denoiser, args.sigma_f, x_f) + alpha * (x_gen + u))
u = u + x_gen - x_f
return x_f, u
optimizer.step(closure)
residual.append(residual_t.item())
if t % args.update_iter == args.update_iter - 1:
x_f, u = denoiser_step(x_f, u)
# try:
# optimizer.step(closure)
# residual.append(residual_t.item())
# if t % args.update_iter == 0:
# x_f, u = denoiser_step(x_f, u)
#
# except:
# skip_to_next = True
# break
if skip_to_next:
break
# getting recovered and true images
x_test_np = x_test.data.cpu().numpy().transpose(0, 2, 3, 1)
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False)
x_gen = glow.postprocess(x_gen, floor_clamp=False)
x_gen_np = x_gen.data.cpu().numpy().transpose(0, 2, 3, 1)
x_gen_np = np.clip(x_gen_np, 0, 1)
mask_np = mask.data.cpu().numpy()
x_masked_test = x_test * mask
x_masked_test_np = x_masked_test.data.cpu().numpy().transpose(0, 2, 3, 1)
x_masked_test_np = np.clip(x_masked_test_np, 0, 1)
Original.append(x_test_np)
Recovered.append(x_gen_np)
Masked.append(x_masked_test_np)
Residual_Curve.append(residual)
Mask.append(mask_np)
# freeing up memory for second loop
glow.zero_grad()
optimizer.zero_grad()
del x_test, x_gen, optimizer, psnr_t, z_sampled, glow, mask,
torch.cuda.empty_cache()
print("\nbatch completed")
if skip_to_next:
print("\nskipping current loop due to instability or user triggered quit")
continue
# metric evaluations
Original = np.vstack(Original)
Recovered = np.vstack(Recovered)
Masked = np.vstack(Masked)
Mask = np.vstack(Mask)
psnr = [compare_psnr(x, y) for x, y in zip(Original, Recovered)]
# print performance analysis
printout = "+-" * 10 + "%s" % args.dataset + "-+" * 10 + "\n"
printout = printout + "\t n_test = %d\n" % len(Recovered)
printout = printout + "\t inpaint_method = %s\n" % args.inpaint_method
printout = printout + "\t mask_size = %0.3f\n" % args.mask_size
printout = printout + "\t update_iter = %0.4f\n" % args.update_iter
printout = printout + "\t gamma = %0.6f\n" % gamma
printout = printout + "\t alpha = %0.6f\n" % alpha
printout = printout + "\t beta = %0.6f\n" % beta
printout = printout + "\t PSNR = %0.3f\n" % np.mean(psnr)
print(printout)
if args.save_metrics_text:
with open("%s_inpaint_glow_results.txt" % args.dataset, "a") as f:
f.write('\n' + printout)
# saving images
if args.save_results:
gamma = gamma.item()
file_names = [name[0].split("/")[-1].split(".")[0] for name in test_dataset.samples]
if args.init_strategy == 'random':
save_path = save_path + "/inpaint_%s_masksize_%0.4f_updateiter_%0.4f_gamma_%0.6f_alpha_%0.6f_beta_%0.6f_steps_%d_lr_%0.3f_init_std_%0.2f_optim_%s"
save_path = save_path % (
args.inpaint_method, args.mask_size, args.update_iter, gamma, alpha, beta, args.steps, args.lr, args.init_std, args.optim)
else:
save_path = save_path + "/inpaint_%s_masksize_%0.4f_updateiter_%0.4f_gamma_%0.6f_alpha_%0.6f_beta_%0.6f_steps_%d_lr_%0.3f_init_std_%0.2f_optim_%s"
save_path = save_path % (
args.inpaint_method, args.mask_size, args.update_iter, gamma, alpha, beta, args.steps, args.lr, args.init_strategy, args.optim)
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
save_path_1 = save_path + "_1"
if not os.path.exists(save_path_1):
os.makedirs(save_path_1)
save_path = save_path_1
else:
save_path_2 = save_path + "_2"
if not os.path.exists(save_path_2):
os.makedirs(save_path_2)
save_path = save_path_2
_ = [sio.imsave(save_path + "/" + name + "_recov.jpg", x) for x, name in zip(Recovered, file_names)]
_ = [sio.imsave(save_path + "/" + name + "_masked.jpg", x) for x, name in zip(Masked, file_names)]
Residual_Curve = np.array(Residual_Curve).mean(axis=0)
np.save(save_path + "/" + "residual_curve.npy", Residual_Curve)
np.save(save_path + "/original.npy", Original)
np.save(save_path + "/recovered.npy", Recovered)
np.save(save_path + "/mask.npy", Mask)
np.save(save_path + "/masked.npy", Masked)
def GlowInpaint(args):
loopOver = zip(args.gamma)
for gamma in loopOver:
skip_to_next = False # flag to skip to next loop if recovery is fails due to instability
n = args.size*args.size*3
modeldir = "./trained_models/%s/glow"%args.model
test_folder = "./test_images/%s"%args.dataset
save_path = "./results/%s/%s"%(args.dataset,args.experiment)
# loading dataset
trans = transforms.Compose([transforms.Resize((args.size,args.size)),transforms.ToTensor()])
test_dataset = datasets.ImageFolder(test_folder, transform=trans)
test_dataloader = torch.utils.data.DataLoader(test_dataset,batch_size=args.batchsize,drop_last=False,shuffle=False)
# loading glow configurations
config_path = modeldir+"/configs.json"
with open(config_path, 'r') as f:
configs = json.load(f)
# regularizor
gamma = torch.tensor(gamma, requires_grad=True, dtype=torch.float, device=args.device)
# getting test images
Original = []
Recovered = []
Masked = []
Mask = []
Residual_Curve = []
for i, data in enumerate(test_dataloader):
# getting batch of data
x_test = data[0]
x_test = x_test.clone().to(device=args.device)
n_test = x_test.size()[0]
assert n_test == args.batchsize, "please make sure that no. of images are evenly divided by batchsize"
# generate mask
mask = gen_mask(args.inpaint_method,args.size,args.mask_size)
mask = np.array([mask for i in range(n_test)])
mask = mask.reshape([n_test,1,args.size,args.size])
mask = torch.tensor(mask, dtype=torch.float, requires_grad=False, device=args.device)
# loading glow model
glow = Glow((3,args.size,args.size),
K=configs["K"],L=configs["L"],
coupling=configs["coupling"],
n_bits_x=configs["n_bits_x"],
nn_init_last_zeros=configs["last_zeros"],
device=args.device)
glow.load_state_dict(torch.load(modeldir+"/glowmodel.pt"))
glow.eval()
# making a forward to record shapes of z's for reverse pass
_ = glow(glow.preprocess(torch.zeros_like(x_test)))
# initializing z from Gaussian
if args.init_strategy == "random":
z_sampled = np.random.normal(0,args.init_std,[n_test,n])
z_sampled = torch.tensor(z_sampled,requires_grad=True,dtype=torch.float,device=args.device)
# initializing z from image with noise filled only in masked region
elif args.init_strategy == "noisy_filled":
x_noisy_filled = x_test.clone().detach()
noise = np.random.normal(0,0.2, x_noisy_filled.size())
noise = torch.tensor(noise,dtype=torch.float,device=args.device)
noise = noise * (1-mask)
x_noisy_filled = x_noisy_filled + noise
x_noisy_filled = torch.clamp(x_noisy_filled, 0, 1)
z, _, _ = glow(x_noisy_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from image with masked region inverted
elif args.init_strategy == "inverted_filled":
x_inverted_filled = x_test.clone().detach()
missing_x = x_inverted_filled.clone()
missing_x = missing_x.data.cpu().numpy()
missing_x = missing_x[:,:,::-1,::-1]
missing_x = torch.tensor(missing_x.copy(),dtype=torch.float,device=args.device)
missing_x = (1-mask)*missing_x
x_inverted_filled = x_inverted_filled * mask
x_inverted_filled = x_inverted_filled + missing_x
z, _, _ = glow(x_inverted_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from masked image ( masked region as zeros )
elif args.init_strategy == "black_filled":
x_black_filled = x_test.clone().detach()
x_black_filled = mask * x_black_filled
x_black_filled = x_black_filled * mask
z, _, _ = glow(x_black_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from noisy complete image
elif args.init_strategy == "noisy":
x_noisy = x_test.clone().detach()
noise = np.random.normal(0,0.05, x_noisy.size())
noise = torch.tensor(noise,dtype=torch.float,device=args.device)
x_noisy = x_noisy + noise
x_noisy = torch.clamp(x_noisy, 0, 1)
z, _, _ = glow(x_noisy - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from image with only noise in masked region
elif args.init_strategy == "only_noise_filled":
x_noisy_filled = x_test.clone().detach()
noise = np.random.normal(0,0.2, x_noisy_filled.size())
noise = torch.tensor(noise,dtype=torch.float,device=args.device)
noise = noise * (1-mask)
x_noisy_filled = mask * x_noisy_filled + noise
x_noisy_filled = torch.clamp(x_noisy_filled, 0, 1)
z, _, _ = glow(x_noisy_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
else:
raise "Initialization strategy not defined"
# selecting optimizer
if args.optim == "adam":
optimizer = torch.optim.Adam([z_sampled], lr=args.lr,)
elif args.optim == "lbfgs":
optimizer = torch.optim.LBFGS([z_sampled], lr=args.lr,)
# metrics to record over training
psnr_t = torch.nn.MSELoss().to(device=args.device)
residual = []
# running optimizer steps
for t in range(args.steps):
def closure():
optimizer.zero_grad()
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False)
x_gen = glow.postprocess(x_gen,floor_clamp=False)
x_masked_test = x_test * mask
x_masked_gen = x_gen * mask
global residual_t
residual_t = ((x_masked_gen - x_masked_test)**2).view(len(x_masked_test),-1).sum(dim=1).mean()
z_reg_loss_t= gamma*z_sampled.norm(dim=1).mean()
loss_t = residual_t + z_reg_loss_t
psnr = psnr_t(x_test, x_gen)
psnr = 10 * np.log10(1 / psnr.item())
print("\rAt step=%0.3d|loss=%0.4f|residual=%0.4f|z_reg=%0.5f|psnr=%0.3f"%(t,loss_t.item(),residual_t.item(),z_reg_loss_t.item(), psnr),end="\r")
loss_t.backward()
return loss_t
optimizer.step(closure)
residual.append(residual_t.item())
# try:
# optimizer.step(closure)
# residual.append(residual_t.item())
# except:
# skip_to_next = True
# break
if skip_to_next:
break
# getting recovered and true images
x_test_np = x_test.data.cpu().numpy().transpose(0,2,3,1)
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False)
x_gen = glow.postprocess(x_gen,floor_clamp=False)
x_gen_np = x_gen.data.cpu().numpy().transpose(0,2,3,1)
x_gen_np = np.clip(x_gen_np,0,1)
mask_np = mask.data.cpu().numpy()
x_masked_test = x_test * mask
x_masked_test_np = x_masked_test.data.cpu().numpy().transpose(0,2,3,1)
x_masked_test_np = np.clip(x_masked_test_np,0,1)
Original.append(x_test_np)
Recovered.append(x_gen_np)
Masked.append(x_masked_test_np)
Residual_Curve.append(residual)
Mask.append(mask_np)
# freeing up memory for second loop
glow.zero_grad()
optimizer.zero_grad()
del x_test, x_gen, optimizer, psnr_t, z_sampled, glow, mask,
torch.cuda.empty_cache()
print("\nbatch completed")
if skip_to_next:
print("\nskipping current loop due to instability or user triggered quit")
continue
# metric evaluations
Original = np.vstack(Original)
Recovered = np.vstack(Recovered)
Masked = np.vstack(Masked)
Mask = np.vstack(Mask)
psnr = [compare_psnr(x, y) for x,y in zip(Original, Recovered)]
# print performance analysis
printout = "+-"*10 + "%s"%args.dataset + "-+"*10 + "\n"
printout = printout + "\t n_test = %d\n"%len(Recovered)
printout = printout + "\t inpaint_method = %s\n"%args.inpaint_method
printout = printout + "\t mask_size = %0.3f\n"%args.mask_size
printout = printout + "\t gamma = %0.6f\n"%gamma
printout = printout + "\t PSNR = %0.3f\n"%np.mean(psnr)
print(printout)
if args.save_metrics_text:
with open("%s_inpaint_glow_results.txt"%args.dataset,"a") as f:
f.write('\n' + printout)
# saving images
if args.save_results:
gamma = gamma.item()
file_names = [name[0].split("/")[-1].split(".")[0] for name in test_dataset.samples]
if args.init_strategy == 'random':
save_path = save_path + "/inpaint_%s_masksize_%0.4f_gamma_%0.6f_steps_%d_lr_%0.3f_init_std_%0.2f_optim_%s"
save_path = save_path%(args.inpaint_method,args.mask_size,gamma,args.steps,args.lr,args.init_std,args.optim)
else:
save_path = save_path + "/inpaint_%s_masksize_%0.4f_gamma_%0.6f_steps_%d_lr_%0.3f_init_%s_optim_%s"
save_path = save_path%(args.inpaint_method,args.mask_size,gamma,args.steps,args.lr,args.init_strategy,args.optim)
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
save_path_1 = save_path + "_1"
if not os.path.exists(save_path_1):
os.makedirs(save_path_1)
save_path = save_path_1
else:
save_path_2 = save_path + "_2"
if not os.path.exists(save_path_2):
os.makedirs(save_path_2)
save_path = save_path_2
_ = [sio.imsave(save_path+"/"+name+"_recov.jpg", x) for x,name in zip(Recovered,file_names)]
_ = [sio.imsave(save_path+"/"+name+"_masked.jpg", x) for x,name in zip(Masked,file_names)]
Residual_Curve = np.array(Residual_Curve).mean(axis=0)
np.save(save_path+"/"+"residual_curve.npy", Residual_Curve)
np.save(save_path+"/original.npy", Original)
np.save(save_path+"/recovered.npy", Recovered)
np.save(save_path+"/mask.npy", Mask)
np.save(save_path+"/masked.npy", Masked)
def GANInpaint(args):
loopOver = zip(args.gamma)
for gamma in loopOver:
n = 100
modeldir = "./trained_models/%s/dcgan"%args.model
test_folder = "./test_images/%s"%args.dataset
save_path = "./results/%s/%s"%(args.dataset,args.experiment)
# loading dataset
trans = transforms.Compose([transforms.Resize((args.size,args.size)),transforms.ToTensor()])
test_dataset = datasets.ImageFolder(test_folder, transform=trans)
test_dataloader = torch.utils.data.DataLoader(test_dataset,batch_size=args.batchsize,drop_last=False,shuffle=False)
# regularizor
gamma = torch.tensor(gamma, requires_grad=True, dtype=torch.float, device=args.device)
# getting test images
Original = []
Recovered = []
Masked = []
Mask = []
Residual_Curve = []
for i, data in enumerate(test_dataloader):
# getting batch of data
x_test = data[0]
x_test = x_test.clone().to(device=args.device)
n_test = x_test.size()[0]
assert n_test == args.batchsize, "please make sure that no. of images are evenly divided by batchsize"
# generate mask
mask = gen_mask(args.inpaint_method,args.size,args.mask_size)
mask = np.array([mask for i in range(n_test)])
mask = mask.reshape([n_test,1,args.size,args.size])
mask = torch.tensor(mask,dtype=torch.float,requires_grad=False, device=args.device)
# loading dcgan model
generator = Generator(ngpu=1).to(device=args.device)
generator.load_state_dict(torch.load(modeldir+'/dcgan_G.pt'))
generator.eval()
# initializing latent code z from Gaussian
if args.init_strategy == "random":
z_sampled = np.random.normal(0,args.init_std,[n_test,n,1,1])
z_sampled = torch.tensor(z_sampled,requires_grad=True,dtype=torch.float,device=args.device)
else:
raise "only random initialization strategy is supported for inpainting in dcgan"
# selecting optimizer
if args.optim == "adam":
optimizer = torch.optim.Adam([z_sampled], lr=args.lr,)
elif args.optim == "lbfgs":
optimizer = torch.optim.LBFGS([z_sampled], lr=args.lr,)
# metrics to record over training
psnr_t = torch.nn.MSELoss().to(device=args.device)
residual = []
# running optimizer steps
for t in range(args.steps):
def closure():
optimizer.zero_grad()
x_gen = generator(z_sampled)
x_gen = (x_gen + 1)/2
x_masked_test = x_test * mask
x_masked_gen = x_gen * mask
global residual_t
residual_t = ((x_masked_gen - x_masked_test)**2).view(len(x_masked_test),-1).sum(dim=1).mean()
z_reg_loss_t= gamma*z_sampled.norm(dim=1).mean()
loss_t = residual_t + z_reg_loss_t
psnr = psnr_t(x_test, x_gen)
psnr = 10 * np.log10(1 / psnr.item())
print("\rAt step=%0.3d|loss=%0.4f|residual=%0.4f|z_reg=%0.5f|psnr=%0.3f"%(t,loss_t.item(),residual_t.item(),z_reg_loss_t.item(), psnr),end="\r")
loss_t.backward()
return loss_t
optimizer.step(closure)
residual.append(residual_t.item())
# getting recovered and true images
x_test_np = x_test.data.cpu().numpy().transpose(0,2,3,1)
x_gen = generator(z_sampled)
x_gen = (x_gen + 1)/2
x_gen_np = x_gen.data.cpu().numpy().transpose(0,2,3,1)
x_gen_np = np.clip(x_gen_np,0,1)
mask_np = mask.data.cpu().numpy()
x_masked_test = x_test * mask
x_masked_test_np = x_masked_test.data.cpu().numpy().transpose(0,2,3,1)
x_masked_test_np = np.clip(x_masked_test_np,0,1)
Original.append(x_test_np)
Recovered.append(x_gen_np)
Masked.append(x_masked_test_np)
Residual_Curve.append(residual)
Mask.append(mask_np)
# freeing up memory for second loop
generator.zero_grad()
optimizer.zero_grad()
del x_test, x_gen, optimizer, psnr_t, z_sampled, generator, mask,
torch.cuda.empty_cache()
print("\nbatch completed")
# metric evaluations
Original = np.vstack(Original)
Recovered = np.vstack(Recovered)
Masked = np.vstack(Masked)
Mask = np.vstack(Mask)
psnr = [compare_psnr(x, y) for x,y in zip(Original, Recovered)]
# print performance analysis
printout = "+-"*10 + "%s"%args.dataset + "-+"*10 + "\n"
printout = printout + "\t n_test = %d\n"%len(Recovered)
printout = printout + "\t inpaint_method = %s\n"%args.inpaint_method
printout = printout + "\t mask_size = %0.3f\n"%args.mask_size
printout = printout + "\t gamma = %0.6f\n"%gamma
printout = printout + "\t PSNR = %0.3f\n"%np.mean(psnr)
print(printout)
if args.save_metrics_text:
with open("%s_inpaint_dcgan_results.txt"%args.dataset,"a") as f:
f.write('\n' + printout)
# saving images
if args.save_results:
gamma = gamma.item()
file_names = [name[0].split("/")[-1].split(".")[0] for name in test_dataset.samples]
save_path = save_path + "/inpaint_%s_masksize_%0.4f_gamma_%0.6f_steps_%d_lr_%0.3f_init_std_%0.2f_optim_%s"
save_path = save_path%(args.inpaint_method,args.mask_size,gamma,args.steps,args.lr,args.init_std,args.optim)
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
save_path_1 = save_path + "_1"
if not os.path.exists(save_path_1):
os.makedirs(save_path_1)
save_path = save_path_1
else:
save_path_2 = save_path + "_2"
if not os.path.exists(save_path_2):
os.makedirs(save_path_2)
save_path = save_path_2
_ = [sio.imsave(save_path+"/"+name+"_recov.jpg", x) for x,name in zip(Recovered,file_names)]
_ = [sio.imsave(save_path+"/"+name+"_masked.jpg", x) for x,name in zip(Masked,file_names)]
Residual_Curve = np.array(Residual_Curve).mean(axis=0)
np.save(save_path+"/"+"residual_curve.npy", Residual_Curve)
np.save(save_path+"/original.npy", Original)
np.save(save_path+"/recovered.npy", Recovered)
np.save(save_path+"/mask.npy", Mask)
# a function to generate masks
def gen_mask(maskType, imgSize, masksize=0.25):
# the larger the masksize, the bigger the mask
image_shape = [imgSize, imgSize]
if maskType == 'random':
mask = np.ones(image_shape)
mask[np.random.random(image_shape[:2]) < masksize] = 0.0
elif maskType == 'center':
center_scale = -(masksize - 1)/2
assert(center_scale <= 0.5)
mask = np.ones(image_shape)
l = int(imgSize*center_scale)
u = int(imgSize*(1.0-center_scale))
mask[l:u, l:u] = 0.0
elif maskType == 'left':
mask = np.ones(image_shape)
c = imgSize #// 2
masksize = 1 - masksize
c = int(c * masksize)
mask[:, c:] = 0.0
elif maskType == 'bottom':
mask = np.ones(image_shape)
c = imgSize# // 2
masksize = 1 - masksize
c = int(c * masksize)
mask[c:, :] = 0.0
else:
assert(False)
return mask | [
"numpy.clip",
"glow.glow.Glow",
"torch.from_numpy",
"torch.nn.MSELoss",
"numpy.array",
"numpy.save",
"numpy.mean",
"os.path.exists",
"numpy.random.random",
"itertools.product",
"skimage.restoration.estimate_sigma",
"numpy.stack",
"torchvision.datasets.ImageFolder",
"dcgan.dcgan.Generator",... | [((402, 435), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (425, 435), False, 'import warnings\n'), ((1937, 1966), 'itertools.product', 'itertools.product', (['*inputdata'], {}), '(*inputdata)\n', (1954, 1966), False, 'import itertools\n'), ((1536, 1603), 'skimage.restoration.denoise_nl_means', 'denoise_nl_means', (['x[0]'], {'h': 's0', 'sigma': 's0', 'fast_mode': '(False)'}), '(x[0], h=s0, sigma=s0, fast_mode=False, **patch_kw)\n', (1552, 1603), False, 'from skimage.restoration import denoise_nl_means, estimate_sigma\n'), ((1617, 1684), 'skimage.restoration.denoise_nl_means', 'denoise_nl_means', (['x[1]'], {'h': 's1', 'sigma': 's1', 'fast_mode': '(False)'}), '(x[1], h=s1, sigma=s1, fast_mode=False, **patch_kw)\n', (1633, 1684), False, 'from skimage.restoration import denoise_nl_means, estimate_sigma\n'), ((1697, 1715), 'numpy.stack', 'np.stack', (['[x0, x1]'], {}), '([x0, x1])\n', (1705, 1715), True, 'import numpy as np\n'), ((2102, 2133), 'itertools.product', 'itertools.product', (['*hyperparams'], {}), '(*hyperparams)\n', (2119, 2133), False, 'import itertools\n'), ((2651, 2701), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_folder'], {'transform': 'trans'}), '(test_folder, transform=trans)\n', (2671, 2701), False, 'from torchvision import datasets\n'), ((2728, 2832), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.batchsize', 'drop_last': '(False)', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.batchsize,\n drop_last=False, shuffle=False)\n', (2755, 2832), False, 'import torch\n'), ((3087, 3165), 'torch.tensor', 'torch.tensor', (['gamma'], {'requires_grad': '(True)', 'dtype': 'torch.float', 'device': 'args.device'}), '(gamma, requires_grad=True, dtype=torch.float, device=args.device)\n', (3099, 3165), False, 'import torch\n'), ((12352, 12371), 'numpy.vstack', 'np.vstack', (['Original'], {}), '(Original)\n', (12361, 12371), True, 'import numpy as np\n'), ((12392, 12412), 'numpy.vstack', 'np.vstack', (['Recovered'], {}), '(Recovered)\n', (12401, 12412), True, 'import numpy as np\n'), ((12430, 12447), 'numpy.vstack', 'np.vstack', (['Masked'], {}), '(Masked)\n', (12439, 12447), True, 'import numpy as np\n'), ((12463, 12478), 'numpy.vstack', 'np.vstack', (['Mask'], {}), '(Mask)\n', (12472, 12478), True, 'import numpy as np\n'), ((16063, 16113), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_folder'], {'transform': 'trans'}), '(test_folder, transform=trans)\n', (16083, 16113), False, 'from torchvision import datasets\n'), ((16140, 16244), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.batchsize', 'drop_last': '(False)', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.batchsize,\n drop_last=False, shuffle=False)\n', (16167, 16244), False, 'import torch\n'), ((16444, 16522), 'torch.tensor', 'torch.tensor', (['gamma'], {'requires_grad': '(True)', 'dtype': 'torch.float', 'device': 'args.device'}), '(gamma, requires_grad=True, dtype=torch.float, device=args.device)\n', (16456, 16522), False, 'import torch\n'), ((24936, 24955), 'numpy.vstack', 'np.vstack', (['Original'], {}), '(Original)\n', (24945, 24955), True, 'import numpy as np\n'), ((24976, 24996), 'numpy.vstack', 'np.vstack', (['Recovered'], {}), '(Recovered)\n', (24985, 24996), True, 'import numpy as np\n'), ((25017, 25034), 'numpy.vstack', 'np.vstack', (['Masked'], {}), '(Masked)\n', (25026, 25034), True, 'import numpy as np\n'), ((25055, 25070), 'numpy.vstack', 'np.vstack', (['Mask'], {}), '(Mask)\n', (25064, 25070), True, 'import numpy as np\n'), ((28097, 28147), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_folder'], {'transform': 'trans'}), '(test_folder, transform=trans)\n', (28117, 28147), False, 'from torchvision import datasets\n'), ((28174, 28278), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.batchsize', 'drop_last': '(False)', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.batchsize,\n drop_last=False, shuffle=False)\n', (28201, 28278), False, 'import torch\n'), ((28315, 28393), 'torch.tensor', 'torch.tensor', (['gamma'], {'requires_grad': '(True)', 'dtype': 'torch.float', 'device': 'args.device'}), '(gamma, requires_grad=True, dtype=torch.float, device=args.device)\n', (28327, 28393), False, 'import torch\n'), ((32366, 32385), 'numpy.vstack', 'np.vstack', (['Original'], {}), '(Original)\n', (32375, 32385), True, 'import numpy as np\n'), ((32406, 32426), 'numpy.vstack', 'np.vstack', (['Recovered'], {}), '(Recovered)\n', (32415, 32426), True, 'import numpy as np\n'), ((32447, 32464), 'numpy.vstack', 'np.vstack', (['Masked'], {}), '(Masked)\n', (32456, 32464), True, 'import numpy as np\n'), ((32485, 32500), 'numpy.vstack', 'np.vstack', (['Mask'], {}), '(Mask)\n', (32494, 32500), True, 'import numpy as np\n'), ((34917, 34937), 'numpy.ones', 'np.ones', (['image_shape'], {}), '(image_shape)\n', (34924, 34937), True, 'import numpy as np\n'), ((1420, 1459), 'skimage.restoration.estimate_sigma', 'estimate_sigma', (['x[0]'], {'multichannel': '(True)'}), '(x[0], multichannel=True)\n', (1434, 1459), False, 'from skimage.restoration import denoise_nl_means, estimate_sigma\n'), ((1482, 1521), 'skimage.restoration.estimate_sigma', 'estimate_sigma', (['x[1]'], {'multichannel': '(True)'}), '(x[1], multichannel=True)\n', (1496, 1521), False, 'from skimage.restoration import denoise_nl_means, estimate_sigma\n'), ((3035, 3047), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3044, 3047), False, 'import json\n'), ((3942, 4020), 'torch.tensor', 'torch.tensor', (['mask'], {'dtype': 'torch.float', 'requires_grad': '(False)', 'device': 'args.device'}), '(mask, dtype=torch.float, requires_grad=False, device=args.device)\n', (3954, 4020), False, 'import torch\n'), ((4074, 4269), 'glow.glow.Glow', 'Glow', (['(3, args.size, args.size)'], {'K': "configs['K']", 'L': "configs['L']", 'coupling': "configs['coupling']", 'n_bits_x': "configs['n_bits_x']", 'nn_init_last_zeros': "configs['last_zeros']", 'device': 'args.device'}), "((3, args.size, args.size), K=configs['K'], L=configs['L'], coupling=\n configs['coupling'], n_bits_x=configs['n_bits_x'], nn_init_last_zeros=\n configs['last_zeros'], device=args.device)\n", (4078, 4269), False, 'from glow.glow import Glow\n'), ((8650, 8674), 'torch.zeros_like', 'torch.zeros_like', (['x_test'], {}), '(x_test)\n', (8666, 8674), False, 'import torch\n'), ((11447, 11470), 'numpy.clip', 'np.clip', (['x_gen_np', '(0)', '(1)'], {}), '(x_gen_np, 0, 1)\n', (11454, 11470), True, 'import numpy as np\n'), ((11676, 11707), 'numpy.clip', 'np.clip', (['x_masked_test_np', '(0)', '(1)'], {}), '(x_masked_test_np, 0, 1)\n', (11683, 11707), True, 'import numpy as np\n'), ((12105, 12129), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (12127, 12129), False, 'import torch\n'), ((12495, 12513), 'skimage.measure.compare_psnr', 'compare_psnr', (['x', 'y'], {}), '(x, y)\n', (12507, 12513), False, 'from skimage.measure import compare_psnr, compare_ssim\n'), ((15192, 15255), 'numpy.save', 'np.save', (["(save_path + '/' + 'residual_curve.npy')", 'Residual_Curve'], {}), "(save_path + '/' + 'residual_curve.npy', Residual_Curve)\n", (15199, 15255), True, 'import numpy as np\n'), ((15268, 15314), 'numpy.save', 'np.save', (["(save_path + '/original.npy')", 'Original'], {}), "(save_path + '/original.npy', Original)\n", (15275, 15314), True, 'import numpy as np\n'), ((15327, 15375), 'numpy.save', 'np.save', (["(save_path + '/recovered.npy')", 'Recovered'], {}), "(save_path + '/recovered.npy', Recovered)\n", (15334, 15375), True, 'import numpy as np\n'), ((15388, 15426), 'numpy.save', 'np.save', (["(save_path + '/mask.npy')", 'Mask'], {}), "(save_path + '/mask.npy', Mask)\n", (15395, 15426), True, 'import numpy as np\n'), ((15439, 15481), 'numpy.save', 'np.save', (["(save_path + '/masked.npy')", 'Masked'], {}), "(save_path + '/masked.npy', Masked)\n", (15446, 15481), True, 'import numpy as np\n'), ((16388, 16400), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16397, 16400), False, 'import json\n'), ((17251, 17329), 'torch.tensor', 'torch.tensor', (['mask'], {'dtype': 'torch.float', 'requires_grad': '(False)', 'device': 'args.device'}), '(mask, dtype=torch.float, requires_grad=False, device=args.device)\n', (17263, 17329), False, 'import torch\n'), ((17383, 17578), 'glow.glow.Glow', 'Glow', (['(3, args.size, args.size)'], {'K': "configs['K']", 'L': "configs['L']", 'coupling': "configs['coupling']", 'n_bits_x': "configs['n_bits_x']", 'nn_init_last_zeros': "configs['last_zeros']", 'device': 'args.device'}), "((3, args.size, args.size), K=configs['K'], L=configs['L'], coupling=\n configs['coupling'], n_bits_x=configs['n_bits_x'], nn_init_last_zeros=\n configs['last_zeros'], device=args.device)\n", (17387, 17578), False, 'from glow.glow import Glow\n'), ((24032, 24055), 'numpy.clip', 'np.clip', (['x_gen_np', '(0)', '(1)'], {}), '(x_gen_np, 0, 1)\n', (24039, 24055), True, 'import numpy as np\n'), ((24260, 24291), 'numpy.clip', 'np.clip', (['x_masked_test_np', '(0)', '(1)'], {}), '(x_masked_test_np, 0, 1)\n', (24267, 24291), True, 'import numpy as np\n'), ((24687, 24711), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (24709, 24711), False, 'import torch\n'), ((25092, 25110), 'skimage.measure.compare_psnr', 'compare_psnr', (['x', 'y'], {}), '(x, y)\n', (25104, 25110), False, 'from skimage.measure import compare_psnr, compare_ssim\n'), ((27341, 27404), 'numpy.save', 'np.save', (["(save_path + '/' + 'residual_curve.npy')", 'Residual_Curve'], {}), "(save_path + '/' + 'residual_curve.npy', Residual_Curve)\n", (27348, 27404), True, 'import numpy as np\n'), ((27413, 27459), 'numpy.save', 'np.save', (["(save_path + '/original.npy')", 'Original'], {}), "(save_path + '/original.npy', Original)\n", (27420, 27459), True, 'import numpy as np\n'), ((27470, 27518), 'numpy.save', 'np.save', (["(save_path + '/recovered.npy')", 'Recovered'], {}), "(save_path + '/recovered.npy', Recovered)\n", (27477, 27518), True, 'import numpy as np\n'), ((27529, 27567), 'numpy.save', 'np.save', (["(save_path + '/mask.npy')", 'Mask'], {}), "(save_path + '/mask.npy', Mask)\n", (27536, 27567), True, 'import numpy as np\n'), ((27578, 27620), 'numpy.save', 'np.save', (["(save_path + '/masked.npy')", 'Masked'], {}), "(save_path + '/masked.npy', Masked)\n", (27585, 27620), True, 'import numpy as np\n'), ((29122, 29200), 'torch.tensor', 'torch.tensor', (['mask'], {'dtype': 'torch.float', 'requires_grad': '(False)', 'device': 'args.device'}), '(mask, dtype=torch.float, requires_grad=False, device=args.device)\n', (29134, 29200), False, 'import torch\n'), ((31587, 31610), 'numpy.clip', 'np.clip', (['x_gen_np', '(0)', '(1)'], {}), '(x_gen_np, 0, 1)\n', (31594, 31610), True, 'import numpy as np\n'), ((31815, 31846), 'numpy.clip', 'np.clip', (['x_masked_test_np', '(0)', '(1)'], {}), '(x_masked_test_np, 0, 1)\n', (31822, 31846), True, 'import numpy as np\n'), ((32251, 32275), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (32273, 32275), False, 'import torch\n'), ((32522, 32540), 'skimage.measure.compare_psnr', 'compare_psnr', (['x', 'y'], {}), '(x, y)\n', (32534, 32540), False, 'from skimage.measure import compare_psnr, compare_ssim\n'), ((34454, 34517), 'numpy.save', 'np.save', (["(save_path + '/' + 'residual_curve.npy')", 'Residual_Curve'], {}), "(save_path + '/' + 'residual_curve.npy', Residual_Curve)\n", (34461, 34517), True, 'import numpy as np\n'), ((34526, 34572), 'numpy.save', 'np.save', (["(save_path + '/original.npy')", 'Original'], {}), "(save_path + '/original.npy', Original)\n", (34533, 34572), True, 'import numpy as np\n'), ((34583, 34631), 'numpy.save', 'np.save', (["(save_path + '/recovered.npy')", 'Recovered'], {}), "(save_path + '/recovered.npy', Recovered)\n", (34590, 34631), True, 'import numpy as np\n'), ((34642, 34680), 'numpy.save', 'np.save', (["(save_path + '/mask.npy')", 'Mask'], {}), "(save_path + '/mask.npy', Mask)\n", (34649, 34680), True, 'import numpy as np\n'), ((35127, 35147), 'numpy.ones', 'np.ones', (['image_shape'], {}), '(image_shape)\n', (35134, 35147), True, 'import numpy as np\n'), ((2561, 2602), 'torchvision.transforms.Resize', 'transforms.Resize', (['(args.size, args.size)'], {}), '((args.size, args.size))\n', (2578, 2602), True, 'import torchvision.transforms as transforms\n'), ((2604, 2625), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2623, 2625), True, 'import torchvision.transforms as transforms\n'), ((4413, 4451), 'torch.load', 'torch.load', (["(modeldir + '/glowmodel.pt')"], {}), "(modeldir + '/glowmodel.pt')\n", (4423, 4451), False, 'import torch\n'), ((4733, 4780), 'numpy.random.normal', 'np.random.normal', (['(0)', 'args.init_std', '[n_test, n]'], {}), '(0, args.init_std, [n_test, n])\n', (4749, 4780), True, 'import numpy as np\n'), ((4809, 4896), 'torch.tensor', 'torch.tensor', (['z_sampled'], {'requires_grad': '(True)', 'dtype': 'torch.float', 'device': 'args.device'}), '(z_sampled, requires_grad=True, dtype=torch.float, device=args.\n device)\n', (4821, 4896), False, 'import torch\n'), ((8299, 8340), 'torch.optim.Adam', 'torch.optim.Adam', (['[z_sampled]'], {'lr': 'args.lr'}), '([z_sampled], lr=args.lr)\n', (8315, 8340), False, 'import torch\n'), ((13234, 13247), 'numpy.mean', 'np.mean', (['psnr'], {}), '(psnr)\n', (13241, 13247), True, 'import numpy as np\n'), ((14390, 14415), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (14404, 14415), False, 'import os\n'), ((14433, 14455), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (14444, 14455), False, 'import os\n'), ((14906, 14958), 'skimage.io.imsave', 'sio.imsave', (["(save_path + '/' + name + '_recov.jpg')", 'x'], {}), "(save_path + '/' + name + '_recov.jpg', x)\n", (14916, 14958), True, 'import skimage.io as sio\n'), ((15019, 15072), 'skimage.io.imsave', 'sio.imsave', (["(save_path + '/' + name + '_masked.jpg')", 'x'], {}), "(save_path + '/' + name + '_masked.jpg', x)\n", (15029, 15072), True, 'import skimage.io as sio\n'), ((15972, 16013), 'torchvision.transforms.Resize', 'transforms.Resize', (['(args.size, args.size)'], {}), '((args.size, args.size))\n', (15989, 16013), True, 'import torchvision.transforms as transforms\n'), ((16013, 16034), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16032, 16034), True, 'import torchvision.transforms as transforms\n'), ((17719, 17757), 'torch.load', 'torch.load', (["(modeldir + '/glowmodel.pt')"], {}), "(modeldir + '/glowmodel.pt')\n", (17729, 17757), False, 'import torch\n'), ((18037, 18084), 'numpy.random.normal', 'np.random.normal', (['(0)', 'args.init_std', '[n_test, n]'], {}), '(0, args.init_std, [n_test, n])\n', (18053, 18084), True, 'import numpy as np\n'), ((18110, 18197), 'torch.tensor', 'torch.tensor', (['z_sampled'], {'requires_grad': '(True)', 'dtype': 'torch.float', 'device': 'args.device'}), '(z_sampled, requires_grad=True, dtype=torch.float, device=args.\n device)\n', (18122, 18197), False, 'import torch\n'), ((21812, 21853), 'torch.optim.Adam', 'torch.optim.Adam', (['[z_sampled]'], {'lr': 'args.lr'}), '([z_sampled], lr=args.lr)\n', (21828, 21853), False, 'import torch\n'), ((25600, 25613), 'numpy.mean', 'np.mean', (['psnr'], {}), '(psnr)\n', (25607, 25613), True, 'import numpy as np\n'), ((26555, 26580), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (26569, 26580), False, 'import os\n'), ((26598, 26620), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (26609, 26620), False, 'import os\n'), ((27071, 27123), 'skimage.io.imsave', 'sio.imsave', (["(save_path + '/' + name + '_recov.jpg')", 'x'], {}), "(save_path + '/' + name + '_recov.jpg', x)\n", (27081, 27123), True, 'import skimage.io as sio\n'), ((27176, 27229), 'skimage.io.imsave', 'sio.imsave', (["(save_path + '/' + name + '_masked.jpg')", 'x'], {}), "(save_path + '/' + name + '_masked.jpg', x)\n", (27186, 27229), True, 'import skimage.io as sio\n'), ((28006, 28047), 'torchvision.transforms.Resize', 'transforms.Resize', (['(args.size, args.size)'], {}), '((args.size, args.size))\n', (28023, 28047), True, 'import torchvision.transforms as transforms\n'), ((28047, 28068), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (28066, 28068), True, 'import torchvision.transforms as transforms\n'), ((29337, 29373), 'torch.load', 'torch.load', (["(modeldir + '/dcgan_G.pt')"], {}), "(modeldir + '/dcgan_G.pt')\n", (29347, 29373), False, 'import torch\n'), ((29533, 29586), 'numpy.random.normal', 'np.random.normal', (['(0)', 'args.init_std', '[n_test, n, 1, 1]'], {}), '(0, args.init_std, [n_test, n, 1, 1])\n', (29549, 29586), True, 'import numpy as np\n'), ((29610, 29697), 'torch.tensor', 'torch.tensor', (['z_sampled'], {'requires_grad': '(True)', 'dtype': 'torch.float', 'device': 'args.device'}), '(z_sampled, requires_grad=True, dtype=torch.float, device=args.\n device)\n', (29622, 29697), False, 'import torch\n'), ((29905, 29946), 'torch.optim.Adam', 'torch.optim.Adam', (['[z_sampled]'], {'lr': 'args.lr'}), '([z_sampled], lr=args.lr)\n', (29921, 29946), False, 'import torch\n'), ((33030, 33043), 'numpy.mean', 'np.mean', (['psnr'], {}), '(psnr)\n', (33037, 33043), True, 'import numpy as np\n'), ((33667, 33692), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (33681, 33692), False, 'import os\n'), ((33710, 33732), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (33721, 33732), False, 'import os\n'), ((34184, 34236), 'skimage.io.imsave', 'sio.imsave', (["(save_path + '/' + name + '_recov.jpg')", 'x'], {}), "(save_path + '/' + name + '_recov.jpg', x)\n", (34194, 34236), True, 'import skimage.io as sio\n'), ((34289, 34342), 'skimage.io.imsave', 'sio.imsave', (["(save_path + '/' + name + '_masked.jpg')", 'x'], {}), "(save_path + '/' + name + '_masked.jpg', x)\n", (34299, 34342), True, 'import skimage.io as sio\n'), ((34951, 34984), 'numpy.random.random', 'np.random.random', (['image_shape[:2]'], {}), '(image_shape[:2])\n', (34967, 34984), True, 'import numpy as np\n'), ((35303, 35323), 'numpy.ones', 'np.ones', (['image_shape'], {}), '(image_shape)\n', (35310, 35323), True, 'import numpy as np\n'), ((904, 928), 'torch.from_numpy', 'torch.from_numpy', (['img_np'], {}), '(img_np)\n', (920, 928), False, 'import torch\n'), ((4587, 4611), 'torch.zeros_like', 'torch.zeros_like', (['x_test'], {}), '(x_test)\n', (4603, 4611), False, 'import torch\n'), ((5180, 5238), 'torch.tensor', 'torch.tensor', (['noise'], {'dtype': 'torch.float', 'device': 'args.device'}), '(noise, dtype=torch.float, device=args.device)\n', (5192, 5238), False, 'import torch\n'), ((5371, 5404), 'torch.clamp', 'torch.clamp', (['x_noisy_filled', '(0)', '(1)'], {}), '(x_noisy_filled, 0, 1)\n', (5382, 5404), False, 'import torch\n'), ((8411, 8453), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[z_sampled]'], {'lr': 'args.lr'}), '([z_sampled], lr=args.lr)\n', (8428, 8453), False, 'import torch\n'), ((8524, 8542), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (8540, 8542), False, 'import torch\n'), ((14544, 14571), 'os.path.exists', 'os.path.exists', (['save_path_1'], {}), '(save_path_1)\n', (14558, 14571), False, 'import os\n'), ((14593, 14617), 'os.makedirs', 'os.makedirs', (['save_path_1'], {}), '(save_path_1)\n', (14604, 14617), False, 'import os\n'), ((15142, 15166), 'numpy.array', 'np.array', (['Residual_Curve'], {}), '(Residual_Curve)\n', (15150, 15166), True, 'import numpy as np\n'), ((17891, 17915), 'torch.zeros_like', 'torch.zeros_like', (['x_test'], {}), '(x_test)\n', (17907, 17915), False, 'import torch\n'), ((18495, 18553), 'torch.tensor', 'torch.tensor', (['noise'], {'dtype': 'torch.float', 'device': 'args.device'}), '(noise, dtype=torch.float, device=args.device)\n', (18507, 18553), False, 'import torch\n'), ((18691, 18724), 'torch.clamp', 'torch.clamp', (['x_noisy_filled', '(0)', '(1)'], {}), '(x_noisy_filled, 0, 1)\n', (18702, 18724), False, 'import torch\n'), ((21923, 21965), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[z_sampled]'], {'lr': 'args.lr'}), '([z_sampled], lr=args.lr)\n', (21940, 21965), False, 'import torch\n'), ((22038, 22056), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (22054, 22056), False, 'import torch\n'), ((26709, 26736), 'os.path.exists', 'os.path.exists', (['save_path_1'], {}), '(save_path_1)\n', (26723, 26736), False, 'import os\n'), ((26758, 26782), 'os.makedirs', 'os.makedirs', (['save_path_1'], {}), '(save_path_1)\n', (26769, 26782), False, 'import os\n'), ((27291, 27315), 'numpy.array', 'np.array', (['Residual_Curve'], {}), '(Residual_Curve)\n', (27299, 27315), True, 'import numpy as np\n'), ((29258, 29275), 'dcgan.dcgan.Generator', 'Generator', ([], {'ngpu': '(1)'}), '(ngpu=1)\n', (29267, 29275), False, 'from dcgan.dcgan import Generator\n'), ((30016, 30058), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[z_sampled]'], {'lr': 'args.lr'}), '([z_sampled], lr=args.lr)\n', (30033, 30058), False, 'import torch\n'), ((30131, 30149), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (30147, 30149), False, 'import torch\n'), ((33821, 33848), 'os.path.exists', 'os.path.exists', (['save_path_1'], {}), '(save_path_1)\n', (33835, 33848), False, 'import os\n'), ((33870, 33894), 'os.makedirs', 'os.makedirs', (['save_path_1'], {}), '(save_path_1)\n', (33881, 33894), False, 'import os\n'), ((34404, 34428), 'numpy.array', 'np.array', (['Residual_Curve'], {}), '(Residual_Curve)\n', (34412, 34428), True, 'import numpy as np\n'), ((35484, 35504), 'numpy.ones', 'np.ones', (['image_shape'], {}), '(image_shape)\n', (35491, 35504), True, 'import numpy as np\n'), ((14762, 14789), 'os.path.exists', 'os.path.exists', (['save_path_2'], {}), '(save_path_2)\n', (14776, 14789), False, 'import os\n'), ((14815, 14839), 'os.makedirs', 'os.makedirs', (['save_path_2'], {}), '(save_path_2)\n', (14826, 14839), False, 'import os\n'), ((26927, 26954), 'os.path.exists', 'os.path.exists', (['save_path_2'], {}), '(save_path_2)\n', (26941, 26954), False, 'import os\n'), ((26980, 27004), 'os.makedirs', 'os.makedirs', (['save_path_2'], {}), '(save_path_2)\n', (26991, 27004), False, 'import os\n'), ((34039, 34066), 'os.path.exists', 'os.path.exists', (['save_path_2'], {}), '(save_path_2)\n', (34053, 34066), False, 'import os\n'), ((34092, 34116), 'os.makedirs', 'os.makedirs', (['save_path_2'], {}), '(save_path_2)\n', (34103, 34116), False, 'import os\n'), ((7104, 7162), 'torch.tensor', 'torch.tensor', (['noise'], {'dtype': 'torch.float', 'device': 'args.device'}), '(noise, dtype=torch.float, device=args.device)\n', (7116, 7162), False, 'import torch\n'), ((7231, 7257), 'torch.clamp', 'torch.clamp', (['x_noisy', '(0)', '(1)'], {}), '(x_noisy, 0, 1)\n', (7242, 7257), False, 'import torch\n'), ((20545, 20603), 'torch.tensor', 'torch.tensor', (['noise'], {'dtype': 'torch.float', 'device': 'args.device'}), '(noise, dtype=torch.float, device=args.device)\n', (20557, 20603), False, 'import torch\n'), ((20672, 20698), 'torch.clamp', 'torch.clamp', (['x_noisy', '(0)', '(1)'], {}), '(x_noisy, 0, 1)\n', (20683, 20698), False, 'import torch\n'), ((7713, 7771), 'torch.tensor', 'torch.tensor', (['noise'], {'dtype': 'torch.float', 'device': 'args.device'}), '(noise, dtype=torch.float, device=args.device)\n', (7725, 7771), False, 'import torch\n'), ((7911, 7944), 'torch.clamp', 'torch.clamp', (['x_noisy_filled', '(0)', '(1)'], {}), '(x_noisy_filled, 0, 1)\n', (7922, 7944), False, 'import torch\n'), ((21196, 21254), 'torch.tensor', 'torch.tensor', (['noise'], {'dtype': 'torch.float', 'device': 'args.device'}), '(noise, dtype=torch.float, device=args.device)\n', (21208, 21254), False, 'import torch\n'), ((21399, 21432), 'torch.clamp', 'torch.clamp', (['x_noisy_filled', '(0)', '(1)'], {}), '(x_noisy_filled, 0, 1)\n', (21410, 21432), False, 'import torch\n')] |
import numpy as np
import sys
import copy
import logging
from kinbot import bond_combinations
from kinbot import find_motif
from reactions.reac_Cyclic_Ether_Formation import CyclicEtherFormation
from reactions.reac_Diels_alder_addition import DielsAlder
from reactions.reac_Intra_Diels_alder_R import IntraDielsAlder
from reactions.reac_12_shift_S_F import S12ShiftF
from reactions.reac_12_shift_S_R import S12ShiftR
from reactions.reac_cpd_H_migration import CpdHMigration
from reactions.reac_intra_H_migration import IntraHMigration
from reactions.reac_intra_H_migration_suprafacial import IntraHMigrationSuprafacial
from reactions.reac_intra_OH_migration import IntraOHMigration
from reactions.reac_intra_OH_migration_Exocyclic_F import IntraOHMigrationExocyclicF
from reactions.reac_Intra_R_Add_Endocyclic_F import IntraRAddEndocyclicF
from reactions.reac_Intra_R_Add_Exocyclic_F import IntraRAddExocyclicF
from reactions.reac_Intra_R_Add_ExoTetCyclic_F import IntraRAddExoTetCyclicF
from reactions.reac_intra_R_migration import IntraRMigration
from reactions.reac_Retro_Ene import RetroEne
from reactions.reac_r22_cycloaddition import R22Cycloaddition
from reactions.reac_r12_insertion_R import R12Insertion
from reactions.reac_r13_insertion_RSR import R13InsertionRSR
from reactions.reac_r13_insertion_ROR import R13InsertionROR
from reactions.reac_r13_insertion_CO2 import R13InsertionCO2
from reactions.reac_r12_cycloaddition import R12Cycloaddition
from reactions.reac_R_Addition_MultipleBond import RAdditionMultipleBond
from reactions.reac_R_Addition_CSm_R import RAdditionCS
from reactions.reac_R_Addition_COm3_R import RAdditionCO
from reactions.reac_Korcek_step2_odd import KorcekStep2Odd
from reactions.reac_Korcek_step2_even import KorcekStep2Even
from reactions.reac_Korcek_step2 import KorcekStep2
from reactions.reac_ketoenol import KetoEnol
from reactions.reac_Intra_RH_Add_Exocyclic_R import IntraRHAddExoR
from reactions.reac_Intra_RH_Add_Exocyclic_F import IntraRHAddExoF
from reactions.reac_Intra_RH_Add_Endocyclic_R import IntraRHAddEndoR
from reactions.reac_Intra_RH_Add_Endocyclic_F import IntraRHAddEndoF
from reactions.reac_HO2_Elimination_from_PeroxyRadical import HO2Elimination
from reactions.reac_beta_delta import BetaDelta
from reactions.reac_birad_recombination_F import BiradRecombinationF
from reactions.reac_birad_recombination_R import BiradRecombinationR
from reactions.reac_Intra_disproportionation_R import IntraDisproportionationR
from reactions.reac_Intra_disproportionation_F import IntraDisproportionationF
from reactions.reac_r14_birad_scission import R14BiradScission
from reactions.reac_r14_cyclic_birad_scission_R import R14CyclicBiradScission
from reactions.reac_barrierless_saddle import BarrierlessSaddle
from reactions.reac_h2_elim import H2Elim
from reactions.reac_homolytic_scission import HS
from reactions.reac_combinatorial import Combinatorial
class ReactionFinder:
"""
Class to find all the potential reactions starting from a well
"""
def __init__(self, species, par, qc):
self.species = species
self.qc = qc
self.par = par
self.families = par['families']
self.skip_families = par['skip_families']
self.specific_reaction = par['specific_reaction']
self.break_bond = par['break_bonds']
self.form_bond = par['form_bonds']
self.ringrange = range(self.par['ringrange'][0], self.par['ringrange'][1])
self.one_reaction_comb = par['one_reaction_comb']
self.one_reaction_fam = par['one_reaction_fam']
# make a set of frozen sets from the breaking and forming bond lists
self.reac_bonds = set()
for i, bond in enumerate(par['break_bonds']):
self.reac_bonds.add(frozenset(par['break_bonds'][i]))
self.prod_bonds = set()
for i, bond in enumerate(par['form_bonds']):
self.prod_bonds.add(frozenset(par['form_bonds'][i]))
try:
self.barrierless_saddle = par['barrierless_saddle'][str(self.species.chemid)]
except KeyError:
self.barrierless_saddle = None
#keys: names of the families
#values: list of instances
#this dict is used to keep track of the unique reactions found,
#and to verify whether a new reaction is indeed unique
self.reactions = {}
def find_reactions(self):
"""
List all reaction types available, and find the key atoms for them.
"""
reaction_names = {'intra_H_migration': self.search_intra_H_migration,
'intra_H_migration_suprafacial': self.search_intra_H_migration_suprafacial,
'intra_R_migration': self.search_intra_R_migration,
'intra_OH_migration': self.search_intra_OH_migration,
'intra_OH_migration_Exocyclic_F': self.search_intra_OH_migration_Exocyclic_F,
'cpd_H_migration': self.search_cpd_H_migration,
'Intra_RH_Add_Endocyclic_F': self.search_Intra_RH_Add_Endocyclic_F,
'Intra_RH_Add_Endocyclic_R': self.search_Intra_RH_Add_Endocyclic_R,
'Cyclic_Ether_Formation': self.search_Cyclic_Ether_Formation,
'Intra_RH_Add_Exocyclic_F': self.search_Intra_RH_Add_Exocyclic_F,
'Intra_RH_Add_Exocyclic_R': self.search_Intra_RH_Add_Exocyclic_R,
'Retro_Ene': self.search_Retro_Ene,
'Intra_R_Add_Endocyclic_F': self.search_Intra_R_Add_Endocyclic_F,
'Intra_R_Add_ExoTetCyclic_F': self.search_Intra_R_Add_ExoTetCyclic_F,
'Intra_R_Add_Exocyclic_F': self.search_Intra_R_Add_Exocyclic_F,
'Korcek_step2_odd': self.search_Korcek_step2_odd,
'Korcek_step2_even': self.search_Korcek_step2_even,
'Korcek_step2': self.search_Korcek_step2,
'r22_cycloaddition': self.search_r22_cycloaddition,
'r12_cycloaddition': self.search_r12_cycloaddition,
'r12_insertion_R': self.search_r12_insertion_R,
'r13_insertion_CO2': self.search_r13_insertion_CO2,
'r13_insertion_ROR': self.search_r13_insertion_ROR,
'Diels_alder_addition': self.search_Diels_alder_addition,
'Intra_Diels_alder_R': self.search_Intra_Diels_alder_R,
'ketoenol': self.search_ketoenol,
'HO2_Elimination_from_PeroxyRadical': self.search_HO2_Elimination_from_PeroxyRadical,
'R_Addition_COm3_R': self.search_R_Addition_COm3_R,
'R_Addition_MultipleBond': self.search_R_Addition_MultipleBond,
'12_shift_S_F': self.search_12_shift_S_F,
'12_shift_S_R': self.search_12_shift_S_R,
'R_Addition_CSm_R': self.search_R_Addition_CSm_R,
'r13_insertion_RSR': self.search_r13_insertion_RSR,
'beta_delta': self.search_beta_delta,
'h2_elim': self.search_h2_elim,
'hom_sci': self.search_hom_sci,
'barrierless_saddle': self.search_barrierless_saddle,
}
if 'combinatorial' in self.families:
reaction_names['combinatorial'] = self.search_combinatorial
atom = self.species.atom
natom = self.species.natom
for i, bond in enumerate(self.species.bonds):
rad = self.species.rads[i]
if self.one_reaction_comb:
# search for just one reaction, given by the list of bonds to be
# broken or formed
# based on the combinatorial reaction family, because they are also
# defined by the list of bonds to be broken or formed
name = 'combinatorial'
self.reactions[name] = []
self.reac_bonds = self.par['break_bonds']
self.prod_bonds = self.par['form_bonds']
ts = bond_combinations.generate_ts(self.reac_bonds, self.prod_bonds, self.species.bond)
self.reactions[name].append([self.reac_bonds, self.prod_bonds, ts, 1])
else:
for rn in reaction_names:
if rn in self.families or 'all' in self.families:
if not rn in self.skip_families:
reaction_names[rn](natom, atom, bond, rad)
for name in self.reactions:
self.reaction_matrix(self.reactions[name], name)
for index in range(len(self.species.reac_name)-1):
if self.species.reac_name[index] in self.species.reac_name[index + 1:]:
logging.error('Found reaction name "{}" more than once'
.format(self.species.reac_name[index]))
logging.error('Exiting')
sys.exit()
logging.info('\tFound the following reactions:')
for rxn in self.species.reac_name:
logging.info('\t\t{}'.format(rxn))
return 0
def search_combinatorial(self, natom, atom, bond, rad):
"""
This is a method to create all possible combinations of maximum 3 bond breakings
and maximum 3 bond formations.
TODO: allow bond breaking without the atoms forming new bond (only for radicals)
"""
name = 'combinatorial'
if not name in self.reactions:
self.reactions[name] = []
instances = bond_combinations.generate_all_product_bond_matrices(self.species, self.par)
for inst in instances:
self.reactions[name].append(inst)
#~ self.reactions[name] = []
#~ reac = [[0, 5], [1, 2], [3, 4]]
#~ prod = [[0, 1], [2, 3], [4, 5]]
#~ ts = self.species.bond
#~ self.reactions[name].append([reac, prod, ts])
return 0
def search_intra_H_migration(self, natom, atom, bond, rad):
"""
This is an RMG class.
H-R~~~~~~~R* <==> R*~~~~~~~R-H
Works in both directions.
H is moved to
* radical site
* multiple bond
* lone pair
"""
name = 'intra_H_migration'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
if np.sum(rad) == 0:
#find H-migrations over double bonds and to lone pairs
for ringsize in self.ringrange:
# double bonds
motif = ['X' for i in range(ringsize)]
motif[-1] = 'H'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
if any([bi > 1 for bi in bond[instance[0]]]):
rxns += [instance]
# lone pairs
motif = ['X' for i in range(ringsize)]
motif[-1] = 'H'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
if (self.species.atom[instance[0]] == 'O' or
self.species.atom[instance[0]] == 'S' or
self.species.atom[instance[0]] == 'N'):
rxns += [instance]
else:
instances = []
for ringsize in self.ringrange:
motif = ['X' for i in range(ringsize)]
motif[-1] = 'H'
for rad_site in np.nonzero(rad)[0]:
instances += find_motif.start_motif(motif, natom, bond, atom, rad_site, self.species.atom_eqv)
for instance in instances:
rxns.append(instance)
rxns = self.clean_rigid(name, rxns, 0, -1)
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-1], inst[-2]})} or self.prod_bonds != {frozenset({inst[0], inst[-1]})}:
# new = 0
return 0
def search_intra_H_migration_suprafacial(self, natom, atom, bond, rad):
"""
This is a special case of H migration reactions over a double bond
(keto-enol type) that proceeds through a suprafacial instead of the
common antrafacial TS.
"""
name = 'intra_H_migration_suprafacial'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
# search for keto-enol type reactions
motif = ['X', 'X', 'X', 'H']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
# filter for the double bond
for instance in instances:
if bond[instance[0]][instance[1]] == 2:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-1], inst[-2]})} or self.prod_bonds != {frozenset({inst[0], inst[-1]})}:
# new = 0
return 0
def search_intra_R_migration(self, natom, atom, bond, rad):
"""
This is an class that covers several RMG classes.
R cannot be an H, this is already taken care of in the intra_H_migration
TODO: merge this with intra H migration families?
yes, because it is the same rule
no, because then it's hard to search for just one of the types
TODO: this should also include migration to lone pair electrons?
currently it moves atoms to radical sites only
"""
if np.sum(rad) != 1: return
name = 'intra_R_migration'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
instances = []
for ringsize in self.ringrange:
motif = ['X' for i in range(ringsize)]
for rad_site in np.nonzero(rad)[0]:
instances += find_motif.start_motif(motif, natom, bond, atom, rad_site, self.species.atom_eqv)
for instance in instances:
if not atom[instance[-1]] == 'H':
rxns.append(instance)
rxns = self.clean_rigid(name, rxns, 0, -1)
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-1], inst[-2]})} or self.prod_bonds != {frozenset({inst[0], inst[-1]})}:
# new = 0
return 0
def search_cpd_H_migration(self, natom, atom, bond, rad):
"""
This is an RMG class.
H-C1-C=C-C=C-1 <==> C1=C-C=C-C(-H)-1
"""
if not any([len(ci) == 5 for ci in self.species.cycle_chain]) : return
name = 'cpd_H_migration'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
bondsum = 0
for cycle in self.species.cycle_chain:
if len(cycle) == 5:
for index, atomi in enumerate(cycle):
if index < 4:
atomj = cycle[index + 1]
else:
atomj = cycle[0]
if index == 0:
atomk = cycle[-1]
else:
atomk = cycle[index - 1]
bondsum += bond[atomi][atomj]
if bond[atomi][atomj] == 1 and bond[atomi][atomk] == 1:
start = atomi
startindex = index
if bondsum != 7: return # exactly two double bonds
ring_forw = np.ndarray.tolist(np.roll(cycle, 5 - startindex))
ring_rev = ring_forw[::-1] # look at the ring in the reverse direction for an H-shift to the other side
ring_rev = np.ndarray.tolist(np.roll(ring_rev, 1))
rings = [ring_forw,ring_rev]
Hatomi = -1
for atomi in range(natom):
if atom[atomi] == 'H':
if bond[atomi][start] == 1:
Hatomi = atomi
if Hatomi > -1:
for ring in rings:
instance = ring[:]
instance.append(Hatomi)
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# # TODO need to check if this is correct
# if self.reac_bonds != {frozenset({inst[0], inst[1]})} or self.prod_bonds != {frozenset({inst[0], inst[-1]})}:
# new = 0
return 0
def search_intra_OH_migration(self, natom, atom, bond, rad):
"""
This is an RMG class extended.
R*~~~~~~~O-OH <==> HOR~~~~~~~O*
The H atom is not counted in the cycle size but has to be there.
OH transfer to:
radical sites
double bonds on closed shell (just forward)
"""
name = 'intra_OH_migration'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
if np.sum(rad) == 0:
#find OH-migrations over double bonds and to lone pairs
for ringsize in self.ringrange:
# double bonds
motif = ['X' for i in range(ringsize)]
motif[-1] = 'H'
motif[-2] = 'O'
motif[-3] = 'O'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
if any([bi > 1 for bi in bond[instance[0]]]):
rxns += [instance]
else:
for ringsize in self.ringrange:
instances = []
# forward direction
motif = ['X' for i in range(ringsize+1)]
motif[-1] = 'H'
motif[-2] = 'O'
motif[-3] = 'O'
for rad_site in np.nonzero(rad)[0]:
instances += find_motif.start_motif(motif, natom, bond, atom,
rad_site, self.species.atom_eqv)
# reverse direction
motif = ['X' for i in range(ringsize+1)]
motif[-1] = 'H'
motif[-2] = 'O'
motif[0] = 'O'
for rad_site in np.nonzero(rad)[0]:
instances += find_motif.start_motif(motif, natom, bond, atom,
rad_site, self.species.atom_eqv)
for ins in instances:
rxns.append(ins)
for case in range(len(rxns)):
rxns[case] = rxns[case][:-1] #cut off H
rxns = self.clean_rigid(name, rxns, 0, -1)
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-3], inst[-2]})} or self.prod_bonds != {frozenset({inst[0], inst[-2]})}:
# new = 0
return 0
def search_intra_OH_migration_Exocyclic_F(self, natom, atom, bond, rad):
"""
This is the same as search_intra_OH_migration but for double bonds only
0 .....-2-1
R=R~~~~~~~O-OH <==> R=R~~?? + ??~~=O
|
OH
The H atom is not counted in the cycle size but has to be there.
OH transfer to double bonds on closed shell
This is just the forward step as the expectation is that
the product will fall apart while at least one of the framents
also rearrange, yielding two closed shell products.
A special feature is to test both cis and trans transfer, therefore,
in addition to testing for the extra H atom (which is deleted from the motif)
the double bond is also registered and kept in the motif.
"""
name = 'intra_OH_migration_Exocyclic_F'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
if np.sum(rad) == 0:
for ringsize in self.ringrange:
# double bonds
motif = ['X' for i in range(ringsize)]
motif[-1] = 'H'
motif[-2] = 'O'
motif[-3] = 'O'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
if bond[instance[0]][instance[1]] == 2:
rxns += [instance[:-1]] # cut off H and add a -1 for nominal cis
rxns[-1].append(-1)
rxns += [instance[:-1]] # cut off H and add a -2 for nominal trans
rxns[-1].append(-2)
self.new_reaction(rxns, name, a=0, b=-1, c=-2)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-3], inst[-2]})} or self.prod_bonds != {frozenset({inst[0], inst[-2]})}:
# new = 0
return 0
def search_Intra_RH_Add_Endocyclic_F(self, natom, atom, bond, rad):
"""
This is an RMG class.
H
|
H-R~~~~~~~R=R ==> R~~~~~~~R-R
| |
---------
This is for the forward direction.
"""
if np.sum(rad) != 0: return
if len(self.species.cycle_chain) > 0: return
name = 'Intra_RH_Add_Endocyclic_F'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in range(5, 9):
motif = ['X' for i in range(ringsize + 1)]
motif[-1] = 'H'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
bondpattern = ['X' for i in range(ringsize)]
bondpattern[0] = 2
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-2, length=True)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-1], inst[-2]})} or self.prod_bonds != {frozenset({inst[0], inst[-2]}), frozenset({inst[-1], inst[1]})}:
# new = 0
return 0
def search_Intra_RH_Add_Endocyclic_F(self, natom, atom, bond, rad):
"""
This is an RMG class.
H
|
H-R~~~~~~~R=R ==> R~~~~~~~R-R
| |
---------
This is for the forward direction.
"""
if np.sum(rad) != 0: return
if len(self.species.cycle_chain) > 0: return
name = 'Intra_RH_Add_Endocyclic_F'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in range(5, 9):
motif = ['X' for i in range(ringsize + 1)]
motif[-1] = 'H'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
bondpattern = ['X' for i in range(ringsize)]
bondpattern[0] = 2
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-2, length=True)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-1], inst[-2]})} or self.prod_bonds != {frozenset({inst[0], inst[-2]}), frozenset({inst[-1], inst[1]})}:
# new = 0
return 0
def search_Intra_RH_Add_Endocyclic_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
H
|
R~~~~~~~R-R ==> H-R~~~~~~~R=R
| |
---------
This is for the reverse direction.
"""
if len(self.species.cycle_chain) == 0: return
if np.sum(rad) != 0: return
name = 'Intra_RH_Add_Endocyclic_R'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ci in self.species.cycle_chain:
motif = ['X' for i in range(len(ci) + 1)]
motif[-1] = 'H'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
# check if there is a bond between the first and second to last atom
for instance in instances:
if bond[instance[0]][instance[-2]] > 0:
rxns += [instance[-4:]]
self.new_reaction(rxns, name, a=0, b=1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-3], inst[-4]}), frozenset({inst[-1], inst[-2]})} or self.prod_bonds != {frozenset({inst[-1], inst[-4]})}:
# new = 0
self.reactions[name]
return 0
def search_Cyclic_Ether_Formation(self, natom, atom, bond, rad):
"""
This is an RMG class.
R*~~~~~~~O-OR ==> R~~~~~~~O + OR
|_______|
The OR groups are not counted in the cycle size but have to be there.
Only the forward direction is included.
"""
if np.sum(rad) == 0: return
name = 'Cyclic_Ether_Formation'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in range(4, 10):
motif = ['X' for i in range(ringsize)]
motif[-2] = 'O'
motif[-3] = 'O'
motif[0] = 'C'
for rad_site in np.nonzero(rad)[0]:
rxns += find_motif.start_motif(motif, natom, bond, atom, rad_site, self.species.atom_eqv)
for instance in range(len(rxns)):
rxns[instance] = rxns[instance][:-2] #cut off OR
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-2], inst[-3]})} or self.prod_bonds != {frozenset({inst[0], inst[-3]})}:
# new = 0
return 0
def search_Intra_R_Add_Endocyclic_F(self, natom, atom, bond, rad):
"""
This is an RMG class.
*R~~~~~~~~R=R ==> R~~~~~~~~R*-R
|___________|
"""
if np.sum(rad) == 0: return
name = 'Intra_R_Add_Endocyclic_F'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in self.ringrange:
motif = ['X' for i in range(ringsize)]
instances = []
for rad_site in np.nonzero(rad)[0]:
instances += find_motif.start_motif(motif, natom, bond, atom, rad_site, self.species.atom_eqv)
bondpattern = ['X' for i in range(ringsize-1)]
bondpattern[-1] = 2
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
bondpattern[-1] = 3
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset()} or self.prod_bonds != {frozenset({inst[0], inst[-1]})}:
# new = 0
return 0
def search_Intra_R_Add_ExoTetCyclic_F(self, natom, atom, bond, rad):
"""
This is an RMG class.
*R~~~~~~~~R-R ==> R~~~~~~~~R + R*
|________|
"""
if np.sum(rad) == 0: return
name = 'Intra_R_Add_ExoTetCyclic_F'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in self.ringrange:
motif = ['X' for i in range(ringsize + 1)]
for rad_site in np.nonzero(rad)[0]:
rxns += find_motif.start_motif(motif, natom, bond, atom, rad_site, self.species.atom_eqv)
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-1], inst[-2]})} or self.prod_bonds != {frozenset({inst[0], inst[-2]})}:
# new = 0
return 0
def search_Intra_R_Add_Exocyclic_F(self, natom, atom, bond, rad):
"""
This is an RMG class.
*R~~~~~~~~R=R ==> R~~~~~~~~R-R*
|________|
"""
if np.sum(rad) == 0: return
name = 'Intra_R_Add_Exocyclic_F'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in self.ringrange:
motif = ['X' for i in range(ringsize + 1)]
instances = []
for rad_site in np.nonzero(rad)[0]:
instances += find_motif.start_motif(motif, natom, bond, atom, rad_site, self.species.atom_eqv)
bondpattern = ['X' for i in range(ringsize)]
bondpattern[-1] = 2
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
bondpattern[-1] = 3
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-2)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset()} or self.prod_bonds != {frozenset({inst[0], inst[-2]})}:
# new = 0
return 0
def search_Intra_RH_Add_Exocyclic_F(self, natom, atom, bond, rad):
"""
This is an RMG class.
The general scheme is:
H-R~~~~~~R=R ==> R~~~~~~R-R-H
| |
------
The special case of this reaction is Korcel_step1:
R R OH
R / \ /
\ / \C=O C
| ==> / \
O H | O
\ / / \ /
O R O
Implemented as:
--O--O--
| |
O=C~~~~~~~~C-O-O-H ==> HO-C~~~~~~~~C
| |
R R
The carbonyl dangling R and the
tail H are included, but are not counted as the ring size, but these two atoms are kept
because they are needed in the geometry manipulation step.
"""
if len(self.species.cycle_chain) > 0: return
name = 'Intra_RH_Add_Exocyclic_F'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in self.ringrange:
motif = ['X' for i in range(ringsize+2)]
motif[-1] = 'H'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
bondpattern = ['X' for i in range(ringsize+1)]
bondpattern[0] = 2
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-1], inst[-2]})} or self.prod_bonds != {frozenset({inst[1], inst[-2]}), frozenset({inst[-1], inst[0]})}:
# new = 0
return 0
def search_Intra_RH_Add_Exocyclic_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
H
|
H-R~~~~~~~R=R <== R~~~~~~~R-R
|_______|
This is for the reverse direction.
"""
name = 'Intra_RH_Add_Exocyclic_R'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
if len(self.species.cycle_chain) == 0: return
if np.sum(rad) != 0: return
for ci in self.species.cycle_chain:
motif = ['X' for i in range(len(ci) + 2)]
motif[-1] = 'H'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
# check if there is a bond between the first and second to last atom
for instance in instances:
if bond[instance[0]][instance[-3]] > 0:
rxns += [instance[-4:]]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-1], inst[-2]}), frozenset({inst[-3], inst[0]})} or self.prod_bonds != {frozenset({inst[-1], inst[0]})}:
# new = 0
return 0
def search_Retro_Ene(self, natom, atom, bond, rad):
"""
This is not an RMG class.
R-R-R-R=R ==> R=R + R=R-R
"""
if np.sum(rad) != 0: return
name = 'Retro_Ene'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X' for i in range(6)]
motif[-1] = 'H'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
bondpattern = ['X' for i in range(5)]
bondpattern[0] = 2
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[2], inst[3]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_Korcek_step2_odd(self, natom, atom, bond, rad):
"""
Korcek step 2 for cyclic peroxides originating with odd number of atoms in the cycle.
Ring breaks at O-O and then forms 1 three-ringatom and (ringsize-3)/2 two-ringatom
fragments.
The three-ringatom fragment needs to have a H atom transfer.
Numbering:
0 1 2 3 4 5 6 7 8
O-X-X-X-X-X-X-X-O
if the fragment is set to 2 2 3 2, then it breaks like
O-X X-X X-X-X X-X
and the X-X-X will have 0, 1, or 2 possible H transfers.
If the H transfer is not possible, the path is abandoned.
The instance name is:
all atoms in the chain, the middle atom in the triplet,
and the atom to which the H is migrates in the triplet
and the hydrogen itself
"""
name = 'Korcek_step2_odd'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in range(5, 15, 2): # odd number of atoms in the ring
motif = ['X' for i in range(ringsize)]
motif[-1] = 'O'
motif[0] = 'O'
korcek_chain = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
# filter clockwise and anti clockwise hits
korcek_chain_filt = []
for kch in korcek_chain:
k = copy.deepcopy(kch) # need in order to prevent changes to korcek_chain with reverse()
l = copy.deepcopy(kch)
l.reverse()
if k not in korcek_chain_filt and l not in korcek_chain_filt:
korcek_chain_filt.append(kch)
for ins in korcek_chain_filt:
if bond[ins[0]][ins[-1]] == 1: # it is a ring
fragment = [2] * int((ringsize - 3) / 2 + 1)
fragment[0] = 3 # [3, 2, 2, ...]
for ii in range(len(fragment)): # loop over all possible 2/3 fragmentation
threefrag = ins[ii * 2 : ii * 2 + 3] # atoms in the 3-long fragment
for at in range(natom):
if bond[threefrag[1]][at] == 1 and atom[at] == 'H': # there is H on the middle atom
# if there are 2 hydrogens, they are treated separately, as they are not
# in general equivalent due to the ring
ins_full = ins + [threefrag[1]] + [threefrag[0]] + [at] # H adds to the first atom of this fragment
rxns += [ins_full]
ins_full = ins + [threefrag[1]] + [threefrag[2]] + [at] # H adds to the second atom of this fragment
rxns += [ins_full]
self.new_reaction(rxns, name, full=True)
# for n, inst in enumerate(rxns):
# new = 1
# #filter for the same reactions
# for instance in self.reactions[name]:
# if inst == instance:
# new = 0
# # filter for specific reaction after this # TODO
# #if self.one_reaction_fam and new:
# # if ring_var[n] == 7:
# # if (not {frozenset({inst[-2], inst[-3]}), frozenset({inst[0], inst[1]})}.issubset(self.reac_bonds)) or self.prod_bonds != {frozenset()}:
# # new = 0
# # if ring_var[n] == 8:
# # # TODO this is an incomplete check
# # if self.reac_bonds != {frozenset({inst[-2], inst[-3]}), frozenset({inst[-4], inst[-5]}), frozenset({inst[0], inst[1]})}:
# # new = 0
# if new:
# self.reactions[name].append(inst)
return 0
def search_Korcek_step2_even(self, natom, atom, bond, rad):
"""
Korcek step 2 for cyclic peroxides with even number of atoms in the ring.
Still, the 4 membered ring equals a 2,2 cycloaddition and is not considered here.
Ring breaks at O-O and then at every second bond, no H shift is needed.
"""
name = 'Korcek_step2_even'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in range(6, 14, 2): # even number of atoms in the ring
motif = ['X' for i in range(ringsize)]
motif[-1] = 'O'
motif[0] = 'O'
korcek_chain = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
# filter clockwise and anti clockwise hits
korcek_chain_filt = []
for kch in korcek_chain:
k = copy.deepcopy(kch) # need in order to prevent changes to korcek_chain with reverse()
l = copy.deepcopy(kch)
l.reverse()
if k not in korcek_chain_filt and l not in korcek_chain_filt:
korcek_chain_filt.append(kch)
for ins in korcek_chain_filt:
if bond[ins[0]][ins[-1]] == 1: # it is a ring
rxns += [ins]
self.new_reaction(rxns, name, full=True)
# for n, inst in enumerate(rxns):
# new = 1
# #filter for the same reactions
# for instance in self.reactions[name]:
# if inst == instance:
# new = 0
# # filter for specific reaction after this
# #if self.one_reaction_fam and new:
# # if ring_var[n] == 7:
# # if (not {frozenset({inst[-2], inst[-3]}), frozenset({inst[0], inst[1]})}.issubset(self.reac_bonds)) or self.prod_bonds != {frozenset()}:
# # new = 0
# # if ring_var[n] == 8:
# # # TODO this is an incomplete check
# # if self.reac_bonds != {frozenset({inst[-2], inst[-3]}), frozenset({inst[-4], inst[-5]}), frozenset({inst[0], inst[1]})}:
# # new = 0
# if new:
# self.reactions[name].append(inst)
return 0
def search_Korcek_step2(self, natom, atom, bond, rad):
"""
Generalized Korcek step
The 4 membered ring equals a 2,2 cycloaddition and is not considered here (no H shift involved)
The 5 membered ring proceeds through a 6 membered transition state (including a 1,2 H migration):
--O--O--
| |
HO-C---C----C-R ==> RCOOH + R3CC(R)O
| / \ |
R R R R
6-membered ring: TODO
Only the forward direction is included.
"""
name = 'Korcek_step2'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
ring_var = [] # a helper variable to temporarily mark the ring size within this function
for ringsize in range(5, 6):
motif = ['X' for i in range(ringsize + 1)]
#motif[-1] = 'H' # deleted because atom types are no longer checked
korcek_chain = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for ins in korcek_chain:
if bond[ins[0]][ins[-2]] == 1:
rxns += [ins]
ring_var.append(ringsize)
self.new_reaction(rxns, name, a=0, b=-1)
# for n, inst in enumerate(rxns):
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if ring_var[n] == 7:
# if (not {frozenset({inst[-2], inst[-3]}), frozenset({inst[0], inst[1]})}.issubset(self.reac_bonds)) or self.prod_bonds != {frozenset()}:
# new = 0
# if ring_var[n] == 8:
# # TODO this is an incomplete check
# if self.reac_bonds != {frozenset({inst[-2], inst[-3]}), frozenset({inst[-4], inst[-5]}), frozenset({inst[0], inst[1]})}:
# new = 0
return 0
def search_r22_cycloaddition(self, natom, atom, bond, rad):
"""
This is an RMG class.
R R R---R
|| + || <== | |
R R R---R
N.B.: only the reverse direction is available. Also, the 3 related RMG classes are treated as one.
"""
name = 'r22_cycloaddition'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
if not any([len(ci) == 4 for ci in self.species.cycle_chain]): return
for ci in self.species.cycle_chain:
if len(ci) == 4:
# there are two ways to slice a 4-mem ring
ring1 = ci
ring2 = np.ndarray.tolist(np.roll(ring1, 1))
# FIXME only works for 1 cycle
rxns += [ring1]
rxns += [ring2]
self.new_reaction(rxns, name, a=0, b=1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# # TODO need to make sure that these are the bonds that are broken, see the reaction details
# if self.reac_bonds != {frozenset({inst[0], inst[1]}), frozenset({inst[2], inst[3]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_r12_cycloaddition(self, natom, atom, bond, rad):
"""
This is an RMG class.
R--R
R=R + R: <== \ /
R
N.B.: only the reverse direction is available.
"""
name = 'r12_cycloaddition'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
if not any([len(ci) == 3 for ci in self.species.cycle_chain]): return
for ci in self.species.cycle_chain:
if len(ci) == 3:
# there are three ways to slice a 3-mem ring
ring1 = self.species.cycle_chain
ring2 = np.ndarray.tolist(np.roll(ring1, 1))
ring3 = np.ndarray.tolist(np.roll(ring1, 2))
# FIXME only works for 1 cycle
rxns += ring1
rxns += ring2
rxns += ring3
self.new_reaction(rxns, name, a=0, b=1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# # TODO need to make sure that these are the bonds that are broken, see the reaction details
# if self.reac_bonds != {frozenset({inst[0], inst[2]}), frozenset({inst[1], inst[2]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_r12_insertion_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
X
|
X-P + R-R <== R-P-R
"""
#if np.sum(rad) != 0: return
name = 'r12_insertion_R'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X','X','X']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
#if all([atom[atomi] != 'H' for atomi in instance]):
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=1, c=2)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != set({frozenset({inst[0], inst[1]}), frozenset({inst[1], inst[2]})}) or self.prod_bonds != {frozenset({inst[0], inst[2]})}:
# new = 0
return 0
def search_r13_insertion_CO2(self, natom, atom, bond, rad):
"""
This is an RMG class.
O
||
O=C=O + R-R <== R-C-O-R
"""
#if np.sum(rad) != 0: return
name = 'r13_insertion_CO2'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X','C','O','X']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
for atomi in range(natom):
if not atomi in instance:
if atom[atomi] == 'O':
if bond[atomi][instance[1]] == 2:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[0], inst[1]}), frozenset({inst[2], inst[3]})} or self.prod_bonds != {frozenset({inst[0], inst[3]})}:
# new = 0
return 0
def search_r13_insertion_ROR(self, natom, atom, bond, rad):
"""
This is an RMG class.
R1-O-R2 + R=R <== R1-R-R-O-R2
"""
#if np.sum(rad) != 0: return
name = 'r13_insertion_ROR'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X','X','X','O']
rxns = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != set({frozenset({inst[0], inst[1]}), frozenset({inst[2], inst[3]})}) or self.prod_bonds != {frozenset({inst[0], inst[3]})}:
# new = 0
return 0
def search_Diels_alder_addition(self, natom, atom, bond, rad):
"""
This is an RMG class.
R R
// / \
R R R R
| + || <== || |
R R R R
\\ \ /
R R
N.B.: only the reverse direction is available.
"""
name = 'Diels_alder_addition'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
if not any([len(ci) == 6 for ci in self.species.cycle_chain]): return
for ci in self.species.cycle_chain:
if len(ci) == 6:
bondsum = 0
for index, atomi in enumerate(ci):
if index < 5:
atomj = ci[index + 1]
else:
atomj = ci[0]
bondsum += bond[atomi][atomj]
if bond[atomi][atomj] == 2:
start = atomi
startindex = index
if bondsum != 7: return # exactly one double bond
ring = np.ndarray.tolist(np.roll(ci, 6 - startindex))
rxns += [ring] # FIXME only works for 1 cycle
self.new_reaction(rxns, name, a=0, b=1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != set({frozenset({inst[2], inst[3]}), frozenset({inst[4], inst[5]})}) or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_Intra_Diels_alder_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
TODO it seems like this is the forward reaction, but the naming is confusing.
C
/ \\
C C
C=C-C=C~~~C=C <== | |
C C
\ //
C
"""
name = 'Intra_Diels_alder_R'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in self.ringrange: # TODO what is the meaning of these larger rings?
motif = ['X' for i in range(ringsize + 4)]
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
bondpattern = ['X' for i in range(ringsize + 3)]
bondpattern[0] = 2
bondpattern[2] = 2
bondpattern[-1] = 2
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
#inst = instance[:4] + instance[-2:]
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset()} or self.prod_bonds != {frozenset({inst[0], inst[-1]})}:
# new = 0
return 0
def search_ketoenol(self, natom, atom, bond, rad):
"""
This is an RMG class.
R=R-O-R <==> R-R-R=O
"""
name = 'ketoenol'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
# enol to keto
motif = ['C', 'C', 'O', 'X']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
# keto to enol
motif = ['O', 'C', 'C', 'X']
instances += find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
bondpattern = [2, 'X', 'X', 'X']
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=1, c=2, d=3)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[2], inst[3]})} or self.prod_bonds != {frozenset({inst[0], inst[1]})}:
# new = 0
return 0
def search_HO2_Elimination_from_PeroxyRadical(self, natom, atom, bond, rad):
"""
This is an RMG class.
H-R-R-O-O* ==> R=R + HO2
N.B.: only the forward direction is available.
"""
if np.sum(rad) == 0: return
name = 'HO2_Elimination_from_PeroxyRadical'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['H', 'X', 'X', 'O', 'O']
rxns += find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != set({frozenset({inst[0], inst[1]}), frozenset({inst[2], inst[3]})}) or self.prod_bonds != {frozenset({inst[0], inst[4]})}:
# new = 0
return 0
def search_R_Addition_COm3_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
C#O + R* <== R-C*=O
N.B.: only the reverse direction is available.
"""
if np.sum(rad) == 0: return
name = 'R_Addition_COm3_R'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X', 'C', 'O']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
bondpattern = [1, 2]
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
if rad[instance[1]] == 1:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=1, c=2)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[0], inst[1]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_R_Addition_MultipleBond(self, natom, atom, bond, rad):
"""
This is an RMG class.
R=R + R* <== R*-R-R
N.B.: only the reverse direction is available.
"""
if np.sum(rad) == 0: return
name = 'R_Addition_MultipleBond'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X', 'X', 'X']
for rad_site in np.nonzero(rad)[0]:
rxns += find_motif.start_motif(motif, natom, bond, atom, rad_site, self.species.atom_eqv)
self.new_reaction(rxns, name, a=0, b=1, c=2)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[1], inst[2]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_12_shift_S_F(self, natom, atom, bond, rad):
"""
This is an RMG class.
"""
if np.sum(rad) != 1: return
name = '12_shift_S_F'
if not name in self.reactions:
self.reactions[name] = []
motif = ['X','S','X']
rxns = []
for rad_site in np.nonzero(rad)[0]:
rxns += find_motif.start_motif(motif, natom, bond, atom, rad_site, self.species.atom_eqv)
#filter for identical reactions
for inst in rxns:
new = 1
for instance in self.reactions[name]:
if inst[0] == instance[0] and inst[1] == instance[1] and inst[2] == instance[2]:
new = 0
# filter for specific reaction after this
if self.one_reaction_fam and new:
if self.reac_bonds != {frozenset({inst[1], inst[2]})} or self.prod_bonds != {frozenset()}:
new = 0
if new:
self.reactions[name].append(inst)
return 0
def search_12_shift_S_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
C-S-R* <== *S-R-C
TODO: why not forward??
"""
if np.sum(rad) != 1: return
name = '12_shift_S_R'
if not name in self.reactions:
self.reactions[name] = []
motif = ['S','X','X']
rxns = []
for rad_site in np.nonzero(rad)[0]:
rxns += find_motif.start_motif(motif, natom, bond, atom, rad_site, self.species.atom_eqv)
for inst in rxns:
new = 1
# filter for identical reactions
for instance in self.reactions[name]:
if inst[0] == instance[0] and inst[1] == instance[1] and inst[2] == instance[2]:
new = 0
# filter for specific reaction after this
if self.one_reaction_fam and new:
if self.reac_bonds != {frozenset({inst[0], inst[1]})} or self.prod_bonds != {frozenset({inst[0], inst[2]})}:
new = 0
if new:
self.reactions[name].append(inst)
return 0
def search_r13_insertion_RSR(self, natom, atom, bond, rad):
"""
This is an RMG class.
R-S-R + R1=R2 <== R-R1-R2-S-R
"""
#if np.sum(rad) != 0: return
name = 'r13_insertion_RSR'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X','X','X','S']
rxns = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != set({frozenset({inst[0], inst[1]}), frozenset({inst[0], inst[1]})}) or self.prod_bonds != {frozenset({inst[0], inst[3]})}:
# new = 0
return 0
def search_R_Addition_CSm_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
C#S + R* <== R-C*=S
N.B.: only the reverse direction is available.
"""
if np.sum(rad) == 0: return
name = 'R_Addition_CSm_R'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X', 'C', 'S']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
bondpattern = [1, 2]
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
if rad[instance[1]] == 1:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=1, c=2)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[0], inst[1]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_r14_birad_scission(self, natom, atom, bond, rad):
"""
This is an RMG class.
It is now renamed to 1,4_Linear_birad_scission on the RMG website,
*R-R-R-R* ==> R=R + R=R
Problematic reaction because of the biradical character.
"""
if np.sum(rad) != 2: return
name = 'r14_birad_scission'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X','X','X','X']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
if rad[instance[0]] == 1 and rad[instance[-1]] == 1:
rxns += [instance]
self.new_reaction(rxns, name, a=1, b=2)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[1], inst[2]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_r14_cyclic_birad_scission_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
R1-R*~~~~~~R*-R2 <== R1=R~~~~~~R=R2
|______________|
(this one bond)
TODO forward?
"""
if np.sum(rad) != 0: return
name = 'r14_cyclic_birad_scission_R'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in range(5, 9):
motif = ['X' for i in range(ringsize)]
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
bondpattern = ['X' for i in range(ringsize - 1)]
bondpattern[0] = 2
bondpattern[-1] = 2
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset()} or self.prod_bonds != {frozenset({inst[0], inst[-1]})}:
# new = 0
return 0
def search_birad_recombination_F(self, natom, atom, bond, rad):
"""
This is an RMG class.
*R~~~~~~~~R* ==> R~~~~~~~~R
|________|
"""
if np.sum(rad) != 2: return
name = 'birad_recombination_F'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in self.ringrange:
motif = ['X' for i in range(ringsize)]
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
if rad[instance[0]] == 1 and rad[instance[-1]] == 1:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset()} or self.prod_bonds != {frozenset({inst[0], inst[-1]})}:
# new = 0
return 0
def search_birad_recombination_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
*R~~~~~~~~R* <== R~~~~~~~~R
|________|
"""
if np.sum(rad) != 0: return
if len(self.species.cycle_chain) == 0: return
name = 'birad_recombination_R'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X','X']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
if instance[0] in self.cycle and instance[1] in self.cycle :
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[0], inst[1]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_Intra_disproportionation_F(self, natom, atom, bond, rad):
"""
This is an RMG class.
*R~~~~~R*-R-H ==> H-R~~~~~R=R
"""
if np.sum(rad) != 2: return
name = 'Intra_disproportionation_F'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in range(5, 9):
motif = ['X' for i in range(ringsize)]
motif[-1] = 'H'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
if rad[instance[0]] == 1 and rad[instance[-3]] == 1:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-1], inst[-2]})} or self.prod_bonds != {frozenset({inst[0], inst[-1]})}:
# new = 0
return 0
def search_Intra_disproportionation_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
*R~~~~~R*-R-H <== H-R~~~~~R=R
"""
if np.sum(rad) != 0: return
name = 'Intra_disproportionation_R'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in range(5, 9):
motif = ['X' for i in range(ringsize)]
motif[-1] = 'H'
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
bondpattern = ['X' for i in range(ringsize - 1)]
bondpattern[0] = 2
for instance in instances:
if find_motif.bondfilter(instance, bond, bondpattern) == 0:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[-1], inst[-2]})} or self.prod_bonds != {frozenset({inst[0], inst[-1]})}:
# new = 0
return 0
def search_beta_delta(self, natom, atom, bond, rad):
"""
This is not an RMG class.
A*-B-C-D-E ==> A=B + C=D + E*
It is the parallel breaking of not just the beta but also of the gamma bond, resulting in two unsaturated bonds and a radical.
"""
if np.sum(rad) == 0: return
name = 'beta_delta'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X', 'X', 'X', 'X', 'X']
for rad_site in np.nonzero(rad)[0]:
rxns += find_motif.start_motif(motif, natom, bond, atom, rad_site, self.species.atom_eqv)
self.new_reaction(rxns, name, a=0, b=1, c=2, d=3, e=4)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[1], inst[2]}), frozenset({inst[3], inst[4]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_h2_elim(self, natom, atom, bond, rad):
"""
This is not an RMG class.
H H
| |
X - X ==> X=X + H2
"""
name = 'h2_elim'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['H','X','X','H']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1, cross=True)
# # filter for the same reactions
# for instance in self.reactions[name]:
# if inst[0] == instance[0] and inst[-1] == instance[-1]:
# new = 0
# if inst[0] == instance[-1] and inst[-1] == instance[0]:
# new = 0
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[0], inst[1]}), frozenset({inst[2], inst[3]})} or self.prod_bonds != {frozenset({inst[0], inst[3]})}:
# new = 0
return 0
def search_hom_sci(self, natom, atom, bond, rad):
"""
This is not an RMG class.
R-R ==> R + R
"""
name = 'hom_sci'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] # reactions found with the current resonance isomer
motif = ['X','X']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=1, cross=True)
# for inst in rxns:
# new = 1
# # filter for the same reactions
# for instance in self.reactions[name]:
# if inst[0] == instance[1] and inst[1] == instance[0]:
# new = 0
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[0], inst[1]}), frozenset({inst[2], inst[3]})} or self.prod_bonds != {frozenset({inst[0], inst[3]})}:
# new = 0
return 0
def search_barrierless_saddle(self, natom, atom, bond, rad):
"""
This is not an RMG class.
R - R ==> R + R
Attempts to find a saddle point for a nominally barrierless reaction.
"""
name = 'barrierless_saddle'
if not name in self.reactions:
self.reactions[name] = []
if self.barrierless_saddle is not None:
rxns = self.barrierless_saddle # defined by the user
else:
return 0
self.new_reaction(rxns, name, a=0, b=-1, cross=True)
# for inst in rxns:
# new = 1
# # filter for the same reactions
# for instance in self.reactions[name]:
# if inst[0] == instance[0] and inst[-1] == instance[-1]:
# new = 0
# if inst[0] == instance[-1] and inst[-1] == instance[0]:
# new = 0
# # no filter for specific reaction after this, this is a specific reaction already
return 0
def reaction_matrix(self, reac_list, reac_id):
"""
Create arrays to store all reactions for species.
input:
reac_list: atom motifs from individual searches
reac_id: reaction name (e.g., HO2_Elimination_from_PeroxyRadical) from individual searc functions
Every reaction type just makes the below arrays longer, generated as reactions are found.
generated:
reac_type: reaction class identifier
reac_inst: reaction instance defined by the important atoms
reac_step: the step at which the search is at
reac_scan_energy: for each reaction the energy as a function of steps, only used for scanning type searches, e.g. R_Addition_MultipleBond
rec_ts_done: the last calculations is submitted in the sequence
reac_ts_geom: the geometry of the TS
reac_ts_freq: the freqencies of the TS
reac_name: the base name of the file to run - created for each reaction later
"""
self.species.reac_type += [reac_id for i in range(len(reac_list))]
self.species.reac_inst += reac_list
self.species.reac_step += [0 for i in range(len(reac_list))]
self.species.reac_scan_energy += [[] for i in range(len(reac_list))]
self.species.reac_ts_done += [0 for i in range(len(reac_list))]
self.species.reac_ts_geom += [0 for i in range(len(reac_list))]
self.species.reac_ts_freq += [0 for i in range(len(reac_list))]
for i in range(len(reac_list)):
if reac_id == 'intra_H_migration':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraHMigration(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'intra_H_migration_suprafacial':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraHMigrationSuprafacial(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'intra_R_migration':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraRMigration(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'intra_OH_migration':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraOHMigration(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'intra_OH_migration_Exocyclic_F':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][-2] + 1) + '_' + str(reac_list[i][-1]) # last element is cis/trans (-1, -2)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraOHMigrationExocyclicF(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'cpd_H_migration':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1) + '_' + str(reac_list[i][-2] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(CpdHMigration(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Intra_RH_Add_Endocyclic_F':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(len(reac_list[i])) + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-2] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraRHAddEndoF(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Intra_RH_Add_Endocyclic_R':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraRHAddEndoR(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Cyclic_Ether_Formation':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(CyclicEtherFormation(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Intra_RH_Add_Exocyclic_F':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraRHAddExoF(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Intra_RH_Add_Exocyclic_R':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraRHAddExoR(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Retro_Ene':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(RetroEne(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Intra_R_Add_Endocyclic_F':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraRAddEndocyclicF(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Intra_R_Add_ExoTetCyclic_F':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-2] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraRAddExoTetCyclicF(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Intra_R_Add_Exocyclic_F':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-2] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraRAddExocyclicF(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Korcek_step2_odd':
name = str(self.species.chemid) + '_' + reac_id
for j in range(len(reac_list[i])):
name += '_' + str(reac_list[i][j] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(KorcekStep2Odd(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Korcek_step2_even':
name = str(self.species.chemid) + '_' + reac_id
for j in range(len(reac_list[i])):
name += '_' + str(reac_list[i][j] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(KorcekStep2Even(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Korcek_step2':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(KorcekStep2(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'r22_cycloaddition':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(R22Cycloaddition(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'r12_cycloaddition':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(R12Cycloaddition(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'r12_insertion_R':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(R12Insertion(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'r13_insertion_CO2':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(R13InsertionCO2(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'r13_insertion_ROR':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1) + '_' + str(reac_list[i][3] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(R13InsertionROR(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'r14_birad_scission':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(R14BiradScission(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'r14_cyclic_birad_scission_R':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(R14CyclicBiradScission(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'birad_recombination_F':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(BiradRecombinationF(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'birad_recombination_R':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(BiradRecombinationR(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Intra_disproportionation_F':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraDisproportionationF(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Intra_disproportionation_R':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraDisproportionationR(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Diels_alder_addition':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(DielsAlder(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'Intra_Diels_alder_R':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(IntraDielsAlder(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'ketoenol':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1) + '_' + str(reac_list[i][3] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(KetoEnol(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'HO2_Elimination_from_PeroxyRadical':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][-1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(HO2Elimination(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'R_Addition_COm3_R':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(RAdditionCO(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'R_Addition_MultipleBond':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(RAdditionMultipleBond(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == '12_shift_S_F':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(S12ShiftF(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == '12_shift_S_R':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(S12ShiftR(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'R_Addition_CSm_R':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(RAdditionCS(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'r13_insertion_RSR':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1) + '_' + str(reac_list[i][3] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(R13InsertionRSR(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'beta_delta':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1) + '_' + str(reac_list[i][2] + 1) + '_' + str(reac_list[i][3] + 1) + '_' + str(reac_list[i][4] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(BetaDelta(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'h2_elim':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][3] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(H2Elim(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'hom_sci':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(HS(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'barrierless_saddle':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(reac_list[i][0] + 1) + '_' + str(reac_list[i][1] + 1)
self.species.reac_name.append(name)
self.species.reac_obj.append(BarrierlessSaddle(self.species, self.qc, self.par, reac_list[i], name))
elif reac_id == 'combinatorial':
name = str(self.species.chemid) + '_' + reac_id + '_' + str(i)
self.species.reac_name.append(name)
self.species.reac_obj.append(Combinatorial(self.species, self.qc, self.par, reac_list[i], name))
else:
self.species.reac_name.append(0)
return 0
def clean_rigid(self, name, instances, pivot1, pivot2):
"""
Getting rid of instances where the rigid structure would not allow the
transfer of atoms, e.g., H transfer across a large rigid ring structure.
It is based on the presence of (partial) double bonds along the motif.
If the structure is rigid, and the selected pivot atoms are further than a cutoff
then the instance will be deleted fro the list.
Pivots requires manual determination for each family, where this is important.
Not applied to all families.
"""
cutoff = 3. # Angstrom
mask = [True] * len(instances)
for inst, instance in enumerate(instances):
if all(self.species.maxbond[instance[ii]][instance[ii + 1]] > 1 for ii in range(len(instance) - 2)):
if np.linalg.norm(self.species.geom[instance[pivot1]] - self.species.geom[instance[pivot2]]) > cutoff:
mask[inst] = False
numbers = [ii + 1 for ii in instance]
logging.info(f'{name} reaction {numbers} over rigid backbone with cutoff {cutoff} A is removed.')
return list(np.array(instances)[mask])
def new_reaction(self, rxns, name, a=None, b=None, c=None, d=None, e=None, length=None, full=False, cross=False):
"""
Returns 1 if new, and 0 if not new
Checks a variable number of identical elements
Also can check full equivalency (full=True), same lenght (length=True), and
equivalency between elements that are interchangeable (cross=True)
"""
for inst in rxns:
new = True
for instance in self.reactions[name]:
if cross == True:
if (inst[a] == instance[a] and inst[b] == instance[b]):
new = False
break
if (inst[a] == instance[b] and inst[b] == instance[a]):
new = False
break
if a is not None:
if inst[a] != instance[a]:
continue
if b is not None:
if inst[b] != instance[b]:
continue
if c is not None:
if inst[c] != instance[c]:
continue
if d is not None:
if inst[d] != instance[d]:
continue
if e is not None:
if inst[e] != instance[e]:
continue
if length is not None:
if len(inst) != len(instance):
continue
if full == True:
if any([inst[i] != instance[i] for i, _ in enumerate(inst)]):
continue
new = False
continue
if new:
self.reactions[name].append(inst)
return 0
def main():
"""
Find reaction patterns
"""
if __name__ == "__main__":
main()
| [
"reactions.reac_r14_birad_scission.R14BiradScission",
"reactions.reac_intra_OH_migration_Exocyclic_F.IntraOHMigrationExocyclicF",
"reactions.reac_h2_elim.H2Elim",
"reactions.reac_birad_recombination_R.BiradRecombinationR",
"reactions.reac_R_Addition_CSm_R.RAdditionCS",
"reactions.reac_birad_recombination_... | [((9306, 9354), 'logging.info', 'logging.info', (['"""\tFound the following reactions:"""'], {}), "('\\tFound the following reactions:')\n", (9318, 9354), False, 'import logging\n'), ((9920, 9996), 'kinbot.bond_combinations.generate_all_product_bond_matrices', 'bond_combinations.generate_all_product_bond_matrices', (['self.species', 'self.par'], {}), '(self.species, self.par)\n', (9972, 9996), False, 'from kinbot import bond_combinations\n'), ((13257, 13332), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (13279, 13332), False, 'from kinbot import find_motif\n'), ((36489, 36564), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (36511, 36564), False, 'from kinbot import find_motif\n'), ((48727, 48802), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (48749, 48802), False, 'from kinbot import find_motif\n'), ((49830, 49905), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (49852, 49905), False, 'from kinbot import find_motif\n'), ((50991, 51066), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (51013, 51066), False, 'from kinbot import find_motif\n'), ((55185, 55260), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (55207, 55260), False, 'from kinbot import find_motif\n'), ((55343, 55418), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (55365, 55418), False, 'from kinbot import find_motif\n'), ((56524, 56599), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (56546, 56599), False, 'from kinbot import find_motif\n'), ((57512, 57587), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (57534, 57587), False, 'from kinbot import find_motif\n'), ((61806, 61881), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (61828, 61881), False, 'from kinbot import find_motif\n'), ((62781, 62856), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (62803, 62856), False, 'from kinbot import find_motif\n'), ((64007, 64082), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (64029, 64082), False, 'from kinbot import find_motif\n'), ((67519, 67594), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (67541, 67594), False, 'from kinbot import find_motif\n'), ((71898, 71973), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (71920, 71973), False, 'from kinbot import find_motif\n'), ((73070, 73145), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (73092, 73145), False, 'from kinbot import find_motif\n'), ((10810, 10821), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (10816, 10821), True, 'import numpy as np\n'), ((14408, 14419), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (14414, 14419), True, 'import numpy as np\n'), ((18295, 18306), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (18301, 18306), True, 'import numpy as np\n'), ((21441, 21452), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (21447, 21452), True, 'import numpy as np\n'), ((22889, 22900), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (22895, 22900), True, 'import numpy as np\n'), ((23320, 23395), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (23342, 23395), False, 'from kinbot import find_motif\n'), ((24420, 24431), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (24426, 24431), True, 'import numpy as np\n'), ((24851, 24926), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (24873, 24926), False, 'from kinbot import find_motif\n'), ((25933, 25944), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (25939, 25944), True, 'import numpy as np\n'), ((26338, 26413), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (26360, 26413), False, 'from kinbot import find_motif\n'), ((27414, 27425), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (27420, 27425), True, 'import numpy as np\n'), ((28661, 28672), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (28667, 28672), True, 'import numpy as np\n'), ((30154, 30165), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (30160, 30165), True, 'import numpy as np\n'), ((31192, 31203), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (31198, 31203), True, 'import numpy as np\n'), ((33877, 33952), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (33899, 33952), False, 'from kinbot import find_motif\n'), ((35163, 35174), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (35169, 35174), True, 'import numpy as np\n'), ((35339, 35414), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (35361, 35414), False, 'from kinbot import find_motif\n'), ((36179, 36190), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (36185, 36190), True, 'import numpy as np\n'), ((38357, 38432), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (38379, 38432), False, 'from kinbot import find_motif\n'), ((41702, 41777), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (41724, 41777), False, 'from kinbot import find_motif\n'), ((44388, 44463), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (44410, 44463), False, 'from kinbot import find_motif\n'), ((53995, 54070), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (54017, 54070), False, 'from kinbot import find_motif\n'), ((56215, 56226), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (56221, 56226), True, 'import numpy as np\n'), ((57225, 57236), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (57231, 57236), True, 'import numpy as np\n'), ((58380, 58391), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (58386, 58391), True, 'import numpy as np\n'), ((58676, 58691), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (58686, 58691), True, 'import numpy as np\n'), ((58716, 58802), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (58738, 58802), False, 'from kinbot import find_motif\n'), ((59248, 59259), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (59254, 59259), True, 'import numpy as np\n'), ((59479, 59494), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (59489, 59494), True, 'import numpy as np\n'), ((59519, 59605), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (59541, 59605), False, 'from kinbot import find_motif\n'), ((60381, 60392), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (60387, 60392), True, 'import numpy as np\n'), ((60612, 60627), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (60622, 60627), True, 'import numpy as np\n'), ((60652, 60738), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (60674, 60738), False, 'from kinbot import find_motif\n'), ((62495, 62506), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (62501, 62506), True, 'import numpy as np\n'), ((63708, 63719), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (63714, 63719), True, 'import numpy as np\n'), ((64797, 64808), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (64803, 64808), True, 'import numpy as np\n'), ((65145, 65220), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (65167, 65220), False, 'from kinbot import find_motif\n'), ((66073, 66084), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (66079, 66084), True, 'import numpy as np\n'), ((66418, 66493), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (66440, 66493), False, 'from kinbot import find_motif\n'), ((67181, 67192), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (67187, 67192), True, 'import numpy as np\n'), ((68235, 68246), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (68241, 68246), True, 'import numpy as np\n'), ((68610, 68685), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (68632, 68685), False, 'from kinbot import find_motif\n'), ((69364, 69375), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (69370, 69375), True, 'import numpy as np\n'), ((69760, 69835), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (69782, 69835), False, 'from kinbot import find_motif\n'), ((70751, 70762), 'numpy.sum', 'np.sum', (['rad'], {}), '(rad)\n', (70757, 70762), True, 'import numpy as np\n'), ((71020, 71035), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (71030, 71035), True, 'import numpy as np\n'), ((71060, 71146), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (71082, 71146), False, 'from kinbot import find_motif\n'), ((8390, 8477), 'kinbot.bond_combinations.generate_ts', 'bond_combinations.generate_ts', (['self.reac_bonds', 'self.prod_bonds', 'self.species.bond'], {}), '(self.reac_bonds, self.prod_bonds, self.\n species.bond)\n', (8419, 8477), False, 'from kinbot import bond_combinations\n'), ((9245, 9269), 'logging.error', 'logging.error', (['"""Exiting"""'], {}), "('Exiting')\n", (9258, 9269), False, 'import logging\n'), ((9286, 9296), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9294, 9296), False, 'import sys\n'), ((11092, 11167), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (11114, 11167), False, 'from kinbot import find_motif\n'), ((11477, 11552), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (11499, 11552), False, 'from kinbot import find_motif\n'), ((14769, 14784), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (14779, 14784), True, 'import numpy as np\n'), ((14818, 14904), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (14840, 14904), False, 'from kinbot import find_motif\n'), ((18633, 18708), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (18655, 18708), False, 'from kinbot import find_motif\n'), ((21715, 21790), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', '(-1)', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, -1, self.species.atom_eqv)\n', (21737, 21790), False, 'from kinbot import find_motif\n'), ((27853, 27868), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (27863, 27868), True, 'import numpy as np\n'), ((27897, 27983), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (27919, 27983), False, 'from kinbot import find_motif\n'), ((29032, 29047), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (29042, 29047), True, 'import numpy as np\n'), ((29081, 29167), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (29103, 29167), False, 'from kinbot import find_motif\n'), ((30512, 30527), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (30522, 30527), True, 'import numpy as np\n'), ((30556, 30642), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (30578, 30642), False, 'from kinbot import find_motif\n'), ((31574, 31589), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (31584, 31589), True, 'import numpy as np\n'), ((31623, 31709), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (31645, 31709), False, 'from kinbot import find_motif\n'), ((36689, 36739), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (36710, 36739), False, 'from kinbot import find_motif\n'), ((38580, 38598), 'copy.deepcopy', 'copy.deepcopy', (['kch'], {}), '(kch)\n', (38593, 38598), False, 'import copy\n'), ((38686, 38704), 'copy.deepcopy', 'copy.deepcopy', (['kch'], {}), '(kch)\n', (38699, 38704), False, 'import copy\n'), ((41925, 41943), 'copy.deepcopy', 'copy.deepcopy', (['kch'], {}), '(kch)\n', (41938, 41943), False, 'import copy\n'), ((42031, 42049), 'copy.deepcopy', 'copy.deepcopy', (['kch'], {}), '(kch)\n', (42044, 42049), False, 'import copy\n'), ((55510, 55560), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (55531, 55560), False, 'from kinbot import find_motif\n'), ((57672, 57722), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (57693, 57722), False, 'from kinbot import find_motif\n'), ((62941, 62991), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (62962, 62991), False, 'from kinbot import find_motif\n'), ((93677, 93696), 'numpy.array', 'np.array', (['instances'], {}), '(instances)\n', (93685, 93696), True, 'import numpy as np\n'), ((12039, 12054), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (12049, 12054), True, 'import numpy as np\n'), ((12092, 12178), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (12114, 12178), False, 'from kinbot import find_motif\n'), ((16653, 16683), 'numpy.roll', 'np.roll', (['cycle', '(5 - startindex)'], {}), '(cycle, 5 - startindex)\n', (16660, 16683), True, 'import numpy as np\n'), ((16850, 16870), 'numpy.roll', 'np.roll', (['ring_rev', '(1)'], {}), '(ring_rev, 1)\n', (16857, 16870), True, 'import numpy as np\n'), ((19185, 19200), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (19195, 19200), True, 'import numpy as np\n'), ((19238, 19324), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (19260, 19324), False, 'from kinbot import find_motif\n'), ((19597, 19612), 'numpy.nonzero', 'np.nonzero', (['rad'], {}), '(rad)\n', (19607, 19612), True, 'import numpy as np\n'), ((19650, 19736), 'kinbot.find_motif.start_motif', 'find_motif.start_motif', (['motif', 'natom', 'bond', 'atom', 'rad_site', 'self.species.atom_eqv'], {}), '(motif, natom, bond, atom, rad_site, self.species.\n atom_eqv)\n', (19672, 19736), False, 'from kinbot import find_motif\n'), ((23543, 23593), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (23564, 23593), False, 'from kinbot import find_motif\n'), ((25074, 25124), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (25095, 25124), False, 'from kinbot import find_motif\n'), ((29312, 29362), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (29333, 29362), False, 'from kinbot import find_motif\n'), ((29511, 29561), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (29532, 29561), False, 'from kinbot import find_motif\n'), ((31852, 31902), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (31873, 31902), False, 'from kinbot import find_motif\n'), ((32059, 32109), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (32080, 32109), False, 'from kinbot import find_motif\n'), ((34101, 34151), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (34122, 34151), False, 'from kinbot import find_motif\n'), ((46171, 46188), 'numpy.roll', 'np.roll', (['ring1', '(1)'], {}), '(ring1, 1)\n', (46178, 46188), True, 'import numpy as np\n'), ((47551, 47568), 'numpy.roll', 'np.roll', (['ring1', '(1)'], {}), '(ring1, 1)\n', (47558, 47568), True, 'import numpy as np\n'), ((47612, 47629), 'numpy.roll', 'np.roll', (['ring1', '(2)'], {}), '(ring1, 2)\n', (47619, 47629), True, 'import numpy as np\n'), ((52732, 52759), 'numpy.roll', 'np.roll', (['ci', '(6 - startindex)'], {}), '(ci, 6 - startindex)\n', (52739, 52759), True, 'import numpy as np\n'), ((54286, 54336), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (54307, 54336), False, 'from kinbot import find_motif\n'), ((65429, 65479), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (65450, 65479), False, 'from kinbot import find_motif\n'), ((70012, 70062), 'kinbot.find_motif.bondfilter', 'find_motif.bondfilter', (['instance', 'bond', 'bondpattern'], {}), '(instance, bond, bondpattern)\n', (70033, 70062), False, 'from kinbot import find_motif\n'), ((76651, 76719), 'reactions.reac_intra_H_migration.IntraHMigration', 'IntraHMigration', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (76666, 76719), False, 'from reactions.reac_intra_H_migration import IntraHMigration\n'), ((93342, 93436), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.species.geom[instance[pivot1]] - self.species.geom[instance[pivot2]])'], {}), '(self.species.geom[instance[pivot1]] - self.species.geom[\n instance[pivot2]])\n', (93356, 93436), True, 'import numpy as np\n'), ((93559, 93666), 'logging.info', 'logging.info', (['f"""{name} reaction {numbers} over rigid backbone with cutoff {cutoff} A is removed."""'], {}), "(\n f'{name} reaction {numbers} over rigid backbone with cutoff {cutoff} A is removed.'\n )\n", (93571, 93666), False, 'import logging\n'), ((77010, 77089), 'reactions.reac_intra_H_migration_suprafacial.IntraHMigrationSuprafacial', 'IntraHMigrationSuprafacial', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (77036, 77089), False, 'from reactions.reac_intra_H_migration_suprafacial import IntraHMigrationSuprafacial\n'), ((77368, 77436), 'reactions.reac_intra_R_migration.IntraRMigration', 'IntraRMigration', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (77383, 77436), False, 'from reactions.reac_intra_R_migration import IntraRMigration\n'), ((77716, 77785), 'reactions.reac_intra_OH_migration.IntraOHMigration', 'IntraOHMigration', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (77732, 77785), False, 'from reactions.reac_intra_OH_migration import IntraOHMigration\n'), ((78145, 78224), 'reactions.reac_intra_OH_migration_Exocyclic_F.IntraOHMigrationExocyclicF', 'IntraOHMigrationExocyclicF', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (78171, 78224), False, 'from reactions.reac_intra_OH_migration_Exocyclic_F import IntraOHMigrationExocyclicF\n'), ((78535, 78601), 'reactions.reac_cpd_H_migration.CpdHMigration', 'CpdHMigration', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (78548, 78601), False, 'from reactions.reac_cpd_H_migration import CpdHMigration\n'), ((78919, 78987), 'reactions.reac_Intra_RH_Add_Endocyclic_F.IntraRHAddEndoF', 'IntraRHAddEndoF', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (78934, 78987), False, 'from reactions.reac_Intra_RH_Add_Endocyclic_F import IntraRHAddEndoF\n'), ((79273, 79341), 'reactions.reac_Intra_RH_Add_Endocyclic_R.IntraRHAddEndoR', 'IntraRHAddEndoR', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (79288, 79341), False, 'from reactions.reac_Intra_RH_Add_Endocyclic_R import IntraRHAddEndoR\n'), ((79625, 79698), 'reactions.reac_Cyclic_Ether_Formation.CyclicEtherFormation', 'CyclicEtherFormation', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (79645, 79698), False, 'from reactions.reac_Cyclic_Ether_Formation import CyclicEtherFormation\n'), ((79984, 80051), 'reactions.reac_Intra_RH_Add_Exocyclic_F.IntraRHAddExoF', 'IntraRHAddExoF', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (79998, 80051), False, 'from reactions.reac_Intra_RH_Add_Exocyclic_F import IntraRHAddExoF\n'), ((80336, 80403), 'reactions.reac_Intra_RH_Add_Exocyclic_R.IntraRHAddExoR', 'IntraRHAddExoR', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (80350, 80403), False, 'from reactions.reac_Intra_RH_Add_Exocyclic_R import IntraRHAddExoR\n'), ((80674, 80735), 'reactions.reac_Retro_Ene.RetroEne', 'RetroEne', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (80682, 80735), False, 'from reactions.reac_Retro_Ene import RetroEne\n'), ((81021, 81094), 'reactions.reac_Intra_R_Add_Endocyclic_F.IntraRAddEndocyclicF', 'IntraRAddEndocyclicF', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (81041, 81094), False, 'from reactions.reac_Intra_R_Add_Endocyclic_F import IntraRAddEndocyclicF\n'), ((81416, 81491), 'reactions.reac_Intra_R_Add_ExoTetCyclic_F.IntraRAddExoTetCyclicF', 'IntraRAddExoTetCyclicF', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (81438, 81491), False, 'from reactions.reac_Intra_R_Add_ExoTetCyclic_F import IntraRAddExoTetCyclicF\n'), ((81776, 81848), 'reactions.reac_Intra_R_Add_Exocyclic_F.IntraRAddExocyclicF', 'IntraRAddExocyclicF', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (81795, 81848), False, 'from reactions.reac_Intra_R_Add_Exocyclic_F import IntraRAddExocyclicF\n'), ((82169, 82236), 'reactions.reac_Korcek_step2_odd.KorcekStep2Odd', 'KorcekStep2Odd', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (82183, 82236), False, 'from reactions.reac_Korcek_step2_odd import KorcekStep2Odd\n'), ((82558, 82626), 'reactions.reac_Korcek_step2_even.KorcekStep2Even', 'KorcekStep2Even', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (82573, 82626), False, 'from reactions.reac_Korcek_step2_even import KorcekStep2Even\n'), ((82900, 82964), 'reactions.reac_Korcek_step2.KorcekStep2', 'KorcekStep2', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (82911, 82964), False, 'from reactions.reac_Korcek_step2 import KorcekStep2\n'), ((83242, 83311), 'reactions.reac_r22_cycloaddition.R22Cycloaddition', 'R22Cycloaddition', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (83258, 83311), False, 'from reactions.reac_r22_cycloaddition import R22Cycloaddition\n'), ((83589, 83658), 'reactions.reac_r12_cycloaddition.R12Cycloaddition', 'R12Cycloaddition', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (83605, 83658), False, 'from reactions.reac_r12_cycloaddition import R12Cycloaddition\n'), ((83967, 84032), 'reactions.reac_r12_insertion_R.R12Insertion', 'R12Insertion', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (83979, 84032), False, 'from reactions.reac_r12_insertion_R import R12Insertion\n'), ((84311, 84379), 'reactions.reac_r13_insertion_CO2.R13InsertionCO2', 'R13InsertionCO2', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (84326, 84379), False, 'from reactions.reac_r13_insertion_CO2 import R13InsertionCO2\n'), ((84723, 84791), 'reactions.reac_r13_insertion_ROR.R13InsertionROR', 'R13InsertionROR', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (84738, 84791), False, 'from reactions.reac_r13_insertion_ROR import R13InsertionROR\n'), ((85070, 85139), 'reactions.reac_r14_birad_scission.R14BiradScission', 'R14BiradScission', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (85086, 85139), False, 'from reactions.reac_r14_birad_scission import R14BiradScission\n'), ((85428, 85503), 'reactions.reac_r14_cyclic_birad_scission_R.R14CyclicBiradScission', 'R14CyclicBiradScission', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (85450, 85503), False, 'from reactions.reac_r14_cyclic_birad_scission_R import R14CyclicBiradScission\n'), ((85786, 85858), 'reactions.reac_birad_recombination_F.BiradRecombinationF', 'BiradRecombinationF', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (85805, 85858), False, 'from reactions.reac_birad_recombination_F import BiradRecombinationF\n'), ((86140, 86212), 'reactions.reac_birad_recombination_R.BiradRecombinationR', 'BiradRecombinationR', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (86159, 86212), False, 'from reactions.reac_birad_recombination_R import BiradRecombinationR\n'), ((86500, 86577), 'reactions.reac_Intra_disproportionation_F.IntraDisproportionationF', 'IntraDisproportionationF', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (86524, 86577), False, 'from reactions.reac_Intra_disproportionation_F import IntraDisproportionationF\n'), ((86865, 86942), 'reactions.reac_Intra_disproportionation_R.IntraDisproportionationR', 'IntraDisproportionationR', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (86889, 86942), False, 'from reactions.reac_Intra_disproportionation_R import IntraDisproportionationR\n'), ((87223, 87286), 'reactions.reac_Diels_alder_addition.DielsAlder', 'DielsAlder', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (87233, 87286), False, 'from reactions.reac_Diels_alder_addition import DielsAlder\n'), ((87567, 87635), 'reactions.reac_Intra_Diels_alder_R.IntraDielsAlder', 'IntraDielsAlder', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (87582, 87635), False, 'from reactions.reac_Intra_Diels_alder_R import IntraDielsAlder\n'), ((87972, 88033), 'reactions.reac_ketoenol.KetoEnol', 'KetoEnol', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (87980, 88033), False, 'from reactions.reac_ketoenol import KetoEnol\n'), ((88329, 88396), 'reactions.reac_HO2_Elimination_from_PeroxyRadical.HO2Elimination', 'HO2Elimination', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (88343, 88396), False, 'from reactions.reac_HO2_Elimination_from_PeroxyRadical import HO2Elimination\n'), ((88707, 88771), 'reactions.reac_R_Addition_COm3_R.RAdditionCO', 'RAdditionCO', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (88718, 88771), False, 'from reactions.reac_R_Addition_COm3_R import RAdditionCO\n'), ((89088, 89162), 'reactions.reac_R_Addition_MultipleBond.RAdditionMultipleBond', 'RAdditionMultipleBond', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (89109, 89162), False, 'from reactions.reac_R_Addition_MultipleBond import RAdditionMultipleBond\n'), ((89468, 89530), 'reactions.reac_12_shift_S_F.S12ShiftF', 'S12ShiftF', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (89477, 89530), False, 'from reactions.reac_12_shift_S_F import S12ShiftF\n'), ((89836, 89898), 'reactions.reac_12_shift_S_R.S12ShiftR', 'S12ShiftR', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (89845, 89898), False, 'from reactions.reac_12_shift_S_R import S12ShiftR\n'), ((90208, 90272), 'reactions.reac_R_Addition_CSm_R.RAdditionCS', 'RAdditionCS', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (90219, 90272), False, 'from reactions.reac_R_Addition_CSm_R import RAdditionCS\n'), ((90616, 90684), 'reactions.reac_r13_insertion_RSR.R13InsertionRSR', 'R13InsertionRSR', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (90631, 90684), False, 'from reactions.reac_r13_insertion_RSR import R13InsertionRSR\n'), ((91054, 91116), 'reactions.reac_beta_delta.BetaDelta', 'BetaDelta', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (91063, 91116), False, 'from reactions.reac_beta_delta import BetaDelta\n'), ((91384, 91443), 'reactions.reac_h2_elim.H2Elim', 'H2Elim', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (91390, 91443), False, 'from reactions.reac_h2_elim import H2Elim\n'), ((91711, 91766), 'reactions.reac_homolytic_scission.HS', 'HS', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (91713, 91766), False, 'from reactions.reac_homolytic_scission import HS\n'), ((92045, 92115), 'reactions.reac_barrierless_saddle.BarrierlessSaddle', 'BarrierlessSaddle', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (92062, 92115), False, 'from reactions.reac_barrierless_saddle import BarrierlessSaddle\n'), ((92338, 92404), 'reactions.reac_combinatorial.Combinatorial', 'Combinatorial', (['self.species', 'self.qc', 'self.par', 'reac_list[i]', 'name'], {}), '(self.species, self.qc, self.par, reac_list[i], name)\n', (92351, 92404), False, 'from reactions.reac_combinatorial import Combinatorial\n')] |
import numpy as np
import cv2
import time
import math
def distance_estimate(alt, deviation):
alt = alt * 100
a = 0.002
b = -0.0129
pixel_to_cm = a * alt + b
return deviation * pixel_to_cm / 100
cap = cv2.VideoCapture(0)
distance = 0.7
while True:
ret, frame = cap.read()
if ret == True:
# Flip the image
frame = cv2.flip(frame, 1)
# cv2.imshow("frame",frame)
# Mask green color
frame_hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
low_green = np.array([25, 52, 72])
high_green = np.array([102, 255, 255])
mask = cv2.inRange(frame_hsv, low_green, high_green)
cv2.imshow("mask",mask)
frame_bgr = cv2.cvtColor(frame_hsv,cv2.COLOR_HSV2BGR)
frame_green = cv2.bitwise_and(frame_bgr, frame_bgr, mask=mask)
# Show final mask
# cv2.imshow("green mask", frame_green)
# Find the center
white_pixels = np.where(mask==255)
cX = np.average(white_pixels[1])
cY = np.average(white_pixels[0])
# Small noise elimination
if len(white_pixels[0]) > 1000:
# Create a black image
black_img = np.zeros((480,640,1),np.uint8)
# Draw a circle
cv2.circle(black_img, (int(cX),int(cY)), 85, (255,255,255), thickness=-1, lineType=8, shift=0)
intersection = cv2.bitwise_and(black_img, mask)
cv2.imshow("intersection",intersection)
intersection_length = np.where(intersection==255)
# Noise elimination II
if len(intersection_length[0]) > 1000:
intersection_cX= np.average(intersection_length[1])
intersection_cY= np.average(intersection_length[0])
# Calculate deviations
x = intersection_cX-320
y = 240-intersection_cY
deviation = math.sqrt((x)*(x) + (y)*(y))
# Draw the center
cv2.circle(frame, (int(intersection_cX),int(intersection_cY)), 10, (255,255,255), thickness=-1, lineType=8, shift=0)
# Draw deviation line
cv2.line(frame, (320, 240), (int(intersection_cX),int(intersection_cY)), (255, 255, 255), 2)
cv2.imshow("frame updated", frame)
# Get deviation in meters at x-axis
x = distance_estimate(distance, x)
# Get deviation in meters at y-axis
y = distance_estimate(distance, y)
print("(" + str(x) + ", " + str(y) + ")")
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows() | [
"cv2.flip",
"numpy.average",
"numpy.where",
"cv2.inRange",
"cv2.bitwise_and",
"math.sqrt",
"cv2.imshow",
"numpy.array",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.waitKey"
] | [((223, 242), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (239, 242), False, 'import cv2\n'), ((2689, 2712), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2710, 2712), False, 'import cv2\n'), ((361, 379), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (369, 379), False, 'import cv2\n'), ((463, 501), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (475, 501), False, 'import cv2\n'), ((521, 543), 'numpy.array', 'np.array', (['[25, 52, 72]'], {}), '([25, 52, 72])\n', (529, 543), True, 'import numpy as np\n'), ((565, 590), 'numpy.array', 'np.array', (['[102, 255, 255]'], {}), '([102, 255, 255])\n', (573, 590), True, 'import numpy as np\n'), ((606, 651), 'cv2.inRange', 'cv2.inRange', (['frame_hsv', 'low_green', 'high_green'], {}), '(frame_hsv, low_green, high_green)\n', (617, 651), False, 'import cv2\n'), ((660, 684), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (670, 684), False, 'import cv2\n'), ((705, 747), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_hsv', 'cv2.COLOR_HSV2BGR'], {}), '(frame_hsv, cv2.COLOR_HSV2BGR)\n', (717, 747), False, 'import cv2\n'), ((769, 817), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame_bgr', 'frame_bgr'], {'mask': 'mask'}), '(frame_bgr, frame_bgr, mask=mask)\n', (784, 817), False, 'import cv2\n'), ((942, 963), 'numpy.where', 'np.where', (['(mask == 255)'], {}), '(mask == 255)\n', (950, 963), True, 'import numpy as np\n'), ((975, 1002), 'numpy.average', 'np.average', (['white_pixels[1]'], {}), '(white_pixels[1])\n', (985, 1002), True, 'import numpy as np\n'), ((1016, 1043), 'numpy.average', 'np.average', (['white_pixels[0]'], {}), '(white_pixels[0])\n', (1026, 1043), True, 'import numpy as np\n'), ((1185, 1218), 'numpy.zeros', 'np.zeros', (['(480, 640, 1)', 'np.uint8'], {}), '((480, 640, 1), np.uint8)\n', (1193, 1218), True, 'import numpy as np\n'), ((1383, 1415), 'cv2.bitwise_and', 'cv2.bitwise_and', (['black_img', 'mask'], {}), '(black_img, mask)\n', (1398, 1415), False, 'import cv2\n'), ((1428, 1468), 'cv2.imshow', 'cv2.imshow', (['"""intersection"""', 'intersection'], {}), "('intersection', intersection)\n", (1438, 1468), False, 'import cv2\n'), ((1502, 1531), 'numpy.where', 'np.where', (['(intersection == 255)'], {}), '(intersection == 255)\n', (1510, 1531), True, 'import numpy as np\n'), ((1650, 1684), 'numpy.average', 'np.average', (['intersection_length[1]'], {}), '(intersection_length[1])\n', (1660, 1684), True, 'import numpy as np\n'), ((1718, 1752), 'numpy.average', 'np.average', (['intersection_length[0]'], {}), '(intersection_length[0])\n', (1728, 1752), True, 'import numpy as np\n'), ((1918, 1942), 'math.sqrt', 'math.sqrt', (['(x * x + y * y)'], {}), '(x * x + y * y)\n', (1927, 1942), False, 'import math\n'), ((2294, 2328), 'cv2.imshow', 'cv2.imshow', (['"""frame updated"""', 'frame'], {}), "('frame updated', frame)\n", (2304, 2328), False, 'import cv2\n'), ((2622, 2636), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2633, 2636), False, 'import cv2\n')] |
import pandas
import numpy as np
import matplotlib.pyplot as plt
import csv
import scipy.signal
filename = ('ExerciseClassifier.csv')
names = ['xacc', 'yacc', 'zacc']
data = pandas.read_csv(filename, names=names)
##print(data.shape)
##plt.plot(data)
readdata = csv.reader(open('ExerciseClassifier.csv', 'r'))
data = []
for row in readdata:
data.append(row)
q1 = []
for i in range(len(data)):
q1.append(float(data[i][0]))
##print ('Mean of xacc :', (np.mean(q1)))
for row in readdata:
data.append(row)
q2 = []
for i in range(len(data)):
q2.append(float(data[i][1]))
##print ('Mean of yacc :', (np.mean(q2)))
for row in readdata:
data.append(row)
q3 = []
for i in range(len(data)):
q3.append(float(data[i][2]))
##print ('Mean of zacc :', (np.mean(q3)))
from sklearn import tree
features = [[2.204656364, -9.422429091, -2.457790909], [-0.4940326891, 0.2694644326,0.2694640419], [-1.777014894, -9.113344681, 4.069014894],[0.6229222074,0.1332611733,-0.1534995221]]
labels = ['Vertical Raises', 'Bicep Curls', 'Push Ups','Sit Ups']
clf = tree.DecisionTreeClassifier()
clf = clf.fit(features, labels)
x = np.mean(q1)
y = np.mean(q2)
z = np.mean(q3)
g=clf.predict([[x, y, z]])
print(g)
if clf.predict([[x, y, z]])=='Push Ups':
iot=1
elif clf.predict([[x, y, z]])== 'Sit Ups':
iot=4
elif clf.predict([[x, y, z]])== 'Vertical Raises':
iot=1
elif clf.predict([[x, y, z]])== 'Bicep Curls':
iot=5
tik= scipy.signal.argrelextrema(
np.array(q1),
comparator=np.greater,order = iot
)
##print('Peaks are:',(tik[0]))
print("{} counts".format(len(tik[0])))
| [
"numpy.mean",
"numpy.array",
"sklearn.tree.DecisionTreeClassifier",
"pandas.read_csv"
] | [((196, 234), 'pandas.read_csv', 'pandas.read_csv', (['filename'], {'names': 'names'}), '(filename, names=names)\n', (211, 234), False, 'import pandas\n'), ((1198, 1227), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (1225, 1227), False, 'from sklearn import tree\n'), ((1270, 1281), 'numpy.mean', 'np.mean', (['q1'], {}), '(q1)\n', (1277, 1281), True, 'import numpy as np\n'), ((1289, 1300), 'numpy.mean', 'np.mean', (['q2'], {}), '(q2)\n', (1296, 1300), True, 'import numpy as np\n'), ((1308, 1319), 'numpy.mean', 'np.mean', (['q3'], {}), '(q3)\n', (1315, 1319), True, 'import numpy as np\n'), ((1673, 1685), 'numpy.array', 'np.array', (['q1'], {}), '(q1)\n', (1681, 1685), True, 'import numpy as np\n')] |
import cv2
import numpy as np
class Drawer:
def __init__(self, width = 200, height = 200):
self.img = np.zeros((height, width,1), np.uint8)
self.fn = None
self.radius = 5
def on_key(self, fn):
self.fn = fn
def clear(self):
self.img = np.zeros(self.img.shape, np.uint8)
def get(self, width = 0, height = 0):
if width <= 0:
width = self.img.shape[1]
if height <= 0:
height = self.img.shape[0]
if width != self.img.shape[1] or height != self.img.shape[0]:
dim = (width, height)
img = cv2.resize(self.img, dim, interpolation = cv2.INTER_AREA)
cv2.imshow('www', img)
else:
img = self.img
return img
def run(self, name = 'drawer'):
self.drawing = False
self.ix = -1
self.iy = -1
cv2.namedWindow(name)
cv2.setMouseCallback(name, self.__on_mouse)
while(1):
cv2.imshow(name, self.img)
key = cv2.waitKey(30) & 0xFF
if key == 27:
break
elif key != 0 and key != 255 and self.fn != None:
self.fn(key)
cv2.destroyAllWindows()
def __on_mouse(self, event, x, y, flag, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.drawing = True
self.ix = x
self.iy = y
elif event == cv2.EVENT_MOUSEMOVE:
if self.drawing == True:
cv2.circle(self.img ,(x,y), self.radius, (255,0,0), -1)
elif event == cv2.EVENT_LBUTTONUP:
self.drawing = False
cv2.circle(self.img, (x,y), self.radius, (255,0,0), -1) | [
"cv2.setMouseCallback",
"cv2.imshow",
"numpy.zeros",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.waitKey",
"cv2.namedWindow"
] | [((115, 153), 'numpy.zeros', 'np.zeros', (['(height, width, 1)', 'np.uint8'], {}), '((height, width, 1), np.uint8)\n', (123, 153), True, 'import numpy as np\n'), ((289, 323), 'numpy.zeros', 'np.zeros', (['self.img.shape', 'np.uint8'], {}), '(self.img.shape, np.uint8)\n', (297, 323), True, 'import numpy as np\n'), ((894, 915), 'cv2.namedWindow', 'cv2.namedWindow', (['name'], {}), '(name)\n', (909, 915), False, 'import cv2\n'), ((924, 967), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['name', 'self.__on_mouse'], {}), '(name, self.__on_mouse)\n', (944, 967), False, 'import cv2\n'), ((1214, 1237), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1235, 1237), False, 'import cv2\n'), ((623, 678), 'cv2.resize', 'cv2.resize', (['self.img', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(self.img, dim, interpolation=cv2.INTER_AREA)\n', (633, 678), False, 'import cv2\n'), ((693, 715), 'cv2.imshow', 'cv2.imshow', (['"""www"""', 'img'], {}), "('www', img)\n", (703, 715), False, 'import cv2\n'), ((998, 1024), 'cv2.imshow', 'cv2.imshow', (['name', 'self.img'], {}), '(name, self.img)\n', (1008, 1024), False, 'import cv2\n'), ((1043, 1058), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (1054, 1058), False, 'import cv2\n'), ((1511, 1569), 'cv2.circle', 'cv2.circle', (['self.img', '(x, y)', 'self.radius', '(255, 0, 0)', '(-1)'], {}), '(self.img, (x, y), self.radius, (255, 0, 0), -1)\n', (1521, 1569), False, 'import cv2\n'), ((1656, 1714), 'cv2.circle', 'cv2.circle', (['self.img', '(x, y)', 'self.radius', '(255, 0, 0)', '(-1)'], {}), '(self.img, (x, y), self.radius, (255, 0, 0), -1)\n', (1666, 1714), False, 'import cv2\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.