code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from linlearn import BinaryClassifier, MultiClassifier
from linlearn.robust_means import Holland_catoni_estimator, gmom, alg2
import numpy as np
import gzip
import logging
import pickle
from datetime import datetime
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from scipy.special import logsumexp, softmax
import os
import itertools
from tqdm import tqdm
import joblib
import time
def ensure_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
ensure_directory('exp_archives/')
file_handler = logging.FileHandler(filename='exp_archives/classif_exp.log')
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", handlers=handlers
)
save_results = False
save_fig= True
dataset="MNIST"
logging.info(64*"=")
logging.info("Running new experiment session ON GPU with dataset : %s" % dataset)
logging.info(64*"=")
m_SVRG = 50
step_size = 0.01
max_iter = 10
fit_intercept = True
n_samples = 1000
n_repeats = 2
logging.info("Parameters are : n_repeats = %d , n_samples = %d , max_ter = %d , fit_intercept=%r , m_SVRG = %d" % (n_repeats, n_samples or 0, max_iter, fit_intercept, m_SVRG))
if not save_results:
logging.info("WARNING : results will NOT be saved at the end of this session")
def _images(path):
"""Return images loaded locally."""
with gzip.open(path) as f:
# First 16 bytes are magic_number, n_imgs, n_rows, n_cols
pixels = np.frombuffer(f.read(), 'B', offset=16)
return pixels.reshape(-1, 784).astype('float64') / 255
def _labels(path):
"""Return labels loaded locally."""
with gzip.open(path) as f:
# First 8 bytes are magic_number, n_labels
integer_labels = np.frombuffer(f.read(), 'B', offset=8)
def _onehot(integer_labels):
"""Return matrix whose rows are onehot encodings of integers."""
n_rows = len(integer_labels)
n_cols = integer_labels.max() + 1
onehot = np.zeros((n_rows, n_cols), dtype='uint8')
onehot[np.arange(n_rows), integer_labels] = 1
return onehot
return _onehot(integer_labels)
mnist_train_images_file = "mnist_data/train-images-idx3-ubyte.gz"
mnist_train_labels_file = "mnist_data/train-labels-idx1-ubyte.gz"
mnist_test_images_file = "mnist_data/t10k-images-idx3-ubyte.gz"
mnist_test_labels_file = "mnist_data/t10k-labels-idx1-ubyte.gz"
logging.info("loading data ...")
X_train = _images(mnist_train_images_file)[:n_samples]
y_train = _labels(mnist_train_labels_file)[:n_samples]
X_test = _images(mnist_test_images_file)
y_test = _labels(mnist_test_labels_file)
def l1_apply_single(x, t):
if x > t:
return x - t
elif x < -t:
return x + t
else:
return 0.0
def sample_objectives(X, y, w, fit_intercept=fit_intercept, lnlearn=False):
if fit_intercept:
w0 = w[0] if lnlearn else w[0,:]
w1 = w[1] if lnlearn else w[1:,:]
else:
w0 = 0
w1 = w
scores = X @ w1 + w0
scores = np.hstack((scores, np.zeros((X.shape[0], 1))))
obj = (-scores[np.arange(X.shape[0]), np.argmax(y, axis=1)] + logsumexp(scores, axis=1))
return obj
def objective(X, y, w, fit_intercept=fit_intercept, lnlearn=False):
return sample_objectives(X, y, w, fit_intercept=fit_intercept, lnlearn=lnlearn).mean()
def gradient(X, y, w, fit_intercept=fit_intercept):
scores = X @ w[1:,:] + w[0,:] if fit_intercept else X @ w
scores = np.hstack((scores, np.zeros((X.shape[0], 1))))
sftmax = softmax(scores, axis=1) - y#np.hstack((y, np.zeros((X.shape[0], 1))))
if fit_intercept:
return np.vstack((sftmax[:,:-1].sum(axis=0), X.T @ sftmax[:,:-1]))/X.shape[0]
else:
return (X.T @ sftmax[:,:-1])/X.shape[0] # np.vstack((np.ones((X.shape[0], 1)) @ sftmax[:,:-1], X.T @ sftmax[:,:-1]
def sample_gradients(X, y, w, fit_intercept=fit_intercept):
scores = X @ w[1:,:] + w[0,:] if fit_intercept else X @ w
scores = np.hstack((scores, np.zeros((X.shape[0], 1))))
sftmax = softmax(scores, axis=1) - y#np.hstack((y, np.zeros((X.shape[0], 1))))
if fit_intercept:
return np.concatenate(
(sftmax[:,np.newaxis,:-1], np.einsum("ij, ik->ijk", X, sftmax[:,:-1])), axis=1)
else:
return np.einsum("ij, ik->ijk", X, sftmax[:,:-1])
# def train_loss(w): return objective(X_train, y_train, w, fit_intercept=fit_intercept)
# def test_loss(w): return objective(X_test, y_test, w, fit_intercept=fit_intercept)
#
# def linlearn_train_loss(w): return objective(X_train, y_train, w, fit_intercept=fit_intercept, lnlearn=True)
# def linlearn_test_loss(w): return objective(X_test, y_test, w, fit_intercept=fit_intercept, lnlearn=True)
# linlearn_tracked_funs = [linlearn_train_loss, linlearn_test_loss]
linlearn_algorithms = ["mom_cgd", "catoni_cgd", "tmean_cgd"]
def train_loss(w, algo_name=""):
return objective(X_train, y_train, w, fit_intercept=fit_intercept, lnlearn=algo_name in linlearn_algorithms)
def test_loss(w, algo_name=""):
return objective(X_test, y_test, w, fit_intercept=fit_intercept, lnlearn=algo_name in linlearn_algorithms)
tracked_funs = [train_loss, test_loss]
class Record(object):
def __init__(self, shape, capacity):
self.record = np.zeros(capacity) if shape == 1 else np.zeros(tuple([capacity] + list(shape)))
self.cursor = 0
def update(self, value):
self.record[self.cursor] = value
self.cursor += 1
def __len__(self):
return self.record.shape[0]
def tmean_cgd(X_train, y_train, batch_size=500):
mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="tmean", fit_intercept=fit_intercept,
thresholding=False, step_size=step_size*batch_size/1000, loss="multilogistic", batch_size=batch_size)
mom_logreg.fit(X_train, y_train, tracked_funs=linlearn_tracked_funs)
n_iter = len(mom_logreg.optimization_result_.tracked_funs[0])
n_batches = X_train.shape[0] // batch_size + int(X_train.shape[0] % batch_size > 0)
gradient_counts = [(i // n_batches)*X_train.shape[0] + (i % n_batches)*batch_size for i in range(n_iter)]
return mom_logreg.optimization_result_.tracked_funs + [gradient_counts]
# def catoni_cgd(X_train, y_train, batch_size=500):
# mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="catoni", fit_intercept=fit_intercept,
# thresholding=False, step_size=step_size*batch_size/1000, loss="multilogistic", batch_size=batch_size)
# mom_logreg.fit(X_train, y_train, tracked_funs=linlearn_tracked_funs)
#
# n_iter = len(mom_logreg.optimization_result_.tracked_funs[0])
# n_batches = X_train.shape[0] // batch_size + int(X_train.shape[0] % batch_size > 0)
# gradient_counts = [(i // n_batches)*X_train.shape[0] + (i % n_batches)*batch_size for i in range(n_iter)]
#
# return mom_logreg.optimization_result_.tracked_funs + [gradient_counts]
def catoni_cgd(X_train, y_train, l1_penalty=1):
mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="catoni", fit_intercept=fit_intercept, penalty="l1", C=l1_penalty,
step_size=step_size, loss="multilogistic")
param_record = Record((X_train.shape[1]+int(fit_intercept), y_train.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
if fit_intercept:
param_tracker = lambda w : param_record.update(np.vstack(w))
else:
param_tracker = lambda w : param_record.update(w)
mom_logreg.fit(X_train, y_train, trackers=[param_tracker, lambda _:time_record.update(time.time())])
return param_record, time_record
# def mom_cgd(X_train, y_train, batch_size=500):
# mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="mom", fit_intercept=fit_intercept,
# thresholding=False, step_size=step_size*batch_size/1000, loss="multilogistic", batch_size=batch_size)
# mom_logreg.fit(X_train, y_train, tracked_funs=linlearn_tracked_funs)
#
# n_iter = len(mom_logreg.optimization_result_.tracked_funs[0])
# n_batches = X_train.shape[0] // batch_size + int(X_train.shape[0] % batch_size > 0)
# gradient_counts = [(i // n_batches) * X_train.shape[0] + (i % n_batches) * batch_size for i in
# range(n_iter)]
#
# return mom_logreg.optimization_result_.tracked_funs + [gradient_counts]
def mom_cgd(X_train, y_train, l1_penalty=1):
mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="mom", fit_intercept=fit_intercept, penalty="l1", C=l1_penalty,
step_size=step_size, loss="multilogistic")
param_record = Record((X_train.shape[1]+int(fit_intercept), y_train.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
if fit_intercept:
param_tracker = lambda w : param_record.update(np.vstack(w))
else:
param_tracker = lambda w : param_record.update(w)
mom_logreg.fit(X_train, y_train, trackers=[param_tracker, lambda _:time_record.update(time.time())])
return param_record, time_record
# def SVRG(X, y, grad, m, w0=None, T=max_iter, fit_intercept=fit_intercept, tracked_funs=tracked_funs):
# if w0 is None:
# w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
# w_tilde = w0
# wt = w0
# step = step_size*(X.shape[0]/m + 2)/1000
# tracks = [[obj(w0)] for obj in tracked_funs] + [[0]]
# for i in tqdm(range((T*500)//(X.shape[0] + 2*m) + 1), desc="SVRG"):
# mu = grad(X, y, w_tilde, fit_intercept=fit_intercept)
# additional_gradients = X.shape[0]
# for j in range(m):
# ind = np.random.randint(X.shape[0])
# X_ind, y_ind = X[ind:ind+1,:], y[ind:ind+1,:]
# wt -= step*(grad(X_ind, y_ind, wt, fit_intercept=fit_intercept) - grad(X_ind, y_ind, w_tilde, fit_intercept=fit_intercept) + mu)
# additional_gradients += 2
# for idx, obj in enumerate(tracked_funs):
# tracks[idx].append(obj(wt))
# tracks[-1].append(tracks[-1][-1] + additional_gradients)
# additional_gradients = 0
# w_tilde = wt
# return tracks
def SVRG(X, y, w0=None, T=max_iter, fit_intercept=fit_intercept):
if w0 is None:
w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
w_tilde = w0
wt = w0
step = step_size/(X.shape[0])
m = X.shape[0]
param_record = Record((X.shape[1]+int(fit_intercept), y.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
for i in tqdm(range(T), desc="SVRG"):
mu = gradient(X, y, w_tilde, fit_intercept=fit_intercept)
for j in range(m):
ind = np.random.randint(X.shape[0])
X_ind, y_ind = X[ind:ind+1,:], y[ind:ind+1]
wt -= step*(gradient(X_ind, y_ind, wt, fit_intercept=fit_intercept) - gradient(X_ind, y_ind, w_tilde, fit_intercept=fit_intercept) + mu)
w_tilde = wt
param_record.update(wt)
time_record.update(time.time())
return param_record, time_record
def SGD(X, y, w0=None, T=max_iter, fit_intercept=fit_intercept):
if w0 is None:
w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
wt = w0
step = step_size/(X.shape[0])
param_record = Record((X.shape[1]+int(fit_intercept), y.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
for i in tqdm(range(T), desc="SGD"):
index = np.random.randint(X.shape[0])
wt -= step * gradient(X[index:index+1,:], y[index:index+1,:], wt, fit_intercept=fit_intercept)
param_record.update(wt)
time_record.update(time.time())
return param_record, time_record
def Holland_gd(X, y, w0=None, T=max_iter, fit_intercept=fit_intercept):
if w0 is None:
w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
wt = w0
param_record = Record((X.shape[1]+int(fit_intercept), y.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
for i in tqdm(range(T), desc="Holland"):
gradients = sample_gradients(X, y, wt, fit_intercept=fit_intercept)
catoni_avg_grad = np.zeros_like(wt)
for k in range(wt.shape[0]):
for l in range(wt.shape[1]):
catoni_avg_grad[k,l] = Holland_catoni_estimator(gradients[:, k, l])
wt -= step_size * catoni_avg_grad
param_record.update(wt)
time_record.update(time.time())
return param_record, time_record
# def Prasad_heavyTails_gd(X, y, w0=None, T=max_iter, fit_intercept=fit_intercept, delta=0.01):
# if w0 is None:
# w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
# n_blocks = 1 + int(3.5 * np.log(1 / delta))
# block_size = batch_size // n_blocks
# wt = w0
# step = step_size*batch_size/1000
# tracks = [[obj(wt)] for obj in tracked_funs] + [[0]]
# for i in tqdm(range(T), desc = "prasad_heavy_tail"):
# indices = np.random.randint(X.shape[0], size=batch_size)
# gradients = sample_gradients(X[indices,:], y[indices,:], wt, fit_intercept=fit_intercept)
# permutation = np.random.permutation(batch_size)
# block_means = []
# for j in range(n_blocks):
# block_means.append(np.mean(gradients[permutation[j * block_size:(j + 1) * block_size], :], axis=0).reshape(-1))
# grad = gmom(np.array(block_means)).reshape(wt.shape)
# wt -= step * grad
#
# for idx, obj in enumerate(tracked_funs):
# tracks[idx].append(obj(wt))
# tracks[-1].append(tracks[-1][-1] + batch_size)
#
# return tracks
def Prasad_heavyTails_gd(X, y, w0=None, T=max_iter, fit_intercept=fit_intercept, delta=0.01, l1_penalty=0):
if w0 is None:
w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1] - 1))
n_blocks = 1 + int(3.5 * np.log(1 / delta))
block_size = X.shape[0] // n_blocks
wt = w0
param_record = Record((X.shape[1]+int(fit_intercept), y.shape[1] - 1), max_iter)
time_record = Record(1, max_iter)
for i in tqdm(range(T), desc = "prasad_heavy_tail"):
gradients = sample_gradients(X, y, wt, fit_intercept=fit_intercept)
permutation = np.random.permutation(X.shape[0])
block_means = []
for j in range(n_blocks):
block_means.append(np.mean(gradients[permutation[j * block_size:(j + 1) * block_size], :], axis=0).reshape(-1))
grad = gmom(np.array(block_means)).reshape(wt.shape)
wt -= step_size * grad
for k in range(int(fit_intercept), wt.shape[0]):
for l in range(wt.shape[1]):
wt[k,l] = l1_apply_single(wt[k,l], l1_penalty * step_size)
param_record.update(wt)
time_record.update(time.time())
return param_record, time_record
# def Lecue_gd(X, y, w0=None, T=max_iter, batch_size=500, fit_intercept=fit_intercept, tracked_funs=tracked_funs, n_blocks=21):
# if w0 is None:
# w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
#
# def argmedian(x):
# return np.argpartition(x, len(x) // 2)[len(x) // 2]
# block_size = batch_size // n_blocks
# wt = w0
# step = step_size*batch_size/1000
# tracks = [[obj(wt)] for obj in tracked_funs] + [[0]]
# for i in tqdm(range(T), desc = "Lecue"):
# indices = np.random.randint(X.shape[0], size=batch_size)
# objectives = sample_objectives(X[indices,:], y[indices,:], wt, fit_intercept=fit_intercept)
#
# perm = np.random.permutation(len(indices))
# means = [
# np.mean(objectives[perm[j * block_size: (j + 1) * block_size]])
# for j in range(n_blocks)
# ]
# argmed = argmedian(means)
# indices = perm[argmed * block_size: (argmed + 1) * block_size]
# X_subset, y_subset = X[indices, :], y[indices]
#
#
# grad = gradient(X_subset, y_subset, wt, fit_intercept=fit_intercept)
# wt -= step * grad
#
# for idx, obj in enumerate(tracked_funs):
# tracks[idx].append(obj(wt))
# tracks[-1].append(tracks[-1][-1] + batch_size)
#
# return tracks
def Lecue_gd(X, y, w0=None, T=max_iter, fit_intercept=fit_intercept, n_blocks=21):
"""n_blocks must be uneven"""
if w0 is None:
w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
def argmedian(x):
return np.argpartition(x, len(x) // 2)[len(x) // 2]
block_size = X.shape[0] // n_blocks
wt = w0
param_record = Record((X.shape[1]+int(fit_intercept), y.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
for i in tqdm(range(T), desc = "Lecue"):
objectives = sample_objectives(X, y, wt, fit_intercept=fit_intercept)
perm = np.random.permutation(X.shape[0])
means = [
np.mean(objectives[perm[j * block_size: (j + 1) * block_size]])
for j in range(n_blocks)
]
argmed = argmedian(means)
indices = perm[argmed * block_size: (argmed + 1) * block_size]
X_subset, y_subset = X[indices, :], y[indices]
grad = gradient(X_subset, y_subset, wt, fit_intercept=fit_intercept)
wt -= step_size * grad
param_record.update(wt)
time_record.update(time.time())
return param_record, time_record
# This one is too computationally heavy
# def Prasad_outliers_gd(X, y, w0=None, step=0.01, T=max_iter, batch_size=100, fit_intercept=fit_intercept, tracked_funs=tracked_funs, eps = 0.01, delta=0.01):
# if w0 is None:
# w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
# wt = w0
# tracks = [[obj(wt)] for obj in tracked_funs] + [[0]]
# for i in range(T):
# print("iter")
# indices = np.random.randint(X.shape[0], size=batch_size)
# gradients = sample_gradients(X[indices,:], y[indices,:], wt, fit_intercept=fit_intercept)
# grad = alg2(gradients.reshape((batch_size, -1)), eps, delta)[0]
# wt -= step * grad.reshape(wt.shape)
#
# for idx, obj in enumerate(tracked_funs):
# tracks[idx].append(obj(wt))
# tracks[-1].append(tracks[-1][-1] + batch_size)
#
# return tracks
metrics = [train_loss, test_loss]
def run_repetition(rep):
col_try, col_algo, col_metric, col_time, col_val = [], [], [], [], []
outputs = {}
def announce(x):
print(str(rep)+" : "+x+" done")
outputs["SGD"] = SGD(X_train, y_train)
announce("SGD")
outputs["SVRG"] = SVRG(X_train, y_train)#, gradient, m_SVRG)
announce("SVRG")
outputs["lecue_gd"] = Lecue_gd(X_train, y_train)
announce("lecue_gd")
outputs["Prasad_heavytails"] = Prasad_heavyTails_gd(X_train, y_train)
announce("gmom_gd")
outputs["mom_cgd"] = mom_cgd(X_train, y_train)
announce("mom_cgd")
outputs["Holland_gd"] = Holland_gd(X_train, y_train)
announce("Holland_gd")
outputs["catoni_cgd"] = catoni_cgd(X_train, y_train)
announce("catoni_cgd")
logging.info("computing objective history")
for alg in outputs.keys():
for ind_metric, metric in enumerate(metrics):
for i in range(max_iter):
col_try.append(rep)
col_algo.append(alg)
col_metric.append(metric.__name__)
col_val.append(metric(outputs[alg][0].record[i]))
col_time.append(outputs[alg][1].record[i] - outputs[alg][1].record[0])
print("repetition done")
return col_try, col_algo, col_metric, col_val, col_time
if os.cpu_count() > 8:
#logging.info("precompiling linlearn code")
logging.info("running parallel repetitions")
results = joblib.Parallel(n_jobs=-1)(joblib.delayed(run_repetition)(rep) for rep in range(n_repeats))
else:
results = [run_repetition(rep) for rep in range(n_repeats)]
col_try = list(itertools.chain.from_iterable([x[0] for x in results]))
col_algo = list(itertools.chain.from_iterable([x[1] for x in results]))
col_metric = list(itertools.chain.from_iterable([x[2] for x in results]))
col_val = list(itertools.chain.from_iterable([x[3] for x in results]))
col_time = list(itertools.chain.from_iterable([x[4] for x in results]))
data = pd.DataFrame({"repeat":col_try, "algorithm":col_algo, "metric":col_metric, "value":col_val, "time" : col_time})
if save_results:
logging.info("Saving results ...")
now = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
filename = "classif_"+dataset+"_results_" + now + ".pickle"
ensure_directory("exp_archives/classif/")
with open("exp_archives/classif/" + filename, "wb") as f:
pickle.dump({"datetime": now, "results": data}, f)
logging.info("Saved results in file %s" % filename)
g = sns.FacetGrid(
data, col="metric", height=4, legend_out=True
)
g.map(
sns.lineplot,
"time",
"value",
"algorithm",
#lw=4,
)#.set(yscale="log")#, xlabel="", ylabel="")
#g.set_titles(col_template="{col_name}")
axes = g.axes.flatten()
# for i, dataset in enumerate(df["dataset"].unique()):
# axes[i].set_xticklabels([0, 1, 2, 5, 10, 20, 50], fontsize=14)
# axes[i].set_title(dataset, fontsize=18)
plt.legend(
list(data["algorithm"].unique()),
#bbox_to_anchor=(0.3, 0.7, 1.0, 0.0),
loc="upper center",
#ncol=1,
#borderaxespad=0.0,
#fontsize=14,
)
#g.fig.subplots_adjust(top=0.9)
#g.fig.suptitle('n=%d , noise=%s , $\\sigma$ = %.2f, block_size=%.2f, w_star_dist=%s' % (n_samples, noise_dist, noise_sigma[noise_dist], MOMreg_block_size, w_star_dist))
plt.show()
if save_fig:
now = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ensure_directory("exp_archives/classif/")
#specs = 'n%d_%s%.2f_block_size=%.2f_w_dist=%s' % (n_samples, noise_dist, noise_sigma[noise_dist], MOMreg_block_size, w_star_dist)
fig_file_name = "exp_archives/classif/" + dataset + now + ".pdf"
g.fig.savefig(fname=fig_file_name, bbox_inches='tight')
logging.info("Saved figure into file : %s" % fig_file_name)
| [
"logging.StreamHandler",
"gzip.open",
"numpy.log",
"numpy.array",
"numpy.einsum",
"os.cpu_count",
"logging.info",
"scipy.special.logsumexp",
"numpy.arange",
"linlearn.robust_means.Holland_catoni_estimator",
"os.path.exists",
"numpy.mean",
"itertools.chain.from_iterable",
"logging.FileHandl... | [((575, 635), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': '"""exp_archives/classif_exp.log"""'}), "(filename='exp_archives/classif_exp.log')\n", (594, 635), False, 'import logging\n'), ((653, 686), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (674, 686), False, 'import logging\n'), ((730, 855), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""', 'handlers': 'handlers'}), "(level=logging.INFO, format='%(asctime)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S', handlers=handlers)\n", (749, 855), False, 'import logging\n'), ((913, 935), 'logging.info', 'logging.info', (["(64 * '=')"], {}), "(64 * '=')\n", (925, 935), False, 'import logging\n'), ((934, 1019), 'logging.info', 'logging.info', (["('Running new experiment session ON GPU with dataset : %s' % dataset)"], {}), "('Running new experiment session ON GPU with dataset : %s' %\n dataset)\n", (946, 1019), False, 'import logging\n'), ((1016, 1038), 'logging.info', 'logging.info', (["(64 * '=')"], {}), "(64 * '=')\n", (1028, 1038), False, 'import logging\n'), ((1136, 1321), 'logging.info', 'logging.info', (["('Parameters are : n_repeats = %d , n_samples = %d , max_ter = %d , fit_intercept=%r , m_SVRG = %d'\n % (n_repeats, n_samples or 0, max_iter, fit_intercept, m_SVRG))"], {}), "(\n 'Parameters are : n_repeats = %d , n_samples = %d , max_ter = %d , fit_intercept=%r , m_SVRG = %d'\n % (n_repeats, n_samples or 0, max_iter, fit_intercept, m_SVRG))\n", (1148, 1321), False, 'import logging\n'), ((2517, 2549), 'logging.info', 'logging.info', (['"""loading data ..."""'], {}), "('loading data ...')\n", (2529, 2549), False, 'import logging\n'), ((20270, 20388), 'pandas.DataFrame', 'pd.DataFrame', (["{'repeat': col_try, 'algorithm': col_algo, 'metric': col_metric, 'value':\n col_val, 'time': col_time}"], {}), "({'repeat': col_try, 'algorithm': col_algo, 'metric':\n col_metric, 'value': col_val, 'time': col_time})\n", (20282, 20388), True, 'import pandas as pd\n'), ((20789, 20849), 'seaborn.FacetGrid', 'sns.FacetGrid', (['data'], {'col': '"""metric"""', 'height': '(4)', 'legend_out': '(True)'}), "(data, col='metric', height=4, legend_out=True)\n", (20802, 20849), True, 'import seaborn as sns\n'), ((21595, 21605), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21603, 21605), True, 'import matplotlib.pyplot as plt\n'), ((1338, 1416), 'logging.info', 'logging.info', (['"""WARNING : results will NOT be saved at the end of this session"""'], {}), "('WARNING : results will NOT be saved at the end of this session')\n", (1350, 1416), False, 'import logging\n'), ((5694, 5900), 'linlearn.MultiClassifier', 'MultiClassifier', ([], {'tol': '(1e-17)', 'max_iter': 'max_iter', 'strategy': '"""tmean"""', 'fit_intercept': 'fit_intercept', 'thresholding': '(False)', 'step_size': '(step_size * batch_size / 1000)', 'loss': '"""multilogistic"""', 'batch_size': 'batch_size'}), "(tol=1e-17, max_iter=max_iter, strategy='tmean',\n fit_intercept=fit_intercept, thresholding=False, step_size=step_size *\n batch_size / 1000, loss='multilogistic', batch_size=batch_size)\n", (5709, 5900), False, 'from linlearn import BinaryClassifier, MultiClassifier\n'), ((7128, 7301), 'linlearn.MultiClassifier', 'MultiClassifier', ([], {'tol': '(1e-17)', 'max_iter': 'max_iter', 'strategy': '"""catoni"""', 'fit_intercept': 'fit_intercept', 'penalty': '"""l1"""', 'C': 'l1_penalty', 'step_size': 'step_size', 'loss': '"""multilogistic"""'}), "(tol=1e-17, max_iter=max_iter, strategy='catoni',\n fit_intercept=fit_intercept, penalty='l1', C=l1_penalty, step_size=\n step_size, loss='multilogistic')\n", (7143, 7301), False, 'from linlearn import BinaryClassifier, MultiClassifier\n'), ((8575, 8746), 'linlearn.MultiClassifier', 'MultiClassifier', ([], {'tol': '(1e-17)', 'max_iter': 'max_iter', 'strategy': '"""mom"""', 'fit_intercept': 'fit_intercept', 'penalty': '"""l1"""', 'C': 'l1_penalty', 'step_size': 'step_size', 'loss': '"""multilogistic"""'}), "(tol=1e-17, max_iter=max_iter, strategy='mom', fit_intercept\n =fit_intercept, penalty='l1', C=l1_penalty, step_size=step_size, loss=\n 'multilogistic')\n", (8590, 8746), False, 'from linlearn import BinaryClassifier, MultiClassifier\n'), ((19069, 19112), 'logging.info', 'logging.info', (['"""computing objective history"""'], {}), "('computing objective history')\n", (19081, 19112), False, 'import logging\n'), ((19606, 19620), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (19618, 19620), False, 'import os\n'), ((19678, 19722), 'logging.info', 'logging.info', (['"""running parallel repetitions"""'], {}), "('running parallel repetitions')\n", (19690, 19722), False, 'import logging\n'), ((19916, 19970), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[x[0] for x in results]'], {}), '([x[0] for x in results])\n', (19945, 19970), False, 'import itertools\n'), ((19988, 20042), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[x[1] for x in results]'], {}), '([x[1] for x in results])\n', (20017, 20042), False, 'import itertools\n'), ((20062, 20116), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[x[2] for x in results]'], {}), '([x[2] for x in results])\n', (20091, 20116), False, 'import itertools\n'), ((20133, 20187), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[x[3] for x in results]'], {}), '([x[3] for x in results])\n', (20162, 20187), False, 'import itertools\n'), ((20205, 20259), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[x[4] for x in results]'], {}), '([x[4] for x in results])\n', (20234, 20259), False, 'import itertools\n'), ((20404, 20438), 'logging.info', 'logging.info', (['"""Saving results ..."""'], {}), "('Saving results ...')\n", (20416, 20438), False, 'import logging\n'), ((20731, 20782), 'logging.info', 'logging.info', (["('Saved results in file %s' % filename)"], {}), "('Saved results in file %s' % filename)\n", (20743, 20782), False, 'import logging\n'), ((21990, 22049), 'logging.info', 'logging.info', (["('Saved figure into file : %s' % fig_file_name)"], {}), "('Saved figure into file : %s' % fig_file_name)\n", (22002, 22049), False, 'import logging\n'), ((466, 491), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (480, 491), False, 'import os\n'), ((501, 523), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (512, 523), False, 'import os\n'), ((1486, 1501), 'gzip.open', 'gzip.open', (['path'], {}), '(path)\n', (1495, 1501), False, 'import gzip\n'), ((1760, 1775), 'gzip.open', 'gzip.open', (['path'], {}), '(path)\n', (1769, 1775), False, 'import gzip\n'), ((2100, 2141), 'numpy.zeros', 'np.zeros', (['(n_rows, n_cols)'], {'dtype': '"""uint8"""'}), "((n_rows, n_cols), dtype='uint8')\n", (2108, 2141), True, 'import numpy as np\n'), ((3247, 3272), 'scipy.special.logsumexp', 'logsumexp', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (3256, 3272), False, 'from scipy.special import logsumexp, softmax\n'), ((3638, 3661), 'scipy.special.softmax', 'softmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (3645, 3661), False, 'from scipy.special import logsumexp, softmax\n'), ((4145, 4168), 'scipy.special.softmax', 'softmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (4152, 4168), False, 'from scipy.special import logsumexp, softmax\n'), ((4385, 4428), 'numpy.einsum', 'np.einsum', (['"""ij, ik->ijk"""', 'X', 'sftmax[:, :-1]'], {}), "('ij, ik->ijk', X, sftmax[:, :-1])\n", (4394, 4428), True, 'import numpy as np\n'), ((11567, 11596), 'numpy.random.randint', 'np.random.randint', (['X.shape[0]'], {}), '(X.shape[0])\n', (11584, 11596), True, 'import numpy as np\n'), ((12254, 12271), 'numpy.zeros_like', 'np.zeros_like', (['wt'], {}), '(wt)\n', (12267, 12271), True, 'import numpy as np\n'), ((14304, 14337), 'numpy.random.permutation', 'np.random.permutation', (['X.shape[0]'], {}), '(X.shape[0])\n', (14325, 14337), True, 'import numpy as np\n'), ((16840, 16873), 'numpy.random.permutation', 'np.random.permutation', (['X.shape[0]'], {}), '(X.shape[0])\n', (16861, 16873), True, 'import numpy as np\n'), ((19737, 19763), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (19752, 19763), False, 'import joblib\n'), ((20675, 20725), 'pickle.dump', 'pickle.dump', (["{'datetime': now, 'results': data}", 'f'], {}), "({'datetime': now, 'results': data}, f)\n", (20686, 20725), False, 'import pickle\n'), ((3153, 3178), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (3161, 3178), True, 'import numpy as np\n'), ((3597, 3622), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (3605, 3622), True, 'import numpy as np\n'), ((4104, 4129), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (4112, 4129), True, 'import numpy as np\n'), ((5368, 5386), 'numpy.zeros', 'np.zeros', (['capacity'], {}), '(capacity)\n', (5376, 5386), True, 'import numpy as np\n'), ((10817, 10846), 'numpy.random.randint', 'np.random.randint', (['X.shape[0]'], {}), '(X.shape[0])\n', (10834, 10846), True, 'import numpy as np\n'), ((11132, 11143), 'time.time', 'time.time', ([], {}), '()\n', (11141, 11143), False, 'import time\n'), ((11759, 11770), 'time.time', 'time.time', ([], {}), '()\n', (11768, 11770), False, 'import time\n'), ((12537, 12548), 'time.time', 'time.time', ([], {}), '()\n', (12546, 12548), False, 'import time\n'), ((14846, 14857), 'time.time', 'time.time', ([], {}), '()\n', (14855, 14857), False, 'import time\n'), ((16904, 16966), 'numpy.mean', 'np.mean', (['objectives[perm[j * block_size:(j + 1) * block_size]]'], {}), '(objectives[perm[j * block_size:(j + 1) * block_size]])\n', (16911, 16966), True, 'import numpy as np\n'), ((17343, 17354), 'time.time', 'time.time', ([], {}), '()\n', (17352, 17354), False, 'import time\n'), ((20449, 20463), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20461, 20463), False, 'from datetime import datetime\n'), ((21631, 21645), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21643, 21645), False, 'from datetime import datetime\n'), ((2157, 2174), 'numpy.arange', 'np.arange', (['n_rows'], {}), '(n_rows)\n', (2166, 2174), True, 'import numpy as np\n'), ((4307, 4350), 'numpy.einsum', 'np.einsum', (['"""ij, ik->ijk"""', 'X', 'sftmax[:, :-1]'], {}), "('ij, ik->ijk', X, sftmax[:, :-1])\n", (4316, 4350), True, 'import numpy as np\n'), ((7534, 7546), 'numpy.vstack', 'np.vstack', (['w'], {}), '(w)\n', (7543, 7546), True, 'import numpy as np\n'), ((8978, 8990), 'numpy.vstack', 'np.vstack', (['w'], {}), '(w)\n', (8987, 8990), True, 'import numpy as np\n'), ((12389, 12433), 'linlearn.robust_means.Holland_catoni_estimator', 'Holland_catoni_estimator', (['gradients[:, k, l]'], {}), '(gradients[:, k, l])\n', (12413, 12433), False, 'from linlearn.robust_means import Holland_catoni_estimator, gmom, alg2\n'), ((13952, 13969), 'numpy.log', 'np.log', (['(1 / delta)'], {}), '(1 / delta)\n', (13958, 13969), True, 'import numpy as np\n'), ((19764, 19794), 'joblib.delayed', 'joblib.delayed', (['run_repetition'], {}), '(run_repetition)\n', (19778, 19794), False, 'import joblib\n'), ((3200, 3221), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (3209, 3221), True, 'import numpy as np\n'), ((3223, 3243), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3232, 3243), True, 'import numpy as np\n'), ((14541, 14562), 'numpy.array', 'np.array', (['block_means'], {}), '(block_means)\n', (14549, 14562), True, 'import numpy as np\n'), ((7707, 7718), 'time.time', 'time.time', ([], {}), '()\n', (7716, 7718), False, 'import time\n'), ((9151, 9162), 'time.time', 'time.time', ([], {}), '()\n', (9160, 9162), False, 'import time\n'), ((14428, 14507), 'numpy.mean', 'np.mean', (['gradients[permutation[j * block_size:(j + 1) * block_size], :]'], {'axis': '(0)'}), '(gradients[permutation[j * block_size:(j + 1) * block_size], :], axis=0)\n', (14435, 14507), True, 'import numpy as np\n')] |
import numpy
import matplotlib
import matplotlib.pyplot as plt
def plot_progress_kmeans(iteration, x_array, centroid_history, idx_history):
"""
A helper function that displays the progress of k-Means as it is running. It is intended for use
only with 2D data. It plots data points with colors assigned to each centroid. With the
previous centroids, it also plots a line between the previous locations and current locations
of the centroids.
Parameters
----------
iteration : int
Current iteration number of k-means. Used for matplotlib animation function.
x_array : array_like
The dataset, which is a matrix (m x n). Note since the plot only supports 2D data, n should
be equal to 2.
centroid_history : list
A list of computed centroids for all iteration.
idx_history : list
A list of computed assigned indices for all iterations.
"""
max_iters, num_centroids_K, ncols = centroid_history.shape
plt.gcf().clf()
cmap = plt.cm.rainbow
norm = matplotlib.colors.Normalize(vmin=0, vmax=2)
for k in range(num_centroids_K):
current = numpy.stack([c[k, :] for c in centroid_history[:iteration+1]], axis=0)
plt.plot(current[:, 0], current[:, 1], '-Xk', mec='k', lw=2, ms=10, mfc=cmap(norm(k)), mew=2)
plt.scatter(x_array[:, 0], x_array[:, 1], c=idx_history[iteration], cmap=cmap, marker='o', s=8**2, linewidths=1)
plt.grid(b=True, which='major', axis='both', linestyle='--', linewidth=0.5)
plt.title('Iteration number %d' % (iteration+1))
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.tight_layout()
return plt
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"numpy.stack",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title"
] | [((1049, 1092), 'matplotlib.colors.Normalize', 'matplotlib.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(2)'}), '(vmin=0, vmax=2)\n', (1076, 1092), False, 'import matplotlib\n'), ((1447, 1522), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'axis': '"""both"""', 'linestyle': '"""--"""', 'linewidth': '(0.5)'}), "(b=True, which='major', axis='both', linestyle='--', linewidth=0.5)\n", (1455, 1522), True, 'import matplotlib.pyplot as plt\n'), ((1527, 1577), 'matplotlib.pyplot.title', 'plt.title', (["('Iteration number %d' % (iteration + 1))"], {}), "('Iteration number %d' % (iteration + 1))\n", (1536, 1577), True, 'import matplotlib.pyplot as plt\n'), ((1580, 1603), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Feature 1"""'], {}), "('Feature 1')\n", (1590, 1603), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1631), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Feature 2"""'], {}), "('Feature 2')\n", (1618, 1631), True, 'import matplotlib.pyplot as plt\n'), ((1636, 1654), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1652, 1654), True, 'import matplotlib.pyplot as plt\n'), ((1149, 1221), 'numpy.stack', 'numpy.stack', (['[c[k, :] for c in centroid_history[:iteration + 1]]'], {'axis': '(0)'}), '([c[k, :] for c in centroid_history[:iteration + 1]], axis=0)\n', (1160, 1221), False, 'import numpy\n'), ((1330, 1449), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_array[:, 0]', 'x_array[:, 1]'], {'c': 'idx_history[iteration]', 'cmap': 'cmap', 'marker': '"""o"""', 's': '(8 ** 2)', 'linewidths': '(1)'}), "(x_array[:, 0], x_array[:, 1], c=idx_history[iteration], cmap=\n cmap, marker='o', s=8 ** 2, linewidths=1)\n", (1341, 1449), True, 'import matplotlib.pyplot as plt\n'), ((996, 1005), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1003, 1005), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from synthtext.config import load_cfg
class Curvature(object):
curve = lambda this, a: lambda x: a * x * x
differential = lambda this, a: lambda x: 2 * a * x
def __init__(self):
load_cfg(self)
def sample_curvature(self):
"""
Returns the functions for the curve and differential for a and b
"""
sgn = 1.0
if np.random.rand() < self.p_sgn:
sgn = -1
a = self.a[1] * np.random.randn() + sgn * self.a[0]
return {
'curve': self.curve(a),
'diff': self.differential(a),
}
| [
"synthtext.config.load_cfg",
"numpy.random.randn",
"numpy.random.rand"
] | [((222, 236), 'synthtext.config.load_cfg', 'load_cfg', (['self'], {}), '(self)\n', (230, 236), False, 'from synthtext.config import load_cfg\n'), ((396, 412), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (410, 412), True, 'import numpy as np\n'), ((473, 490), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (488, 490), True, 'import numpy as np\n')] |
"""
Integrated gradient saliency maps
Created on 04/30/2020
@author: RH
"""
import saliency
import os
import sys
import cv2
import numpy as np
import tensorflow as tf
import data_input2 as data_input
# image to double
def im2double(im):
return cv2.normalize(im.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
# image to jpg
def py_map2jpg(imgmap):
heatmap_x = np.round(imgmap*255).astype(np.uint8)
return cv2.applyColorMap(heatmap_x, cv2.COLORMAP_JET)
def infer(model, classes, dropout=0.3):
# image input
x_in = tf.placeholder(tf.float32, name="x")
x_in_reshape = tf.reshape(x_in, [-1, 299, 299, 3])
# label input
y_in = tf.placeholder(dtype=tf.int32, name="y")
if model == 'I1':
import InceptionV1
logits, nett, ww = InceptionV1.googlenet(x_in_reshape,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='GoogleNet')
print('Using Inception-V1')
elif model == 'I2':
import InceptionV2
logits, nett, ww = InceptionV2.inceptionv2(x_in_reshape,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='InceptionV2')
print('Using Inception-V2')
elif model == 'I3':
import InceptionV3
logits, nett, ww = InceptionV3.inceptionv3(x_in_reshape,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='InceptionV3')
print('Using Inception-V3')
elif model == 'I4':
import InceptionV4
logits, nett, ww = InceptionV4.inceptionv4(x_in_reshape,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='InceptionV4')
print('Using Inception-V4')
elif model == 'I5':
import InceptionV5
logits, nett, ww = InceptionV5.inceptionresnetv1(x_in_reshape,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='InceptionResV1')
print('Using Inception-Resnet-V1')
elif model == 'I6':
import InceptionV6
logits, nett, ww = InceptionV6.inceptionresnetv2(x_in_reshape,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='InceptionResV2')
print('Using Inception-Resnet-V2')
elif model == 'R18':
from Scripts.Legacy import ResNet
logits, nett, ww = ResNet.resnet(x_in_reshape,
mode=18,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='ResNet18')
print('Using ResNet18')
elif model == 'R34':
from Scripts.Legacy import ResNet
logits, nett, ww = ResNet.resnet(x_in_reshape,
mode=34,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='ResNet34')
print('Using ResNet34')
elif model == 'R50':
from Scripts.Legacy import ResNet
logits, nett, ww = ResNet.resnet(x_in_reshape,
mode=50,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='ResNet50')
print('Using ResNet50')
elif model == 'R101':
from Scripts.Legacy import ResNet
logits, nett, ww = ResNet.resnet(x_in_reshape,
mode=101,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='ResNet101')
print('Using ResNet101')
elif model == 'R152':
from Scripts.Legacy import ResNet
logits, nett, ww = ResNet.resnet(x_in_reshape,
mode=152,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='ResNet152')
print('Using ResNet152')
else:
import InceptionV1
logits, nett, ww = InceptionV1.googlenet(x_in_reshape,
num_classes=classes,
is_training=False,
dropout_keep_prob=dropout,
scope='GoogleNet')
print('Using Default: Inception-V1')
pred = tf.nn.softmax(logits, name="prediction")
neuron_selector = tf.placeholder(tf.int32)
ny = logits[0][neuron_selector]
return x_in, y_in, logits, nett, ww, pred, neuron_selector, ny
def reconstruct(X, model, classs, modelpath, outpath, do=0.3, bs =64):
graph = tf.Graph()
with graph.as_default():
x_in_, y_in_, logits_, nett_, ww_, pred_, neuron_selector_, ny_ = infer(model=model, classes=classs, dropout=do)
with tf.Session(graph=graph,
config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
tf.global_variables_initializer().run()
saver = tf.train.import_meta_graph(str(modelpath+'.meta'))
saver.restore(sess, modelpath)
itr, file, ph = X.data(train=False)
next_element = itr.get_next()
with tf.Session() as sessa:
sessa.run(itr.initializer, feed_dict={ph: file})
ct = 0
while True:
try:
x, y = sessa.run(next_element)
for mm in range(np.shape(x)[0]):
grad = saliency.IntegratedGradients(graph, sess, y, x_in_)
img = x[mm, :, :, :]
# Baseline is a white image.
baseline = np.zeros(img.shape)
baseline.fill(255)
smoothgrad_mask_3d = grad.GetSmoothedMask(x, feed_dict={
neuron_selector_: 1}, x_steps=25, x_baseline=baseline)
# Call the visualization methods to convert the 3D tensors to 2D grayscale.
smoothgrad_mask_grayscale = saliency.VisualizeImageGrayscale(smoothgrad_mask_3d)
smoothgrad_mask_grayscale = im2double(smoothgrad_mask_grayscale)
smoothgrad_mask_grayscale = py_map2jpg(smoothgrad_mask_grayscale)
sa = im2double(img) * 255
sb = im2double(smoothgrad_mask_grayscale) * 255
scurHeatMap = sa * 0.5 + sb * 0.5
sab = np.hstack((sa, sb))
sfull = np.hstack((scurHeatMap, sab))
cv2.imwrite(str(outpath + str(ct) + '.png'), sfull)
ct += 1
except tf.errors.OutOfRangeError:
print("Done!")
break
if __name__ == "__main__":
THE = data_input.DataSet(64, 10000, ep=1, cls=2, mode='test', filename='PATH TO test.tfrecords')
reconstruct(THE, 'I3', 2, 'PATH TO trained model', 'PATH TO output dir', do=0.3, bs=64)
| [
"InceptionV5.inceptionresnetv1",
"numpy.hstack",
"Scripts.Legacy.ResNet.resnet",
"InceptionV1.googlenet",
"tensorflow.nn.softmax",
"data_input2.DataSet",
"tensorflow.Graph",
"tensorflow.placeholder",
"tensorflow.Session",
"saliency.IntegratedGradients",
"InceptionV4.inceptionv4",
"tensorflow.C... | [((426, 472), 'cv2.applyColorMap', 'cv2.applyColorMap', (['heatmap_x', 'cv2.COLORMAP_JET'], {}), '(heatmap_x, cv2.COLORMAP_JET)\n', (443, 472), False, 'import cv2\n'), ((544, 580), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""x"""'}), "(tf.float32, name='x')\n", (558, 580), True, 'import tensorflow as tf\n'), ((600, 635), 'tensorflow.reshape', 'tf.reshape', (['x_in', '[-1, 299, 299, 3]'], {}), '(x_in, [-1, 299, 299, 3])\n', (610, 635), True, 'import tensorflow as tf\n'), ((666, 706), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'name': '"""y"""'}), "(dtype=tf.int32, name='y')\n", (680, 706), True, 'import tensorflow as tf\n'), ((6150, 6190), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""prediction"""'}), "(logits, name='prediction')\n", (6163, 6190), True, 'import tensorflow as tf\n'), ((6214, 6238), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (6228, 6238), True, 'import tensorflow as tf\n'), ((6428, 6438), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6436, 6438), True, 'import tensorflow as tf\n'), ((8749, 8844), 'data_input2.DataSet', 'data_input.DataSet', (['(64)', '(10000)'], {'ep': '(1)', 'cls': '(2)', 'mode': '"""test"""', 'filename': '"""PATH TO test.tfrecords"""'}), "(64, 10000, ep=1, cls=2, mode='test', filename=\n 'PATH TO test.tfrecords')\n", (8767, 8844), True, 'import data_input2 as data_input\n'), ((784, 909), 'InceptionV1.googlenet', 'InceptionV1.googlenet', (['x_in_reshape'], {'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""GoogleNet"""'}), "(x_in_reshape, num_classes=classes, is_training=False,\n dropout_keep_prob=dropout, scope='GoogleNet')\n", (805, 909), False, 'import InceptionV1\n'), ((377, 399), 'numpy.round', 'np.round', (['(imgmap * 255)'], {}), '(imgmap * 255)\n', (385, 399), True, 'import numpy as np\n'), ((1216, 1346), 'InceptionV2.inceptionv2', 'InceptionV2.inceptionv2', (['x_in_reshape'], {'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""InceptionV2"""'}), "(x_in_reshape, num_classes=classes, is_training=\n False, dropout_keep_prob=dropout, scope='InceptionV2')\n", (1239, 1346), False, 'import InceptionV2\n'), ((1660, 1790), 'InceptionV3.inceptionv3', 'InceptionV3.inceptionv3', (['x_in_reshape'], {'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""InceptionV3"""'}), "(x_in_reshape, num_classes=classes, is_training=\n False, dropout_keep_prob=dropout, scope='InceptionV3')\n", (1683, 1790), False, 'import InceptionV3\n'), ((7009, 7021), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7019, 7021), True, 'import tensorflow as tf\n'), ((2104, 2234), 'InceptionV4.inceptionv4', 'InceptionV4.inceptionv4', (['x_in_reshape'], {'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""InceptionV4"""'}), "(x_in_reshape, num_classes=classes, is_training=\n False, dropout_keep_prob=dropout, scope='InceptionV4')\n", (2127, 2234), False, 'import InceptionV4\n'), ((6657, 6725), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(True)'}), '(allow_soft_placement=True, log_device_placement=True)\n', (6671, 6725), True, 'import tensorflow as tf\n'), ((6748, 6781), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6779, 6781), True, 'import tensorflow as tf\n'), ((2548, 2686), 'InceptionV5.inceptionresnetv1', 'InceptionV5.inceptionresnetv1', (['x_in_reshape'], {'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""InceptionResV1"""'}), "(x_in_reshape, num_classes=classes,\n is_training=False, dropout_keep_prob=dropout, scope='InceptionResV1')\n", (2577, 2686), False, 'import InceptionV5\n'), ((3032, 3170), 'InceptionV6.inceptionresnetv2', 'InceptionV6.inceptionresnetv2', (['x_in_reshape'], {'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""InceptionResV2"""'}), "(x_in_reshape, num_classes=classes,\n is_training=False, dropout_keep_prob=dropout, scope='InceptionResV2')\n", (3061, 3170), False, 'import InceptionV6\n'), ((7320, 7371), 'saliency.IntegratedGradients', 'saliency.IntegratedGradients', (['graph', 'sess', 'y', 'x_in_'], {}), '(graph, sess, y, x_in_)\n', (7348, 7371), False, 'import saliency\n'), ((7517, 7536), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (7525, 7536), True, 'import numpy as np\n'), ((7918, 7970), 'saliency.VisualizeImageGrayscale', 'saliency.VisualizeImageGrayscale', (['smoothgrad_mask_3d'], {}), '(smoothgrad_mask_3d)\n', (7950, 7970), False, 'import saliency\n'), ((8384, 8403), 'numpy.hstack', 'np.hstack', (['(sa, sb)'], {}), '((sa, sb))\n', (8393, 8403), True, 'import numpy as np\n'), ((8440, 8469), 'numpy.hstack', 'np.hstack', (['(scurHeatMap, sab)'], {}), '((scurHeatMap, sab))\n', (8449, 8469), True, 'import numpy as np\n'), ((3532, 3657), 'Scripts.Legacy.ResNet.resnet', 'ResNet.resnet', (['x_in_reshape'], {'mode': '(18)', 'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""ResNet18"""'}), "(x_in_reshape, mode=18, num_classes=classes, is_training=False,\n dropout_keep_prob=dropout, scope='ResNet18')\n", (3545, 3657), False, 'from Scripts.Legacy import ResNet\n'), ((7268, 7279), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (7276, 7279), True, 'import numpy as np\n'), ((3985, 4110), 'Scripts.Legacy.ResNet.resnet', 'ResNet.resnet', (['x_in_reshape'], {'mode': '(34)', 'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""ResNet34"""'}), "(x_in_reshape, mode=34, num_classes=classes, is_training=False,\n dropout_keep_prob=dropout, scope='ResNet34')\n", (3998, 4110), False, 'from Scripts.Legacy import ResNet\n'), ((4438, 4563), 'Scripts.Legacy.ResNet.resnet', 'ResNet.resnet', (['x_in_reshape'], {'mode': '(50)', 'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""ResNet50"""'}), "(x_in_reshape, mode=50, num_classes=classes, is_training=False,\n dropout_keep_prob=dropout, scope='ResNet50')\n", (4451, 4563), False, 'from Scripts.Legacy import ResNet\n'), ((4892, 5020), 'Scripts.Legacy.ResNet.resnet', 'ResNet.resnet', (['x_in_reshape'], {'mode': '(101)', 'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""ResNet101"""'}), "(x_in_reshape, mode=101, num_classes=classes, is_training=\n False, dropout_keep_prob=dropout, scope='ResNet101')\n", (4905, 5020), False, 'from Scripts.Legacy import ResNet\n'), ((5349, 5477), 'Scripts.Legacy.ResNet.resnet', 'ResNet.resnet', (['x_in_reshape'], {'mode': '(152)', 'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""ResNet152"""'}), "(x_in_reshape, mode=152, num_classes=classes, is_training=\n False, dropout_keep_prob=dropout, scope='ResNet152')\n", (5362, 5477), False, 'from Scripts.Legacy import ResNet\n'), ((5775, 5900), 'InceptionV1.googlenet', 'InceptionV1.googlenet', (['x_in_reshape'], {'num_classes': 'classes', 'is_training': '(False)', 'dropout_keep_prob': 'dropout', 'scope': '"""GoogleNet"""'}), "(x_in_reshape, num_classes=classes, is_training=False,\n dropout_keep_prob=dropout, scope='GoogleNet')\n", (5796, 5900), False, 'import InceptionV1\n')] |
"""
python utilities for neuron
"""
# internal python imports
import os
# third party imports
import numpy as np
import matplotlib
# local (our) imports
def get_backend():
"""
Returns the currently used backend. Default is tensorflow unless the
NEURITE_BACKEND environment variable is set to 'pytorch'.
"""
return 'pytorch' if os.environ.get('NEURITE_BACKEND') == 'pytorch' else 'tensorflow'
def softmax(x, axis):
"""
softmax of a numpy array along a given dimension
"""
return np.exp(x) / np.sum(np.exp(x), axis=axis, keepdims=True)
def rebase_lab(labels):
"""
Rebase labels and return lookup table (LUT) to convert to new labels in
interval [0, N[ as: LUT[label_map]. Be sure to pass all possible labels.
"""
labels = np.unique(labels) # Sorted.
assert np.issubdtype(labels.dtype, np.integer), 'non-integer data'
lab_to_ind = np.zeros(np.max(labels) + 1, dtype='int_')
for i, lab in enumerate(labels):
lab_to_ind[lab] = i
ind_to_lab = labels
return lab_to_ind, ind_to_lab
def load_fs_lut(filename):
"""
Reads a label lookup-table from file. File is expected to
define the anatomical name and color for each label ID.
Each line in the file should have the format:
```
ID AnatomicalName R G B
```
Parameters:
filename (str): File to load.
Returns:
dict: Label lookup dictionary.
"""
label_table = {}
with open(filename, 'r') as file:
for line in file:
line = line.rstrip()
if not line or line[0] == '#':
continue
tokens = line.split()
sid = int(tokens[0])
name = tokens[1]
label_table[sid] = {'name': name}
if len(tokens) > 2:
label_table[sid]['color'] = [int(c) for c in tokens[2:5]]
return label_table
def seg_to_rgb_fs_lut(seg, label_table):
"""
Converts a hard segmentation into an RGB color image given a
freesurfer-style label lookup-table dictionary.
Parameters:
seg (ndarray): Hard segmentation array.
label_table (dict): Label lookup.
Returns:
ndarray: RGB (3-frame) image with shape of input seg.
"""
unique = np.unique(seg)
color_seg = np.zeros((*seg.shape, 3), dtype='uint8')
for sid in unique:
label = label_table.get(sid)
if label is not None:
color_seg[seg == sid] = label['color']
return color_seg
def fs_lut_to_cmap(lut):
"""
convert a freesurfer LUT to a matplotlib colormap.
example
lut = ne.py.utils.load_fs_lut('/path/to/seg32_labels.lut')
fs_cmap = ne.py.utils.fs_lut_to_cmap(lut)
Args:
lut (dict/str): string (path to LUT file) or
dict with keys being integers (label ids), and each value should be a
dictionary with the key 'color' which is a list with 3 elements,
the RGB colors (0 to 255)
Returns:
matplotlib ListedColormap: [description]
"""
if isinstance(lut, str):
lut = load_fs_lut(lut)
keys = list(lut.keys())
rgb = np.zeros((np.array(keys).max() + 1, 3), dtype='float')
for key in keys:
rgb[key] = lut[key]['color']
return matplotlib.colors.ListedColormap(rgb / 255)
| [
"numpy.unique",
"os.environ.get",
"matplotlib.colors.ListedColormap",
"numpy.exp",
"numpy.issubdtype",
"numpy.zeros",
"numpy.max",
"numpy.array"
] | [((786, 803), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (795, 803), True, 'import numpy as np\n'), ((826, 865), 'numpy.issubdtype', 'np.issubdtype', (['labels.dtype', 'np.integer'], {}), '(labels.dtype, np.integer)\n', (839, 865), True, 'import numpy as np\n'), ((2274, 2288), 'numpy.unique', 'np.unique', (['seg'], {}), '(seg)\n', (2283, 2288), True, 'import numpy as np\n'), ((2305, 2345), 'numpy.zeros', 'np.zeros', (['(*seg.shape, 3)'], {'dtype': '"""uint8"""'}), "((*seg.shape, 3), dtype='uint8')\n", (2313, 2345), True, 'import numpy as np\n'), ((3279, 3322), 'matplotlib.colors.ListedColormap', 'matplotlib.colors.ListedColormap', (['(rgb / 255)'], {}), '(rgb / 255)\n', (3311, 3322), False, 'import matplotlib\n'), ((522, 531), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (528, 531), True, 'import numpy as np\n'), ((352, 385), 'os.environ.get', 'os.environ.get', (['"""NEURITE_BACKEND"""'], {}), "('NEURITE_BACKEND')\n", (366, 385), False, 'import os\n'), ((541, 550), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (547, 550), True, 'import numpy as np\n'), ((913, 927), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (919, 927), True, 'import numpy as np\n'), ((3165, 3179), 'numpy.array', 'np.array', (['keys'], {}), '(keys)\n', (3173, 3179), True, 'import numpy as np\n')] |
'''
Author: <NAME> (@abodh_ltd)
MSEE, South Dakota State University
Last updated: August 26, 2020
'''
import numpy as np
import torch
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import pdb
from datetime import date, datetime
import os
import time
from data_loading import loading, separate_dataset, freq_data
from model import Net, Simple1DCNN
from utils import accuracy, testing
from torch.utils.data import DataLoader
# resets weights for different learning rates
def weight_init(m):
if isinstance(m, torch.nn.Linear):
m.reset_parameters()
if __name__ == '__main__':
# manual seed to reproduce same results every time
torch.manual_seed(0); np.random.seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# setting the parameters
network = 'CNN' # you can choose to train on MLP or CNN
epoch = 200 # number of epochs -> in 1 epoch all of the training data are used
mini_batch = 30 # number of mini-batches -> subset of the training data
learning_rate = 1e-3 # SGD learning rate -> considers SGD as optimizer
momentum = 0.5 # SGD momentum term -> considers SGD as optimizer
n_hidden1 = 25 # number of hidden units in first hidden layer (for MLP)
n_hidden2 = 25 # number of hidden units in second hidden layer (for MLP)
n_output = 1 # number of output units
frac_train = 0.80 # fraction of data to be used as training set
dropout_rate = 0.2 # dropout rate -> remember to set dropout_decision as True
weight_initializer = 0.05 # weight initializer -> initializes between [-x,x)
dropout_decision = False # do you want to dropout or not?
w_lambda = 0.0005 # weight decay parameter
tolerance = 0.1 # tolerance for the estimated value
# -> 0.1 means the output around 10% is considers to be correct
save_figs = False # set True if you want to save figs and data
save_model = False # set True when you want to save models for specific conditions
load_model = False # set True when you want to load the saved models for specific conditions
# update the location of your data files in data_path to match the path of the input data
# data_path = "..\\..\\Neural-Network-Regression\\data files\\other data\\varying both_M_P_posneg_pulse" \
# "\\manipulated\\"
data_path = "..\\..\\matlab files\\0.2Hz\\manipulated\\" # set the path of the data file
# loading the data
dataset = freq_data(data_path) # loads data from the freq_data class (dataset class -> awesome in pytorch)
print('the length of the dataset = ', len(dataset))
train_num = int(frac_train * len(dataset)) # number of data for training
test_num = len(dataset) - train_num # number of data for validating
max_batches = epoch * int(train_num / mini_batch)
'''' brute force search '''
# hidden = [10, 25, 50, 60]
# lr = [1e-4, 1e-3, 1e-2, 1e-1]
# decay = [1e-4, 5e-4, 1e-3, 1e-2]
# batch = [10, 20, 30, 50]
# results = np.zeros((len(hidden) * len(lr) * len(decay) * len(batch), 6))
# cnt = 0
# for n_hidden1 in hidden:
# n_hidden2 = n_hidden1
# for learning_rate in lr:
# for w_lambda in decay:
# for mini_batch in batch:
# creating a unique folder to save the output files
str(date.today().strftime("%d/%m/%Y"))
output_path = "../../Neural-Network-Regression/log/testing_models/" + str(date.today().strftime("%b-%d-%Y")) + \
str(datetime.now().strftime("-%H.%M.%S-")) \
+ "h{}_lr{}_lam{}_bat_{}".format(n_hidden1, learning_rate, w_lambda, mini_batch)
try:
os.mkdir(output_path) # creates a directory based on current date and time
except OSError:
print("Creation of the directory %s failed" % output_path)
# creating models folder if save_model is set to true
if (save_model):
os.mkdir(output_path + '/models')
if (load_model):
# path to the saved model from where it needs to be loaded
model_path = output_path + '/models'
else:
model_path = ' '
###################################################################################################################
################### 2. creating the model #######################
# splitting into training and validation dataset
training, validation = torch.utils.data.random_split(dataset, (train_num, test_num))
# load separate training and validating dataset -> repeat !!! dataset and dataloader are awesome in pytorch)
train_loader = DataLoader(training, batch_size=mini_batch, shuffle=True)
validation_loader = DataLoader(validation, batch_size=mini_batch, shuffle=False)
# these initializations are for MLP network
n_inp = len(training[0][0])
n_hid1 = n_hidden1
n_hid2 = n_hidden2
n_out = n_output
# call your neural network model right here
if (network == 'CNN'):
net = Simple1DCNN().double().to(device)
else:
net = Net(n_inp, n_hid1, n_hid2, n_out, dropout_rate, weight_initializer, dropout_decision).to(device)
print(net) # prints the architecture of your current NN model
##################################################################################################################
############# 3. Training the model #######################
net = net.train() # set the network to training mode
# net.apply(weight_init)
criterion = torch.nn.MSELoss() # set the loss criterion
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum, weight_decay=w_lambda)
print("Starting training \n")
# print("h{}_lr{}_lam{}_bat_{}".format(n_hidden1, learning_rate, w_lambda, mini_batch))
print("####################################################################### \n")
weight_ih = [] # storing the weights from input to hidden
weight_ho = [] # storing the weights from hidden unit to output unit
train_losses = [] # storing the training losses
val_losses = [] # storing the validation losses
test_losses = [] # storing the validation losses
min_val_RMSE = 1e5 # initializing to find min validation RMSE
min_R_epoch = 1e5 # initializing to find the epoch with min validation RMSE
counter = [] # to store the different epochs that gives validation accuracy > 90%
t_correct = []
t_acc = []
v_acc = []
# uncomment below to test on different learning rates
###### important: comment out the criterion, optimizer, and net initialized above #####
# learning_rates = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
# lr_train_loss = []
# lr_val_loss = []
# for iter_learn, l_rate in enumerate(learning_rates):
# torch.manual_seed(1); np.random.seed(1)
# # net = net.train()
# net.apply(weight_init)
# optimizer = torch.optim.SGD(net.parameters(), lr=l_rate, momentum=0.5)
# weight_ho = []
t0 = time.time()
for ep in range(epoch):
train_loss = [] # saves the batch training loss for each epoch
t_item = 0
t_correct = []
for batch_idx, (data, target) in enumerate(train_loader):
t_item += len(target)
data, target = data.to(device), target.to(device) # passing the variables to gpu
if (network == 'CNN'):
X = data.double().unsqueeze(1) # pytorch input should be float -> awkward right?
Y = target.double().view(-1, 1) # converting into 2-d tensor
else:
X = data.float() # pytorch input should be float -> awkward right?
Y = target.float().view(-1, 1) # converting into 2-d tensor
optimizer.zero_grad() # making the gradient zero before optimizing
oupt = net(X) # neural network output
loss_obj = criterion(oupt, Y) # loss calculation
loss_obj.backward() # back propagation
optimizer.step() # remember w(t) = w(t-1) - alpha*cost ??
# if not, we are just updating weights here
# saving the weights from input to hidden and hidden to output
# weight_ih.append(np.reshape(net.hid1.weight.data.clone().cpu().numpy(), (1, n_inp * n_hid1)))
# weight_ho.append(np.reshape(net.oupt.weight.data.clone().cpu().numpy(), (1, n_hid2 * n_out)))
train_loss.append(loss_obj.item()) # batch losses -> length = number of batches in an epoch
correct = torch.sum((torch.abs(oupt - Y) < torch.abs(0.1 * Y)))
t_correct.append(correct)
if (network == 'CNN'):
weight_ho.append((net.fc3.weight.data.clone().cpu().numpy()))
else:
weight_ho.append(np.reshape(net.oupt.weight.data.clone().cpu().numpy(), (1, n_hid2 * n_out)))
t_result = ((sum(t_correct)).item() / t_item)
t_acc.append(t_result)
# getting the training loss for each epoch
train_loss_avg = sum(train_loss) / len(train_loss) # batch averaging
train_losses.append([train_loss_avg]) # saving average batch loss for each epoch
# testing validation set after training all the batches
net = net.eval() # set the network to evaluation mode
val_acc, val_RMSE, vali_loss = accuracy(net, validation_loader, tolerance, criterion, device, network, eval=False)
val_losses.append([vali_loss.item()]) # validation loss on entire samples for each epoch
v_acc.append(val_acc.item() / 100)
# find the epoch that gives minimum validation loss
if val_RMSE < min_val_RMSE:
min_val_RMSE = val_RMSE
min_R_epoch = ep
# set the network to training mode after validation
net = net.train()
# if we are willing to test the models on testing data that gives validation accuracy > 90%
# if (save_model) and val_RMSE <= 0.65:
if (save_model) and val_RMSE<=0.25:
counter.append((ep))
torch.save(net.state_dict(), output_path + '/models/model{}.pth'.format(ep))
print("epoch = %d" % ep, end="")
print(" train loss = %7.4f" % train_loss_avg, end="")
print(" val_accuracy = %0.2f%%" % val_acc, end="")
print(" val_RMSE = %7.4f" % val_RMSE, end="")
print(" val_loss = %7.4f" % vali_loss.item()) # similar to RMSE, can comment out if unnecessary
# uncomment below if you are testing for different learning rates
# plotted loss after each learning rate test
# print(" min RMSE = {} at {} batch \n".format(min_RMSE, min_R_epoch))
# weight_ho = np.reshape(weight_ho, (np.shape(weight_ho)[0], np.shape(weight_ho)[2]))
# weights_ho_num = int(np.shape(weight_ho)[1])
# for i in range(0, weights_ho_num):
# plt.plot(weight_ho[:, i])
# plt.grid(linestyle='-', linewidth=0.5)
# plt.xticks(fontsize=12)
# plt.yticks(fontsize=12)
# plt.ylabel("weights from hidden to output layer", **axis_font)
# plt.xlabel("Number of batches in entire epochs", **axis_font)
# plt.xlim(0, epoch)
# plt.rcParams['agg.path.chunksize'] = 10000
# plt.savefig('C:/Users/abodh/Box Sync/Box Sync/Spring 2020/inertia project/Neural-Network-Regression/output/'
# 'output_feb19/iteration{}'.format(iter_learn), dpi=600, bbox_inches='tight')
# plt.close()
# averaged the loss to test on learning rates
# lr_train_loss.append([sum(train_losses) / len(train_losses)])
# lr_val_loss.append([sum(val_losses) / len(val_losses)])
# train_losses.append([loss_obj.item()])
# val_losses.append([vali_loss.item()])
print("####################################################################### \n")
print("Training complete \n")
print("Time taken = {}".format(time.time() - t0))
print(" min RMSE = {} at {} epoch \n".format(min_val_RMSE, min_R_epoch))
print("####################################################################### \n")
# results[cnt, 0] = min_val_RMSE
# results[cnt, 1] = min_R_epoch
# results[cnt, 2] = n_hidden1
# results[cnt, 3] = learning_rate
# results[cnt, 4] = w_lambda
# results[cnt, 5] = mini_batch
# cnt += 1
if (save_figs):
np.savetxt(output_path + '/train_losses.csv', train_losses, delimiter=',')
np.savetxt(output_path + '/val_losses.csv', val_losses, delimiter=',')
###################################################################################################################
################### 4. Evaluating the model (validation) #######################
net = net.eval() # set eval mode
acc_val, val_RMSE, _ = accuracy(net, validation_loader, tolerance, criterion, device, network, eval=True)
print('validation accuracy with {} tolerance = {:.2f} and RMSE = {:.6f}\n'
.format(tolerance, acc_val, val_RMSE))
###################################################################################################################
################### 5. Using the model (testing) #######################
test_loss, test_RMSE = testing(model_path, data_path, counter, net, criterion, device, network, load_model)
###################################################################################################################
################### 6. Plotting the results #######################
# Set the font dictionaries (for plot title and axis titles)
title_font = {'fontname': 'Arial', 'size': '16', 'color': 'black', 'weight': 'normal',
'verticalalignment': 'bottom'} # Bottom vertical alignment for more space
axis_font = {'fontname': 'Arial', 'size': '16'}
# uncomment below to plot the losses along with different learning rates
# losses = np.squeeze(lr_train_loss)
# val_losses = np.squeeze(lr_val_loss)
# Plot the training loss and validation loss for different learning rates
# pdb.set_trace()
# plt.semilogx(np.array(learning_rates), losses, label='training loss/total Loss')
# plt.semilogx(np.array(learning_rates), val_losses, label='validation cost/total Loss')
# plt.ylabel('Cost\ Total Loss')
# plt.xlabel('learning rate')
# plt.legend()
# plt.savefig(output_path + '/losses{}'.format(iter_learn), dpi=600, bbox_inches='tight')
# pdb.set_trace()
label_graph = ['train_loss', 'val_loss', 'fitted_train_loss', 'fitted_val_loss', 'test_loss']
losses = np.squeeze(train_losses)
val_losses = np.squeeze(val_losses)
t_x = np.arange(len(losses))
v_x = np.arange(len(val_losses))
plt.figure()
plt.plot(t_x, losses, label=label_graph[0], c='blue', linewidth='5')
plt.plot(v_x, val_losses, label=label_graph[1], c='green', linewidth='2')
# uncomment below if you want to have a vertical line at your best epoch
# plt.axvline(x=min_R_epoch, color='r', linestyle='--', linewidth=3)
plt.ylabel("Mean Squared Error", **axis_font)
plt.xlabel("Number of epochs", **axis_font)
plt.xlim(0, len(t_x))
plt.grid(linestyle='-', linewidth=0.5)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.rcParams['agg.path.chunksize'] = 1000
plt.legend()
if (save_figs):
plt.savefig(output_path + '/batch_loss.png', dpi=600, bbox_inches='tight')
# plt.show()
plt.close()
plt.figure()
plt.plot(t_x, t_acc, label='training accuracy', c='blue', linewidth='5')
plt.plot(v_x, v_acc, label='validation accuracy', c='green', linewidth='2')
# uncomment below if you want to have a vertical line at your best epoch
# plt.axvline(x=min_R_epoch, color='r', linestyle='--', linewidth=3)
plt.ylabel("Accuracy", **axis_font)
plt.xlabel("Number of epochs", **axis_font)
plt.xlim(0, len(t_x))
plt.grid(linestyle='-', linewidth=0.5)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.rcParams['agg.path.chunksize'] = 1000
plt.legend()
if (save_figs):
plt.savefig(output_path + '/accuracy.png', dpi=600, bbox_inches='tight')
# plt.show()
plt.close()
# uncomment below to plot polyfit on losses
# # t_x = np.arange(len(losses))
# plt.scatter(t_x, losses, label=label_graph[0], marker = 'x', c = '#1f77b4', alpha = 0.5)
# poly = np.polyfit(t_x, losses, 4)
# losses = np.poly1d(poly)(t_x)
# plt.plot(t_x, losses, label=label_graph[2], c = 'red', linewidth = '5')
#
# v_x = np.arange(len(val_losses))
# plt.scatter(v_x, val_losses, label=label_graph[1], marker = '>', c = '#9467bd', alpha = 0.5)
# poly = np.polyfit(v_x, val_losses, 4)
# val_losses = np.poly1d(poly)(v_x)
# plt.plot(v_x, val_losses, label=label_graph[3], c = 'green', linewidth = '5')
#
# plt.ylabel("Mean Squared Error", **axis_font)
# plt.xlabel("Number of epochs", **axis_font)
# # plt.title("Batch training loss vs number of batch", **title_font)
# plt.grid(linestyle='-', linewidth=0.5)
# plt.xticks(fontsize=12)
# plt.yticks(fontsize=12)
# plt.rcParams['agg.path.chunksize'] = 1000
# plt.legend()
# plt.savefig('./batch_loss.png', dpi=600, bbox_inches='tight')
# plt.show()
# plt.close()
# uncomment below to plot input to hidden weights
# weight_ih = np.reshape(weight_ih, (np.shape(weight_ih)[0], np.shape(weight_ih)[2]))
# weights_ih_num = int(np.shape(weight_ih)[1])
# for i in range(0, weights_ih_num):
# plt.plot(weight_ih[:, i])
# plt.grid(linestyle='-', linewidth=0.5)
# plt.xticks(fontsize=12)
# plt.yticks(fontsize=12)
# plt.ylabel("weights from input to hidden layer", **axis_font)
# plt.xlabel("Number of batches in entire epochs", **axis_font)
# plt.xlim(0, max_batches)
# plt.rcParams['agg.path.chunksize'] = 10000
# plt.savefig(output_path + '/wih.png', dpi=600, bbox_inches='tight')
# plt.show()
# plt.close()
plt.figure()
weight_ho = np.reshape(weight_ho, (np.shape(weight_ho)[0], np.shape(weight_ho)[2]))
weights_ho_num = int(np.shape(weight_ho)[1])
for i in range(0, weights_ho_num):
plt.plot(weight_ho[:, i])
plt.grid(linestyle='-', linewidth=0.5)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.ylabel("weights from hidden to output layer", **axis_font)
plt.xlabel("Number of epochs", **axis_font)
plt.xlim(0, epoch)
plt.rcParams['agg.path.chunksize'] = 10000
if (save_figs):
plt.savefig(output_path + '/who', dpi=600, bbox_inches='tight')
# plt.show()
plt.close()
# # finally saving the results
# np.savetxt('../../Neural-Network-Regression/log/testing_models/results.csv', results, delimiter=',')
# best_idx = np.argmax(results[:, 0])
# print ("the best result is given with hid: {} "
# "lr: {} lambda: {} batch_size: {} with an min RMSE of: {}"
# " at epoch: {}".format(results[best_idx,2], results[best_idx,3],
# results[best_idx,4], results[best_idx, 5],
# results[best_idx,0], results[best_idx,1])) | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"model.Simple1DCNN",
"matplotlib.pyplot.yticks",
"numpy.random.seed",
"os.mkdir",
"utils.testing",
"torch.abs"... | [((185, 206), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (199, 206), False, 'import matplotlib\n'), ((672, 692), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (689, 692), False, 'import torch\n'), ((695, 712), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (709, 712), True, 'import numpy as np\n'), ((2590, 2610), 'data_loading.freq_data', 'freq_data', (['data_path'], {}), '(data_path)\n', (2599, 2610), False, 'from data_loading import loading, separate_dataset, freq_data\n'), ((4523, 4584), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['dataset', '(train_num, test_num)'], {}), '(dataset, (train_num, test_num))\n', (4552, 4584), False, 'import torch\n'), ((4718, 4775), 'torch.utils.data.DataLoader', 'DataLoader', (['training'], {'batch_size': 'mini_batch', 'shuffle': '(True)'}), '(training, batch_size=mini_batch, shuffle=True)\n', (4728, 4775), False, 'from torch.utils.data import DataLoader\n'), ((4800, 4860), 'torch.utils.data.DataLoader', 'DataLoader', (['validation'], {'batch_size': 'mini_batch', 'shuffle': '(False)'}), '(validation, batch_size=mini_batch, shuffle=False)\n', (4810, 4860), False, 'from torch.utils.data import DataLoader\n'), ((5620, 5638), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (5636, 5638), False, 'import torch\n'), ((7112, 7123), 'time.time', 'time.time', ([], {}), '()\n', (7121, 7123), False, 'import time\n'), ((12895, 12981), 'utils.accuracy', 'accuracy', (['net', 'validation_loader', 'tolerance', 'criterion', 'device', 'network'], {'eval': '(True)'}), '(net, validation_loader, tolerance, criterion, device, network,\n eval=True)\n', (12903, 12981), False, 'from utils import accuracy, testing\n'), ((13341, 13429), 'utils.testing', 'testing', (['model_path', 'data_path', 'counter', 'net', 'criterion', 'device', 'network', 'load_model'], {}), '(model_path, data_path, counter, net, criterion, device, network,\n load_model)\n', (13348, 13429), False, 'from utils import accuracy, testing\n'), ((14693, 14717), 'numpy.squeeze', 'np.squeeze', (['train_losses'], {}), '(train_losses)\n', (14703, 14717), True, 'import numpy as np\n'), ((14735, 14757), 'numpy.squeeze', 'np.squeeze', (['val_losses'], {}), '(val_losses)\n', (14745, 14757), True, 'import numpy as np\n'), ((14834, 14846), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14844, 14846), True, 'import matplotlib.pyplot as plt\n'), ((14851, 14919), 'matplotlib.pyplot.plot', 'plt.plot', (['t_x', 'losses'], {'label': 'label_graph[0]', 'c': '"""blue"""', 'linewidth': '"""5"""'}), "(t_x, losses, label=label_graph[0], c='blue', linewidth='5')\n", (14859, 14919), True, 'import matplotlib.pyplot as plt\n'), ((14924, 14997), 'matplotlib.pyplot.plot', 'plt.plot', (['v_x', 'val_losses'], {'label': 'label_graph[1]', 'c': '"""green"""', 'linewidth': '"""2"""'}), "(v_x, val_losses, label=label_graph[1], c='green', linewidth='2')\n", (14932, 14997), True, 'import matplotlib.pyplot as plt\n'), ((15152, 15197), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Squared Error"""'], {}), "('Mean Squared Error', **axis_font)\n", (15162, 15197), True, 'import matplotlib.pyplot as plt\n'), ((15202, 15245), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of epochs"""'], {}), "('Number of epochs', **axis_font)\n", (15212, 15245), True, 'import matplotlib.pyplot as plt\n'), ((15276, 15314), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""-"""', 'linewidth': '(0.5)'}), "(linestyle='-', linewidth=0.5)\n", (15284, 15314), True, 'import matplotlib.pyplot as plt\n'), ((15319, 15342), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (15329, 15342), True, 'import matplotlib.pyplot as plt\n'), ((15347, 15370), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (15357, 15370), True, 'import matplotlib.pyplot as plt\n'), ((15421, 15433), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15431, 15433), True, 'import matplotlib.pyplot as plt\n'), ((15558, 15569), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15567, 15569), True, 'import matplotlib.pyplot as plt\n'), ((15575, 15587), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15585, 15587), True, 'import matplotlib.pyplot as plt\n'), ((15592, 15664), 'matplotlib.pyplot.plot', 'plt.plot', (['t_x', 't_acc'], {'label': '"""training accuracy"""', 'c': '"""blue"""', 'linewidth': '"""5"""'}), "(t_x, t_acc, label='training accuracy', c='blue', linewidth='5')\n", (15600, 15664), True, 'import matplotlib.pyplot as plt\n'), ((15669, 15744), 'matplotlib.pyplot.plot', 'plt.plot', (['v_x', 'v_acc'], {'label': '"""validation accuracy"""', 'c': '"""green"""', 'linewidth': '"""2"""'}), "(v_x, v_acc, label='validation accuracy', c='green', linewidth='2')\n", (15677, 15744), True, 'import matplotlib.pyplot as plt\n'), ((15899, 15934), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy', **axis_font)\n", (15909, 15934), True, 'import matplotlib.pyplot as plt\n'), ((15939, 15982), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of epochs"""'], {}), "('Number of epochs', **axis_font)\n", (15949, 15982), True, 'import matplotlib.pyplot as plt\n'), ((16013, 16051), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""-"""', 'linewidth': '(0.5)'}), "(linestyle='-', linewidth=0.5)\n", (16021, 16051), True, 'import matplotlib.pyplot as plt\n'), ((16056, 16079), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (16066, 16079), True, 'import matplotlib.pyplot as plt\n'), ((16084, 16107), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (16094, 16107), True, 'import matplotlib.pyplot as plt\n'), ((16158, 16170), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16168, 16170), True, 'import matplotlib.pyplot as plt\n'), ((16293, 16304), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16302, 16304), True, 'import matplotlib.pyplot as plt\n'), ((18117, 18129), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18127, 18129), True, 'import matplotlib.pyplot as plt\n'), ((18344, 18382), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""-"""', 'linewidth': '(0.5)'}), "(linestyle='-', linewidth=0.5)\n", (18352, 18382), True, 'import matplotlib.pyplot as plt\n'), ((18387, 18410), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (18397, 18410), True, 'import matplotlib.pyplot as plt\n'), ((18415, 18438), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (18425, 18438), True, 'import matplotlib.pyplot as plt\n'), ((18443, 18505), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""weights from hidden to output layer"""'], {}), "('weights from hidden to output layer', **axis_font)\n", (18453, 18505), True, 'import matplotlib.pyplot as plt\n'), ((18510, 18553), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of epochs"""'], {}), "('Number of epochs', **axis_font)\n", (18520, 18553), True, 'import matplotlib.pyplot as plt\n'), ((18558, 18576), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'epoch'], {}), '(0, epoch)\n', (18566, 18576), True, 'import matplotlib.pyplot as plt\n'), ((18737, 18748), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18746, 18748), True, 'import matplotlib.pyplot as plt\n'), ((3788, 3809), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (3796, 3809), False, 'import os\n'), ((4038, 4071), 'os.mkdir', 'os.mkdir', (["(output_path + '/models')"], {}), "(output_path + '/models')\n", (4046, 4071), False, 'import os\n'), ((9433, 9520), 'utils.accuracy', 'accuracy', (['net', 'validation_loader', 'tolerance', 'criterion', 'device', 'network'], {'eval': '(False)'}), '(net, validation_loader, tolerance, criterion, device, network,\n eval=False)\n', (9441, 9520), False, 'from utils import accuracy, testing\n'), ((12464, 12538), 'numpy.savetxt', 'np.savetxt', (["(output_path + '/train_losses.csv')", 'train_losses'], {'delimiter': '""","""'}), "(output_path + '/train_losses.csv', train_losses, delimiter=',')\n", (12474, 12538), True, 'import numpy as np\n'), ((12547, 12617), 'numpy.savetxt', 'np.savetxt', (["(output_path + '/val_losses.csv')", 'val_losses'], {'delimiter': '""","""'}), "(output_path + '/val_losses.csv', val_losses, delimiter=',')\n", (12557, 12617), True, 'import numpy as np\n'), ((15462, 15536), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_path + '/batch_loss.png')"], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "(output_path + '/batch_loss.png', dpi=600, bbox_inches='tight')\n", (15473, 15536), True, 'import matplotlib.pyplot as plt\n'), ((16199, 16271), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_path + '/accuracy.png')"], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "(output_path + '/accuracy.png', dpi=600, bbox_inches='tight')\n", (16210, 16271), True, 'import matplotlib.pyplot as plt\n'), ((18314, 18339), 'matplotlib.pyplot.plot', 'plt.plot', (['weight_ho[:, i]'], {}), '(weight_ho[:, i])\n', (18322, 18339), True, 'import matplotlib.pyplot as plt\n'), ((18652, 18715), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_path + '/who')"], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "(output_path + '/who', dpi=600, bbox_inches='tight')\n", (18663, 18715), True, 'import matplotlib.pyplot as plt\n'), ((749, 774), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (772, 774), False, 'import torch\n'), ((18243, 18262), 'numpy.shape', 'np.shape', (['weight_ho'], {}), '(weight_ho)\n', (18251, 18262), True, 'import numpy as np\n'), ((3457, 3469), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3467, 3469), False, 'from datetime import date, datetime\n'), ((5158, 5247), 'model.Net', 'Net', (['n_inp', 'n_hid1', 'n_hid2', 'n_out', 'dropout_rate', 'weight_initializer', 'dropout_decision'], {}), '(n_inp, n_hid1, n_hid2, n_out, dropout_rate, weight_initializer,\n dropout_decision)\n', (5161, 5247), False, 'from model import Net, Simple1DCNN\n'), ((12022, 12033), 'time.time', 'time.time', ([], {}), '()\n', (12031, 12033), False, 'import time\n'), ((18169, 18188), 'numpy.shape', 'np.shape', (['weight_ho'], {}), '(weight_ho)\n', (18177, 18188), True, 'import numpy as np\n'), ((18193, 18212), 'numpy.shape', 'np.shape', (['weight_ho'], {}), '(weight_ho)\n', (18201, 18212), True, 'import numpy as np\n'), ((8653, 8672), 'torch.abs', 'torch.abs', (['(oupt - Y)'], {}), '(oupt - Y)\n', (8662, 8672), False, 'import torch\n'), ((8675, 8693), 'torch.abs', 'torch.abs', (['(0.1 * Y)'], {}), '(0.1 * Y)\n', (8684, 8693), False, 'import torch\n'), ((3631, 3645), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3643, 3645), False, 'from datetime import date, datetime\n'), ((5100, 5113), 'model.Simple1DCNN', 'Simple1DCNN', ([], {}), '()\n', (5111, 5113), False, 'from model import Net, Simple1DCNN\n'), ((3570, 3582), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3580, 3582), False, 'from datetime import date, datetime\n')] |
import datetime
import h5py
import numpy as np
import os
import pytest
import random
import string
import time
import tempfile
import unittest
from labrad import types as T
from labrad import units as U
from twisted.internet import task
from datavault import backend, errors
def _unique_filename(suffix='.hdf5'):
return tempfile.mktemp(prefix='dvtest', suffix=suffix)
def _remove_file_if_exists(name):
try:
os.unlink(name)
except OSError:
pass
class _TestCase(unittest.TestCase):
def assert_arrays_equal(self, first, second):
self.assertTrue(
np.array_equal(first, second),
msg=('Arrays not equal.\n'
'first: {}\n'
'second: {}'.format(first, second)))
class UtilityMethodsTest(_TestCase):
def test_time_to_str(self):
time = datetime.datetime(2012, 9, 21, 3, 14, 15)
actual = backend.time_to_str(time)
expected = '2012-09-21, 03:14:15'
self.assertEqual(expected, actual)
def test_time_from_str(self):
time_string = '2012-09-21, 03:14:15'
actual = backend.time_from_str(time_string)
expected = datetime.datetime(2012, 9, 21, 3, 14, 15)
self.assertEqual(expected, actual)
def test_labrad_urlencode(self):
url_string = 'foo.bar/baz'
actual = backend.labrad_urlencode(url_string)
expected = ('data:application/labrad;base64,'
'AAAAAXMAAAAPAAAAC2Zvby5iYXIvYmF6')
self.assertEqual(expected, actual)
def test_labrad_urldecode(self):
url_string = ('data:application/labrad;base64,'
'AAAAAXMAAAAPAAAAC2Zvby5iYXIvYmF6')
actual = backend.labrad_urldecode(url_string)
expected = 'foo.bar/baz'
self.assertEqual(expected, actual)
def test_labrad_urldecode_incorrect_prefix(self):
url_string = ('labrad;base64,AAAAAXMAAAAPAAAAC2Zvby5iYXIvYmF6')
self.assertRaises(
ValueError, backend.labrad_urldecode, url_string)
class _MockFile(object):
def __init__(self):
self.is_open = True
def close(self):
self.is_open = False
class _MockFileOpener(object):
def __init__(self):
self.file = None
self.args = None
self.kwargs = None
def __call__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.file = _MockFile()
return self.file
class SelfClosingFileTest(_TestCase):
"""Tests for the SelfClosingFile."""
def setUp(self):
self.close_timeout_sec = 1
self.opener = _MockFileOpener()
self.clock = task.Clock()
self.file = backend.SelfClosingFile(opener=self.opener,
timeout=self.close_timeout_sec,
open_args=('1', '2', '3'),
open_kw={'a': 'b', 'c': 'd'},
reactor=self.clock)
def test_file_doesnt_open_if_no_touch(self):
opener = _MockFileOpener()
self.file = backend.SelfClosingFile(opener=opener,
timeout=self.close_timeout_sec,
open_args=('1', '2', '3'),
open_kw={'a': 'b', 'c': 'd'},
touch=False)
self.assertTrue(opener.file is None, msg='File was opened on init')
def test_file_opens_on_init(self):
self.assertTrue(self.opener.file.is_open, msg='File not opened on init')
self.assertEqual(
self.opener.args, ('1', '2', '3'), 'File args not set')
self.assertEqual(
self.opener.kwargs, {'a': 'b', 'c': 'd'}, 'File kwargs not set')
def test_closes_file_after_timeout(self):
self.close_callback_called = False
def onCloseCallback(self_closing_file):
self.close_callback_called = True
self.file.onClose(onCloseCallback)
# Advance clock to close the self closing file.
self.clock.advance(self.close_timeout_sec)
self.assertFalse(self.opener.file.is_open,
msg='File not closed after timeout')
self.assertTrue(self.close_callback_called,
msg='Registered callback not called!')
# Dependent and Independent variables used for testing IniData and HDF5MetaData.
_INDEPENDENTS = [
backend.Independent(
label='FirstVariable',
shape=(1,),
datatype='v',
unit='Ghz'),
backend.Independent(
label='SecondVariable',
shape=(1,),
datatype='v',
unit='Kelvin')]
_DEPENDENTS = [
backend.Dependent(
label='Cents',
legend='OnlyDependent',
shape=(1,),
datatype='v',
unit='Dollars')]
class _MetadataTest(_TestCase):
def run(self, result):
"""Prevents the base test class from running tests."""
if issubclass(_MetadataTest, type(self)):
return
super(_MetadataTest, self).run(result)
def get_data(self):
"""Returns metadata instance to test."""
pass
def test_initialize_independents(self):
data = self.get_data()
data.initialize_info('FooTitle', _INDEPENDENTS, [])
self.assertEqual(data.getRowType(), '*(v[Ghz],v[Kelvin])')
self.assertEqual(data.getTransposeType(), '(*v[Ghz],*v[Kelvin])')
self.assertEqual(data.getIndependents(), _INDEPENDENTS)
self.assertEqual(data.getDependents(), [])
def test_initialize_dependents(self):
data = self.get_data()
data.initialize_info('FooTitle', [], _DEPENDENTS)
self.assertEqual(data.getRowType(), '*(v[Dollars])')
self.assertEqual(data.getTransposeType(), '(*v[Dollars])')
self.assertEqual(data.getDependents(), _DEPENDENTS)
self.assertEqual(data.getIndependents(), [])
def test_initialize_info(self):
data = self.get_data()
data.initialize_info('FooTitle', _INDEPENDENTS, _DEPENDENTS)
self.assertEqual(data.getRowType(), '*(v[Ghz],v[Kelvin],v[Dollars])')
self.assertEqual(
data.getTransposeType(), '(*v[Ghz],*v[Kelvin],*v[Dollars])')
self.assertEqual(data.getDependents(), _DEPENDENTS)
self.assertEqual(data.getIndependents(), _INDEPENDENTS)
def test_add_param(self):
data = self.get_data()
data.initialize_info('FooTitle', _INDEPENDENTS, _DEPENDENTS)
self.assertEqual(data.getParamNames(), [])
param = (True, np.int32(100), U.Complex(1.j + 0xdeadbeef, U.inch))
data.addParam('Param1', param)
self.assertEqual(data.getParamNames(), ['Param1'])
self.assertEqual(data.getParameter('Param1'), param)
self.assertRaises(
errors.BadParameterError,
data.getParameter,
'param1')
self.assertEqual(
data.getParameter('param1', case_sensitive=False), param)
def test_add_param_already_added(self):
data = self.get_data()
data.initialize_info('FooTitle', _INDEPENDENTS, _DEPENDENTS)
self.assertEqual(data.getParamNames(), [])
param = (True, np.int32(100), U.Complex(1.j + 0xdeadbeef, U.inch))
data.addParam('Param1', param)
self.assertRaises(
errors.ParameterInUseError,
data.addParam,
'Param1', param)
def test_add_comment(self):
data = self.get_data()
data.initialize_info('FooTitle', _INDEPENDENTS, _DEPENDENTS)
self.assertEqual(data.numComments(), 0)
data.addComment('foo user', 'bar comment')
self.assertEqual(data.numComments(), 1)
comments, _ = data.getComments(None, 0)
self.assertEqual(len(comments), 1)
self.assertEqual(comments[0][1], 'foo user')
self.assertEqual(comments[0][2], 'bar comment')
def test_iterate_get_comments(self):
data = self.get_data()
data.initialize_info('FooTitle', _INDEPENDENTS, _DEPENDENTS)
for i in range(3):
data.addComment('user {}'.format(i), '{}'.format(i))
self.assertEqual(data.getComments(0, 0), ([], 0))
comments_0, next_pos = data.getComments(1, 0)
self.assertEqual(next_pos, 1)
self.assertEqual(len(comments_0), 1)
self.assertEqual(comments_0[0][1], 'user 0')
self.assertEqual(comments_0[0][2], '0')
comments_1, next_pos = data.getComments(2, 1)
self.assertEqual(next_pos, 3)
self.assertEqual(len(comments_1), 2)
self.assertEqual(comments_1[0][1], 'user 1')
self.assertEqual(comments_1[0][2], '1')
self.assertEqual(comments_1[1][1], 'user 2')
self.assertEqual(comments_1[1][2], '2')
class IniDataTest(_MetadataTest):
_TEST_INI_FILE = '''
[General]
title=TestTitle
created=2012-09-21, 03:14:15
accessed=2012-09-22, 03:14:15
modified=2012-09-23, 03:14:15
independent=2
dependent=1
parameters=1
Comments=2
[Independent 1]
label=FirstVariable
units=GHz
[Independent 2]
label=SecondVariable
units=Kelvin
[Dependent 1]
label=OnlyDependent
units=Dollars
category=Cents
[Parameter 1]
label=A Parameter
data=[12,3,{'a':0}]
[Comments]
c0=('2012-09-24, 03:14:15','bar','baz')
c1=('2012-09-25, 03:14:15','fizz','buzz')
'''
def setUp(self):
self.infofilename = _unique_filename(suffix='.ini')
def tearDown(self):
_remove_file_if_exists(self.infofilename)
def get_data(self):
return backend.IniData()
def load_test_data(self):
# First save the test it to a file.
with file(self.infofilename, 'w') as f:
f.write(self._TEST_INI_FILE)
# Now load it into a new IniData
data = backend.IniData()
data.infofile = self.infofilename
data.load()
return data
def test_load_dtype(self):
data = self.load_test_data()
self.assertEqual(
data.dtype,
np.dtype([('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')]))
def test_load_independents(self):
data = self.load_test_data()
independents = data.getIndependents()
self.assertEqual(len(independents), 2)
self.assertEqual(independents[0].label, 'FirstVariable')
self.assertEqual(independents[0].unit, 'GHz')
self.assertEqual(independents[1].label, 'SecondVariable', )
self.assertEqual(independents[1].unit, 'Kelvin')
def test_load_dependents(self):
data = self.load_test_data()
dependents = data.getDependents()
self.assertEqual(len(dependents), 1)
self.assertEqual(dependents[0].label, 'Cents')
self.assertEqual(dependents[0].unit, 'Dollars')
self.assertEqual(dependents[0].legend, 'OnlyDependent')
def test_load_parameters(self):
data = self.load_test_data()
parameters = data.getParamNames()
self.assertEqual(len(parameters), 1)
self.assertEqual(parameters[0], 'A Parameter')
self.assertEqual(data.getParameter('A Parameter'), [12, 3, {'a': 0}])
def test_load_comments(self):
data = self.load_test_data()
self.assertEqual(data.numComments(), 2)
comments, next_comment_position = data.getComments(None, 0)
self.assertEqual(next_comment_position, 2)
self.assertEqual(len(comments), 2)
self.assertEqual(
comments[0],
(datetime.datetime(2012, 9, 24, 3, 14, 15), 'bar', 'baz'))
self.assertEqual(
comments[1],
(datetime.datetime(2012, 9, 25, 3, 14, 15), 'fizz', 'buzz'))
def test_load_rowtype(self):
data = self.load_test_data()
self.assertEqual(data.getRowType(), '*(v[GHz],v[Kelvin],v[Dollars])')
def test_load_transpose_type(self):
data = self.load_test_data()
self.assertEqual(
data.getTransposeType(), '(*v[GHz],*v[Kelvin],*v[Dollars])')
def test_add_complicated_param(self):
data = backend.IniData()
data.initialize_info('FooTitle', _INDEPENDENTS, _DEPENDENTS)
self.assertEqual(data.getParamNames(), [])
data.addParam('Param1', ('really', {'complex': 0xdeadbeef}, ['data']))
self.assertEqual(data.getParamNames(), ['Param1'])
self.assertEqual(
data.getParameter('Param1'),
('really', {'complex': 0xdeadbeef}, ['data']))
self.assertRaises(
errors.BadParameterError,
data.getParameter,
'param1')
self.assertEqual(
data.getParameter('param1', case_sensitive=False),
('really', {'complex': 0xdeadbeef}, ['data']))
def test_save_reload(self):
# Generate some data to save.
data_to_save = backend.IniData()
data_to_save.initialize_info(
'FooTitle', _INDEPENDENTS, _DEPENDENTS)
data_to_save.addComment('foo user', 'bar comment')
data_to_save.addParam('Param1', [100])
# Save it.
data_to_save.infofile = self.infofilename
data_to_save.save()
# Create a new IniData and read the saved file.
data = backend.IniData()
data.infofile = self.infofilename
data.load()
# Check that it's all there.
self.assertEqual(data.getDependents(), _DEPENDENTS)
self.assertEqual(data.getIndependents(), _INDEPENDENTS)
comments, _ = data.getComments(1, 0)
self.assertEqual(len(comments), 1)
self.assertEqual(comments[0][1], 'foo user')
self.assertEqual(comments[0][2], 'bar comment')
self.assertEqual(data.getParamNames(), ['Param1'])
self.assertEqual(data.getParameter('Param1'), [100])
self.assertEqual(data.getRowType(), '*(v[Ghz],v[Kelvin],v[Dollars])')
self.assertEqual(
data.getTransposeType(), '(*v[Ghz],*v[Kelvin],*v[Dollars])')
class _MockAttrs(dict):
"""Mock Attributes class for use in the _MockDataset."""
def create(self, name, data, dtype):
self[name] = np.asarray(data, dtype=dtype)
class _MockDataset(object):
"""Mock Dataset class to use in the HDF5MetaDataTest."""
def __init__(self):
self.attrs = _MockAttrs()
class HDF5MetaDataTest(_MetadataTest):
def get_data(self):
data = backend.HDF5MetaData()
data.dataset = _MockDataset()
return data
class _BackendDataTestCase(_TestCase):
def assert_data_in_backend(self, backend_data, expected_data):
"""Checks that the backend data contains the expected data.
Note that expected_data should be at least 2 rows.
"""
# Read using .data.
if hasattr(backend_data, 'data'):
read_data = backend_data.data
self.assert_arrays_equal(read_data, expected_data)
# Read using getData for all data.
read_data, next_pos = backend_data.getData(None, 0, False, None)
self.assert_arrays_equal(read_data, expected_data)
self.assertEqual(next_pos, len(expected_data))
# Read using getData for first row of data.
read_data, next_pos = backend_data.getData(1, 0, False, None)
self.assert_arrays_equal(read_data[0], expected_data[0])
self.assertEqual(next_pos, 1)
# Read using getData for first row of data.
read_data, next_pos = backend_data.getData(1, 1, False, None)
self.assert_arrays_equal(read_data[0], expected_data[1])
self.assertEqual(next_pos, 2)
class CsvListDataTest(_BackendDataTestCase):
def setUp(self):
self.filename = _unique_filename(suffix='.raw')
self.clock = task.Clock()
self.data = self.get_backend_data()
# Initialize the metadata.
self.data.initialize_info('FooTitle', _INDEPENDENTS, _DEPENDENTS)
def tearDown(self):
_remove_file_if_exists(self.filename)
_remove_file_if_exists(self.filename[:-4] + '.ini')
def get_backend_data(self):
return backend.CsvListData(self.filename, reactor=self.clock)
def test_empty_data_read(self):
read_data = self.data.data
self.assertEqual(read_data, [])
def test_add_data_then_read(self):
self.data.addData([[1, 2, 3]])
self.data.addData([[4, 5, 6]])
self.assert_data_in_backend(self.data, [[1, 2, 3], [4, 5, 6]])
def test_add_data_wrong_number_of_columns(self):
self.assertRaises(errors.BadDataError, self.data.addData, [(1, 2)])
self.assertRaises(
errors.BadDataError, self.data.addData, [(1, 2, 3, 4)])
def test_read_from_file(self):
# Add some data and save it to a file.
self.data.addData([[1, 2, 3], [4, 5, 6]])
self.data.save()
del self.data
# Load it back.
data = self.get_backend_data()
data.load()
self.assert_data_in_backend(data, [[1, 2, 3], [4, 5, 6]])
self.assertTrue(data.hasMore(0))
self.assertTrue(data.hasMore(1))
self.assertFalse(data.hasMore(2))
def test_add_data_wrong_number_of_columns(self):
self.assertRaises(errors.BadDataError, self.data.addData, [(1, 2)])
self.assertRaises(
errors.BadDataError, self.data.addData, [(1, 2, 3, 4)])
class _BackendDataTest(_BackendDataTestCase):
"""Base tests for data backends."""
def run(self, result):
"""Prevents the base test class from running tests."""
if issubclass(_BackendDataTest, type(self)):
return
super(_BackendDataTest, self).run(result)
def get_backend_data(self, filename):
return None
def test_add_data_then_read(self):
data0 = np.recarray(
(1,),
dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])
data0[0] = (1, 2, 3)
data1 = np.recarray(
(1,),
dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])
data1[0] = (4, 5, 6)
self.data.addData(data0)
self.data.addData(data1)
self.assert_data_in_backend(self.data, [[1, 2, 3], [4, 5, 6]])
def test_add_recarray_data_then_read(self):
data = np.recarray(
(2,),
dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])
data[0] = (1, 2, 3)
data[1] = (4, 5, 6)
self.data.addData(data)
self.assert_data_in_backend(self.data, [[1, 2, 3], [4, 5, 6]])
def test_read_from_file(self):
# Add some data and save it to a file.
data_to_save = np.recarray(
(2,),
dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])
data_to_save[0] = (1, 2, 3)
data_to_save[1] = (4, 5, 6)
self.data.addData(data_to_save)
self.data.save()
del self.data
# Load it back.
data = self.get_backend_data(self.filename)
data.load()
self.assert_data_in_backend(data, [[1, 2, 3], [4, 5, 6]])
self.assertTrue(data.hasMore(0))
self.assertTrue(data.hasMore(1))
self.assertFalse(data.hasMore(2))
def test_get_data_transpose(self):
data_to_add = np.recarray(
(2,),
dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])
data_to_add[0] = (1, 2, 3)
data_to_add[1] = (4, 5, 6)
self.data.addData(data_to_add)
self.assertRaises(
RuntimeError,
self.data.getData,
None,
0,
True,
None)
class CsvNumpyDataTest(_BackendDataTest):
def setUp(self):
self.filename = _unique_filename(suffix='raw')
self.files_to_remove = []
self.clock = task.Clock()
self.data = self.get_backend_data(self.filename)
# Initialize the metadata.
self.data.initialize_info('FooTitle', _INDEPENDENTS, _DEPENDENTS)
def tearDown(self):
for name in self.files_to_remove:
_remove_file_if_exists(name)
_remove_file_if_exists(name[:-4] + '.ini')
def get_backend_data(self, filename):
self.files_to_remove.append(filename)
return backend.CsvNumpyData(filename, reactor=self.clock)
def test_empty_data_read(self):
read_data = self.data.data
self.assertEqual(read_data.dtype, np.dtype(float))
self.assertEqual(read_data.size, 0)
self.assertEqual(read_data[0].size, 0)
def test_add_data_wrong_number_of_columns(self):
self.assertRaises(errors.BadDataError, self.data.addData, [(1, 2)])
self.assertRaises(
errors.BadDataError, self.data.addData, [(1, 2, 3, 4)])
class ExtendedHDF5DataTest(_BackendDataTest):
def setUp(self):
self.filename = _unique_filename(suffix='.hdf5')
self.files_to_remove = []
self.clock = task.Clock()
self.data = self.get_backend_data(self.filename)
# Initialize the metadata.
self.data.initialize_info('FooTitle', _INDEPENDENTS, _DEPENDENTS)
def tearDown(self):
for name in self.files_to_remove:
_remove_file_if_exists(name)
def get_backend_data(self, filename):
self.files_to_remove.append(filename)
fh = backend.SelfClosingFile(
h5py.File, open_args=(filename, 'a'), reactor=self.clock)
return backend.ExtendedHDF5Data(fh)
def test_empty_data_read(self):
read_data, _ = self.data.getData(None, 0, False, None)
self.assertEqual(read_data, [])
def test_get_data_transpose(self):
data_to_add = np.recarray(
(2,),
dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])
data_to_add[0] = (1, 2, 3)
data_to_add[1] = (4, 5, 6)
self.data.addData(data_to_add)
actual, next_pos = self.data.getData(None, 0, True, None)
self.assertEqual(next_pos, 2)
self.assertEqual(len(actual), 3)
self.assert_arrays_equal(actual, [[1, 4], [2, 5], [3, 6]])
def test_initialize_info_bad_vars(self):
bad_independents = [
backend.Independent(
label='FirstVariable',
shape=(1,),
datatype='f',
unit='Ghz')]
data = self.get_backend_data(self.filename)
self.assertRaises(
RuntimeError,
data.initialize_info,
'FooTitle',
bad_independents,
[])
bad_dependents = [
backend.Dependent(
label='Cents',
legend='OnlyDependent',
shape=(1,),
datatype='t',
unit='Dollars')]
self.assertRaises(
RuntimeError,
data.initialize_info,
'FooTitle',
bad_dependents,
[])
def test_initialize_type_i(self):
name = _unique_filename()
data = self.get_backend_data(name)
independent = backend.Independent(
label='NewVariable',
shape=(1,),
datatype='i',
unit='')
data.initialize_info('Foo', [independent], [])
self.assertEqual(data.dtype, np.dtype([('f0', '<i4')]))
def test_initialize_type_t(self):
name = _unique_filename()
data = self.get_backend_data(name)
independent = backend.Independent(
label='NewVariable',
shape=(1,),
datatype='t',
unit='')
data.initialize_info('Foo', [independent], [])
self.assertEqual(data.dtype, np.dtype([('f0', '<i8')]))
def test_initialize_type_c(self):
name = _unique_filename()
data = self.get_backend_data(name)
independent = backend.Independent(
label='NewVariable',
shape=(1,),
datatype='c',
unit='')
data.initialize_info('Foo', [independent], [])
self.assertEqual(data.dtype, np.dtype([('f0', '<c16')]))
def test_initialize_type_v(self):
name = _unique_filename()
data = self.get_backend_data(name)
independent = backend.Independent(
label='NewVariable',
shape=(1,),
datatype='v',
unit='')
data.initialize_info('Foo', [independent], [])
self.assertEqual(data.dtype, np.dtype([('f0', '<f8')]))
def test_initialize_type_v(self):
name = _unique_filename()
data = self.get_backend_data(name)
independent = backend.Independent(
label='NewVariable',
shape=(1,),
datatype='s',
unit='')
data.initialize_info('Foo', [independent], [])
self.assertEqual(data.dtype, np.dtype([('f0', 'O')]))
def test_initialize_type_unknown(self):
name = _unique_filename()
data = self.get_backend_data(name)
independent = backend.Independent(
label='NewVariable',
shape=(1,),
datatype='x',
unit='')
self.assertRaises(
RuntimeError,
data.initialize_info,
'FooTitle',
[independent],
[])
def test_add_complex_data_array_then_read(self):
name = _unique_filename()
data = self.get_backend_data(name)
independent = backend.Independent(
label='NewVariable',
shape=(2, 2),
datatype='c',
unit='V')
data.initialize_info('Foo', [independent], [])
data_entry = np.recarray(
(1,),
dtype=[('f0', '<c16', (2, 2))])
data_entry[0][0][0] = [0j, 1j]
data_entry[0][0][1] = [1j, 0j]
data.addData(data_entry)
added_data, _ = data.getData(None, 0, False, None)
self.assert_arrays_equal(added_data[0][0], [[0j, 1j], [1j, 0j]])
def test_add_complex_data_array_then_read_objs(self):
name = _unique_filename()
data = self.get_backend_data(name)
independent = backend.Independent(
label='NewVariable',
shape=(1,),
datatype='s',
unit='')
data.initialize_info('Foo', [independent], [])
data_entry = np.recarray(
(1,),
dtype=[('f0', 'O')])
data_entry[0] = ({'a': 0},)
data.addData(data_entry)
added_data, _ = data.getData(None, 0, False, None)
self.assertEqual(added_data[0][0], "{'a': 0}")
def test_add_string_array_column(self):
name = _unique_filename()
data = self.get_backend_data(name)
independent = backend.Independent(
label='NewVariable',
shape=(2, 0),
datatype='s',
unit='')
self.assertRaises(
ValueError,
data.initialize_info,
'FooTitle',
[independent],
[])
class SimpleHDF5DataTest(_BackendDataTest):
def setUp(self):
self.filename = _unique_filename()
self.filenames_to_remove = []
self.clock = task.Clock()
self.data = self.get_backend_data(self.filename)
# Initialize the metadata.
self.data.initialize_info('FooTitle', _INDEPENDENTS, _DEPENDENTS)
def tearDown(self):
for name in self.filenames_to_remove:
_remove_file_if_exists(name)
def get_backend_data(self, filename):
self.filenames_to_remove.append(filename)
fh = backend.SelfClosingFile(
h5py.File, open_args=(filename, 'a'), reactor=self.clock)
return backend.SimpleHDF5Data(fh)
def test_empty_data_read(self):
read_data, _ = self.data.getData(None, 0, False, None)
self.assertEqual(read_data.dtype, np.dtype(float))
self.assertEqual(read_data.size, 0)
if __name__ == '__main__':
pytest.main(['-v', __file__])
| [
"datavault.backend.CsvListData",
"numpy.int32",
"labrad.units.Complex",
"datavault.backend.Independent",
"datavault.backend.time_to_str",
"datavault.backend.IniData",
"datetime.datetime",
"twisted.internet.task.Clock",
"datavault.backend.labrad_urldecode",
"numpy.asarray",
"datavault.backend.lab... | [((329, 376), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'prefix': '"""dvtest"""', 'suffix': 'suffix'}), "(prefix='dvtest', suffix=suffix)\n", (344, 376), False, 'import tempfile\n'), ((4492, 4577), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""FirstVariable"""', 'shape': '(1,)', 'datatype': '"""v"""', 'unit': '"""Ghz"""'}), "(label='FirstVariable', shape=(1,), datatype='v', unit='Ghz'\n )\n", (4511, 4577), False, 'from datavault import backend, errors\n'), ((4611, 4700), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""SecondVariable"""', 'shape': '(1,)', 'datatype': '"""v"""', 'unit': '"""Kelvin"""'}), "(label='SecondVariable', shape=(1,), datatype='v', unit=\n 'Kelvin')\n", (4630, 4700), False, 'from datavault import backend, errors\n'), ((4751, 4853), 'datavault.backend.Dependent', 'backend.Dependent', ([], {'label': '"""Cents"""', 'legend': '"""OnlyDependent"""', 'shape': '(1,)', 'datatype': '"""v"""', 'unit': '"""Dollars"""'}), "(label='Cents', legend='OnlyDependent', shape=(1,),\n datatype='v', unit='Dollars')\n", (4768, 4853), False, 'from datavault import backend, errors\n'), ((27736, 27765), 'pytest.main', 'pytest.main', (["['-v', __file__]"], {}), "(['-v', __file__])\n", (27747, 27765), False, 'import pytest\n'), ((430, 445), 'os.unlink', 'os.unlink', (['name'], {}), '(name)\n', (439, 445), False, 'import os\n'), ((845, 886), 'datetime.datetime', 'datetime.datetime', (['(2012)', '(9)', '(21)', '(3)', '(14)', '(15)'], {}), '(2012, 9, 21, 3, 14, 15)\n', (862, 886), False, 'import datetime\n'), ((904, 929), 'datavault.backend.time_to_str', 'backend.time_to_str', (['time'], {}), '(time)\n', (923, 929), False, 'from datavault import backend, errors\n'), ((1112, 1146), 'datavault.backend.time_from_str', 'backend.time_from_str', (['time_string'], {}), '(time_string)\n', (1133, 1146), False, 'from datavault import backend, errors\n'), ((1166, 1207), 'datetime.datetime', 'datetime.datetime', (['(2012)', '(9)', '(21)', '(3)', '(14)', '(15)'], {}), '(2012, 9, 21, 3, 14, 15)\n', (1183, 1207), False, 'import datetime\n'), ((1341, 1377), 'datavault.backend.labrad_urlencode', 'backend.labrad_urlencode', (['url_string'], {}), '(url_string)\n', (1365, 1377), False, 'from datavault import backend, errors\n'), ((1700, 1736), 'datavault.backend.labrad_urldecode', 'backend.labrad_urldecode', (['url_string'], {}), '(url_string)\n', (1724, 1736), False, 'from datavault import backend, errors\n'), ((2645, 2657), 'twisted.internet.task.Clock', 'task.Clock', ([], {}), '()\n', (2655, 2657), False, 'from twisted.internet import task\n'), ((2678, 2839), 'datavault.backend.SelfClosingFile', 'backend.SelfClosingFile', ([], {'opener': 'self.opener', 'timeout': 'self.close_timeout_sec', 'open_args': "('1', '2', '3')", 'open_kw': "{'a': 'b', 'c': 'd'}", 'reactor': 'self.clock'}), "(opener=self.opener, timeout=self.close_timeout_sec,\n open_args=('1', '2', '3'), open_kw={'a': 'b', 'c': 'd'}, reactor=self.clock\n )\n", (2701, 2839), False, 'from datavault import backend, errors\n'), ((3112, 3256), 'datavault.backend.SelfClosingFile', 'backend.SelfClosingFile', ([], {'opener': 'opener', 'timeout': 'self.close_timeout_sec', 'open_args': "('1', '2', '3')", 'open_kw': "{'a': 'b', 'c': 'd'}", 'touch': '(False)'}), "(opener=opener, timeout=self.close_timeout_sec,\n open_args=('1', '2', '3'), open_kw={'a': 'b', 'c': 'd'}, touch=False)\n", (3135, 3256), False, 'from datavault import backend, errors\n'), ((9570, 9587), 'datavault.backend.IniData', 'backend.IniData', ([], {}), '()\n', (9585, 9587), False, 'from datavault import backend, errors\n'), ((9808, 9825), 'datavault.backend.IniData', 'backend.IniData', ([], {}), '()\n', (9823, 9825), False, 'from datavault import backend, errors\n'), ((12051, 12068), 'datavault.backend.IniData', 'backend.IniData', ([], {}), '()\n', (12066, 12068), False, 'from datavault import backend, errors\n'), ((12813, 12830), 'datavault.backend.IniData', 'backend.IniData', ([], {}), '()\n', (12828, 12830), False, 'from datavault import backend, errors\n'), ((13195, 13212), 'datavault.backend.IniData', 'backend.IniData', ([], {}), '()\n', (13210, 13212), False, 'from datavault import backend, errors\n'), ((14081, 14110), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (14091, 14110), True, 'import numpy as np\n'), ((14342, 14364), 'datavault.backend.HDF5MetaData', 'backend.HDF5MetaData', ([], {}), '()\n', (14362, 14364), False, 'from datavault import backend, errors\n'), ((15675, 15687), 'twisted.internet.task.Clock', 'task.Clock', ([], {}), '()\n', (15685, 15687), False, 'from twisted.internet import task\n'), ((16020, 16074), 'datavault.backend.CsvListData', 'backend.CsvListData', (['self.filename'], {'reactor': 'self.clock'}), '(self.filename, reactor=self.clock)\n', (16039, 16074), False, 'from datavault import backend, errors\n'), ((17700, 17770), 'numpy.recarray', 'np.recarray', (['(1,)'], {'dtype': "[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')]"}), "((1,), dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])\n", (17711, 17770), True, 'import numpy as np\n'), ((17841, 17911), 'numpy.recarray', 'np.recarray', (['(1,)'], {'dtype': "[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')]"}), "((1,), dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])\n", (17852, 17911), True, 'import numpy as np\n'), ((18167, 18237), 'numpy.recarray', 'np.recarray', (['(2,)'], {'dtype': "[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')]"}), "((2,), dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])\n", (18178, 18237), True, 'import numpy as np\n'), ((18528, 18598), 'numpy.recarray', 'np.recarray', (['(2,)'], {'dtype': "[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')]"}), "((2,), dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])\n", (18539, 18598), True, 'import numpy as np\n'), ((19132, 19202), 'numpy.recarray', 'np.recarray', (['(2,)'], {'dtype': "[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')]"}), "((2,), dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])\n", (19143, 19202), True, 'import numpy as np\n'), ((19667, 19679), 'twisted.internet.task.Clock', 'task.Clock', ([], {}), '()\n', (19677, 19679), False, 'from twisted.internet import task\n'), ((20113, 20163), 'datavault.backend.CsvNumpyData', 'backend.CsvNumpyData', (['filename'], {'reactor': 'self.clock'}), '(filename, reactor=self.clock)\n', (20133, 20163), False, 'from datavault import backend, errors\n'), ((20793, 20805), 'twisted.internet.task.Clock', 'task.Clock', ([], {}), '()\n', (20803, 20805), False, 'from twisted.internet import task\n'), ((21182, 21268), 'datavault.backend.SelfClosingFile', 'backend.SelfClosingFile', (['h5py.File'], {'open_args': "(filename, 'a')", 'reactor': 'self.clock'}), "(h5py.File, open_args=(filename, 'a'), reactor=self.\n clock)\n", (21205, 21268), False, 'from datavault import backend, errors\n'), ((21292, 21320), 'datavault.backend.ExtendedHDF5Data', 'backend.ExtendedHDF5Data', (['fh'], {}), '(fh)\n', (21316, 21320), False, 'from datavault import backend, errors\n'), ((21523, 21593), 'numpy.recarray', 'np.recarray', (['(2,)'], {'dtype': "[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')]"}), "((2,), dtype=[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])\n", (21534, 21593), True, 'import numpy as np\n'), ((22898, 22973), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""NewVariable"""', 'shape': '(1,)', 'datatype': '"""i"""', 'unit': '""""""'}), "(label='NewVariable', shape=(1,), datatype='i', unit='')\n", (22917, 22973), False, 'from datavault import backend, errors\n'), ((23280, 23355), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""NewVariable"""', 'shape': '(1,)', 'datatype': '"""t"""', 'unit': '""""""'}), "(label='NewVariable', shape=(1,), datatype='t', unit='')\n", (23299, 23355), False, 'from datavault import backend, errors\n'), ((23662, 23737), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""NewVariable"""', 'shape': '(1,)', 'datatype': '"""c"""', 'unit': '""""""'}), "(label='NewVariable', shape=(1,), datatype='c', unit='')\n", (23681, 23737), False, 'from datavault import backend, errors\n'), ((24045, 24120), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""NewVariable"""', 'shape': '(1,)', 'datatype': '"""v"""', 'unit': '""""""'}), "(label='NewVariable', shape=(1,), datatype='v', unit='')\n", (24064, 24120), False, 'from datavault import backend, errors\n'), ((24427, 24502), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""NewVariable"""', 'shape': '(1,)', 'datatype': '"""s"""', 'unit': '""""""'}), "(label='NewVariable', shape=(1,), datatype='s', unit='')\n", (24446, 24502), False, 'from datavault import backend, errors\n'), ((24813, 24888), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""NewVariable"""', 'shape': '(1,)', 'datatype': '"""x"""', 'unit': '""""""'}), "(label='NewVariable', shape=(1,), datatype='x', unit='')\n", (24832, 24888), False, 'from datavault import backend, errors\n'), ((25245, 25323), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""NewVariable"""', 'shape': '(2, 2)', 'datatype': '"""c"""', 'unit': '"""V"""'}), "(label='NewVariable', shape=(2, 2), datatype='c', unit='V')\n", (25264, 25323), False, 'from datavault import backend, errors\n'), ((25449, 25498), 'numpy.recarray', 'np.recarray', (['(1,)'], {'dtype': "[('f0', '<c16', (2, 2))]"}), "((1,), dtype=[('f0', '<c16', (2, 2))])\n", (25460, 25498), True, 'import numpy as np\n'), ((25925, 26000), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""NewVariable"""', 'shape': '(1,)', 'datatype': '"""s"""', 'unit': '""""""'}), "(label='NewVariable', shape=(1,), datatype='s', unit='')\n", (25944, 26000), False, 'from datavault import backend, errors\n'), ((26126, 26164), 'numpy.recarray', 'np.recarray', (['(1,)'], {'dtype': "[('f0', 'O')]"}), "((1,), dtype=[('f0', 'O')])\n", (26137, 26164), True, 'import numpy as np\n'), ((26517, 26594), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""NewVariable"""', 'shape': '(2, 0)', 'datatype': '"""s"""', 'unit': '""""""'}), "(label='NewVariable', shape=(2, 0), datatype='s', unit='')\n", (26536, 26594), False, 'from datavault import backend, errors\n'), ((26966, 26978), 'twisted.internet.task.Clock', 'task.Clock', ([], {}), '()\n', (26976, 26978), False, 'from twisted.internet import task\n'), ((27363, 27449), 'datavault.backend.SelfClosingFile', 'backend.SelfClosingFile', (['h5py.File'], {'open_args': "(filename, 'a')", 'reactor': 'self.clock'}), "(h5py.File, open_args=(filename, 'a'), reactor=self.\n clock)\n", (27386, 27449), False, 'from datavault import backend, errors\n'), ((27473, 27499), 'datavault.backend.SimpleHDF5Data', 'backend.SimpleHDF5Data', (['fh'], {}), '(fh)\n', (27495, 27499), False, 'from datavault import backend, errors\n'), ((604, 633), 'numpy.array_equal', 'np.array_equal', (['first', 'second'], {}), '(first, second)\n', (618, 633), True, 'import numpy as np\n'), ((6627, 6640), 'numpy.int32', 'np.int32', (['(100)'], {}), '(100)\n', (6635, 6640), True, 'import numpy as np\n'), ((6642, 6678), 'labrad.units.Complex', 'U.Complex', (['(1.0j + 3735928559)', 'U.inch'], {}), '(1.0j + 3735928559, U.inch)\n', (6651, 6678), True, 'from labrad import units as U\n'), ((7271, 7284), 'numpy.int32', 'np.int32', (['(100)'], {}), '(100)\n', (7279, 7284), True, 'import numpy as np\n'), ((7286, 7322), 'labrad.units.Complex', 'U.Complex', (['(1.0j + 3735928559)', 'U.inch'], {}), '(1.0j + 3735928559, U.inch)\n', (7295, 7322), True, 'from labrad import units as U\n'), ((10039, 10094), 'numpy.dtype', 'np.dtype', (["[('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')]"], {}), "([('f0', '<f8'), ('f1', '<f8'), ('f2', '<f8')])\n", (10047, 10094), True, 'import numpy as np\n'), ((20278, 20293), 'numpy.dtype', 'np.dtype', (['float'], {}), '(float)\n', (20286, 20293), True, 'import numpy as np\n'), ((22028, 22113), 'datavault.backend.Independent', 'backend.Independent', ([], {'label': '"""FirstVariable"""', 'shape': '(1,)', 'datatype': '"""f"""', 'unit': '"""Ghz"""'}), "(label='FirstVariable', shape=(1,), datatype='f', unit='Ghz'\n )\n", (22047, 22113), False, 'from datavault import backend, errors\n'), ((22424, 22526), 'datavault.backend.Dependent', 'backend.Dependent', ([], {'label': '"""Cents"""', 'legend': '"""OnlyDependent"""', 'shape': '(1,)', 'datatype': '"""t"""', 'unit': '"""Dollars"""'}), "(label='Cents', legend='OnlyDependent', shape=(1,),\n datatype='t', unit='Dollars')\n", (22441, 22526), False, 'from datavault import backend, errors\n'), ((23115, 23140), 'numpy.dtype', 'np.dtype', (["[('f0', '<i4')]"], {}), "([('f0', '<i4')])\n", (23123, 23140), True, 'import numpy as np\n'), ((23497, 23522), 'numpy.dtype', 'np.dtype', (["[('f0', '<i8')]"], {}), "([('f0', '<i8')])\n", (23505, 23522), True, 'import numpy as np\n'), ((23879, 23905), 'numpy.dtype', 'np.dtype', (["[('f0', '<c16')]"], {}), "([('f0', '<c16')])\n", (23887, 23905), True, 'import numpy as np\n'), ((24262, 24287), 'numpy.dtype', 'np.dtype', (["[('f0', '<f8')]"], {}), "([('f0', '<f8')])\n", (24270, 24287), True, 'import numpy as np\n'), ((24644, 24667), 'numpy.dtype', 'np.dtype', (["[('f0', 'O')]"], {}), "([('f0', 'O')])\n", (24652, 24667), True, 'import numpy as np\n'), ((27642, 27657), 'numpy.dtype', 'np.dtype', (['float'], {}), '(float)\n', (27650, 27657), True, 'import numpy as np\n'), ((11485, 11526), 'datetime.datetime', 'datetime.datetime', (['(2012)', '(9)', '(24)', '(3)', '(14)', '(15)'], {}), '(2012, 9, 24, 3, 14, 15)\n', (11502, 11526), False, 'import datetime\n'), ((11607, 11648), 'datetime.datetime', 'datetime.datetime', (['(2012)', '(9)', '(25)', '(3)', '(14)', '(15)'], {}), '(2012, 9, 25, 3, 14, 15)\n', (11624, 11648), False, 'import datetime\n')] |
# encoding=utf-8
import numpy as np
import pyqtgraph.opengl as gl
from pyqtgraph.Qt import QtCore, QtGui
class plot3d(object):
def __init__(self, title='null'):
"""
:param title:
"""
self.glview = gl.GLViewWidget()
coord = gl.GLAxisItem()
coord.setSize(1, 1, 1)
# self.glview.addItem(coord)
self.glview.setMinimumSize(QtCore.QSize(600, 500))
self.glview.pan(1, 0, 0)
self.glview.setCameraPosition(azimuth=180)
self.glview.setCameraPosition(elevation=0)
self.glview.setCameraPosition(distance=5)
self.items = []
self.view = QtGui.QWidget()
self.view.window().setWindowTitle(title)
hlayout = QtGui.QHBoxLayout()
snap_btn = QtGui.QPushButton('&Snap')
def take_snap():
qimg = self.glview.readQImage()
qimg.save('1.jpg')
snap_btn.clicked.connect(take_snap)
hlayout.addWidget(snap_btn)
hlayout.addStretch()
layout = QtGui.QVBoxLayout()
layout.addLayout(hlayout)
layout.addWidget(self.glview)
self.view.setLayout(layout)
def add_item(self, item):
"""
:param item:
:return:
"""
self.glview.addItem(item)
self.items.append(item)
def clear(self):
for it in self.items:
self.glview.removeItem(it)
self.items.clear()
def add_points(self, points, colors):
"""
:param points:
:param colors:
:return:
"""
points_item = gl.GLScatterPlotItem(pos=points, size=1.5, color=colors)
self.add_item(points_item)
def add_line(self, p1, p2, color, width=3):
lines = np.array([[p1[0], p1[1], p1[2]],
[p2[0], p2[1], p2[2]]])
lines_item = gl.GLLinePlotItem(pos=lines, mode='lines',
color=color, width=width, antialias=True)
self.add_item(lines_item)
def plot_bbox_mesh(self, gt_boxes3d, color=(0, 1, 0, 1)):
"""
:param gt_boxes3d:
:param color:
:return:
"""
b = gt_boxes3d
for k in range(0, 4):
i, j = k, (k + 1) % 4
self.add_line([b[i, 0], b[i, 1], b[i, 2]], [b[j, 0], b[j, 1], b[j, 2]], color)
i, j = k + 4, (k + 1) % 4 + 4
self.add_line([b[i, 0], b[i, 1], b[i, 2]], [b[j, 0], b[j, 1], b[j, 2]], color)
i, j = k, k + 4
self.add_line([b[i, 0], b[i, 1], b[i, 2]], [b[j, 0], b[j, 1], b[j, 2]], color)
def value_to_rgb(pc_inte):
minimum, maximum = np.min(pc_inte), np.max(pc_inte)
ratio = (pc_inte - minimum + 0.1) / (maximum - minimum + 0.1)
r = (np.maximum((1 - ratio), 0))
b = (np.maximum((ratio - 1), 0))
g = 1 - b - r
return np.stack([r, g, b]).transpose()
def view_points_cloud(pc=None):
app = QtGui.QApplication([])
glview = plot3d()
if pc is None:
pc = np.random.rand(1024, 3)
pc_color = np.ones([pc.shape[0], 4])
glview.add_points(pc, pc_color)
glview.view.show()
return app.exec()
if __name__ == '__main__':
point_cloud = np.fromfile(str("./000010.bin"),
dtype=np.float32, count=-1).reshape([-1, 4])
view_points_cloud(point_cloud)
| [
"pyqtgraph.Qt.QtGui.QVBoxLayout",
"pyqtgraph.opengl.GLLinePlotItem",
"numpy.ones",
"pyqtgraph.Qt.QtGui.QWidget",
"pyqtgraph.Qt.QtGui.QPushButton",
"numpy.random.rand",
"pyqtgraph.opengl.GLScatterPlotItem",
"pyqtgraph.opengl.GLViewWidget",
"numpy.max",
"numpy.array",
"pyqtgraph.Qt.QtGui.QApplicat... | [((2740, 2764), 'numpy.maximum', 'np.maximum', (['(1 - ratio)', '(0)'], {}), '(1 - ratio, 0)\n', (2750, 2764), True, 'import numpy as np\n'), ((2777, 2801), 'numpy.maximum', 'np.maximum', (['(ratio - 1)', '(0)'], {}), '(ratio - 1, 0)\n', (2787, 2801), True, 'import numpy as np\n'), ((2910, 2932), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (2928, 2932), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((3028, 3053), 'numpy.ones', 'np.ones', (['[pc.shape[0], 4]'], {}), '([pc.shape[0], 4])\n', (3035, 3053), True, 'import numpy as np\n'), ((236, 253), 'pyqtgraph.opengl.GLViewWidget', 'gl.GLViewWidget', ([], {}), '()\n', (251, 253), True, 'import pyqtgraph.opengl as gl\n'), ((270, 285), 'pyqtgraph.opengl.GLAxisItem', 'gl.GLAxisItem', ([], {}), '()\n', (283, 285), True, 'import pyqtgraph.opengl as gl\n'), ((643, 658), 'pyqtgraph.Qt.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (656, 658), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((726, 745), 'pyqtgraph.Qt.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (743, 745), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((765, 791), 'pyqtgraph.Qt.QtGui.QPushButton', 'QtGui.QPushButton', (['"""&Snap"""'], {}), "('&Snap')\n", (782, 791), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((1020, 1039), 'pyqtgraph.Qt.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (1037, 1039), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((1578, 1634), 'pyqtgraph.opengl.GLScatterPlotItem', 'gl.GLScatterPlotItem', ([], {'pos': 'points', 'size': '(1.5)', 'color': 'colors'}), '(pos=points, size=1.5, color=colors)\n', (1598, 1634), True, 'import pyqtgraph.opengl as gl\n'), ((1735, 1791), 'numpy.array', 'np.array', (['[[p1[0], p1[1], p1[2]], [p2[0], p2[1], p2[2]]]'], {}), '([[p1[0], p1[1], p1[2]], [p2[0], p2[1], p2[2]]])\n', (1743, 1791), True, 'import numpy as np\n'), ((1839, 1927), 'pyqtgraph.opengl.GLLinePlotItem', 'gl.GLLinePlotItem', ([], {'pos': 'lines', 'mode': '"""lines"""', 'color': 'color', 'width': 'width', 'antialias': '(True)'}), "(pos=lines, mode='lines', color=color, width=width,\n antialias=True)\n", (1856, 1927), True, 'import pyqtgraph.opengl as gl\n'), ((2632, 2647), 'numpy.min', 'np.min', (['pc_inte'], {}), '(pc_inte)\n', (2638, 2647), True, 'import numpy as np\n'), ((2649, 2664), 'numpy.max', 'np.max', (['pc_inte'], {}), '(pc_inte)\n', (2655, 2664), True, 'import numpy as np\n'), ((2988, 3011), 'numpy.random.rand', 'np.random.rand', (['(1024)', '(3)'], {}), '(1024, 3)\n', (3002, 3011), True, 'import numpy as np\n'), ((389, 411), 'pyqtgraph.Qt.QtCore.QSize', 'QtCore.QSize', (['(600)', '(500)'], {}), '(600, 500)\n', (401, 411), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((2834, 2853), 'numpy.stack', 'np.stack', (['[r, g, b]'], {}), '([r, g, b])\n', (2842, 2853), True, 'import numpy as np\n')] |
import pandas as pd
from tabulate import tabulate
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.interpolate import interp1d
#sns.set()
#sns.color_palette("mako")
df = pd.read_csv('bias_classifier.csv')
df = df.replace({'vanilla': 'Vanilla', 'end': 'EnD', 'rebias': 'ReBias', 'rubi': 'RUBi', 'learned-mixin': 'LearnedMixin'})
print(df.head())
print(df.groupby(['crit', 'rho']).count())
crits = df.crit.unique()
rhos = df.rho.unique()
accuracy_bias = {}
std_bias = {}
accuracy_target = {}
std_target = {}
for crit in crits:
for rho in rhos:
print('\n', crit, rho)
acc_bias = df.loc[(df.crit == crit) & (df.rho == rho)]['unbiased.accuracy.bias'].values
acc_target = df.loc[(df.crit == crit) & (df.rho == rho)]['unbiased.accuracy.f_target'].values
if crit not in accuracy_bias:
accuracy_bias[crit] = []
accuracy_target[crit] = []
std_bias[crit] = []
std_target[crit] = []
accuracy_bias[crit].append(np.mean(acc_bias)*100.)
accuracy_target[crit].append(np.mean(acc_target)*100.)
std_bias[crit].append(np.std(acc_bias)*100.)
std_target[crit].append(np.std(acc_target)*100.)
SMALL_SIZE = 24
MEDIUM_SIZE = 26
BIGGER_SIZE = 28
from matplotlib import rc
rc('axes', titlesize=18) # fontsize of the axes title
rc('axes', labelsize=18) # fontsize of the x and y labels
rc('xtick', labelsize=18) # fontsize of the tick labels
rc('ytick', labelsize=18) # fontsize of the tick labels
rc('legend', fontsize=14) # legend fontsize
rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
rc('font', size=18)
#rc('font', family='Times New Roman')
rc('text', usetex=True)
#plt.rcParams['font.family'] = 'Times New Roman'
#plt.rcParams.update({
# 'font.size': 12,
# 'text.usetex': True,
# 'text.latex.preamble': r'\usepackage{amsfonts}'
#})
# fig = plt.figure()
# ax = fig.add_subplot(111)
# for crit in crits:
# mean, std = np.array(accuracy_bias[crit]), np.array(std_bias[crit])
# ax.plot(mean[::-1], label=crit, marker='o', linestyle='-.',)
# ax.fill_between(x=range(len(mean)), y1=(mean-std)[::-1], y2=(mean+std)[::-1], alpha=0.25)
# plt.xticks(range(len(rhos)), rhos[::-1])
# ax.legend()
# ax.spines['right'].set_color('none')
# ax.spines['top'].set_color('none')
# ax.text(-0.125, -5, 'High Bias', fontstyle='italic')
# ax.text(2.75, -5, 'Low Bias', fontstyle='italic')
# ax.set_xlabel(r'$\rho$')
# ax.set_ylabel('Bias Accuracy')
# plt.savefig('bias_acc.pdf', format='pdf', dpi=300)
# plt.show()
crits = ['RUBi', 'Vanilla', 'ReBias', 'LearnedMixin', 'EnD']
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111, projection='polar')
angle_offset = 15
angle_width = 50
for i, crit in enumerate(crits):
color=next(ax._get_lines.prop_cycler)['color']
mean, std = np.array(accuracy_bias[crit]), np.array(std_bias[crit])
x = rhos[::-1]
y = mean[::-1]
x = np.radians([angle_offset, angle_offset+angle_width, angle_offset+angle_width*2, angle_offset+angle_width*3])
ax.scatter(x, y, label=crit, c=color, alpha=0.5)
f = interp1d(x, y, kind='quadratic')
xnew = np.linspace(np.radians(angle_offset), np.radians(angle_offset+angle_width*3))
ax.plot(xnew, f(xnew), c=color, linestyle='--')
ax.fill_between(x=xnew, y1=f(xnew), y2=f(xnew)-f(xnew), alpha=0.05) #1./float(len(crits)-(i-0.8)))
#color=next(ax._get_lines.prop_cycler)['color']
#mean, std = np.array([10.]*4), np.array([0.]*4)
#x = range(len(rhos))
#y = mean[::-1]
#xnew = np.linspace(0, len(rhos)-1)
#ax.plot(xnew, [10.]*len(xnew), c='gray', linestyle='--', label='Unbiased')
ax.legend(bbox_to_anchor=(1.0,1.15), markerscale=2)
thetaticks = np.arange(angle_offset, angle_offset+200, 50)
ax.set_thetagrids(thetaticks, labels=rhos[::-1])
ax.tick_params(axis='x', pad=15)
ax.set_ylim(0, 110)
ax.set_thetamin(angle_offset)
ax.set_thetamax(angle_offset+angle_width*3)
ax.spines['polar'].set_color('none')
ax.yaxis.set_label_position('right')
ax.xaxis.set_label_position('bottom')
ax.text(np.radians(-50), ax.get_rmax()/5, 'Private Class Accuracy', rotation=15)
ax.text(np.radians(60), ax.get_rmax(), r'$\rho$')
plt.tight_layout()
plt.savefig('bias_acc_polar.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0)
plt.show()
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(111)
sizes = np.array([1, 2.5, 5.,10])*40
for crit in crits:
print(rhos)
print(accuracy_bias[crit])
ax.scatter(accuracy_bias[crit], accuracy_target[crit], alpha=0.5, label=crit, s=sizes)
#plt.xticks(range(len(rhos)), rhos)
ax.plot(1, 50, ">k", transform=ax.get_yaxis_transform(), clip_on=False)
ax.plot(50, 1, "^k", transform=ax.get_xaxis_transform(), clip_on=False)
ax.legend(markerscale=1)
ax.text(1, 100, 'Privacy preserving', fontstyle='italic')
ax.text(90, 1, 'Privacy leakage', fontstyle='italic')
ax.set_xlabel('Private Class Accuracy', loc='right')
ax.set_ylabel('Target Accuracy', loc='bottom', labelpad=-60)
ax.set_xlim(-5, 105)
ax.set_ylim(-5, 105)
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
#plt.tight_layout()
plt.savefig('scatter.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0)
plt.show()
print(accuracy_bias, std_bias)
crits = ['Vanilla', 'RUBi', 'ReBias', 'LearnedMixin', 'EnD']
table = []
for crit in crits:
entry = [crit]
accs = accuracy_bias[crit]
accs = [f'{a:.2f}' for a in accs]
entry.extend(accs)
table.append(entry)
table = tabulate(table, headers=['Method', '0.99', '0.995', '0.997', '0.999'], tablefmt='latex_booktabs')
print(table)
print(plt.rcParams['axes.prop_cycle'].by_key()['color'])
| [
"numpy.radians",
"tabulate.tabulate",
"numpy.mean",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"scipy.interpolate.interp1d",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.rc",
"matplotlib.pyplot.tight_layout",
"numpy.std",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplot... | [((207, 241), 'pandas.read_csv', 'pd.read_csv', (['"""bias_classifier.csv"""'], {}), "('bias_classifier.csv')\n", (218, 241), True, 'import pandas as pd\n'), ((1326, 1350), 'matplotlib.rc', 'rc', (['"""axes"""'], {'titlesize': '(18)'}), "('axes', titlesize=18)\n", (1328, 1350), False, 'from matplotlib import rc\n'), ((1384, 1408), 'matplotlib.rc', 'rc', (['"""axes"""'], {'labelsize': '(18)'}), "('axes', labelsize=18)\n", (1386, 1408), False, 'from matplotlib import rc\n'), ((1445, 1470), 'matplotlib.rc', 'rc', (['"""xtick"""'], {'labelsize': '(18)'}), "('xtick', labelsize=18)\n", (1447, 1470), False, 'from matplotlib import rc\n'), ((1504, 1529), 'matplotlib.rc', 'rc', (['"""ytick"""'], {'labelsize': '(18)'}), "('ytick', labelsize=18)\n", (1506, 1529), False, 'from matplotlib import rc\n'), ((1563, 1588), 'matplotlib.rc', 'rc', (['"""legend"""'], {'fontsize': '(14)'}), "('legend', fontsize=14)\n", (1565, 1588), False, 'from matplotlib import rc\n'), ((1610, 1645), 'matplotlib.rc', 'rc', (['"""figure"""'], {'titlesize': 'BIGGER_SIZE'}), "('figure', titlesize=BIGGER_SIZE)\n", (1612, 1645), False, 'from matplotlib import rc\n'), ((1678, 1697), 'matplotlib.rc', 'rc', (['"""font"""'], {'size': '(18)'}), "('font', size=18)\n", (1680, 1697), False, 'from matplotlib import rc\n'), ((1736, 1759), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1738, 1759), False, 'from matplotlib import rc\n'), ((2686, 2712), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (2696, 2712), True, 'import matplotlib.pyplot as plt\n'), ((3774, 3821), 'numpy.arange', 'np.arange', (['angle_offset', '(angle_offset + 200)', '(50)'], {}), '(angle_offset, angle_offset + 200, 50)\n', (3783, 3821), True, 'import numpy as np\n'), ((4243, 4261), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4259, 4261), True, 'import matplotlib.pyplot as plt\n'), ((4262, 4358), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bias_acc_polar.pdf"""'], {'format': '"""pdf"""', 'dpi': '(300)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "('bias_acc_polar.pdf', format='pdf', dpi=300, bbox_inches=\n 'tight', pad_inches=0)\n", (4273, 4358), True, 'import matplotlib.pyplot as plt\n'), ((4354, 4364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4362, 4364), True, 'import matplotlib.pyplot as plt\n'), ((4373, 4399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (4383, 4399), True, 'import matplotlib.pyplot as plt\n'), ((4404, 4420), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4415, 4420), True, 'import matplotlib.pyplot as plt\n'), ((5266, 5354), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""scatter.pdf"""'], {'format': '"""pdf"""', 'dpi': '(300)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "('scatter.pdf', format='pdf', dpi=300, bbox_inches='tight',\n pad_inches=0)\n", (5277, 5354), True, 'import matplotlib.pyplot as plt\n'), ((5351, 5361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5359, 5361), True, 'import matplotlib.pyplot as plt\n'), ((5630, 5731), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': "['Method', '0.99', '0.995', '0.997', '0.999']", 'tablefmt': '"""latex_booktabs"""'}), "(table, headers=['Method', '0.99', '0.995', '0.997', '0.999'],\n tablefmt='latex_booktabs')\n", (5638, 5731), False, 'from tabulate import tabulate\n'), ((3007, 3130), 'numpy.radians', 'np.radians', (['[angle_offset, angle_offset + angle_width, angle_offset + angle_width * 2, \n angle_offset + angle_width * 3]'], {}), '([angle_offset, angle_offset + angle_width, angle_offset + \n angle_width * 2, angle_offset + angle_width * 3])\n', (3017, 3130), True, 'import numpy as np\n'), ((3182, 3214), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'y'], {'kind': '"""quadratic"""'}), "(x, y, kind='quadratic')\n", (3190, 3214), False, 'from scipy.interpolate import interp1d\n'), ((4119, 4134), 'numpy.radians', 'np.radians', (['(-50)'], {}), '(-50)\n', (4129, 4134), True, 'import numpy as np\n'), ((4200, 4214), 'numpy.radians', 'np.radians', (['(60)'], {}), '(60)\n', (4210, 4214), True, 'import numpy as np\n'), ((4430, 4457), 'numpy.array', 'np.array', (['[1, 2.5, 5.0, 10]'], {}), '([1, 2.5, 5.0, 10])\n', (4438, 4457), True, 'import numpy as np\n'), ((2895, 2924), 'numpy.array', 'np.array', (['accuracy_bias[crit]'], {}), '(accuracy_bias[crit])\n', (2903, 2924), True, 'import numpy as np\n'), ((2926, 2950), 'numpy.array', 'np.array', (['std_bias[crit]'], {}), '(std_bias[crit])\n', (2934, 2950), True, 'import numpy as np\n'), ((3238, 3262), 'numpy.radians', 'np.radians', (['angle_offset'], {}), '(angle_offset)\n', (3248, 3262), True, 'import numpy as np\n'), ((3264, 3306), 'numpy.radians', 'np.radians', (['(angle_offset + angle_width * 3)'], {}), '(angle_offset + angle_width * 3)\n', (3274, 3306), True, 'import numpy as np\n'), ((1050, 1067), 'numpy.mean', 'np.mean', (['acc_bias'], {}), '(acc_bias)\n', (1057, 1067), True, 'import numpy as np\n'), ((1111, 1130), 'numpy.mean', 'np.mean', (['acc_target'], {}), '(acc_target)\n', (1118, 1130), True, 'import numpy as np\n'), ((1167, 1183), 'numpy.std', 'np.std', (['acc_bias'], {}), '(acc_bias)\n', (1173, 1183), True, 'import numpy as np\n'), ((1222, 1240), 'numpy.std', 'np.std', (['acc_target'], {}), '(acc_target)\n', (1228, 1240), True, 'import numpy as np\n')] |
import cloudpickle
import os
import numpy as np
import transformations as tf
import zlib, cPickle as pickle
#### For ZMQ ####
def send_zipped_pickle(socket, obj, flags=0, protocol=-1):
"""pickle an object, and zip the pickle before sending it"""
p = pickle.dumps(obj, protocol)
z = zlib.compress(p)
return socket.send(z, flags=flags)
def recv_zipped_pickle(socket, flags=0, protocol=-1):
"""inverse of send_zipped_pickle"""
z = socket.recv(flags)
p = zlib.decompress(z)
return pickle.loads(p)
def send_array(socket, A, flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
socket.send_json(md, flags|zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
def recv_array(socket, flags=0, copy=True, track=False):
"""recv a numpy array"""
md = socket.recv_json(flags=flags)
msg = socket.recv(flags=flags, copy=copy, track=track)
buf = buffer(msg)
A = np.frombuffer(buf, dtype=md['dtype'])
return A.reshape(md['shape'])
def quaternion_log(q):
u = q[:3]
v = q[3]
if np.linalg.norm(u) == 0:
return np.array([0,0,0])
else:
if v > 0.999:
v = 0.999
return np.arccos(v) * (u / np.linalg.norm(u))
def quaternion_dist(q1, q2):
conjugate_product = tf.transformations.quaternion_multiply(q1, tf.transformations.quaternion_conjugate(q2))
if all(conjugate_product == np.array([0,0,0,-1])):
return 2*np.pi
else:
return 2 * np.linalg.norm(quaternion_log(conjugate_product))
def get_p2(p1, Mp1p2):
Mp1 = tf.transformations.quaternion_matrix(p1[3:])
Mp1[:3,3] = p1[:3]
Mp2 = Mp1.dot(Mp1p2)
p2_quat = tf.transformations.quaternion_from_matrix(Mp2)
p2 = np.concatenate([Mp2[:3,3], p2_quat])
return p2
def pos_distance(pos1, pos2):
pos_dist = np.linalg.norm(pos1 - pos2)
return pos_dist
def quat_distance(q1, q2):
quat_dist_arg = 2 * np.inner(q1, q2) - 1
quat_dist_arg = np.modf(quat_dist_arg)[0]
if np.abs(quat_dist_arg) > 0.99 or np.abs(quat_dist_arg) < 0.05:
quat_distance = 0.
else:
quat_distance = np.arccos(quat_dist_arg)
return quat_distance
def get_object_goal_pose(object_pose, goal_rel_pose):
assert len(object_pose) == len(goal_rel_pose) == 7
Ms = tf.quaternion_matrix(object_pose[3:])
Ms[:3,3] = object_pose[:3]
M_rel = tf.quaternion_matrix(goal_rel_pose[3:])
M_rel[:3,3] = goal_rel_pose[:3]
M = Ms.dot(M_rel)
quat_M = tf.quaternion_from_matrix(M)
pose_M = np.concatenate([M[:3,3], quat_M])
return pose_M
def pose_distance(p1, p2):
assert len(p1) == len(p2) == 7
pos_dist = pos_distance(p1[:3], p2[:3])
quat_dist = quat_distance(p1[3:], p2[3:])
return pos_dist, quat_dist
def load_policy_and_preprocessor(loading_config):
policy = None
state_preprocessor = None
config_restore_path = loading_config['training_config_restore_path']
if config_restore_path is not None:
training_config = cloudpickle.loads(open(config_restore_path, 'rb').read())
#### load policy ####
policy_restore_path = loading_config['policy_restore_path']
if policy_restore_path is not None:
policy_config = training_config.get(['Actor', 'config'])
policy_config['obs_dim'] = loading_config['state_space']['shape'][0]
policy_config['action_dim'] = loading_config['action_space']['shape'][0]
policy_config['action_space'] = loading_config['action_space']
policy = training_config.get(['Actor', 'type'])(policy_config)
policy.restore(model_dir=policy_restore_path, model_name='policy')
print("Loaded policy from {}".format(os.path.join(policy_restore_path, 'policy')))
#### load state preprocessor ####
state_preprocessor_restore_path = loading_config['state_preprocessor_restore_path']
if state_preprocessor_restore_path is not None:
state_preprocessor_config = training_config.get(['Preprocessors', 'state_preprocessor', 'config'])
state_preprocessor_config['dim'] = loading_config['state_space']['shape'][0]
state_preprocessor_type = training_config.get(['Preprocessors', 'state_preprocessor', 'type'])
if state_preprocessor_type is not None:
state_preprocessor = state_preprocessor_type(state_preprocessor_config)
state_preprocessor.restore_preprocessor(state_preprocessor_restore_path)
return policy, state_preprocessor
| [
"numpy.abs",
"numpy.arccos",
"transformations.transformations.quaternion_from_matrix",
"cPickle.loads",
"os.path.join",
"zlib.compress",
"transformations.quaternion_matrix",
"transformations.quaternion_from_matrix",
"numpy.array",
"cPickle.dumps",
"transformations.transformations.quaternion_matr... | [((260, 287), 'cPickle.dumps', 'pickle.dumps', (['obj', 'protocol'], {}), '(obj, protocol)\n', (272, 287), True, 'import zlib, cPickle as pickle\n'), ((296, 312), 'zlib.compress', 'zlib.compress', (['p'], {}), '(p)\n', (309, 312), False, 'import zlib, cPickle as pickle\n'), ((482, 500), 'zlib.decompress', 'zlib.decompress', (['z'], {}), '(z)\n', (497, 500), False, 'import zlib, cPickle as pickle\n'), ((512, 527), 'cPickle.loads', 'pickle.loads', (['p'], {}), '(p)\n', (524, 527), True, 'import zlib, cPickle as pickle\n'), ((1024, 1061), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': "md['dtype']"}), "(buf, dtype=md['dtype'])\n", (1037, 1061), True, 'import numpy as np\n'), ((1660, 1704), 'transformations.transformations.quaternion_matrix', 'tf.transformations.quaternion_matrix', (['p1[3:]'], {}), '(p1[3:])\n', (1696, 1704), True, 'import transformations as tf\n'), ((1769, 1815), 'transformations.transformations.quaternion_from_matrix', 'tf.transformations.quaternion_from_matrix', (['Mp2'], {}), '(Mp2)\n', (1810, 1815), True, 'import transformations as tf\n'), ((1826, 1863), 'numpy.concatenate', 'np.concatenate', (['[Mp2[:3, 3], p2_quat]'], {}), '([Mp2[:3, 3], p2_quat])\n', (1840, 1863), True, 'import numpy as np\n'), ((1924, 1951), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos1 - pos2)'], {}), '(pos1 - pos2)\n', (1938, 1951), True, 'import numpy as np\n'), ((2398, 2435), 'transformations.quaternion_matrix', 'tf.quaternion_matrix', (['object_pose[3:]'], {}), '(object_pose[3:])\n', (2418, 2435), True, 'import transformations as tf\n'), ((2480, 2519), 'transformations.quaternion_matrix', 'tf.quaternion_matrix', (['goal_rel_pose[3:]'], {}), '(goal_rel_pose[3:])\n', (2500, 2519), True, 'import transformations as tf\n'), ((2593, 2621), 'transformations.quaternion_from_matrix', 'tf.quaternion_from_matrix', (['M'], {}), '(M)\n', (2618, 2621), True, 'import transformations as tf\n'), ((2635, 2669), 'numpy.concatenate', 'np.concatenate', (['[M[:3, 3], quat_M]'], {}), '([M[:3, 3], quat_M])\n', (2649, 2669), True, 'import numpy as np\n'), ((1159, 1176), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (1173, 1176), True, 'import numpy as np\n'), ((1198, 1217), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1206, 1217), True, 'import numpy as np\n'), ((1422, 1465), 'transformations.transformations.quaternion_conjugate', 'tf.transformations.quaternion_conjugate', (['q2'], {}), '(q2)\n', (1461, 1465), True, 'import transformations as tf\n'), ((2065, 2087), 'numpy.modf', 'np.modf', (['quat_dist_arg'], {}), '(quat_dist_arg)\n', (2072, 2087), True, 'import numpy as np\n'), ((2222, 2246), 'numpy.arccos', 'np.arccos', (['quat_dist_arg'], {}), '(quat_dist_arg)\n', (2231, 2246), True, 'import numpy as np\n'), ((1285, 1297), 'numpy.arccos', 'np.arccos', (['v'], {}), '(v)\n', (1294, 1297), True, 'import numpy as np\n'), ((1500, 1523), 'numpy.array', 'np.array', (['[0, 0, 0, -1]'], {}), '([0, 0, 0, -1])\n', (1508, 1523), True, 'import numpy as np\n'), ((2024, 2040), 'numpy.inner', 'np.inner', (['q1', 'q2'], {}), '(q1, q2)\n', (2032, 2040), True, 'import numpy as np\n'), ((2099, 2120), 'numpy.abs', 'np.abs', (['quat_dist_arg'], {}), '(quat_dist_arg)\n', (2105, 2120), True, 'import numpy as np\n'), ((2131, 2152), 'numpy.abs', 'np.abs', (['quat_dist_arg'], {}), '(quat_dist_arg)\n', (2137, 2152), True, 'import numpy as np\n'), ((1305, 1322), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (1319, 1322), True, 'import numpy as np\n'), ((3806, 3849), 'os.path.join', 'os.path.join', (['policy_restore_path', '"""policy"""'], {}), "(policy_restore_path, 'policy')\n", (3818, 3849), False, 'import os\n')] |
"""
These tests are the test fits that come with C mpfit
"""
import mpyfit
import unittest
import numpy
class LinearFunction( unittest.TestCase):
@staticmethod
def func(p, args):
x, y, error = args
return (y - p[0] - p[1]*x)/error
def test_fit(self):
x = numpy.array([-1.7237128E+00,1.8712276E+00,-9.6608055E-01,
-2.8394297E-01,1.3416969E+00,1.3757038E+00,
-1.3703436E+00,4.2581975E-02,-1.4970151E-01,
8.2065094E-01])
y = numpy.array([1.9000429E-01,6.5807428E+00,1.4582725E+00,
2.7270851E+00,5.5969253E+00,5.6249280E+00,
0.787615,3.2599759E+00,2.9771762E+00,
4.5936475E+00])
error = 0.07 * numpy.ones(x.shape)
p = [1.0, 1.0]
pactual = [3.20, 1.78]
args = (x, y, error)
p, result = mpyfit.fit(self.func, p, args)
dof = result['nfunc'] - result['nfree']
self.assertEqual(dof, 8)
self.assertEqual(result['status'][0], 1)
self.assertAlmostEqual(result['bestnorm'], 2.756285)
self.assertTrue(abs(p[0] - pactual[0]) < result['parerrors'][0])
self.assertTrue(abs(p[1] - pactual[1]) < result['parerrors'][1])
class QuadraticFunction(unittest.TestCase):
@staticmethod
def func(p, args):
x, y, error = args
return (y - p[0] - p[1]*x - p[2]*x*x)/error
def setUp(self):
self.p = numpy.ones((3,))
self.pactual = [4.7, 0.0, 6.2]
self.x = numpy.asarray([-1.7237128E+00,1.8712276E+00,-9.6608055E-01,
-2.8394297E-01,1.3416969E+00,1.3757038E+00,
-1.3703436E+00,4.2581975E-02,-1.4970151E-01,
8.2065094E-01])
self.y = numpy.asarray([2.3095947E+01,2.6449392E+01,1.0204468E+01,
5.40507,1.5787588E+01,1.6520903E+01,
1.5971818E+01,4.7668524E+00,4.9337711E+00,
8.7348375E+00])
self.error = 0.2 * numpy.ones(self.x.shape)
def test_fit(self):
args = (self.x, self.y, self.error)
p, result = mpyfit.fit(self.func, self.p, args)
dof = result['nfunc'] - result['nfree']
self.assertEqual(result['status'][0], 1)
self.assertEqual(dof, 7)
self.assertTrue(abs(p[0] - self.pactual[0]) < result['parerrors'][0])
self.assertFalse(abs(p[1] - self.pactual[1]) < result['parerrors'][1])
self.assertTrue(abs(p[2] - self.pactual[2]) < result['parerrors'][2])
def test_frozen(self):
args = (self.x, self.y, self.error)
# Fix the second term
parinfo = [{}, {'fixed': True}, {}]
self.p[1] = 0
p, result = mpyfit.fit(self.func, self.p, args=args, parinfo=parinfo)
dof = result['nfunc'] - result['nfree']
self.assertEqual(result['status'][0], 1)
self.assertEqual(dof, 8)
self.assertTrue(abs(p[0] - self.pactual[0]) < result['parerrors'][0])
self.assertEqual(p[1], 0.0)
self.assertTrue(abs(p[2] - self.pactual[2]) < result['parerrors'][2])
class GaussFunction(unittest.TestCase):
@staticmethod
def func(p, args):
x, y, error = args
xc = x - p[2]
return (y - p[1] * numpy.exp(-0.5*(x-p[2])*(x-p[2]) / (p[3]*p[3])) -
p[0]) / error
def setUp(self):
self.x = numpy.array([-1.7237128E+00,1.8712276E+00,-9.6608055E-01,
-2.8394297E-01,1.3416969E+00,1.3757038E+00,
-1.3703436E+00,4.2581975E-02,-1.4970151E-01,
8.2065094E-01])
self.y = numpy.array([-4.4494256E-02,8.7324673E-01,7.4443483E-01,
4.7631559E+00,1.7187297E-01,1.1639182E-01,
1.5646480E+00,5.2322268E+00,4.2543168E+00,
6.2792623E-01])
self.p = numpy.ones((4,))
self.p[0] = 0
self.pactual = [0.0, 4.7, 0.0, 0.5]
self.error = 0.5 * numpy.ones(self.x.shape)
def test_fit(self):
args = (self.x, self.y, self.error)
p, result = mpyfit.fit(self.func, self.p, args)
dof = result['nfunc'] - result['nfree']
self.assertEqual(result['status'][0], 1)
self.assertEqual(dof, 6)
self.assertFalse(abs(p[0] - self.pactual[0]) < result['parerrors'][0])
self.assertTrue(abs(p[1] - self.pactual[1]) < result['parerrors'][1])
self.assertTrue(abs(p[2] - self.pactual[2]) < result['parerrors'][2])
self.assertFalse(abs(p[3] - self.pactual[3]) < result['parerrors'][3])
def test_freeze(self):
self.p[2] = 0
self.p[3] = 0.1
parinfo = [{'fixed': True}, {}, {'fixed': True}, {}]
args = (self.x, self.y, self.error)
p, result = mpyfit.fit(self.func, self.p, args, parinfo=parinfo)
dof = result['nfunc'] - result['nfree']
self.assertEqual(result['status'][0], 1)
self.assertEqual(result['nfunc'] - result['nfree'], 8)
self.assertEqual(p[0], 0.0)
self.assertTrue(abs(p[1] - self.pactual[1]) < 1.5*result['parerrors'][1])
self.assertEqual(p[2], 0.0)
self.assertTrue(abs(p[3] - self.pactual[3]) < result['parerrors'][3])
def test_limit(self):
self.p[2] = 0
self.p[3] = 0.1
parinfo = [{'fixed': True}, {}, {'fixed': True},
{'limits': (-0.2, 0.3)}]
args = (self.x, self.y, self.error)
p, result = mpyfit.fit(self.func, self.p, args, parinfo=parinfo)
dof = result['nfunc'] - result['nfree']
self.assertEqual(result['status'][0], 1)
self.assertEqual(result['nfunc'] - result['nfree'], 8)
self.assertEqual(p[0], 0.0)
self.assertEqual(result['parerrors'][0], 0)
self.assertAlmostEqual(p[1], 5.533173)
self.assertAlmostEqual(result['parerrors'][1], 0.3395397)
self.assertEqual(p[2], 0.0)
self.assertEqual(result['parerrors'][2], 0)
self.assertTrue(p[3] >= -0.2 and p[3] <= 0.3)
if __name__ == '__main__':
unittest.main()
| [
"numpy.ones",
"mpyfit.fit",
"numpy.asarray",
"numpy.exp",
"numpy.array",
"unittest.main"
] | [((6204, 6219), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6217, 6219), False, 'import unittest\n'), ((296, 435), 'numpy.array', 'numpy.array', (['[-1.7237128, 1.8712276, -0.96608055, -0.28394297, 1.3416969, 1.3757038, -\n 1.3703436, 0.042581975, -0.14970151, 0.82065094]'], {}), '([-1.7237128, 1.8712276, -0.96608055, -0.28394297, 1.3416969, \n 1.3757038, -1.3703436, 0.042581975, -0.14970151, 0.82065094])\n', (307, 435), False, 'import numpy\n'), ((546, 673), 'numpy.array', 'numpy.array', (['[0.19000429, 6.5807428, 1.4582725, 2.7270851, 5.5969253, 5.624928, 0.787615,\n 3.2599759, 2.9771762, 4.5936475]'], {}), '([0.19000429, 6.5807428, 1.4582725, 2.7270851, 5.5969253, \n 5.624928, 0.787615, 3.2599759, 2.9771762, 4.5936475])\n', (557, 673), False, 'import numpy\n'), ((921, 951), 'mpyfit.fit', 'mpyfit.fit', (['self.func', 'p', 'args'], {}), '(self.func, p, args)\n', (931, 951), False, 'import mpyfit\n'), ((1496, 1512), 'numpy.ones', 'numpy.ones', (['(3,)'], {}), '((3,))\n', (1506, 1512), False, 'import numpy\n'), ((1569, 1710), 'numpy.asarray', 'numpy.asarray', (['[-1.7237128, 1.8712276, -0.96608055, -0.28394297, 1.3416969, 1.3757038, -\n 1.3703436, 0.042581975, -0.14970151, 0.82065094]'], {}), '([-1.7237128, 1.8712276, -0.96608055, -0.28394297, 1.3416969, \n 1.3757038, -1.3703436, 0.042581975, -0.14970151, 0.82065094])\n', (1582, 1710), False, 'import numpy\n'), ((1847, 1975), 'numpy.asarray', 'numpy.asarray', (['[23.095947, 26.449392, 10.204468, 5.40507, 15.787588, 16.520903, 15.971818,\n 4.7668524, 4.9337711, 8.7348375]'], {}), '([23.095947, 26.449392, 10.204468, 5.40507, 15.787588, \n 16.520903, 15.971818, 4.7668524, 4.9337711, 8.7348375])\n', (1860, 1975), False, 'import numpy\n'), ((2238, 2273), 'mpyfit.fit', 'mpyfit.fit', (['self.func', 'self.p', 'args'], {}), '(self.func, self.p, args)\n', (2248, 2273), False, 'import mpyfit\n'), ((2828, 2885), 'mpyfit.fit', 'mpyfit.fit', (['self.func', 'self.p'], {'args': 'args', 'parinfo': 'parinfo'}), '(self.func, self.p, args=args, parinfo=parinfo)\n', (2838, 2885), False, 'import mpyfit\n'), ((3488, 3627), 'numpy.array', 'numpy.array', (['[-1.7237128, 1.8712276, -0.96608055, -0.28394297, 1.3416969, 1.3757038, -\n 1.3703436, 0.042581975, -0.14970151, 0.82065094]'], {}), '([-1.7237128, 1.8712276, -0.96608055, -0.28394297, 1.3416969, \n 1.3757038, -1.3703436, 0.042581975, -0.14970151, 0.82065094])\n', (3499, 3627), False, 'import numpy\n'), ((3758, 3893), 'numpy.array', 'numpy.array', (['[-0.044494256, 0.87324673, 0.74443483, 4.7631559, 0.17187297, 0.11639182, \n 1.564648, 5.2322268, 4.2543168, 0.62792623]'], {}), '([-0.044494256, 0.87324673, 0.74443483, 4.7631559, 0.17187297, \n 0.11639182, 1.564648, 5.2322268, 4.2543168, 0.62792623])\n', (3769, 3893), False, 'import numpy\n'), ((4024, 4040), 'numpy.ones', 'numpy.ones', (['(4,)'], {}), '((4,))\n', (4034, 4040), False, 'import numpy\n'), ((4248, 4283), 'mpyfit.fit', 'mpyfit.fit', (['self.func', 'self.p', 'args'], {}), '(self.func, self.p, args)\n', (4258, 4283), False, 'import mpyfit\n'), ((4929, 4981), 'mpyfit.fit', 'mpyfit.fit', (['self.func', 'self.p', 'args'], {'parinfo': 'parinfo'}), '(self.func, self.p, args, parinfo=parinfo)\n', (4939, 4981), False, 'import mpyfit\n'), ((5614, 5666), 'mpyfit.fit', 'mpyfit.fit', (['self.func', 'self.p', 'args'], {'parinfo': 'parinfo'}), '(self.func, self.p, args, parinfo=parinfo)\n', (5624, 5666), False, 'import mpyfit\n'), ((797, 816), 'numpy.ones', 'numpy.ones', (['x.shape'], {}), '(x.shape)\n', (807, 816), False, 'import numpy\n'), ((2124, 2148), 'numpy.ones', 'numpy.ones', (['self.x.shape'], {}), '(self.x.shape)\n', (2134, 2148), False, 'import numpy\n'), ((4134, 4158), 'numpy.ones', 'numpy.ones', (['self.x.shape'], {}), '(self.x.shape)\n', (4144, 4158), False, 'import numpy\n'), ((3369, 3426), 'numpy.exp', 'numpy.exp', (['(-0.5 * (x - p[2]) * (x - p[2]) / (p[3] * p[3]))'], {}), '(-0.5 * (x - p[2]) * (x - p[2]) / (p[3] * p[3]))\n', (3378, 3426), False, 'import numpy\n')] |
import os
import pandas as pd
import numpy as np
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
import matplotlib.pyplot as plt
import arviz as az
def make_dir_if_necessary(directory):
if not os.path.exists(directory):
os.makedirs(directory)
class IterativeDict:
"""
Structure useful to save metrics for different models over different trainings
Saved in a nested dictionnary
Structure:
model_name ==> metric_name ==> table [n_trainings, ...]
"""
def __init__(self, model_names):
self.values = {key: {} for key in model_names}
def set_values(self, model_name, metric_name, values):
if metric_name not in self.values[model_name]:
self.values[model_name][metric_name] = [values]
else:
self.values[model_name][metric_name].append(values)
def to_df(self):
return pd.DataFrame(self.values)
def plot_traj(history, x=None, **plot_params):
"""
:param history: (n_sim, n_x_values) array
:param x: associated x values used for plotting
:param plot_params: Plot parameters fed to plt.plot
:return:
"""
plot_params = {} if plot_params is None else plot_params
history_np = np.array(history)
theta_mean = np.mean(history_np, axis=0)
theta_std = np.std(history_np, axis=0)
n_iter = len(theta_mean)
x = np.arange(n_iter) if x is None else x
plt.plot(x, theta_mean, **plot_params)
plt.fill_between(
x=x, y1=theta_mean - theta_std, y2=theta_mean + theta_std, alpha=0.25
)
def plot_identity():
xmin, xmax = plt.xlim()
vals = np.linspace(xmin, xmax, 50)
plt.plot(vals, vals, "--", label="identity")
def plot_precision_recall(y_test, y_score, label=""):
average_precision = average_precision_score(y_test, y_score)
precision, recall, _ = precision_recall_curve(y_test, y_score)
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
step_kwargs = {"step": "post"}
legend = "{0} PR curve: AP={1:0.2f}".format(label, average_precision)
plt.step(recall, precision, color="b", alpha=0.2, where="post", label=legend)
plt.fill_between(recall, precision, alpha=0.2, **step_kwargs)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
def compute_hdi(arr, credible_interval=0.64):
"""
Given array of (simulations, dimensions) computes Highest Density Intervals
Sample dimension should be first dimension
:param arr: Array of shape (n_samples, n_genes)
:param credible_interval:
:return:
"""
return az.hpd(arr, credible_interval=credible_interval)
def demultiply(arr1, arr2, factor=2):
"""
Suppose you have at disposal
arr1 ~ p(h|x_a)
arr2 ~ p(h|x_b)
Then artificially increase the sizes on respective arrays
so that you can sample from
p(f(h1, h2) | x_a, x_b) under the right assumptions
:param arr1:
:param arr2:
:param factor:
:return:
"""
assert arr1.shape == arr2.shape
n_original = len(arr1)
idx_1 = np.random.choice(n_original, size=n_original * factor, replace=True)
idx_2 = np.random.choice(n_original, size=n_original * factor, replace=True)
return arr1[idx_1], arr2[idx_2]
def predict_de_genes(posterior_probas: np.ndarray, desired_fdr: float):
"""
:param posterior_probas: Shape (n_samples, n_genes)
:param desired_fdr:
:return:
"""
assert posterior_probas.ndim == 1
sorted_genes = np.argsort(-posterior_probas)
sorted_pgs = posterior_probas[sorted_genes]
cumulative_fdr = (1.0 - sorted_pgs).cumsum() / (1.0 + np.arange(len(sorted_pgs)))
d = (cumulative_fdr <= desired_fdr).sum() - 1
pred_de_genes = sorted_genes[:d]
is_pred_de = np.zeros_like(cumulative_fdr).astype(bool)
is_pred_de[pred_de_genes] = True
return is_pred_de
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"numpy.mean",
"os.path.exists",
"arviz.hpd",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"sklearn.metrics.... | [((1263, 1280), 'numpy.array', 'np.array', (['history'], {}), '(history)\n', (1271, 1280), True, 'import numpy as np\n'), ((1298, 1325), 'numpy.mean', 'np.mean', (['history_np'], {'axis': '(0)'}), '(history_np, axis=0)\n', (1305, 1325), True, 'import numpy as np\n'), ((1342, 1368), 'numpy.std', 'np.std', (['history_np'], {'axis': '(0)'}), '(history_np, axis=0)\n', (1348, 1368), True, 'import numpy as np\n'), ((1449, 1487), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'theta_mean'], {}), '(x, theta_mean, **plot_params)\n', (1457, 1487), True, 'import matplotlib.pyplot as plt\n'), ((1493, 1584), 'matplotlib.pyplot.fill_between', 'plt.fill_between', ([], {'x': 'x', 'y1': '(theta_mean - theta_std)', 'y2': '(theta_mean + theta_std)', 'alpha': '(0.25)'}), '(x=x, y1=theta_mean - theta_std, y2=theta_mean + theta_std,\n alpha=0.25)\n', (1509, 1584), True, 'import matplotlib.pyplot as plt\n'), ((1635, 1645), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (1643, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1657, 1684), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(50)'], {}), '(xmin, xmax, 50)\n', (1668, 1684), True, 'import numpy as np\n'), ((1689, 1733), 'matplotlib.pyplot.plot', 'plt.plot', (['vals', 'vals', '"""--"""'], {'label': '"""identity"""'}), "(vals, vals, '--', label='identity')\n", (1697, 1733), True, 'import matplotlib.pyplot as plt\n'), ((1814, 1854), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_test', 'y_score'], {}), '(y_test, y_score)\n', (1837, 1854), False, 'from sklearn.metrics import average_precision_score\n'), ((1882, 1921), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_test', 'y_score'], {}), '(y_test, y_score)\n', (1904, 1921), False, 'from sklearn.metrics import precision_recall_curve\n'), ((2113, 2190), 'matplotlib.pyplot.step', 'plt.step', (['recall', 'precision'], {'color': '"""b"""', 'alpha': '(0.2)', 'where': '"""post"""', 'label': 'legend'}), "(recall, precision, color='b', alpha=0.2, where='post', label=legend)\n", (2121, 2190), True, 'import matplotlib.pyplot as plt\n'), ((2195, 2256), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['recall', 'precision'], {'alpha': '(0.2)'}), '(recall, precision, alpha=0.2, **step_kwargs)\n', (2211, 2256), True, 'import matplotlib.pyplot as plt\n'), ((2261, 2281), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (2271, 2281), True, 'import matplotlib.pyplot as plt\n'), ((2286, 2309), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (2296, 2309), True, 'import matplotlib.pyplot as plt\n'), ((2314, 2335), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (2322, 2335), True, 'import matplotlib.pyplot as plt\n'), ((2340, 2360), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (2348, 2360), True, 'import matplotlib.pyplot as plt\n'), ((2659, 2707), 'arviz.hpd', 'az.hpd', (['arr'], {'credible_interval': 'credible_interval'}), '(arr, credible_interval=credible_interval)\n', (2665, 2707), True, 'import arviz as az\n'), ((3138, 3206), 'numpy.random.choice', 'np.random.choice', (['n_original'], {'size': '(n_original * factor)', 'replace': '(True)'}), '(n_original, size=n_original * factor, replace=True)\n', (3154, 3206), True, 'import numpy as np\n'), ((3219, 3287), 'numpy.random.choice', 'np.random.choice', (['n_original'], {'size': '(n_original * factor)', 'replace': '(True)'}), '(n_original, size=n_original * factor, replace=True)\n', (3235, 3287), True, 'import numpy as np\n'), ((3565, 3594), 'numpy.argsort', 'np.argsort', (['(-posterior_probas)'], {}), '(-posterior_probas)\n', (3575, 3594), True, 'import numpy as np\n'), ((254, 279), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (268, 279), False, 'import os\n'), ((289, 311), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (300, 311), False, 'import os\n'), ((927, 952), 'pandas.DataFrame', 'pd.DataFrame', (['self.values'], {}), '(self.values)\n', (939, 952), True, 'import pandas as pd\n'), ((1407, 1424), 'numpy.arange', 'np.arange', (['n_iter'], {}), '(n_iter)\n', (1416, 1424), True, 'import numpy as np\n'), ((3833, 3862), 'numpy.zeros_like', 'np.zeros_like', (['cumulative_fdr'], {}), '(cumulative_fdr)\n', (3846, 3862), True, 'import numpy as np\n')] |
from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout
from keras.losses import mean_squared_error
from keras.models import Model, clone_model, load_model
from keras.optimizers import SGD, Adam, RMSprop
import numpy as np
class RandomAgent(object):
def __init__(self, color=1):
self.color = color
def predict(self, board_layer):
return np.random.randint(-5, 5) / 5
def select_move(self, board):
moves = [x for x in board.generate_legal_moves()]
return np.random.choice(moves)
class GreedyAgent(object):
def __init__(self, color=-1):
self.color = color
def predict(self, layer_board, noise=True):
layer_board1 = layer_board[0, :, :, :]
pawns = 1 * np.sum(layer_board1[0, :, :])
rooks = 5 * np.sum(layer_board1[1, :, :])
minor = 3 * np.sum(layer_board1[2:4, :, :])
queen = 9 * np.sum(layer_board1[4, :, :])
maxscore = 40
material = pawns + rooks + minor + queen
board_value = self.color * material / maxscore
if noise:
added_noise = np.random.randn() / 1e3
return board_value + added_noise
class Agent(object):
def __init__(self, lr=0.003, network='big'):
self.optimizer = RMSprop(lr=lr)
self.model = Model()
self.proportional_error = False
if network == 'simple':
self.init_simple_network()
elif network == 'super_simple':
self.init_super_simple_network()
elif network == 'alt':
self.init_altnet()
elif network == 'big':
self.init_bignet()
else:
self.init_network()
def fix_model(self):
"""
The fixed model is the model used for bootstrapping
Returns:
"""
self.fixed_model = clone_model(self.model)
self.fixed_model.compile(optimizer=self.optimizer, loss='mse', metrics=['mae'])
self.fixed_model.set_weights(self.model.get_weights())
def init_network(self):
layer_state = Input(shape=(8, 8, 8), name='state')
openfile = Conv2D(3, (8, 1), padding='valid', activation='relu', name='fileconv')(layer_state) # 3,8,1
openrank = Conv2D(3, (1, 8), padding='valid', activation='relu', name='rankconv')(layer_state) # 3,1,8
quarters = Conv2D(3, (4, 4), padding='valid', activation='relu', name='quarterconv', strides=(4, 4))(
layer_state) # 3,2,2
large = Conv2D(8, (6, 6), padding='valid', activation='relu', name='largeconv')(layer_state) # 8,2,2
board1 = Conv2D(16, (3, 3), padding='valid', activation='relu', name='board1')(layer_state) # 16,6,6
board2 = Conv2D(20, (3, 3), padding='valid', activation='relu', name='board2')(board1) # 20,4,4
board3 = Conv2D(24, (3, 3), padding='valid', activation='relu', name='board3')(board2) # 24,2,2
flat_file = Flatten()(openfile)
flat_rank = Flatten()(openrank)
flat_quarters = Flatten()(quarters)
flat_large = Flatten()(large)
flat_board = Flatten()(board1)
flat_board3 = Flatten()(board3)
dense1 = Concatenate(name='dense_bass')(
[flat_file, flat_rank, flat_quarters, flat_large, flat_board, flat_board3])
dropout1 = Dropout(rate=0.1)(dense1)
dense2 = Dense(128, activation='sigmoid')(dropout1)
dense3 = Dense(64, activation='sigmoid')(dense2)
dropout3 = Dropout(rate=0.1)(dense3, training=True)
dense4 = Dense(32, activation='sigmoid')(dropout3)
dropout4 = Dropout(rate=0.1)(dense4, training=True)
value_head = Dense(1)(dropout4)
self.model = Model(inputs=layer_state,
outputs=[value_head])
self.model.compile(optimizer=self.optimizer,
loss=[mean_squared_error]
)
def init_simple_network(self):
layer_state = Input(shape=(8, 8, 8), name='state')
conv1 = Conv2D(8, (3, 3), activation='sigmoid')(layer_state)
conv2 = Conv2D(6, (3, 3), activation='sigmoid')(conv1)
conv3 = Conv2D(4, (3, 3), activation='sigmoid')(conv2)
flat4 = Flatten()(conv3)
dense5 = Dense(24, activation='sigmoid')(flat4)
dense6 = Dense(8, activation='sigmoid')(dense5)
value_head = Dense(1)(dense6)
self.model = Model(inputs=layer_state,
outputs=value_head)
self.model.compile(optimizer=self.optimizer,
loss=mean_squared_error
)
def init_super_simple_network(self):
layer_state = Input(shape=(8, 8, 8), name='state')
conv1 = Conv2D(8, (3, 3), activation='sigmoid')(layer_state)
flat4 = Flatten()(conv1)
dense5 = Dense(10, activation='sigmoid')(flat4)
value_head = Dense(1)(dense5)
self.model = Model(inputs=layer_state,
outputs=value_head)
self.model.compile(optimizer=self.optimizer,
loss=mean_squared_error
)
def init_altnet(self):
layer_state = Input(shape=(8, 8, 8), name='state')
conv1 = Conv2D(6, (1, 1), activation='sigmoid')(layer_state)
flat2 = Flatten()(conv1)
dense3 = Dense(128, activation='sigmoid')(flat2)
value_head = Dense(1)(dense3)
self.model = Model(inputs=layer_state,
outputs=value_head)
self.model.compile(optimizer=self.optimizer,
loss=mean_squared_error
)
def init_bignet(self):
layer_state = Input(shape=(8, 8, 8), name='state')
conv_xs = Conv2D(4, (1, 1), activation='relu')(layer_state)
conv_s = Conv2D(8, (2, 2), strides=(1, 1), activation='relu')(layer_state)
conv_m = Conv2D(12, (3, 3), strides=(2, 2), activation='relu')(layer_state)
conv_l = Conv2D(16, (4, 4), strides=(2, 2), activation='relu')(layer_state)
conv_xl = Conv2D(20, (8, 8), activation='relu')(layer_state)
conv_rank = Conv2D(3, (1, 8), activation='relu')(layer_state)
conv_file = Conv2D(3, (8, 1), activation='relu')(layer_state)
f_xs = Flatten()(conv_xs)
f_s = Flatten()(conv_s)
f_m = Flatten()(conv_m)
f_l = Flatten()(conv_l)
f_xl = Flatten()(conv_xl)
f_r = Flatten()(conv_rank)
f_f = Flatten()(conv_file)
dense1 = Concatenate(name='dense_bass')([f_xs, f_s, f_m, f_l, f_xl, f_r, f_f])
dense2 = Dense(256, activation='sigmoid')(dense1)
dense3 = Dense(128, activation='sigmoid')(dense2)
dense4 = Dense(56, activation='sigmoid')(dense3)
dense5 = Dense(64, activation='sigmoid')(dense4)
dense6 = Dense(32, activation='sigmoid')(dense5)
value_head = Dense(1)(dense6)
self.model = Model(inputs=layer_state,
outputs=value_head)
self.model.compile(optimizer=self.optimizer,
loss=mean_squared_error
)
def predict_distribution(self, states, batch_size=256):
"""
:param states: list of distinct states
:param n: each state is predicted n times
:return:
"""
predictions_per_state = int(batch_size / len(states))
state_batch = []
for state in states:
state_batch = state_batch + [state for x in range(predictions_per_state)]
state_batch = np.stack(state_batch, axis=0)
predictions = self.model.predict(state_batch)
predictions = predictions.reshape(len(states), predictions_per_state)
mean_pred = np.mean(predictions, axis=1)
std_pred = np.std(predictions, axis=1)
upper_bound = mean_pred + 2 * std_pred
return mean_pred, std_pred, upper_bound
def predict(self, board_layer):
return self.model.predict(board_layer)
def TD_update(self, states, rewards, sucstates, episode_active, gamma=0.9):
"""
Update the SARSA-network using samples from the minibatch
Args:
minibatch: list
The minibatch contains the states, moves, rewards and new states.
Returns:
td_errors: np.array
array of temporal difference errors
"""
suc_state_values = self.fixed_model.predict(sucstates)
V_target = np.array(rewards) + np.array(episode_active) * gamma * np.squeeze(suc_state_values)
# Perform a step of minibatch Gradient Descent.
self.model.fit(x=states, y=V_target, epochs=1, verbose=0)
V_state = self.model.predict(states) # the expected future returns
td_errors = V_target - np.squeeze(V_state)
return td_errors
def MC_update(self, states, returns):
"""
Update network using a monte carlo playout
Args:
states: starting states
returns: discounted future rewards
Returns:
td_errors: np.array
array of temporal difference errors
"""
self.model.fit(x=states, y=returns, epochs=0, verbose=0)
V_state = np.squeeze(self.model.predict(states))
td_errors = returns - V_state
return td_errors
| [
"numpy.mean",
"keras.layers.Conv2D",
"keras.layers.Flatten",
"keras.models.clone_model",
"numpy.random.choice",
"keras.layers.Concatenate",
"numpy.squeeze",
"numpy.stack",
"keras.layers.Input",
"numpy.random.randint",
"numpy.sum",
"keras.models.Model",
"numpy.array",
"numpy.std",
"keras.... | [((523, 546), 'numpy.random.choice', 'np.random.choice', (['moves'], {}), '(moves)\n', (539, 546), True, 'import numpy as np\n'), ((1270, 1284), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'lr'}), '(lr=lr)\n', (1277, 1284), False, 'from keras.optimizers import SGD, Adam, RMSprop\n'), ((1306, 1313), 'keras.models.Model', 'Model', ([], {}), '()\n', (1311, 1313), False, 'from keras.models import Model, clone_model, load_model\n'), ((1835, 1858), 'keras.models.clone_model', 'clone_model', (['self.model'], {}), '(self.model)\n', (1846, 1858), False, 'from keras.models import Model, clone_model, load_model\n'), ((2061, 2097), 'keras.layers.Input', 'Input', ([], {'shape': '(8, 8, 8)', 'name': '"""state"""'}), "(shape=(8, 8, 8), name='state')\n", (2066, 2097), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3682, 3729), 'keras.models.Model', 'Model', ([], {'inputs': 'layer_state', 'outputs': '[value_head]'}), '(inputs=layer_state, outputs=[value_head])\n', (3687, 3729), False, 'from keras.models import Model, clone_model, load_model\n'), ((3951, 3987), 'keras.layers.Input', 'Input', ([], {'shape': '(8, 8, 8)', 'name': '"""state"""'}), "(shape=(8, 8, 8), name='state')\n", (3956, 3987), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4388, 4433), 'keras.models.Model', 'Model', ([], {'inputs': 'layer_state', 'outputs': 'value_head'}), '(inputs=layer_state, outputs=value_head)\n', (4393, 4433), False, 'from keras.models import Model, clone_model, load_model\n'), ((4658, 4694), 'keras.layers.Input', 'Input', ([], {'shape': '(8, 8, 8)', 'name': '"""state"""'}), "(shape=(8, 8, 8), name='state')\n", (4663, 4694), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4913, 4958), 'keras.models.Model', 'Model', ([], {'inputs': 'layer_state', 'outputs': 'value_head'}), '(inputs=layer_state, outputs=value_head)\n', (4918, 4958), False, 'from keras.models import Model, clone_model, load_model\n'), ((5169, 5205), 'keras.layers.Input', 'Input', ([], {'shape': '(8, 8, 8)', 'name': '"""state"""'}), "(shape=(8, 8, 8), name='state')\n", (5174, 5205), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((5426, 5471), 'keras.models.Model', 'Model', ([], {'inputs': 'layer_state', 'outputs': 'value_head'}), '(inputs=layer_state, outputs=value_head)\n', (5431, 5471), False, 'from keras.models import Model, clone_model, load_model\n'), ((5682, 5718), 'keras.layers.Input', 'Input', ([], {'shape': '(8, 8, 8)', 'name': '"""state"""'}), "(shape=(8, 8, 8), name='state')\n", (5687, 5718), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6918, 6963), 'keras.models.Model', 'Model', ([], {'inputs': 'layer_state', 'outputs': 'value_head'}), '(inputs=layer_state, outputs=value_head)\n', (6923, 6963), False, 'from keras.models import Model, clone_model, load_model\n'), ((7549, 7578), 'numpy.stack', 'np.stack', (['state_batch'], {'axis': '(0)'}), '(state_batch, axis=0)\n', (7557, 7578), True, 'import numpy as np\n'), ((7731, 7759), 'numpy.mean', 'np.mean', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (7738, 7759), True, 'import numpy as np\n'), ((7779, 7806), 'numpy.std', 'np.std', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (7785, 7806), True, 'import numpy as np\n'), ((386, 410), 'numpy.random.randint', 'np.random.randint', (['(-5)', '(5)'], {}), '(-5, 5)\n', (403, 410), True, 'import numpy as np\n'), ((754, 783), 'numpy.sum', 'np.sum', (['layer_board1[0, :, :]'], {}), '(layer_board1[0, :, :])\n', (760, 783), True, 'import numpy as np\n'), ((804, 833), 'numpy.sum', 'np.sum', (['layer_board1[1, :, :]'], {}), '(layer_board1[1, :, :])\n', (810, 833), True, 'import numpy as np\n'), ((854, 885), 'numpy.sum', 'np.sum', (['layer_board1[2:4, :, :]'], {}), '(layer_board1[2:4, :, :])\n', (860, 885), True, 'import numpy as np\n'), ((906, 935), 'numpy.sum', 'np.sum', (['layer_board1[4, :, :]'], {}), '(layer_board1[4, :, :])\n', (912, 935), True, 'import numpy as np\n'), ((2118, 2188), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(8, 1)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""fileconv"""'}), "(3, (8, 1), padding='valid', activation='relu', name='fileconv')\n", (2124, 2188), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((2230, 2300), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(1, 8)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""rankconv"""'}), "(3, (1, 8), padding='valid', activation='relu', name='rankconv')\n", (2236, 2300), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((2342, 2435), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(4, 4)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""quarterconv"""', 'strides': '(4, 4)'}), "(3, (4, 4), padding='valid', activation='relu', name='quarterconv',\n strides=(4, 4))\n", (2348, 2435), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((2483, 2554), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(6, 6)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""largeconv"""'}), "(8, (6, 6), padding='valid', activation='relu', name='largeconv')\n", (2489, 2554), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((2595, 2664), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""board1"""'}), "(16, (3, 3), padding='valid', activation='relu', name='board1')\n", (2601, 2664), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((2705, 2774), 'keras.layers.Conv2D', 'Conv2D', (['(20)', '(3, 3)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""board2"""'}), "(20, (3, 3), padding='valid', activation='relu', name='board2')\n", (2711, 2774), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((2810, 2879), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(3, 3)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""board3"""'}), "(24, (3, 3), padding='valid', activation='relu', name='board3')\n", (2816, 2879), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((2919, 2928), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2926, 2928), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((2959, 2968), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2966, 2968), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3003, 3012), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3010, 3012), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3044, 3053), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3051, 3053), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3083, 3092), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3090, 3092), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3123, 3132), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3130, 3132), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3159, 3189), 'keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""dense_bass"""'}), "(name='dense_bass')\n", (3170, 3189), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3298, 3315), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.1)'}), '(rate=0.1)\n', (3305, 3315), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3341, 3373), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""sigmoid"""'}), "(128, activation='sigmoid')\n", (3346, 3373), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3401, 3432), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""sigmoid"""'}), "(64, activation='sigmoid')\n", (3406, 3432), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3460, 3477), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.1)'}), '(rate=0.1)\n', (3467, 3477), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3518, 3549), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""sigmoid"""'}), "(32, activation='sigmoid')\n", (3523, 3549), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3579, 3596), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.1)'}), '(rate=0.1)\n', (3586, 3596), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((3642, 3650), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3647, 3650), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4004, 4043), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(3, 3)'], {'activation': '"""sigmoid"""'}), "(8, (3, 3), activation='sigmoid')\n", (4010, 4043), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4073, 4112), 'keras.layers.Conv2D', 'Conv2D', (['(6)', '(3, 3)'], {'activation': '"""sigmoid"""'}), "(6, (3, 3), activation='sigmoid')\n", (4079, 4112), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4136, 4175), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'activation': '"""sigmoid"""'}), "(4, (3, 3), activation='sigmoid')\n", (4142, 4175), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4199, 4208), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4206, 4208), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4233, 4264), 'keras.layers.Dense', 'Dense', (['(24)'], {'activation': '"""sigmoid"""'}), "(24, activation='sigmoid')\n", (4238, 4264), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4289, 4319), 'keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""sigmoid"""'}), "(8, activation='sigmoid')\n", (4294, 4319), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4349, 4357), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (4354, 4357), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4711, 4750), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(3, 3)'], {'activation': '"""sigmoid"""'}), "(8, (3, 3), activation='sigmoid')\n", (4717, 4750), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4780, 4789), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4787, 4789), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4814, 4845), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""sigmoid"""'}), "(10, activation='sigmoid')\n", (4819, 4845), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((4874, 4882), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (4879, 4882), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((5222, 5261), 'keras.layers.Conv2D', 'Conv2D', (['(6)', '(1, 1)'], {'activation': '"""sigmoid"""'}), "(6, (1, 1), activation='sigmoid')\n", (5228, 5261), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((5291, 5300), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5298, 5300), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((5325, 5357), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""sigmoid"""'}), "(128, activation='sigmoid')\n", (5330, 5357), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((5387, 5395), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (5392, 5395), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((5737, 5773), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(1, 1)'], {'activation': '"""relu"""'}), "(4, (1, 1), activation='relu')\n", (5743, 5773), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((5804, 5856), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(2, 2)'], {'strides': '(1, 1)', 'activation': '"""relu"""'}), "(8, (2, 2), strides=(1, 1), activation='relu')\n", (5810, 5856), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((5887, 5940), 'keras.layers.Conv2D', 'Conv2D', (['(12)', '(3, 3)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(12, (3, 3), strides=(2, 2), activation='relu')\n", (5893, 5940), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((5971, 6024), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(4, 4)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(16, (4, 4), strides=(2, 2), activation='relu')\n", (5977, 6024), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6056, 6093), 'keras.layers.Conv2D', 'Conv2D', (['(20)', '(8, 8)'], {'activation': '"""relu"""'}), "(20, (8, 8), activation='relu')\n", (6062, 6093), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6127, 6163), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(1, 8)'], {'activation': '"""relu"""'}), "(3, (1, 8), activation='relu')\n", (6133, 6163), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6197, 6233), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(8, 1)'], {'activation': '"""relu"""'}), "(3, (8, 1), activation='relu')\n", (6203, 6233), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6263, 6272), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6270, 6272), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6296, 6305), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6303, 6305), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6328, 6337), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6335, 6337), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6360, 6369), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6367, 6369), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6393, 6402), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6400, 6402), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6426, 6435), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6433, 6435), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6461, 6470), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6468, 6470), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6500, 6530), 'keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""dense_bass"""'}), "(name='dense_bass')\n", (6511, 6530), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6587, 6619), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""sigmoid"""'}), "(256, activation='sigmoid')\n", (6592, 6619), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6645, 6677), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""sigmoid"""'}), "(128, activation='sigmoid')\n", (6650, 6677), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6703, 6734), 'keras.layers.Dense', 'Dense', (['(56)'], {'activation': '"""sigmoid"""'}), "(56, activation='sigmoid')\n", (6708, 6734), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6760, 6791), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""sigmoid"""'}), "(64, activation='sigmoid')\n", (6765, 6791), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6817, 6848), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""sigmoid"""'}), "(32, activation='sigmoid')\n", (6822, 6848), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((6879, 6887), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (6884, 6887), False, 'from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout\n'), ((8467, 8484), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (8475, 8484), True, 'import numpy as np\n'), ((8781, 8800), 'numpy.squeeze', 'np.squeeze', (['V_state'], {}), '(V_state)\n', (8791, 8800), True, 'import numpy as np\n'), ((1107, 1124), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1122, 1124), True, 'import numpy as np\n'), ((8522, 8550), 'numpy.squeeze', 'np.squeeze', (['suc_state_values'], {}), '(suc_state_values)\n', (8532, 8550), True, 'import numpy as np\n'), ((8487, 8511), 'numpy.array', 'np.array', (['episode_active'], {}), '(episode_active)\n', (8495, 8511), True, 'import numpy as np\n')] |
from make_datasets_spark import DatasetConverter
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from pyspark.sql.functions import udf
from pyspark.sql.types import ArrayType, DoubleType, StringType, IntegerType
from pyspark.sql.window import Window
from pyspark.sql.functions import dense_rank
import os
import json
import logging
import datetime
import numpy as np
logger = logging.getLogger(__name__)
class LocalDatasetConverter(DatasetConverter):
def load_transactions(self):
file_name_train, file_name_test = self.config.trx_files
df_train = self.spark_read_file(self.path_to_file(file_name_train))
df_test = self.spark_read_file(self.path_to_file(file_name_test))
logger.info(f'Loaded {df_train.count()} records from "{file_name_train}"')
logger.info(f'Loaded {df_test.count()} records from "{file_name_test}"')
for col in df_train.columns:
if col not in df_test.columns:
df_test = df_test.withColumn(col, F.lit(None))
logger.info(f'Test extended with "{col}" column')
df = df_train.union(df_test)
# timestamp to float
frmt = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"
col_event_time = self.config.cols_event_time[0]
df = df.withColumn(col_event_time, F.unix_timestamp(col_event_time, frmt))
df = df.withColumn(col_event_time, F.col(col_event_time) / (24 * 60 * 60))
df = df.withColumn('event_time', df[col_event_time])
# Process key == 'correct' in json data
udf_function = udf(lambda x: str(json.loads(x).get('correct', 'None')), StringType())
df = df.withColumn('correct', udf_function('event_data'))
self.source_df = df
return df
def load_target(self):
df_target = self.load_source_data(self.config.target_files)
df_target = df_target.select([self.config.col_client_id, self.config.col_target[0]])
# Filter & Merge with source dataframe
filtered_df = (
self.source_df
.where((F.col('event_type') == 'Assessment') & (F.col('event_code') == 2000))
.select(['installation_id', self.config.col_client_id, self.config.cols_event_time[0]])
)
df_target = df_target.join(filtered_df, on=[self.config.col_client_id], how='left')
return df_target
def trx_to_features(self, df_data, print_dataset_info,
col_client_id, cols_event_time, cols_category, cols_log_norm, max_trx_count):
encoders = {col: self.get_encoder(df_data, col) for col in cols_category}
for col in cols_category:
df_data = self.encode_col(df_data, col, encoders[col])
used_columns = cols_category + ['event_time', 'installation_id']
features = df_data.select(used_columns)
features = self.remove_long_trx(features, max_trx_count, 'installation_id')
features = self.collect_lists(features, 'installation_id')
features.persist()
return features
def update_with_target(self, features, df_target, col_client_id, col_target):
data = df_target.join(features, on=['installation_id'], how='left')
# Find index for find last timestamp in event_time sequences
def get_index(event_time, timestamp):
return int(np.searchsorted(np.array(event_time), timestamp)) + 1
udf_function = udf(get_index, IntegerType())
data = data.withColumn('index', udf_function('event_time', self.config.cols_event_time[0]))
# Slice transactions by index
cols_to_slice = ['event_id', 'event_code', 'event_type','title', 'world', 'correct']
for col in cols_to_slice:
udf_function = udf(lambda seq, index: seq[0: index], ArrayType(IntegerType()))
data = data.withColumn(col, udf_function(col, 'index'))
udf_function = udf(lambda seq, index: seq[0: index], ArrayType(DoubleType()))
data = data.withColumn('event_time', udf_function('event_time', 'index'))
# Update trx_count since transaction were cutted by index
udf_function = udf(lambda seq: len(seq), IntegerType())
data = data.withColumn('trx_count', udf_function('event_time'))
# Remove useless columns
data = data.drop('index', self.config.cols_event_time[0])
return data
if __name__ == '__main__':
LocalDatasetConverter().run()
| [
"logging.getLogger",
"pyspark.sql.functions.lit",
"json.loads",
"pyspark.sql.types.DoubleType",
"pyspark.sql.functions.unix_timestamp",
"pyspark.sql.types.IntegerType",
"pyspark.sql.functions.col",
"numpy.array",
"pyspark.sql.types.StringType"
] | [((402, 429), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (419, 429), False, 'import logging\n'), ((1314, 1352), 'pyspark.sql.functions.unix_timestamp', 'F.unix_timestamp', (['col_event_time', 'frmt'], {}), '(col_event_time, frmt)\n', (1330, 1352), True, 'import pyspark.sql.functions as F\n'), ((1635, 1647), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1645, 1647), False, 'from pyspark.sql.types import ArrayType, DoubleType, StringType, IntegerType\n'), ((3470, 3483), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (3481, 3483), False, 'from pyspark.sql.types import ArrayType, DoubleType, StringType, IntegerType\n'), ((4205, 4218), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (4216, 4218), False, 'from pyspark.sql.types import ArrayType, DoubleType, StringType, IntegerType\n'), ((1397, 1418), 'pyspark.sql.functions.col', 'F.col', (['col_event_time'], {}), '(col_event_time)\n', (1402, 1418), True, 'import pyspark.sql.functions as F\n'), ((3983, 3995), 'pyspark.sql.types.DoubleType', 'DoubleType', ([], {}), '()\n', (3993, 3995), False, 'from pyspark.sql.types import ArrayType, DoubleType, StringType, IntegerType\n'), ((1022, 1033), 'pyspark.sql.functions.lit', 'F.lit', (['None'], {}), '(None)\n', (1027, 1033), True, 'import pyspark.sql.functions as F\n'), ((3827, 3840), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (3838, 3840), False, 'from pyspark.sql.types import ArrayType, DoubleType, StringType, IntegerType\n'), ((3393, 3413), 'numpy.array', 'np.array', (['event_time'], {}), '(event_time)\n', (3401, 3413), True, 'import numpy as np\n'), ((1596, 1609), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (1606, 1609), False, 'import json\n'), ((2091, 2110), 'pyspark.sql.functions.col', 'F.col', (['"""event_type"""'], {}), "('event_type')\n", (2096, 2110), True, 'import pyspark.sql.functions as F\n'), ((2131, 2150), 'pyspark.sql.functions.col', 'F.col', (['"""event_code"""'], {}), "('event_code')\n", (2136, 2150), True, 'import pyspark.sql.functions as F\n')] |
import numpy as np
def normalize_orders(image_data, trace, half_window=10, n=100):
"""
:param image_data: numpy.ndarray
:param trace: xwavecal.utils.trace_utils.Trace
:param half_window: int
the number of pixels above an below a diffraction order to try and normalize
:param n: int
the n brightest pixels of those near the trace will have their median computed, then
all the pixels near the trace will be divided by that median.
:return: normalization_factor: numpy.ndarray
The array that normalizes each diffraction order of image_data to 1.
"""
maxy = np.max(image_data.shape[0])
x, y = np.meshgrid(np.arange(image_data.shape[1]), np.arange(image_data.shape[0]))
normalization_factor = np.ones_like(image_data).astype(float) * np.inf
for single_order in trace.data['centers']:
roi = slice(int(max(0, np.min(single_order) - half_window)),
int(min(np.max(single_order) + half_window, maxy)))
near_trace = np.where(np.isclose(y[roi] - single_order, 0, atol=half_window))
median_of_n_brightest = np.median(np.sort(image_data[roi][near_trace])[::-1][:n])
normalization_factor[roi][near_trace] = median_of_n_brightest
return normalization_factor
| [
"numpy.ones_like",
"numpy.isclose",
"numpy.sort",
"numpy.max",
"numpy.min",
"numpy.arange"
] | [((653, 680), 'numpy.max', 'np.max', (['image_data.shape[0]'], {}), '(image_data.shape[0])\n', (659, 680), True, 'import numpy as np\n'), ((704, 734), 'numpy.arange', 'np.arange', (['image_data.shape[1]'], {}), '(image_data.shape[1])\n', (713, 734), True, 'import numpy as np\n'), ((736, 766), 'numpy.arange', 'np.arange', (['image_data.shape[0]'], {}), '(image_data.shape[0])\n', (745, 766), True, 'import numpy as np\n'), ((1061, 1115), 'numpy.isclose', 'np.isclose', (['(y[roi] - single_order)', '(0)'], {'atol': 'half_window'}), '(y[roi] - single_order, 0, atol=half_window)\n', (1071, 1115), True, 'import numpy as np\n'), ((795, 819), 'numpy.ones_like', 'np.ones_like', (['image_data'], {}), '(image_data)\n', (807, 819), True, 'import numpy as np\n'), ((1159, 1195), 'numpy.sort', 'np.sort', (['image_data[roi][near_trace]'], {}), '(image_data[roi][near_trace])\n', (1166, 1195), True, 'import numpy as np\n'), ((921, 941), 'numpy.min', 'np.min', (['single_order'], {}), '(single_order)\n', (927, 941), True, 'import numpy as np\n'), ((987, 1007), 'numpy.max', 'np.max', (['single_order'], {}), '(single_order)\n', (993, 1007), True, 'import numpy as np\n')] |
import numpy as np
def init(A, b, c, rank, m):
A_b = A[:, (m-rank):]
A_n = A[:, :(m-rank)]
c_b = c[:, (m-rank):]
c_n = c[:, :(m-rank)]
A_b_inv = np.linalg.pinv(A_b)
b_ = A_b_inv @ b
tmp = c_b @ A_b_inv
ZC = tmp @ A_n - c_n
F = tmp @ b
# Get the Simplex Tabel
T1 = np.vstack((A_b_inv @ A_n, ZC))
T2 = np.vstack((A_b, np.zeros_like(ZC)))
T3 = np.vstack((b_, F))
T = np.hstack((T1, T2, T3))
return T
def main(T, rank, m):
'''
:param T: left is non-basic and right is basic
:param rank: the number of basic vector or variable
:return:
'''
while True:
F = T[-1, -1]
ZC = T[-1, :(m - rank)]
if np.max(ZC) <= 0:
return F
# Find the main column
main_col = np.argmax(T[-1, :(m-rank)])
# Find the main row
tmp = T[:-1, -1] / T[:-1, main_col]
index, min_v = -1, 1e10
for i in range(tmp.shape[0]):
if tmp[i] < min_v and tmp[i] > 0:
index, min_v = i, tmp[i]
main_row = index
if main_row == -1:
raise Exception('No solution!')
return False
# Update
T[main_row, :] = T[main_row, :] / T[main_row, main_col]
for i in range(T.shape[0]):
if i == main_row:
continue
alpha = T[i, main_col] / T[main_row, main_col]
T[i, :] = T[i, :] - alpha * T[main_row, :]
turn = T[:, main_col]
T[:, main_col] = T[:, (m-rank+main_row-1)]
T[:, (m-rank+main_row-1)] = turn
return 0
if __name__ == '__main__':
data = np.loadtxt('data/data-1.txt')
n, m = data.shape
# C: row-vector
c = data[0, :-1]
c = c[np.newaxis, :]
# B: col-vector
b = data[1:n, -1]
b = b[:, np.newaxis]
# A: matrix
A = data[1:n, :-1]
rank = min(n-1, m-1)
assert rank == n-1
m = m-1
T = init(A, b, c, rank, m)
Final = main(T, rank, m)
print('The min value of object function is ', Final) | [
"numpy.linalg.pinv",
"numpy.hstack",
"numpy.argmax",
"numpy.max",
"numpy.vstack",
"numpy.loadtxt",
"numpy.zeros_like"
] | [((168, 187), 'numpy.linalg.pinv', 'np.linalg.pinv', (['A_b'], {}), '(A_b)\n', (182, 187), True, 'import numpy as np\n'), ((313, 343), 'numpy.vstack', 'np.vstack', (['(A_b_inv @ A_n, ZC)'], {}), '((A_b_inv @ A_n, ZC))\n', (322, 343), True, 'import numpy as np\n'), ((398, 416), 'numpy.vstack', 'np.vstack', (['(b_, F)'], {}), '((b_, F))\n', (407, 416), True, 'import numpy as np\n'), ((425, 448), 'numpy.hstack', 'np.hstack', (['(T1, T2, T3)'], {}), '((T1, T2, T3))\n', (434, 448), True, 'import numpy as np\n'), ((1635, 1664), 'numpy.loadtxt', 'np.loadtxt', (['"""data/data-1.txt"""'], {}), "('data/data-1.txt')\n", (1645, 1664), True, 'import numpy as np\n'), ((794, 821), 'numpy.argmax', 'np.argmax', (['T[-1, :m - rank]'], {}), '(T[-1, :m - rank])\n', (803, 821), True, 'import numpy as np\n'), ((369, 386), 'numpy.zeros_like', 'np.zeros_like', (['ZC'], {}), '(ZC)\n', (382, 386), True, 'import numpy as np\n'), ((705, 715), 'numpy.max', 'np.max', (['ZC'], {}), '(ZC)\n', (711, 715), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# @author <NAME>
# @date 1 Feb 2019
import numpy as np
import pandas as pd
import itertools
from sklearn import svm
from data_handling import *
from data_stats import *
# Data paths
proj_dir = '/Users/nikhil/code/git_repos/compare-surf-tools/'
data_dir = proj_dir + 'data/'
demograph_file = 'ABIDE_Phenotype.csv'
ants_file = 'ABIDE_ants_thickness_data.csv'
fs53_file = 'ABIDE_fs5.3_thickness.csv'
fs51_file = 'cortical_fs5.1_measuresenigma_thickavg.csv'
fs60_lh_file = 'aparc_lh_thickness_table.txt'
fs60_rh_file = 'aparc_rh_thickness_table.txt'
# Global Vars
subject_ID_col = 'SubjID'
# test_1: stdize data
test_name = 'test_1: stdize data'
print('\n ------------- Running {} -------------'.format(test_name))
# Demographics and Dx
demograph = pd.read_csv(data_dir + demograph_file)
demograph = demograph.rename(columns={'Subject_ID':subject_ID_col})
# ANTs
ants_data = pd.read_csv(data_dir + ants_file, header=2)
print('shape of ants data {}'.format(ants_data.shape))
ants_data_std = standardize_ants_data(ants_data, subject_ID_col)
print('shape of stdized ants data {}'.format(ants_data_std.shape))
print(list(ants_data_std.columns)[:5])
print('')
# FS
fs53_data = pd.read_csv(data_dir + fs53_file)
print('shape of fs51 data {}'.format(fs53_data.shape))
fs53_data_std = standardize_fs_data(fs53_data, subject_ID_col)
print('shape of stdized fs53 data {}'.format(fs53_data_std.shape))
print(list(fs53_data_std.columns[:5]))
print('')
fs51_data = pd.read_csv(data_dir + fs51_file)
print('shape of fs51 data {}'.format(fs51_data.shape))
fs51_data_std = standardize_fs_data(fs51_data, subject_ID_col)
print('shape of stdized fs51 data {}'.format(fs51_data_std.shape))
print(list(fs51_data_std.columns[:5]))
print('')
fs60_lh_data = pd.read_csv(data_dir + fs60_lh_file, delim_whitespace=True)
fs60_rh_data = pd.read_csv(data_dir + fs60_rh_file, delim_whitespace=True)
print('shape of fs60 data l: {}, r: {}'.format(fs60_lh_data.shape,fs60_rh_data.shape))
fs60_data_std = standardize_fs60_data(fs60_lh_data, fs60_rh_data, subject_ID_col)
print('shape of stdized fs51 data {}'.format(fs60_data_std.shape))
print(list(fs60_data_std.columns[:5]))
# test_2: create master df
test_name = 'test_2: create master df'
print('\n ------------- Running {} -------------'.format(test_name))
data_dict = {'ants' : ants_data_std,
'fs60' : fs60_data_std,
'fs53' : fs53_data_std,
'fs51' : fs51_data_std}
na_action = 'drop' # options: ignore, drop; anything else will not use the dataframe for analysis.
master_df, common_subs, common_roi_cols = combine_processed_data(data_dict, subject_ID_col, na_action)
# Add demographic columns to the master_df
useful_demograph = demograph[[subject_ID_col,'SEX','AGE_AT_SCAN','DX_GROUP']]
master_df = pd.merge(master_df, useful_demograph, how='left', on=subject_ID_col)
print('master df shape after adding demographic info {}'.format(master_df.shape))
# test_3: compute cross correlation
test_name = 'test_3: compute cross correlation'
print('\n ------------- Running {} -------------'.format(test_name))
possible_pairs = list(itertools.combinations(data_dict.keys(), 2))
for pair in possible_pairs:
pipe1 = pair[0]
pipe2 = pair[1]
df1 = master_df[master_df['pipeline']==pipe1][[subject_ID_col]+common_roi_cols]
df2 = master_df[master_df['pipeline']==pipe2][[subject_ID_col]+common_roi_cols]
xcorr = cross_correlations(df1,df2,subject_ID_col)
print('Avg cross correlation between {} & {} = {:4.2f}\n'.format(pipe1,pipe2,np.mean(xcorr)))
# # test_4: compute ML perf
# test_name = 'test_4: compute ML perf'
# print('\n ------------- Running {} -------------'.format(test_name))
# input_cols = common_roi_cols
# outcome_col = 'DX_GROUP'
# clf = svm.SVC(kernel='linear')
# ml_perf = getClassiferPerf(master_df,input_cols,outcome_col,clf)
# test_5: compute stats_models perf
test_name = 'test_5: compute stats_models perf'
print('\n ------------- Running {} -------------'.format(test_name))
roi_cols = common_roi_cols
covar_cols = ['SEX','AGE_AT_SCAN']
outcome_col = 'DX_GROUP'
stat_model = 'logit'
sm_perf = getStatModelPerf(master_df,roi_cols,covar_cols,outcome_col,stat_model)
print('Shape of the stats_models results df {}'.format(sm_perf.shape))
| [
"numpy.mean",
"pandas.merge",
"pandas.read_csv"
] | [((779, 817), 'pandas.read_csv', 'pd.read_csv', (['(data_dir + demograph_file)'], {}), '(data_dir + demograph_file)\n', (790, 817), True, 'import pandas as pd\n'), ((906, 949), 'pandas.read_csv', 'pd.read_csv', (['(data_dir + ants_file)'], {'header': '(2)'}), '(data_dir + ants_file, header=2)\n', (917, 949), True, 'import pandas as pd\n'), ((1204, 1237), 'pandas.read_csv', 'pd.read_csv', (['(data_dir + fs53_file)'], {}), '(data_dir + fs53_file)\n', (1215, 1237), True, 'import pandas as pd\n'), ((1485, 1518), 'pandas.read_csv', 'pd.read_csv', (['(data_dir + fs51_file)'], {}), '(data_dir + fs51_file)\n', (1496, 1518), True, 'import pandas as pd\n'), ((1769, 1828), 'pandas.read_csv', 'pd.read_csv', (['(data_dir + fs60_lh_file)'], {'delim_whitespace': '(True)'}), '(data_dir + fs60_lh_file, delim_whitespace=True)\n', (1780, 1828), True, 'import pandas as pd\n'), ((1844, 1903), 'pandas.read_csv', 'pd.read_csv', (['(data_dir + fs60_rh_file)'], {'delim_whitespace': '(True)'}), '(data_dir + fs60_rh_file, delim_whitespace=True)\n', (1855, 1903), True, 'import pandas as pd\n'), ((2799, 2867), 'pandas.merge', 'pd.merge', (['master_df', 'useful_demograph'], {'how': '"""left"""', 'on': 'subject_ID_col'}), "(master_df, useful_demograph, how='left', on=subject_ID_col)\n", (2807, 2867), True, 'import pandas as pd\n'), ((3547, 3561), 'numpy.mean', 'np.mean', (['xcorr'], {}), '(xcorr)\n', (3554, 3561), True, 'import numpy as np\n')] |
import cPickle
import numpy as np
import cv2
repository = []
for i in range(1, 6):
name = 'cifar/data_batch_'+str(i)
with open(name, 'rb') as fo:
data = cPickle.load(fo)
collect = data.get('data')
for j in collect:
red = []
green = []
blue = []
image = []
red = j[0:1024]
green = j[1024:2048]
blue = j[2048:3072]
repo = []
for k in range(0, 1024):
image.append([red[k], green[k], blue[k]])
for l in range(0, 1024, 32):
repo.append(image[l:l+32])
#print np.array(repo).shape
repository.append(np.array(repo))
with open('collection.pickle', 'wb') as f:
cPickle.dump(repository ,f)
| [
"numpy.array",
"cPickle.dump",
"cPickle.load"
] | [((709, 736), 'cPickle.dump', 'cPickle.dump', (['repository', 'f'], {}), '(repository, f)\n', (721, 736), False, 'import cPickle\n'), ((173, 189), 'cPickle.load', 'cPickle.load', (['fo'], {}), '(fo)\n', (185, 189), False, 'import cPickle\n'), ((644, 658), 'numpy.array', 'np.array', (['repo'], {}), '(repo)\n', (652, 658), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License
# Copyright (c) 2019 Ina (<NAME> & <NAME> - http://www.ina.fr/)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import dlib, cv2
import numpy as np
import pandas as pd
import os
import csv
from .face_utils import extract_left_eye_center, extract_right_eye_center, get_rotation_matrix, crop_image
#import sklearn.externals.joblib as extjoblib
import joblib as jblib
from keras_vggface.vggface import VGGFace
from keras_vggface import utils
from keras.preprocessing import image
def write_to_video(frames_list, file_name, fps):
"""
Writes a list of frames into a video using MP4V encoding.
Parameters:
frames_list (list): List of the frames to write
file_name (string): video output path
fps (int) : Number of frames per second used in output video
"""
frame_width = frames_list[0].shape[0]
frame_height = frames_list[0].shape[1]
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter(file_name,fourcc,
fps, (frame_height,frame_width))
for frame in frames_list:
out.write(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
out.release()
def _get_bbox_pts(detections, face_idx, frame_width, frame_height):
x1 = int(detections[0, 0, face_idx, 3] * frame_width)
y1 = int(detections[0, 0, face_idx, 4] * frame_height)
x2 = int(detections[0, 0, face_idx, 5] * frame_width)
y2 = int(detections[0, 0, face_idx, 6] * frame_height)
width = x2 - x1
height = y2 - y1
max_size = max(width, height)
x1, x2 = max(0, (x1 + x2) // 2 - max_size // 2), min(frame_width, (x1 + x2) // 2 + max_size // 2)
y1, y2 = max(0, (y1 + y2) // 2 - max_size // 2), min(frame_height, (y1 + y2) // 2 + max_size // 2)
return x1, y1, x2, y2
def info2csv(df, csv_path):
"""
Write df into a csv.
Parameters:
df (DataFrame): Dataframe to be written to csv.
csv_path (string): CSV output path.
"""
df.to_csv(csv_path, index=False)
def _label_decision_fun(x):
if x>0:
return 'm'
else:
return 'f'
def _smooth_labels(df):
if len(df) == 0:
df['smoothed_decision'] = []
df['smoothed_label'] = []
return df
byfaceid = pd.DataFrame(df.groupby('faceid')['decision'].mean())
byfaceid.rename(columns = {'decision':'smoothed_decision'}, inplace=True)
new_df = df.merge(byfaceid, on= 'faceid')
new_df['smoothed_label'] = new_df['smoothed_decision'].map(_label_decision_fun)
return new_df
def _match_bbox_tracker(bbox, tracker):
# bbox info
x = bbox.left()
y = bbox.top()
width = bbox.width()
height = bbox.height()
x_center = x + 0.5 * width
y_center = y + 0.5 * height
# tracker info
tracked_position = tracker.get_position()
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
t_x_center = t_x + 0.5 * t_w
t_y_center = t_y + 0.5 * t_h
return ( ( t_x <= x_center <= (t_x + t_w)) and
( t_y <= y_center <= (t_y + t_h)) and
( x <= t_x_center <= (x + width)) and
( y <= t_y_center <= (y + height)))
def is_tracker_pos_in_frame(tracker, frame):
fheight, fwidth, _ = frame.shape
pos = tracker.get_position()
#print('tracker pos in frame', pos.right(), pos.left(), pos.top(), pos.bottom())
return (pos.right() > 0) and (pos.left() < fwidth) and (pos.top() < fheight) and (pos.bottom() > 0)
class GenderVideo:
"""
This is a class regrouping all phases of a pipeline designed for gender classification from video.
Attributes:
face_detector: Face detection model.
align_predictor: Face alignment model.
gender_svm: Gender SVM classifier model.
vgg_feature_extractor: VGGFace neural model used for feature extraction.
threshold: quality of face detection considered acceptable, value between 0 and 1.
"""
def __init__(self, threshold = 0.65, verbose = False):
"""
The constructor for GenderVideo class.
Parameters:
threshold (float): quality of face detection considered acceptable, value between 0 and 1.
"""
p = os.path.dirname(os.path.realpath(__file__)) + '/models/'
self.face_detector = cv2.dnn.readNetFromTensorflow(p + "opencv_face_detector_uint8.pb",
p + "opencv_face_detector.pbtxt")
self.align_predictor = dlib.shape_predictor(p +'shape_predictor_68_face_landmarks.dat')
self.gender_svm = jblib.load(p + 'svm_classifier.joblib')
self.vgg_feature_extractor = VGGFace(include_top = False, input_shape = (224, 224, 3), pooling ='avg')
self.threshold = threshold
self.verbose = verbose
def _gender_from_face(self, img):
"""
Face is supposed to be aligned and cropped and resized to 224*224
it is for regulard detection __call__
we should check if it is done in the tracking implementation
"""
img = image.img_to_array(img)
img = utils.preprocess_input(img, version=1)
img = np.expand_dims(img, axis=0)
features = self.vgg_feature_extractor.predict(img)
label = self.gender_svm.predict(features)[0]
decision_value = round(self.gender_svm.decision_function(features)[0], 3)
return label, decision_value
def _process_tracked_face(self, cur_tracker, frame):
## There is no rotation in this function... results may be suspicious
tracked_position = cur_tracker.get_position()
#print('tracked position', tracked_position)
#print('frame_shape', frame.shape)
# print('cur_tracker', cur_tracker)
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
# print('tracked face: id, x, y, w, h', face_id, t_x, t_y, t_w, t_h)
copy_img = frame[max(0, t_y):(t_y + t_h), max(0, t_x):(t_x + t_w)]
#print('simage shape', copy_img.shape)
copy_img = cv2.resize(copy_img, (224,224))
label, decision_value = self._gender_from_face(copy_img)
return (t_x, t_y, t_w, t_h, label, decision_value)
def align_and_crop_face(self, img, rect_list, desired_width, desired_height):
"""
Aligns and resizes face to desired shape.
Parameters:
img : Image to be aligned and resized.
rect_list: Bounding box coordinates tuples.
desired_width: output image width.
desired_height: output image height.
Returns:
cropped_img: Image aligned and resized.
left_eye: left eye position coordinates.
right_eye: right eye position coordinates.
"""
for j, det in enumerate(rect_list):
shape = self.align_predictor(img, det)
left_eye = extract_left_eye_center(shape)
right_eye = extract_right_eye_center(shape)
M = get_rotation_matrix(left_eye, right_eye)
rotated_img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]), flags=cv2.INTER_CUBIC)
cropped = crop_image(rotated_img, det)
try:
cropped_res = cv2.resize(cropped, (desired_width, desired_height))
except:
print('except in align_and_crop_faces', det)
print(img.shape)
cropped_res = cv2.resize(rotated_img,(desired_width, desired_height))
cropped_img = cropped_res[:, :, ::-1]
return cropped_img, left_eye, right_eye
def detect_faces_from_image(self, img, desired_width,
desired_height, bbox_scaling=1.1):
"""
Detect faces from an image
Parameters:
img (array): Image to detect faces from.
desired_width (int): desired output width of the image.
desired_height (int): desired output height of the image.
bbox_scaling (float): scaling factor to the bounding box around the face.
Returns:
faces_data (list) : List containing :
- the bounding box after scaling
- image cropped around the face and resized
- left eye coordinates
- right eye coordinates
- index of the face in the image
- face detection confidence score
"""
n_face = 0
faces_data = []
frame_height = img.shape[0]
frame_width = img.shape[1]
blob = cv2.dnn.blobFromImage(img, 1.0, (300, 300), [104, 117, 123], True, False)
self.face_detector.setInput(blob)
detections = self.face_detector.forward()
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > self.threshold:
n_face += 1
bbox = _get_bbox_pts(detections, i, frame_width, frame_height)
x1, y1 = [int(i * abs(bbox_scaling//1 - bbox_scaling%1)) for i in bbox[:2]]
x2, y2 = [int(i*bbox_scaling) for i in bbox[2:]]
if x1 < x2 and y1 < y2:
dets = [dlib.rectangle(x1, y1, x2, y2)]
else:
dets = [dlib.rectangle(0, 0, frame_width, frame_height)]
face_img, left_eye, right_eye = self.align_and_crop_face(img, dets, desired_width,
desired_height)
face_data = [dets, face_img, left_eye, right_eye,
'face_%d' % n_face, confidence]
faces_data.append(face_data)
return faces_data
def detect_with_tracking(self, video_path, k_frames, subsamp_coeff = 1, offset = -1):
"""
Pipeline for gender classification from videos using correlation filters based tracking (dlib's).
Parameters:
video_path (string): Path for input video.
k_frames (int) : Number of frames for which continue tracking the faces without renewing face detection.
subsamp_coeff (int) : only 1/subsamp_coeff frames will be processed
offset (float) : Time in milliseconds to skip at the beginning of the video.
Returns:
info (DataFrame): A Dataframe with frame and face information (coordinates, decision function, smoothed and non smoothed labels)
"""
assert (k_frames % subsamp_coeff) == 0
font = cv2.FONT_HERSHEY_SIMPLEX
current_face_id = 0
face_trackers = {}
confidence = {}
info = []
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise Exception("Video file does not exist or is invalid")
while cap.isOpened() :
ret, frame = cap.read()
if not ret:
break
# skip frames until offset is reached or for subsampling reasons
if (cap.get(cv2.CAP_PROP_POS_MSEC) < offset) or (cap.get(cv2.CAP_PROP_POS_FRAMES) % subsamp_coeff != 0):
continue
#if ((cap.get(cv2.CAP_PROP_POS_FRAMES)) % 1000 == 0) or True:
# print(cap.get(cv2.CAP_PROP_POS_FRAMES))
# print('dface trackers before update', face_trackers)
# track faces in current frame
face_ids_to_delete = []
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
for fid in face_trackers:
tracking_quality = face_trackers[fid].update(frame)
if (tracking_quality < 7) or (not is_tracker_pos_in_frame(face_trackers[fid], frame)):
face_ids_to_delete.append(fid)
for fid in face_ids_to_delete:
face_trackers.pop(fid)
#print('dface trackers after update', face_trackers)
# detect faces every k frames
if (cap.get(cv2.CAP_PROP_POS_FRAMES) % k_frames)==0:
faces_info = self.detect_faces_from_image(frame,
desired_width=224, desired_height=224)
if faces_info:
for element in faces_info:
bbox = element[0][0]
confidence[ current_face_id ] = round(element[5], 3)
matched_fid = None
# match detected face to previously tracked faces
for fid in face_trackers:
## TODO/BUG: several elements may match using this condition
## This loop should be debugged to use the closest match found,
## instead of the last match found
if _match_bbox_tracker(bbox, face_trackers[fid]):
matched_fid = fid
# if detected face is not corresponding to previously tracked faces
# create a new face id and a new face tracker
# BUG: in the current implementation, the newly detected face bounding box
# is not used to update the tracker bounding box
if matched_fid is None:
tracker = dlib.correlation_tracker()
tracker.start_track(frame, bbox)
face_trackers[ current_face_id ] = tracker
current_face_id += 1
#print('dface trackers after face detection ', face_trackers)
# delete invalide face positions
face_ids_to_delete = []
for fid in face_trackers:
if not is_tracker_pos_in_frame(face_trackers[fid], frame):
face_ids_to_delete.append(fid)
for fid in face_ids_to_delete:
face_trackers.pop(fid)
# process faces based on position found in trackers
for fid in face_trackers:
t_x, t_y, t_w, t_h, label, decision_value = self._process_tracked_face(face_trackers[fid], frame)
t_bbox = dlib.rectangle(t_x, t_y, t_x+t_w, t_y+t_h)
info.append([
cap.get(cv2.CAP_PROP_POS_FRAMES), fid, t_bbox, (t_w, t_h), label,
decision_value, confidence[fid]
])
cap.release()
track_res = pd.DataFrame.from_records(info, columns = ['frame', 'faceid', 'bb', 'size','label', 'decision', 'conf'])
info = _smooth_labels(track_res)
return info
def __call__(self, video_path, subsamp_coeff = 1 , offset = -1):
"""
Pipeline function for gender classification from videos without tracking.
Parameters:
video_path (string): Path for input video.
subsamp_coeff (int) : only 1/subsamp_coeff frames will be processed
offset (float) : Time in milliseconds to skip at the beginning of the video.
Returns:
info: A Dataframe with frame and face information (coordinates, decision function,labels..)
"""
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise Exception("Video file does not exist or is invalid")
info = []
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# skip frames until offset is reached or for subsampling reasons
if (cap.get(cv2.CAP_PROP_POS_MSEC) < offset) or (cap.get(cv2.CAP_PROP_POS_FRAMES) % subsamp_coeff != 0):
continue
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
faces_info = self.detect_faces_from_image(frame,
desired_width=224, desired_height=224)
if faces_info:
for element in faces_info:
label, decision_value = self._gender_from_face(element[1])
bounding_box = element[0][0]
detection_score = round(element[5], 3)
bbox_length = bounding_box.bottom() - bounding_box.top()
info.append([
cap.get(cv2.CAP_PROP_POS_FRAMES), bounding_box, (bbox_length, bbox_length), label,
decision_value, detection_score
])
cap.release()
info = pd.DataFrame.from_records(info, columns = ['frame', 'bb', 'size','label', 'decision', 'conf'])
return info
| [
"cv2.dnn.blobFromImage",
"keras.preprocessing.image.img_to_array",
"pandas.DataFrame.from_records",
"cv2.warpAffine",
"cv2.dnn.readNetFromTensorflow",
"keras_vggface.vggface.VGGFace",
"dlib.rectangle",
"keras_vggface.utils.preprocess_input",
"dlib.correlation_tracker",
"dlib.shape_predictor",
"c... | [((1953, 1984), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MP4V'"], {}), "(*'MP4V')\n", (1975, 1984), False, 'import dlib, cv2\n'), ((1995, 2063), 'cv2.VideoWriter', 'cv2.VideoWriter', (['file_name', 'fourcc', 'fps', '(frame_height, frame_width)'], {}), '(file_name, fourcc, fps, (frame_height, frame_width))\n', (2010, 2063), False, 'import dlib, cv2\n'), ((5476, 5580), 'cv2.dnn.readNetFromTensorflow', 'cv2.dnn.readNetFromTensorflow', (["(p + 'opencv_face_detector_uint8.pb')", "(p + 'opencv_face_detector.pbtxt')"], {}), "(p + 'opencv_face_detector_uint8.pb', p +\n 'opencv_face_detector.pbtxt')\n", (5505, 5580), False, 'import dlib, cv2\n'), ((5667, 5732), 'dlib.shape_predictor', 'dlib.shape_predictor', (["(p + 'shape_predictor_68_face_landmarks.dat')"], {}), "(p + 'shape_predictor_68_face_landmarks.dat')\n", (5687, 5732), False, 'import dlib, cv2\n'), ((5758, 5797), 'joblib.load', 'jblib.load', (["(p + 'svm_classifier.joblib')"], {}), "(p + 'svm_classifier.joblib')\n", (5768, 5797), True, 'import joblib as jblib\n'), ((5835, 5903), 'keras_vggface.vggface.VGGFace', 'VGGFace', ([], {'include_top': '(False)', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(include_top=False, input_shape=(224, 224, 3), pooling='avg')\n", (5842, 5903), False, 'from keras_vggface.vggface import VGGFace\n'), ((6242, 6265), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (6260, 6265), False, 'from keras.preprocessing import image\n'), ((6280, 6318), 'keras_vggface.utils.preprocess_input', 'utils.preprocess_input', (['img'], {'version': '(1)'}), '(img, version=1)\n', (6302, 6318), False, 'from keras_vggface import utils\n'), ((6333, 6360), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (6347, 6360), True, 'import numpy as np\n'), ((7353, 7385), 'cv2.resize', 'cv2.resize', (['copy_img', '(224, 224)'], {}), '(copy_img, (224, 224))\n', (7363, 7385), False, 'import dlib, cv2\n'), ((10062, 10135), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['img', '(1.0)', '(300, 300)', '[104, 117, 123]', '(True)', '(False)'], {}), '(img, 1.0, (300, 300), [104, 117, 123], True, False)\n', (10083, 10135), False, 'import dlib, cv2\n'), ((12255, 12283), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (12271, 12283), False, 'import dlib, cv2\n'), ((16159, 16266), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['info'], {'columns': "['frame', 'faceid', 'bb', 'size', 'label', 'decision', 'conf']"}), "(info, columns=['frame', 'faceid', 'bb', 'size',\n 'label', 'decision', 'conf'])\n", (16184, 16266), True, 'import pandas as pd\n'), ((16929, 16957), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (16945, 16957), False, 'import dlib, cv2\n'), ((18249, 18346), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['info'], {'columns': "['frame', 'bb', 'size', 'label', 'decision', 'conf']"}), "(info, columns=['frame', 'bb', 'size', 'label',\n 'decision', 'conf'])\n", (18274, 18346), True, 'import pandas as pd\n'), ((2142, 2180), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2154, 2180), False, 'import dlib, cv2\n'), ((8410, 8485), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(img.shape[1], img.shape[0])'], {'flags': 'cv2.INTER_CUBIC'}), '(img, M, (img.shape[1], img.shape[0]), flags=cv2.INTER_CUBIC)\n', (8424, 8485), False, 'import dlib, cv2\n'), ((13072, 13110), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (13084, 13110), False, 'import dlib, cv2\n'), ((17456, 17494), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (17468, 17494), False, 'import dlib, cv2\n'), ((5406, 5432), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5422, 5432), False, 'import os\n'), ((8601, 8653), 'cv2.resize', 'cv2.resize', (['cropped', '(desired_width, desired_height)'], {}), '(cropped, (desired_width, desired_height))\n', (8611, 8653), False, 'import dlib, cv2\n'), ((15871, 15917), 'dlib.rectangle', 'dlib.rectangle', (['t_x', 't_y', '(t_x + t_w)', '(t_y + t_h)'], {}), '(t_x, t_y, t_x + t_w, t_y + t_h)\n', (15885, 15917), False, 'import dlib, cv2\n'), ((8798, 8854), 'cv2.resize', 'cv2.resize', (['rotated_img', '(desired_width, desired_height)'], {}), '(rotated_img, (desired_width, desired_height))\n', (8808, 8854), False, 'import dlib, cv2\n'), ((10715, 10745), 'dlib.rectangle', 'dlib.rectangle', (['x1', 'y1', 'x2', 'y2'], {}), '(x1, y1, x2, y2)\n', (10729, 10745), False, 'import dlib, cv2\n'), ((10797, 10844), 'dlib.rectangle', 'dlib.rectangle', (['(0)', '(0)', 'frame_width', 'frame_height'], {}), '(0, 0, frame_width, frame_height)\n', (10811, 10844), False, 'import dlib, cv2\n'), ((14948, 14974), 'dlib.correlation_tracker', 'dlib.correlation_tracker', ([], {}), '()\n', (14972, 14974), False, 'import dlib, cv2\n')] |
import sys
import cPickle as cp
import random
import numpy as np
import networkx as nx
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', help='Save directory.')
parser.add_argument('--max_n', type=int, help='Upper bound on graph size.')
parser.add_argument('--min_n', type=int, help='Lower bound on graph size.')
parser.add_argument('--num_graph', type=int, help='Number of graphs to generate')
parser.add_argument('--p', type=float, help='Connectivity parameter.')
parser.add_argument('--n_comp', type=int, help='Number of connected components.')
args = parser.parse_args()
def get_component():
"""Generate a connected ER component with min_n <= n <= max_n."""
cur_n = np.random.randint(max_n - min_n + 1) + min_n
g = nx.erdos_renyi_graph(n=cur_n, p=p)
comps = [c for c in nx.connected_component_subgraphs(g)]
random.shuffle(comps)
for i in range(1, len(comps)):
x = random.choice(comps[i - 1].nodes())
y = random.choice(comps[i].nodes())
g.add_edge(x, y)
assert nx.is_connected(g)
return g
if __name__ == '__main__':
max_n = args.max_n
min_n = args.min_n
p = args.p
n_comp = args.n_comp
fout_name = '%s/ncomp-%d-nrange-%d-%d-n_graph-%d-p-%.2f.pkl' % (args.save_dir, n_comp, min_n, max_n, args.num_graph, p)
print('Final Output: ' + fout_name)
print("Generating graphs...")
min_n = min_n // n_comp
max_n = max_n // n_comp
for i in tqdm(range(args.num_graph)):
for j in range(n_comp):
g = get_component()
if j == 0:
g_all = g
else:
g_all = nx.disjoint_union(g_all, g)
assert nx.number_connected_components(g_all) == n_comp
with open(fout_name, 'ab') as fout:
cp.dump(g_all, fout, cp.HIGHEST_PROTOCOL)
| [
"cPickle.dump",
"networkx.disjoint_union",
"random.shuffle",
"argparse.ArgumentParser",
"networkx.connected_component_subgraphs",
"networkx.is_connected",
"numpy.random.randint",
"networkx.number_connected_components",
"networkx.erdos_renyi_graph"
] | [((135, 160), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (158, 160), False, 'import argparse\n'), ((792, 826), 'networkx.erdos_renyi_graph', 'nx.erdos_renyi_graph', ([], {'n': 'cur_n', 'p': 'p'}), '(n=cur_n, p=p)\n', (812, 826), True, 'import networkx as nx\n'), ((893, 914), 'random.shuffle', 'random.shuffle', (['comps'], {}), '(comps)\n', (907, 914), False, 'import random\n'), ((1078, 1096), 'networkx.is_connected', 'nx.is_connected', (['g'], {}), '(g)\n', (1093, 1096), True, 'import networkx as nx\n'), ((739, 775), 'numpy.random.randint', 'np.random.randint', (['(max_n - min_n + 1)'], {}), '(max_n - min_n + 1)\n', (756, 775), True, 'import numpy as np\n'), ((852, 887), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['g'], {}), '(g)\n', (884, 887), True, 'import networkx as nx\n'), ((1735, 1772), 'networkx.number_connected_components', 'nx.number_connected_components', (['g_all'], {}), '(g_all)\n', (1765, 1772), True, 'import networkx as nx\n'), ((1840, 1881), 'cPickle.dump', 'cp.dump', (['g_all', 'fout', 'cp.HIGHEST_PROTOCOL'], {}), '(g_all, fout, cp.HIGHEST_PROTOCOL)\n', (1847, 1881), True, 'import cPickle as cp\n'), ((1692, 1719), 'networkx.disjoint_union', 'nx.disjoint_union', (['g_all', 'g'], {}), '(g_all, g)\n', (1709, 1719), True, 'import networkx as nx\n')] |
from autolens import decorator_util
import numpy as np
from autolens.data.array.util import mask_util
@decorator_util.jit()
def centres_from_shape_pixel_scales_and_origin(shape, pixel_scales, origin):
"""Determine the (y,x) arc-second central coordinates of an array from its shape, pixel-scales and origin.
The coordinate system is defined such that the positive y axis is up and positive x axis is right.
Parameters
----------
shape : (int, int)
The (y,x) shape of the 2D array the arc-second centre is computed for.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the 2D array.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the centre is shifted to.
Returns
--------
tuple (float, float)
The (y,x) arc-second central coordinates of the input array.
Examples
--------
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=(5,5), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
y_centre_arcsec = float(shape[0] - 1) / 2 + (origin[0] / pixel_scales[0])
x_centre_arcsec = float(shape[1] - 1) / 2 - (origin[1] / pixel_scales[1])
return (y_centre_arcsec, x_centre_arcsec)
@decorator_util.jit()
def regular_grid_2d_from_shape_pixel_scales_and_origin(shape, pixel_scales, origin=(0.0, 0.0)):
"""Compute the (y,x) arc second coordinates at the centre of every pixel of an array of shape (rows, columns).
Coordinates are defined from the top-left corner, such that the first pixel at location [0, 0] has negative x \
and y values in arc seconds.
The regular grid is returned on an array of shape (total_pixels, total_pixels, 2) where coordinate indexes match \
those of the original 2D array. y coordinates are stored in the 0 index of the third dimension, x coordinates in \
the 1 index.
Parameters
----------
shape : (int, int)
The (y,x) shape of the 2D array the regular grid of coordinates is computed for.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the 2D array.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the regular grid is shifted around.
Returns
--------
ndarray
A regular grid of (y,x) arc-second coordinates at the centre of every pixel on a 2D array. The regular grid \
array has dimensions (total_pixels, total_pixels, 2).
Examples
--------
regular_grid_1d = regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(5,5), pixel_scales=(0.5, 0.5), \
origin=(0.0, 0.0))
"""
grid_2d = np.zeros((shape[0], shape[1], 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales, origin=origin)
for y in range(shape[0]):
for x in range(shape[1]):
grid_2d[y, x, 0] = -(y - centres_arc_seconds[0]) * pixel_scales[0]
grid_2d[y, x, 1] = (x - centres_arc_seconds[1]) * pixel_scales[1]
return grid_2d
@decorator_util.jit()
def regular_grid_1d_from_shape_pixel_scales_and_origin(shape, pixel_scales, origin=(0.0, 0.0)):
"""Compute the (y,x) arc second coordinates at the centre of every pixel of an array of shape (rows, columns).
Coordinates are defined from the top-left corner, such that the first pixel at location [0, 0] has negative x \
and y values in arc seconds.
The regular grid is returned on an array of shape (total_pixels**2, 2) where the 2D dimension of the original 2D \
array are reduced to one dimension. y coordinates are stored in the 0 index of the second dimension, x coordinates
in the 1 index.
Parameters
----------
shape : (int, int)
The (y,x) shape of the 2D array the regular grid of coordinates is computed for.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the 2D array.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the regular grid is shifted around.
Returns
--------
ndarray
A regular grid of (y,x) arc-second coordinates at the centre of every pixel on a 2D array. The regular grid
array has dimensions (total_pixels**2, 2).
Examples
--------
regular_grid_1d = regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(5,5), pixel_scales=(0.5, 0.5), \
origin=(0.0, 0.0))
"""
grid_1d = np.zeros((shape[0]*shape[1], 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales, origin=origin)
i=0
for y in range(shape[0]):
for x in range(shape[1]):
grid_1d[i, 0] = -(y - centres_arc_seconds[0]) * pixel_scales[0]
grid_1d[i, 1] = (x - centres_arc_seconds[1]) * pixel_scales[1]
i += 1
return grid_1d
@decorator_util.jit()
def regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask, pixel_scales, origin=(0.0, 0.0)):
"""Compute the (y,x) arc second coordinates at the centre of every pixel of a 2D mask array of shape (rows, columns).
Coordinates are defined from the top-left corner, where the first unmasked pixel corresponds to index 0. The pixel \
at the top-left of the array has negative x and y values in arc seconds.
The regular grid is returned on an array of shape (total_unmasked_pixels, 2). y coordinates are stored in the 0 \
index of the second dimension, x coordinates in the 1 index.
Parameters
----------
mask : ndarray
A 2D array of bools, where *False* values mean unmasked and are therefore included as part of the calculated \
regular grid.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the 2D mask array.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the regular grid is shifted around.
Returns
--------
ndarray
A regular grid of (y,x) arc-second coordinates at the centre of every pixel unmasked pixel on the 2D mask \
array. The regular grid array has dimensions (total_unmasked_pixels, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
regular_grid_1d = regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(0.5, 0.5),
origin=(0.0, 0.0))
"""
grid_2d = regular_grid_2d_from_shape_pixel_scales_and_origin(mask.shape, pixel_scales, origin)
total_regular_pixels = mask_util.total_regular_pixels_from_mask(mask)
regular_grid = np.zeros(shape=(total_regular_pixels, 2))
pixel_count = 0
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
if not mask[y, x]:
regular_grid[pixel_count, :] = grid_2d[y, x]
pixel_count += 1
return regular_grid
@decorator_util.jit()
def sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask, pixel_scales, sub_grid_size, origin=(0.0, 0.0)):
""" For the sub-grid, every unmasked pixel of a 2D mask array of shape (rows, columns) is divided into a finer \
uniform grid of shape (sub_grid_size, sub_grid_size). This routine computes the (y,x) arc second coordinates at \
the centre of every sub-pixel defined by this grid.
Coordinates are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. \
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second \
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_grid_size**2, 2). y coordinates are \
stored in the 0 index of the second dimension, x coordinates in the 1 index.
Parameters
----------
mask : ndarray
A 2D array of bools, where *False* values mean unmasked and are therefore included as part of the calculated \
regular grid.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the 2D mask array.
sub_grid_size : int
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
--------
ndarray
A sub grid of (y,x) arc-second coordinates at the centre of every pixel unmasked pixel on the 2D mask \
array. The sub grid array has dimensions (total_unmasked_pixels*sub_grid_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
sub_grid_1d = sub_grid_1d_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
total_sub_pixels = mask_util.total_sub_pixels_from_mask_and_sub_grid_size(mask, sub_grid_size)
sub_grid = np.zeros(shape=(total_sub_pixels, 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=mask.shape, pixel_scales=pixel_scales,
origin=origin)
sub_index = 0
y_sub_half = pixel_scales[0] / 2
y_sub_step = pixel_scales[0] / (sub_grid_size + 1)
x_sub_half = pixel_scales[1] / 2
x_sub_step = pixel_scales[1] / (sub_grid_size + 1)
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
if not mask[y, x]:
y_arcsec = (y - centres_arc_seconds[0]) * pixel_scales[0]
x_arcsec = (x - centres_arc_seconds[1]) * pixel_scales[1]
for y1 in range(sub_grid_size):
for x1 in range(sub_grid_size):
sub_grid[sub_index, 0] = -(y_arcsec - y_sub_half + (y1 + 1) * y_sub_step)
sub_grid[sub_index, 1] = x_arcsec - x_sub_half + (x1 + 1) * x_sub_step
sub_index += 1
return sub_grid
@decorator_util.jit()
def grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d, shape, pixel_scales, origin=(0.0, 0.0)):
""" Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel coordinate values. Pixel coordinates \
are returned as floats such that they include the decimal offset from each pixel's top-left corner relative to \
the input arc-second coordinate.
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \
the highest y arc-second coordinate and lowest x arc-second coordinate on the gird.
The arc-second grid is defined by an origin and coordinates are shifted to this origin before computing their \
1D grid pixel coordinate values.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_arc_seconds_1d: ndarray
The grid of (y,x) coordinates in arc seconds which is converted to pixel value coordinates.
shape : (int, int)
The (y,x) shape of the original 2D array the arc-second coordinates were computed on.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the arc-second grid is shifted to.
Returns
--------
ndarray
A grid of (y,x) pixel-value coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_arc_seconds_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_1d = grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels = np.zeros((grid_arc_seconds_1d.shape[0], 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales,
origin=origin)
for i in range(grid_arc_seconds_1d.shape[0]):
grid_pixels[i, 0] = (-grid_arc_seconds_1d[i, 0] / pixel_scales[0]) + centres_arc_seconds[0] + 0.5
grid_pixels[i, 1] = (grid_arc_seconds_1d[i, 1] / pixel_scales[1]) + centres_arc_seconds[1] + 0.5
return grid_pixels
@decorator_util.jit()
def grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d, shape, pixel_scales, origin=(0.0, 0.0)):
""" Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel values. Pixel coordinates are \
returned as integers such that they map directly to the pixel they are contained within.
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \
higher y arc-second coordinate value and lowest x arc-second coordinate.
The arc-second coordinate grid is defined by the class attribute origin, and coordinates are shifted to this \
origin before computing their 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_arc_seconds_1d: ndarray
The grid of (y,x) coordinates in arc seconds which is converted to pixel indexes.
shape : (int, int)
The (y,x) shape of the original 2D array the arc-second coordinates were computed on.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the arc-second grid is shifted
Returns
--------
ndarray
A grid of (y,x) pixel indexes with dimensions (total_pixels, 2).
Examples
--------
grid_arc_seconds_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_1d = grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels = np.zeros((grid_arc_seconds_1d.shape[0], 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales,
origin=origin)
for i in range(grid_arc_seconds_1d.shape[0]):
grid_pixels[i, 0] = int((-grid_arc_seconds_1d[i, 0] / pixel_scales[0]) + centres_arc_seconds[0] + 0.5)
grid_pixels[i, 1] = int((grid_arc_seconds_1d[i, 1] / pixel_scales[1]) + centres_arc_seconds[1] + 0.5)
return grid_pixels
@decorator_util.jit()
def grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d, shape, pixel_scales, origin=(0.0, 0.0)):
""" Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel 1D indexes. Pixel coordinates are \
returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then \
downwards.
For example:
The pixel at the top-left, whose 2D index is [0,0], corresponds to 1D index 0.
The fifth pixel on the top row, whose 2D index is [0,5], corresponds to 1D index 4.
The first pixel on the second row, whose 2D index is [0,1], has 1D index 10 if a row has 10 pixels.
The arc-second coordinate grid is defined by the class attribute origin, and coordinates are shifted to this \
origin before computing their 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_arc_seconds_1d: ndarray
The grid of (y,x) coordinates in arc seconds which is converted to 1D pixel indexes.
shape : (int, int)
The (y,x) shape of the original 2D array the arc-second coordinates were computed on.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the arc-second grid is shifted.
Returns
--------
ndarray
A grid of 1d pixel indexes with dimensions (total_pixels, 2).
Examples
--------
grid_arc_seconds_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_1d = grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels = grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=shape,
pixel_scales=pixel_scales, origin=origin)
grid_pixel_indexes = np.zeros(grid_pixels.shape[0])
for i in range(grid_pixels.shape[0]):
grid_pixel_indexes[i] = int(grid_pixels[i,0] * shape[1] + grid_pixels[i,1])
return grid_pixel_indexes
@decorator_util.jit()
def grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d, shape, pixel_scales, origin=(0.0, 0.0)):
""" Convert a grid of (y,x) pixel coordinates to a grid of (y,x) arc second values.
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \
higher y arc-second coordinate value and lowest x arc-second coordinate.
The arc-second coordinate origin is defined by the class attribute origin, and coordinates are shifted to this \
origin after computing their values from the 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_pixels_1d: ndarray
The grid of (y,x) coordinates in pixel values which is converted to arc-second coordinates.
shape : (int, int)
The (y,x) shape of the original 2D array the arc-second coordinates were computed on.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the arc-second grid is shifted.
Returns
--------
ndarray
A grid of 1d arc-second coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_pixels_1d = np.array([[0,0], [0,1], [1,0], [1,1])
grid_pixels_1d = grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d=grid_pixels_1d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_arc_seconds = np.zeros((grid_pixels_1d.shape[0], 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales,
origin=origin)
for i in range(grid_arc_seconds.shape[0]):
grid_arc_seconds[i, 0] = -(grid_pixels_1d[i, 0] - centres_arc_seconds[0] - 0.5) * pixel_scales[0]
grid_arc_seconds[i, 1] = (grid_pixels_1d[i, 1] - centres_arc_seconds[1] - 0.5) * pixel_scales[1]
return grid_arc_seconds | [
"numpy.zeros",
"autolens.data.array.util.mask_util.total_sub_pixels_from_mask_and_sub_grid_size",
"autolens.data.array.util.mask_util.total_regular_pixels_from_mask",
"autolens.decorator_util.jit"
] | [((106, 126), 'autolens.decorator_util.jit', 'decorator_util.jit', ([], {}), '()\n', (124, 126), False, 'from autolens import decorator_util\n'), ((1270, 1290), 'autolens.decorator_util.jit', 'decorator_util.jit', ([], {}), '()\n', (1288, 1290), False, 'from autolens import decorator_util\n'), ((3165, 3185), 'autolens.decorator_util.jit', 'decorator_util.jit', ([], {}), '()\n', (3183, 3185), False, 'from autolens import decorator_util\n'), ((5048, 5068), 'autolens.decorator_util.jit', 'decorator_util.jit', ([], {}), '()\n', (5066, 5068), False, 'from autolens import decorator_util\n'), ((7179, 7199), 'autolens.decorator_util.jit', 'decorator_util.jit', ([], {}), '()\n', (7197, 7199), False, 'from autolens import decorator_util\n'), ((10340, 10360), 'autolens.decorator_util.jit', 'decorator_util.jit', ([], {}), '()\n', (10358, 10360), False, 'from autolens import decorator_util\n'), ((12666, 12686), 'autolens.decorator_util.jit', 'decorator_util.jit', ([], {}), '()\n', (12684, 12686), False, 'from autolens import decorator_util\n'), ((14931, 14951), 'autolens.decorator_util.jit', 'decorator_util.jit', ([], {}), '()\n', (14949, 14951), False, 'from autolens import decorator_util\n'), ((17221, 17241), 'autolens.decorator_util.jit', 'decorator_util.jit', ([], {}), '()\n', (17239, 17241), False, 'from autolens import decorator_util\n'), ((2761, 2794), 'numpy.zeros', 'np.zeros', (['(shape[0], shape[1], 2)'], {}), '((shape[0], shape[1], 2))\n', (2769, 2794), True, 'import numpy as np\n'), ((4624, 4658), 'numpy.zeros', 'np.zeros', (['(shape[0] * shape[1], 2)'], {}), '((shape[0] * shape[1], 2))\n', (4632, 4658), True, 'import numpy as np\n'), ((6824, 6870), 'autolens.data.array.util.mask_util.total_regular_pixels_from_mask', 'mask_util.total_regular_pixels_from_mask', (['mask'], {}), '(mask)\n', (6864, 6870), False, 'from autolens.data.array.util import mask_util\n'), ((6890, 6931), 'numpy.zeros', 'np.zeros', ([], {'shape': '(total_regular_pixels, 2)'}), '(shape=(total_regular_pixels, 2))\n', (6898, 6931), True, 'import numpy as np\n'), ((9198, 9273), 'autolens.data.array.util.mask_util.total_sub_pixels_from_mask_and_sub_grid_size', 'mask_util.total_sub_pixels_from_mask_and_sub_grid_size', (['mask', 'sub_grid_size'], {}), '(mask, sub_grid_size)\n', (9252, 9273), False, 'from autolens.data.array.util import mask_util\n'), ((9290, 9327), 'numpy.zeros', 'np.zeros', ([], {'shape': '(total_sub_pixels, 2)'}), '(shape=(total_sub_pixels, 2))\n', (9298, 9327), True, 'import numpy as np\n'), ((12139, 12182), 'numpy.zeros', 'np.zeros', (['(grid_arc_seconds_1d.shape[0], 2)'], {}), '((grid_arc_seconds_1d.shape[0], 2))\n', (12147, 12182), True, 'import numpy as np\n'), ((14394, 14437), 'numpy.zeros', 'np.zeros', (['(grid_arc_seconds_1d.shape[0], 2)'], {}), '((grid_arc_seconds_1d.shape[0], 2))\n', (14402, 14437), True, 'import numpy as np\n'), ((17029, 17059), 'numpy.zeros', 'np.zeros', (['grid_pixels.shape[0]'], {}), '(grid_pixels.shape[0])\n', (17037, 17059), True, 'import numpy as np\n'), ((18811, 18849), 'numpy.zeros', 'np.zeros', (['(grid_pixels_1d.shape[0], 2)'], {}), '((grid_pixels_1d.shape[0], 2))\n', (18819, 18849), True, 'import numpy as np\n')] |
""" Helper module with methods for one-hot sequence encoding and generators to
to enable whole genome iteration """
import h5py
import numpy as np
import tensorflow as tf
from collections import defaultdict
class Sequence:
dic = {
"A": 0,
"T": 1,
"G": 2,
"C": 3
}
""" Methods for manipulation of DNA Sequence """
def __init__(self):
pass
@staticmethod
def map(buf, seqlength):
numSeq = len(buf)
seqLen = len(buf[0])
# initialize the matrix to seqlen x 4
seqMatrixs = np.zeros((numSeq,seqLen,4), dtype=int)
# change the value to matrix
for i in range(0,numSeq):
dnaSeq = buf[i].upper()
seqMatrix = seqMatrixs[i]
for j in range(0,seqLen):
try:
seqMatrix[j, Sequence.dic[dnaSeq[j]]] = 1
except KeyError:
continue
return seqMatrixs
@staticmethod
def add_to_buffer(buf, line):
buf.append(line.strip())
class Chromatin:
""" Methods for manipulating discrete chromatin tag counts/ domain calls"""
def __init__(self):
pass
@staticmethod
def map(buf, seqlen):
return np.array(buf)
@staticmethod
def add_to_buffer(buf, line):
chrom = line.strip().split()
val = [float(x) for x in chrom]
buf.append(val)
def assign_handler(dtype):
""" Choosing class based on input file type"""
if dtype == "seq":
# use Sequence methods
handler = Sequence
else:
# use Chromatin methods
handler = Chromatin
return handler
def train_generator(h5file, filename, batchsize, seqlen, dtype, iterflag):
""" A generator to return a batch of training data, while iterating over the file in a loop. """
handler = assign_handler(dtype)
with open(filename, "r") as fp:
line_index = 0
buf = [] # buf is my feature buffer
while True:
for line in fp:
if line_index < batchsize:
handler.add_to_buffer(buf, line)
line_index += 1
else:
yield handler.map(buf, seqlen)
buf = [] # clean buffer
handler.add_to_buffer(buf, line)
line_index = 1 # reset line index
if iterflag == "repeat":
# reset file pointer
fp.seek(0)
else:
yield handler.map(buf, seqlen)
break
def train_generator_h5(h5file, dspath, batchsize, seqlen, dtype, iterflag):
""" A generator to return a batch of training data, while iterating over the file in a loop. """
with h5py.File(h5file, 'r', libver='latest', swmr=True) as h5:
ds = h5[dspath][:]
num_samples = ds.shape[0]
dim = len(ds.shape)
start_index = 0
end_index = 0
while True:
start_index = end_index
end_index += batchsize
if end_index >= num_samples:
if iterflag == "repeat":
# reset
c1 = ds[start_index:num_samples]
end_index = batchsize - c1.shape[0]
c2 = ds[0: end_index]
chunk = np.vstack([c1, c2]) if dim>1 \
else np.concatenate([c1, c2])
yield chunk
else:
yield ds[start_index:num_samples]
break
else:
yield ds[start_index:end_index]
def train_TFRecord_dataset(dspath, batchsize, dataflag):
#raw_dataset = tf.data.TFRecordDataset(dspath["TFRecord"])
# prepare feature description
feature_description = defaultdict()
feature_description["seq"] = tf.io.FixedLenFeature([], tf.string)
feature_description["label"] = tf.io.FixedLenFeature([], tf.int64)
for ct in dspath["chromatin_tracks"]:
feature_description[ct] = tf.io.FixedLenFeature([], tf.string)
def _parse_function(example_proto, flag="seqonly"):
# Parse the input `tf.train.Example` proto using the feature dictionary
example_message = tf.io.parse_single_example(example_proto, feature_description)
seq = example_message["seq"]
seq = tf.io.parse_tensor(seq, out_type=tf.int64)
combined_chromatin_data = []
for ct in dspath["chromatin_tracks"]:
ct_message = example_message[ct]
ct_message = tf.io.parse_tensor(ct_message, out_type=tf.float64)
combined_chromatin_data.append(ct_message)
combined_chromatin_data = tf.concat(combined_chromatin_data, axis=0)
label = example_message["label"]
if flag=="seqonly":
return (seq, label)
else:
return {"seq":seq, "chrom_input":combined_chromatin_data}, label
def _parse_function_wrapper(example_proto):
return _parse_function(example_proto, dataflag)
files = tf.data.Dataset.from_tensors(dspath["TFRecord"])
parsed_dataset = (files.interleave(tf.data.TFRecordDataset, num_parallel_calls=tf.data.AUTOTUNE)
.shuffle(100)
.map(_parse_function_wrapper, num_parallel_calls=tf.data.AUTOTUNE)
.batch(batchsize, drop_remainder=True)
.prefetch(tf.data.AUTOTUNE))
return parsed_dataset
| [
"tensorflow.data.Dataset.from_tensors",
"tensorflow.io.parse_single_example",
"tensorflow.io.parse_tensor",
"h5py.File",
"numpy.array",
"numpy.zeros",
"tensorflow.io.FixedLenFeature",
"collections.defaultdict",
"tensorflow.concat",
"numpy.vstack",
"numpy.concatenate"
] | [((3844, 3857), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (3855, 3857), False, 'from collections import defaultdict\n'), ((3891, 3927), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (3912, 3927), True, 'import tensorflow as tf\n'), ((3963, 3998), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (3984, 3998), True, 'import tensorflow as tf\n'), ((5083, 5131), 'tensorflow.data.Dataset.from_tensors', 'tf.data.Dataset.from_tensors', (["dspath['TFRecord']"], {}), "(dspath['TFRecord'])\n", (5111, 5131), True, 'import tensorflow as tf\n'), ((577, 617), 'numpy.zeros', 'np.zeros', (['(numSeq, seqLen, 4)'], {'dtype': 'int'}), '((numSeq, seqLen, 4), dtype=int)\n', (585, 617), True, 'import numpy as np\n'), ((1268, 1281), 'numpy.array', 'np.array', (['buf'], {}), '(buf)\n', (1276, 1281), True, 'import numpy as np\n'), ((2790, 2840), 'h5py.File', 'h5py.File', (['h5file', '"""r"""'], {'libver': '"""latest"""', 'swmr': '(True)'}), "(h5file, 'r', libver='latest', swmr=True)\n", (2799, 2840), False, 'import h5py\n'), ((4075, 4111), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4096, 4111), True, 'import tensorflow as tf\n'), ((4275, 4337), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'feature_description'], {}), '(example_proto, feature_description)\n', (4301, 4337), True, 'import tensorflow as tf\n'), ((4390, 4432), 'tensorflow.io.parse_tensor', 'tf.io.parse_tensor', (['seq'], {'out_type': 'tf.int64'}), '(seq, out_type=tf.int64)\n', (4408, 4432), True, 'import tensorflow as tf\n'), ((4728, 4770), 'tensorflow.concat', 'tf.concat', (['combined_chromatin_data'], {'axis': '(0)'}), '(combined_chromatin_data, axis=0)\n', (4737, 4770), True, 'import tensorflow as tf\n'), ((4587, 4638), 'tensorflow.io.parse_tensor', 'tf.io.parse_tensor', (['ct_message'], {'out_type': 'tf.float64'}), '(ct_message, out_type=tf.float64)\n', (4605, 4638), True, 'import tensorflow as tf\n'), ((3372, 3391), 'numpy.vstack', 'np.vstack', (['[c1, c2]'], {}), '([c1, c2])\n', (3381, 3391), True, 'import numpy as np\n'), ((3432, 3456), 'numpy.concatenate', 'np.concatenate', (['[c1, c2]'], {}), '([c1, c2])\n', (3446, 3456), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
# Imputation of missing values for numerical variables
# by the mode (the value that appears most often in a set of data values).
class ImputerNumericalVariable(BaseEstimator, TransformerMixin):
def __init__(self, variables=None):
self.variables = variables
def fit(self, X, y=None):
self.variable_mode_dict = {}
for variable in self.variables:
self.variable_mode_dict[variable] = X[variable].mode()[0]
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable].fillna(self.variable_mode_dict[variable], inplace=True)
return X
# Imputation of missing values for categorical variables.
# Replace missing values with new label: "missing_value".
class ImputerCategoricalVariable(BaseEstimator, TransformerMixin):
def __init__(self, variables=None):
self.variables = variables
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable] = X[variable].fillna("missing_value")
return X
# Logarithm transformation of non-normal distributed variables.
class TransformerLogarithm(BaseEstimator, TransformerMixin):
def __init__(self, variables=None):
self.variables = variables
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable] = np.log(X[variable])
return X
# Get the time elapsed between variable and the year in which the house was sold
class ProcessorTemporalVariable(BaseEstimator, TransformerMixin):
def __init__(self, variables=None, related_variable=None):
self.variables = [variables]
self.related_variables = related_variable
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable] = X[self.related_variables] - X[variable]
return X
# Replace rare labels (which appear only in a small proportion of the observations) by the string "rare_label".
class EncoderRareLabel(BaseEstimator, TransformerMixin):
def __init__(self, tolerance, variables=None):
self.variables = variables
self.tolerance = tolerance
def fit(self, X, y=None):
self.rare_label_dict = {}
for variable in self.variables:
frequent_var = pd.Series(X[variable].value_counts() / np.float(len(X)))
self.rare_label_dict[variable] = list(frequent_var[frequent_var >= self.tolerance].index)
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable] = np.where(X[variable].isin(self.rare_label_dict[variable]), X[variable], "rare_label")
return X
# Drop unnecessary variables.
class DropSelectedVariable(BaseEstimator, TransformerMixin):
def __init__(self, variables=None):
self.variables = variables
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
X = X.drop(self.variables, axis=1)
return X
# Transform the strings of the categorical variables into numbers.
class EncoderCategoricalVariable(BaseEstimator, TransformerMixin):
def __init__(self, variables=None):
self.variables = variables
def fit(self, X, y):
temp = pd.concat([X, y], axis=1)
temp.columns = list(X.columns) + ['target']
self.ordered_labels_dict = {}
for variable in self.variables:
ordered_labels = temp.groupby([variable])['target'].mean().sort_values(ascending=True).index
self.ordered_labels_dict[variable] = {k: i for i, k in enumerate(ordered_labels, 0)}
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable] = X[variable].map(self.ordered_labels_dict[variable])
return X
| [
"numpy.log",
"pandas.concat"
] | [((3572, 3597), 'pandas.concat', 'pd.concat', (['[X, y]'], {'axis': '(1)'}), '([X, y], axis=1)\n', (3581, 3597), True, 'import pandas as pd\n'), ((1605, 1624), 'numpy.log', 'np.log', (['X[variable]'], {}), '(X[variable])\n', (1611, 1624), True, 'import numpy as np\n')] |
import tensorflow as tf
# Taken from https://www.tensorflow.org/guide/eager
# Eager execution works nicely with NumPy. NumPy operations accept tf.Tensor arguments.
# TensorFlow math operations convert Python objects and NumPy arrays to tf.Tensor objects.
# The tf.Tensor.numpy method returns the object's value as a NumPy ndarray.
tf.enable_eager_execution()
a = tf.constant([
[1, 2],
[3, 4]]
)
print(a)
# Broadcasting support
b = tf.add(a, 1)
print(b)
# Operator overloading is supported
print(a * b)
# Use NumPy values
import numpy as np
c = np.multiply(a, b)
print(c)
| [
"numpy.multiply",
"tensorflow.add",
"tensorflow.constant",
"tensorflow.enable_eager_execution"
] | [((333, 360), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (358, 360), True, 'import tensorflow as tf\n'), ((365, 394), 'tensorflow.constant', 'tf.constant', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (376, 394), True, 'import tensorflow as tf\n'), ((442, 454), 'tensorflow.add', 'tf.add', (['a', '(1)'], {}), '(a, 1)\n', (448, 454), True, 'import tensorflow as tf\n'), ((558, 575), 'numpy.multiply', 'np.multiply', (['a', 'b'], {}), '(a, b)\n', (569, 575), True, 'import numpy as np\n')] |
import logging
import numpy
from PIL import Image
from dirs import dest
log = logging.getLogger()
def save_builtin(img, path):
# having blender save was turning the image back to black!
path.parent.mkdir(parents=True, exist_ok=True)
img.filepath = str(path).replace('.png', '-builtinsave.png')
img.file_format = "PNG"
img.save()
log.info(f'wrote {path}')
def save(img, path, logPrefix=''):
path.parent.mkdir(parents=True, exist_ok=True)
log.info(f'{logPrefix}{path.relative_to(dest)}: preparing image data')
ar = numpy.array(img.pixels).reshape((img.size[0], img.size[1], 4))
ar = ar[::-1, :, :]
# if img.colorspace_settings.name == 'sRGB':
# ar = ar**(1 / 2.2) # unverified
img = Image.fromarray((ar * 255).astype(numpy.uint8))
img.convert('RGB').save(path, quality=70)
log.info(f'{logPrefix}{path.relative_to(dest)}: saved.')
# then use https://github.com/BinomialLLC/basis_universal
# then bjs loads with https://doc.babylonjs.com/advanced_topics/mutliPlatTextures#basis-file-format
| [
"logging.getLogger",
"numpy.array"
] | [((79, 98), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (96, 98), False, 'import logging\n'), ((555, 578), 'numpy.array', 'numpy.array', (['img.pixels'], {}), '(img.pixels)\n', (566, 578), False, 'import numpy\n')] |
import numpy as np
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from matplotlib import dates as md
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.preprocessing import normalize, scale
import sklearn.metrics as metrics
import pickle
import stat_tools as st
import configparser
import os, subprocess
from datetime import datetime, timezone, timedelta
from ast import literal_eval as le
import pytz
def localToUTCtimestamp(t, local_tz):
t_local = local_tz.localize(t, is_dst=None)
t_utc = t_local.astimezone(pytz.utc)
return t_utc.timestamp()
def UTCtimestampTolocal(ts, local_tz):
t_utc = datetime.fromtimestamp(ts,tz=pytz.timezone("UTC"))
t_local = t_utc.astimezone(local_tz)
return t_local
try:
try:
config_path = sys.argv[1]
except Exception:
config_path = "./config.conf"
cp = configparser.ConfigParser()
cp.read(config_path)
inpath=le(cp["paths"]["feature_path"])
GHI_path=le(cp["paths"]["GHI_path"])
forecast_path=le(cp["paths"]["forecast_path"])
lead_minutes=le(cp["forecast"]["lead_minutes"])
days=le(cp["forecast"]["days"])
#lead_minutes=[1,3,5,10,15,30,45];
#sensors = np.arange(99,100)
try:
sensors = le(cp["forecast"]["sensors"])
except Exception:
GHI_Coor = le(cp["GHI_sensors"]["GHI_Coor"]) #if sensor list isn't provided, forecast for all GHI points
sensors = range(0,len(GHI_Coor))
try:
forecast_timezone=pytz.timezone(cp["forecast"]["forecast_timezone"])
print("Using camera timezone: %s" % str(forecast_timezone))
except Exception:
forecast_timezone=pytz.timezone("utc")
print("Error processsing forecast timezone config, assuming UTC")
except KeyError as e:
print("Error loading config: %s" % e)
if not os.path.isdir(forecast_path):
try:
os.mkdir(forecast_path[:-1])
except:
print('Cannot create directory,', forecast_path[:-1])
plt.ioff() #Turn off interactive plotting for running automatically
for day in days:
MAE, MSE = [], []
MAE2, MSE2 = [], []
print("Predicting for " + day)
if not os.path.isdir(forecast_path+day[:8]):
try:
subprocess.call(['mkdir', forecast_path+day[:8]])
except:
print('Cannot create directory,',forecast_path+day[:8])
continue
if not os.path.isdir(forecast_path+day[:8] + "/plots"):
try:
os.mkdir(forecast_path+day[:8] + "/plots")
except:
print('Cannot create directory,', forecast_path+day[:8] + "/plots")
for forward in lead_minutes:
timestamp, DataX, DataY = {},{},{}
MAE_period, MSE_period = [], []
MAE2_period, MSE2_period = [], []
for sensor in sensors:
timestamp[sensor] = []
DataX[sensor] = []
DataY[sensor] = []
try:
x = np.genfromtxt(inpath+day[:8]+'/GHI'+str(sensor)+'.csv',delimiter=',',skip_header=1); # < ORIGINAL
#x = np.genfromtxt(inpath+'/GHI'+str(sensor)+'.csv',delimiter=','); # Temp change to allow running of old data in dhuang3
x = x[x[:,0]==forward]; #Take all rows where forecast period == forward
#if sensor == 26: # Temp added for 2018-09-22 test with location 99
# with np.load(GHI_path+day[:6]+'/GHI_'+str(99)+'.npz') as data: #
# ty, y = data['timestamp'], data['ghi'] #
#else: #
with np.load(GHI_path+day[:6]+'/GHI_'+str(sensor)+'.npz') as data: # < ORIGINAL
ty, y = data['timestamp'], data['ghi'] # < ORIGINAL
#ty -= 3600 #Add an hour (testing only!)
x = x[x[:,1]<=ty[-1]] #Take all "feature" elements where timestamp is less than last GHI timestamp
tx=x[:,1].copy(); #Create copy of feature timestamps
itx = ((tx-ty[0]+30)//60).astype(int) #Create array of relative time based on first GHI timestamp, add 30 secs, floor to minutes, convert to int
print("len(x): %i\tlen y: %i\n" % (len(tx), len(ty)))
try:
print("tx: %i\ty: %i\titx: %i\n" % (tx[0],ty[0],itx[0]))
except IndexError:
pass
x[:,1] = (y[itx]) #Select x values corresponding to times in itx
DataX[sensor] += [x[:,1:]] #Append timestamp and x values to DataX (does NOT copy forecast period "forward" column)
DataY[sensor] += [(y[itx + forward])] #Get future actual GHI
timestamp[sensor] += [tx];
DataX[sensor] = np.vstack(DataX[sensor]) #stack time series for all GHI locations vertically
DataY[sensor] = np.hstack(DataY[sensor]) #stack time series for persistence horizontally
timestamp[sensor] = np.hstack(timestamp[sensor]) #stack timestamps horizontally
#print( DataX[sensor] )
#print(DataY[sensor], DataX[sensor][:,0])
#try:
mk = (DataY[sensor] > 0) & (DataX[sensor][:,0] > 0) #create boolean list where persistence value and timestamp are both >0
DataX[sensor] = DataX[sensor][mk] #take subset selected above
# cf2 column (was 15 but we dropped the leadminutes column already)
cld_frac = np.copy(DataX[sensor][:,14])
DataX[sensor][:,0]/=400; #scale GHI by 400? (note: data in *.npz is already scaled?)
DataX[sensor][:,1:-1] = scale(DataX[sensor][:,1:-1]); #normalize other x values
except ValueError as e:
print("Skipping sensor %i, %s" % (sensor, str(e)))
continue #This will get thrown if there's no GHI data and DataY is filled with NaNs
except IndexError as e:
print("Skipping sensor %i, %s" % (sensor, str(e)))
continue
except FileNotFoundError as e:
print("Skipping sensor %i, %s" % (sensor, str(e)))
continue
# DataX[:,1:] = normalize(DataX[:,1:],axis=0);
DataY[sensor] = DataY[sensor][mk] #take subset to match x values
timestamp[sensor] = timestamp[sensor][mk] #take subset to match x values
print("%i minute forecast, location %i" % (forward, sensor))
print("\t",DataX[sensor].shape,DataY[sensor].shape)
print('\tMean GHI:', np.nanmean(DataY[sensor]))
with open('optimal_model{:02d}.mod99'.format(forward),'rb') as fmod:
SVR_linear = pickle.load(fmod) #Load model
# until a model is trained using the max_ghi column
# - (which requires processing another month) -
# just drop that column before predicting
testY_hat = SVR_linear.predict(DataX[sensor][:,0:-1]) #Run model
testY_per = DataX[sensor][:,0]*400 #Create persistence model (and rescale)
max_ghi = DataX[sensor][:,-1]
# use the theoretical maximum ghi if the sky is clear
testY_hat[cld_frac < 0.05] = max_ghi[cld_frac < 0.05]
# use a persistence estimate if the sky is totally overcast
# since nothing is liable to change
# eventually, we'll want to do something else so we don't need ghi sensor data
testY_hat[cld_frac > 0.95] = testY_per[cld_frac > 0.95]
#ts_offset = datetime(2018,1,1) #fix offset
#ts_offset.replace(tzinfo=timezone.utc)
#ts_fixed = (timestamp[sensor]+ts_offset.timestamp()-dt.timedelta(hours=5)
#ts_fixed = timestamp[sensor] + (ts_offset.timestamp()-(3600*5))
#ts_fixed = (datetime.strptime(timestamp[sensor], '%Y-%m-%d %H:%M:%S')-datetime(2018,1,1)).total_seconds()+(3600*5)
#md_timestamp = md.epoch2num(ts_fixed)
#ts_str = ts_fixed.astype(object)
#ts_str = [datetime.fromtimestamp(ts).strftime("%m/%d/%Y %H:%M:%S") for ts in ts_str]
#print(ts_str)
ts_fixed = timestamp[sensor] #np.vectorize(UTCtimestampTolocal)(timestamp[sensor], forecast_timezone)
md_timestamp = md.epoch2num(ts_fixed)
#np.savetxt(forecast_path + "forecast_" + str(sensor) + "_" + str(forward) + "min.csv", np.column_stack((ts_fixed, DataX[sensor])), header="Timestamp,DateTime String, RawForecast",delimiter=",")
np.savetxt(forecast_path + day[:8] + "/ML_forecast_" + str(sensor) + "_" + str(forward) + "min.csv", np.column_stack((ts_fixed, DataY[sensor], testY_hat, testY_per)), header="Timestamp,Actual_GHI,Forecast_GHI,Persistence_GHI",delimiter=",")
xfmt = md.DateFormatter('%H:%M', tz=forecast_timezone) #pytz.timezone('US/Eastern'))
plt.figure();
ax=plt.gca()
ax.xaxis.set_major_formatter(xfmt)
#xlocator = md.MinuteLocator(byminute=[0], interval = 1)
#ax.xaxis.set_major_locator(xlocator)
plt.title("Actual vs. Forecast Irradiance "+day,y=1.08, fontsize=16)
plt.xlabel('Hour (%s)' % str(forecast_timezone))
plt.ylabel('Irradiance (W/m^2)');
plt.plot(md_timestamp,DataY[sensor], "-", linewidth=1, label='Actual GHI');
plt.plot(md_timestamp,testY_hat, "-", linewidth=1, label='Cloud-Tracking Forecast');
plt.plot(md_timestamp,testY_per, "-", linewidth=1, label='Persistence Forecast');
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=3, borderaxespad=0., fontsize="small");
plt.tight_layout()
plt.savefig(forecast_path + day[:8] + "/plots/GHI_ActualvsForecast_"+ str(sensor) + "_" + str(forward) + ".png")
plt.close()
#plt.show();
#plt.figure(); plt.plot(testY_hat); plt.plot(DataY); plt.show();
# bins=np.unique(timestamp)
# Y = st.bin_average(DataY,timestamp,bins);
# Yhat = st.bin_average(testY_hat,timestamp,bins);
# plt.figure(); plt.plot(Yhat); plt.plot(Y); plt.show();
MAE_period += [metrics.mean_absolute_error(DataY[sensor], testY_hat)]
MSE_period += [metrics.mean_squared_error(DataY[sensor], testY_hat)]
MAE2_period += [metrics.mean_absolute_error(DataY[sensor], testY_per)]
MSE2_period += [metrics.mean_squared_error(DataY[sensor], testY_per)]
# MAE = metrics.mean_absolute_error(Y, Yhat)
# MSE = metrics.mean_squared_error(Y, Yhat)
# print("##################################")
#except Exception as e:
# raise
# print("Exception: %s" % str(e))
if not len(MAE_period):
MAE += [np.nan]
MSE += [np.nan]
MAE2 += [np.nan]
MSE2 += [np.nan]
continue
MAE += [sum(MAE_period)/len(MAE_period)]
MSE += [sum(MSE_period)/len(MSE_period)]
MAE2 += [sum(MAE2_period)/len(MAE2_period)]
MSE2 += [sum(MSE2_period)/len(MSE2_period)]
print('MAE and MAE2:', MAE, MAE2)
plt.figure();
plt.title("Cloud-Tracking vs. Persistent Model MAE "+day,y=1.08, fontsize=16)
plt.plot(lead_minutes,MAE,label='Cloud-Tracking Forecast');
plt.plot(lead_minutes,MAE2,label='Persistent Model');
plt.xlabel('Forecast Lead Time (Minutes)');
plt.ylabel('Mean Absolute Error (W/m^2)');
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=3, borderaxespad=0., fontsize="small");
plt.tight_layout()
plt.savefig(forecast_path + day[:8] + "/plots/MAE_CloudvsPersist.png")
plt.close()
#plt.show();
| [
"configparser.ConfigParser",
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"numpy.column_stack",
"numpy.nanmean",
"pytz.timezone",
"matplotlib.dates.epoch2num",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"os.path.isdir",
"os.mkdir",
"subprocess.call",
"n... | [((37, 58), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (51, 58), False, 'import matplotlib\n'), ((2059, 2069), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (2067, 2069), True, 'from matplotlib import pyplot as plt\n'), ((917, 944), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (942, 944), False, 'import configparser\n'), ((982, 1013), 'ast.literal_eval', 'le', (["cp['paths']['feature_path']"], {}), "(cp['paths']['feature_path'])\n", (984, 1013), True, 'from ast import literal_eval as le\n'), ((1027, 1054), 'ast.literal_eval', 'le', (["cp['paths']['GHI_path']"], {}), "(cp['paths']['GHI_path'])\n", (1029, 1054), True, 'from ast import literal_eval as le\n'), ((1073, 1105), 'ast.literal_eval', 'le', (["cp['paths']['forecast_path']"], {}), "(cp['paths']['forecast_path'])\n", (1075, 1105), True, 'from ast import literal_eval as le\n'), ((1124, 1158), 'ast.literal_eval', 'le', (["cp['forecast']['lead_minutes']"], {}), "(cp['forecast']['lead_minutes'])\n", (1126, 1158), True, 'from ast import literal_eval as le\n'), ((1168, 1194), 'ast.literal_eval', 'le', (["cp['forecast']['days']"], {}), "(cp['forecast']['days'])\n", (1170, 1194), True, 'from ast import literal_eval as le\n'), ((1898, 1926), 'os.path.isdir', 'os.path.isdir', (['forecast_path'], {}), '(forecast_path)\n', (1911, 1926), False, 'import os, subprocess\n'), ((12043, 12055), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12053, 12055), True, 'from matplotlib import pyplot as plt\n'), ((12061, 12146), 'matplotlib.pyplot.title', 'plt.title', (["('Cloud-Tracking vs. Persistent Model MAE ' + day)"], {'y': '(1.08)', 'fontsize': '(16)'}), "('Cloud-Tracking vs. Persistent Model MAE ' + day, y=1.08, fontsize=16\n )\n", (12070, 12146), True, 'from matplotlib import pyplot as plt\n'), ((12143, 12203), 'matplotlib.pyplot.plot', 'plt.plot', (['lead_minutes', 'MAE'], {'label': '"""Cloud-Tracking Forecast"""'}), "(lead_minutes, MAE, label='Cloud-Tracking Forecast')\n", (12151, 12203), True, 'from matplotlib import pyplot as plt\n'), ((12208, 12262), 'matplotlib.pyplot.plot', 'plt.plot', (['lead_minutes', 'MAE2'], {'label': '"""Persistent Model"""'}), "(lead_minutes, MAE2, label='Persistent Model')\n", (12216, 12262), True, 'from matplotlib import pyplot as plt\n'), ((12267, 12309), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Forecast Lead Time (Minutes)"""'], {}), "('Forecast Lead Time (Minutes)')\n", (12277, 12309), True, 'from matplotlib import pyplot as plt\n'), ((12315, 12356), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Absolute Error (W/m^2)"""'], {}), "('Mean Absolute Error (W/m^2)')\n", (12325, 12356), True, 'from matplotlib import pyplot as plt\n'), ((12363, 12480), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '"""lower left"""', 'ncol': '(3)', 'borderaxespad': '(0.0)', 'fontsize': '"""small"""'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='lower left', ncol=3,\n borderaxespad=0.0, fontsize='small')\n", (12373, 12480), True, 'from matplotlib import pyplot as plt\n'), ((12478, 12496), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12494, 12496), True, 'from matplotlib import pyplot as plt\n'), ((12501, 12571), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(forecast_path + day[:8] + '/plots/MAE_CloudvsPersist.png')"], {}), "(forecast_path + day[:8] + '/plots/MAE_CloudvsPersist.png')\n", (12512, 12571), True, 'from matplotlib import pyplot as plt\n'), ((12576, 12587), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12585, 12587), True, 'from matplotlib import pyplot as plt\n'), ((1301, 1330), 'ast.literal_eval', 'le', (["cp['forecast']['sensors']"], {}), "(cp['forecast']['sensors'])\n", (1303, 1330), True, 'from ast import literal_eval as le\n'), ((1550, 1600), 'pytz.timezone', 'pytz.timezone', (["cp['forecast']['forecast_timezone']"], {}), "(cp['forecast']['forecast_timezone'])\n", (1563, 1600), False, 'import pytz\n'), ((1945, 1973), 'os.mkdir', 'os.mkdir', (['forecast_path[:-1]'], {}), '(forecast_path[:-1])\n', (1953, 1973), False, 'import os, subprocess\n'), ((2246, 2284), 'os.path.isdir', 'os.path.isdir', (['(forecast_path + day[:8])'], {}), '(forecast_path + day[:8])\n', (2259, 2284), False, 'import os, subprocess\n'), ((2476, 2525), 'os.path.isdir', 'os.path.isdir', (["(forecast_path + day[:8] + '/plots')"], {}), "(forecast_path + day[:8] + '/plots')\n", (2489, 2525), False, 'import os, subprocess\n'), ((716, 736), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (729, 736), False, 'import pytz\n'), ((1372, 1405), 'ast.literal_eval', 'le', (["cp['GHI_sensors']['GHI_Coor']"], {}), "(cp['GHI_sensors']['GHI_Coor'])\n", (1374, 1405), True, 'from ast import literal_eval as le\n'), ((1717, 1737), 'pytz.timezone', 'pytz.timezone', (['"""utc"""'], {}), "('utc')\n", (1730, 1737), False, 'import pytz\n'), ((2309, 2360), 'subprocess.call', 'subprocess.call', (["['mkdir', forecast_path + day[:8]]"], {}), "(['mkdir', forecast_path + day[:8]])\n", (2324, 2360), False, 'import os, subprocess\n'), ((2550, 2594), 'os.mkdir', 'os.mkdir', (["(forecast_path + day[:8] + '/plots')"], {}), "(forecast_path + day[:8] + '/plots')\n", (2558, 2594), False, 'import os, subprocess\n'), ((9030, 9052), 'matplotlib.dates.epoch2num', 'md.epoch2num', (['ts_fixed'], {}), '(ts_fixed)\n', (9042, 9052), True, 'from matplotlib import dates as md\n'), ((9584, 9631), 'matplotlib.dates.DateFormatter', 'md.DateFormatter', (['"""%H:%M"""'], {'tz': 'forecast_timezone'}), "('%H:%M', tz=forecast_timezone)\n", (9600, 9631), True, 'from matplotlib import dates as md\n'), ((9674, 9686), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9684, 9686), True, 'from matplotlib import pyplot as plt\n'), ((9703, 9712), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9710, 9712), True, 'from matplotlib import pyplot as plt\n'), ((9891, 9962), 'matplotlib.pyplot.title', 'plt.title', (["('Actual vs. Forecast Irradiance ' + day)"], {'y': '(1.08)', 'fontsize': '(16)'}), "('Actual vs. Forecast Irradiance ' + day, y=1.08, fontsize=16)\n", (9900, 9962), True, 'from matplotlib import pyplot as plt\n'), ((10033, 10065), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Irradiance (W/m^2)"""'], {}), "('Irradiance (W/m^2)')\n", (10043, 10065), True, 'from matplotlib import pyplot as plt\n'), ((10079, 10154), 'matplotlib.pyplot.plot', 'plt.plot', (['md_timestamp', 'DataY[sensor]', '"""-"""'], {'linewidth': '(1)', 'label': '"""Actual GHI"""'}), "(md_timestamp, DataY[sensor], '-', linewidth=1, label='Actual GHI')\n", (10087, 10154), True, 'from matplotlib import pyplot as plt\n'), ((10167, 10256), 'matplotlib.pyplot.plot', 'plt.plot', (['md_timestamp', 'testY_hat', '"""-"""'], {'linewidth': '(1)', 'label': '"""Cloud-Tracking Forecast"""'}), "(md_timestamp, testY_hat, '-', linewidth=1, label=\n 'Cloud-Tracking Forecast')\n", (10175, 10256), True, 'from matplotlib import pyplot as plt\n'), ((10264, 10350), 'matplotlib.pyplot.plot', 'plt.plot', (['md_timestamp', 'testY_per', '"""-"""'], {'linewidth': '(1)', 'label': '"""Persistence Forecast"""'}), "(md_timestamp, testY_per, '-', linewidth=1, label=\n 'Persistence Forecast')\n", (10272, 10350), True, 'from matplotlib import pyplot as plt\n'), ((10358, 10475), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '"""lower left"""', 'ncol': '(3)', 'borderaxespad': '(0.0)', 'fontsize': '"""small"""'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='lower left', ncol=3,\n borderaxespad=0.0, fontsize='small')\n", (10368, 10475), True, 'from matplotlib import pyplot as plt\n'), ((10481, 10499), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10497, 10499), True, 'from matplotlib import pyplot as plt\n'), ((10637, 10648), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10646, 10648), True, 'from matplotlib import pyplot as plt\n'), ((5219, 5243), 'numpy.vstack', 'np.vstack', (['DataX[sensor]'], {}), '(DataX[sensor])\n', (5228, 5243), True, 'import numpy as np\n'), ((5343, 5367), 'numpy.hstack', 'np.hstack', (['DataY[sensor]'], {}), '(DataY[sensor])\n', (5352, 5367), True, 'import numpy as np\n'), ((5467, 5495), 'numpy.hstack', 'np.hstack', (['timestamp[sensor]'], {}), '(timestamp[sensor])\n', (5476, 5495), True, 'import numpy as np\n'), ((6002, 6031), 'numpy.copy', 'np.copy', (['DataX[sensor][:, 14]'], {}), '(DataX[sensor][:, 14])\n', (6009, 6031), True, 'import numpy as np\n'), ((6205, 6234), 'sklearn.preprocessing.scale', 'scale', (['DataX[sensor][:, 1:-1]'], {}), '(DataX[sensor][:, 1:-1])\n', (6210, 6234), False, 'from sklearn.preprocessing import normalize, scale\n'), ((7209, 7234), 'numpy.nanmean', 'np.nanmean', (['DataY[sensor]'], {}), '(DataY[sensor])\n', (7219, 7234), True, 'import numpy as np\n'), ((7347, 7364), 'pickle.load', 'pickle.load', (['fmod'], {}), '(fmod)\n', (7358, 7364), False, 'import pickle\n'), ((9412, 9476), 'numpy.column_stack', 'np.column_stack', (['(ts_fixed, DataY[sensor], testY_hat, testY_per)'], {}), '((ts_fixed, DataY[sensor], testY_hat, testY_per))\n', (9427, 9476), True, 'import numpy as np\n'), ((11019, 11072), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['DataY[sensor]', 'testY_hat'], {}), '(DataY[sensor], testY_hat)\n', (11046, 11072), True, 'import sklearn.metrics as metrics\n'), ((11101, 11153), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['DataY[sensor]', 'testY_hat'], {}), '(DataY[sensor], testY_hat)\n', (11127, 11153), True, 'import sklearn.metrics as metrics\n'), ((11183, 11236), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['DataY[sensor]', 'testY_per'], {}), '(DataY[sensor], testY_per)\n', (11210, 11236), True, 'import sklearn.metrics as metrics\n'), ((11266, 11318), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['DataY[sensor]', 'testY_per'], {}), '(DataY[sensor], testY_per)\n', (11292, 11318), True, 'import sklearn.metrics as metrics\n')] |
# Copyright (c) 2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy
import random
from common import TestCase
from annoy import AnnoyIndex
def dot_metric(a, b):
return -numpy.dot(a, b)
def recall(retrieved, relevant):
return float(len(set(relevant) & set(retrieved))) \
/ float(len(set(relevant)))
class DotIndexTest(TestCase):
def test_get_nns_by_vector(self):
f = 2
i = AnnoyIndex(f, 'dot')
i.add_item(0, [2, 2])
i.add_item(1, [3, 2])
i.add_item(2, [3, 3])
i.build(10)
self.assertEqual(i.get_nns_by_vector([4, 4], 3), [2, 1, 0])
self.assertEqual(i.get_nns_by_vector([1, 1], 3), [2, 1, 0])
self.assertEqual(i.get_nns_by_vector([4, 2], 3), [2, 1, 0])
def test_get_nns_by_item(self):
f = 2
i = AnnoyIndex(f, 'dot')
i.add_item(0, [2, 2])
i.add_item(1, [3, 2])
i.add_item(2, [3, 3])
i.build(10)
self.assertEqual(i.get_nns_by_item(0, 3), [2, 1, 0])
self.assertEqual(i.get_nns_by_item(2, 3), [2, 1, 0])
def test_dist(self):
f = 2
i = AnnoyIndex(f, 'dot')
i.add_item(0, [0, 1])
i.add_item(1, [1, 1])
i.add_item(2, [0, 0])
self.assertAlmostEqual(i.get_distance(0, 1), 1.0)
self.assertAlmostEqual(i.get_distance(1, 2), 0.0)
def recall_at(self, n, n_trees=10, n_points=1000, n_rounds=5):
# the best movie/variable name
total_recall = 0.
for r in range(n_rounds):
# create random points at distance x
f = 10
idx = AnnoyIndex(f, 'dot')
data = numpy.array([
[random.gauss(0, 1) for z in range(f)]
for j in range(n_points)
])
expected_results = [
sorted(
range(n_points),
key=lambda j: dot_metric(data[i], data[j])
)[:n]
for i in range(n_points)
]
for i, vec in enumerate(data):
idx.add_item(i, vec)
idx.build(n_trees)
for i in range(n_points):
nns = idx.get_nns_by_vector(data[i], n)
total_recall += recall(nns, expected_results[i])
return total_recall / float(n_rounds * n_points)
def test_recall_at_10(self):
value = self.recall_at(10)
self.assertGreaterEqual(value, 0.65)
def test_recall_at_100(self):
value = self.recall_at(100)
self.assertGreaterEqual(value, 0.95)
def test_recall_at_1000(self):
value = self.recall_at(1000)
self.assertGreaterEqual(value, 0.99)
def test_recall_at_1000_fewer_trees(self):
value = self.recall_at(1000, n_trees=4)
self.assertGreaterEqual(value, 0.99)
def test_get_nns_with_distances(self):
f = 3
i = AnnoyIndex(f, 'dot')
i.add_item(0, [0, 0, 2])
i.add_item(1, [0, 1, 1])
i.add_item(2, [1, 0, 0])
i.build(10)
l, d = i.get_nns_by_item(0, 3, -1, True)
self.assertEqual(l, [0, 1, 2])
self.assertAlmostEqual(d[0], 4.0)
self.assertAlmostEqual(d[1], 2.0)
self.assertAlmostEqual(d[2], 0.0)
l, d = i.get_nns_by_vector([2, 2, 2], 3, -1, True)
self.assertEqual(l, [0, 1, 2])
self.assertAlmostEqual(d[0], 4.0)
self.assertAlmostEqual(d[1], 4.0)
self.assertAlmostEqual(d[2], 2.0)
def test_include_dists(self):
f = 40
i = AnnoyIndex(f, 'dot')
v = numpy.random.normal(size=f)
i.add_item(0, v)
i.add_item(1, -v)
i.build(10)
indices, dists = i.get_nns_by_item(0, 2, 10, True)
self.assertEqual(indices, [0, 1])
self.assertAlmostEqual(dists[0], numpy.dot(v, v))
def test_distance_consistency(self):
n, f = 1000, 3
i = AnnoyIndex(f, 'dot')
for j in range(n):
i.add_item(j, numpy.random.normal(size=f))
i.build(10)
for a in random.sample(range(n), 100):
indices, dists = i.get_nns_by_item(a, 100, include_distances=True)
for b, dist in zip(indices, dists):
self.assertAlmostEqual(dist, numpy.dot(
i.get_item_vector(a),
i.get_item_vector(b)
))
self.assertAlmostEqual(dist, i.get_distance(a, b))
| [
"numpy.random.normal",
"numpy.dot",
"random.gauss",
"annoy.AnnoyIndex"
] | [((695, 710), 'numpy.dot', 'numpy.dot', (['a', 'b'], {}), '(a, b)\n', (704, 710), False, 'import numpy\n'), ((934, 954), 'annoy.AnnoyIndex', 'AnnoyIndex', (['f', '"""dot"""'], {}), "(f, 'dot')\n", (944, 954), False, 'from annoy import AnnoyIndex\n'), ((1333, 1353), 'annoy.AnnoyIndex', 'AnnoyIndex', (['f', '"""dot"""'], {}), "(f, 'dot')\n", (1343, 1353), False, 'from annoy import AnnoyIndex\n'), ((1639, 1659), 'annoy.AnnoyIndex', 'AnnoyIndex', (['f', '"""dot"""'], {}), "(f, 'dot')\n", (1649, 1659), False, 'from annoy import AnnoyIndex\n'), ((3412, 3432), 'annoy.AnnoyIndex', 'AnnoyIndex', (['f', '"""dot"""'], {}), "(f, 'dot')\n", (3422, 3432), False, 'from annoy import AnnoyIndex\n'), ((4054, 4074), 'annoy.AnnoyIndex', 'AnnoyIndex', (['f', '"""dot"""'], {}), "(f, 'dot')\n", (4064, 4074), False, 'from annoy import AnnoyIndex\n'), ((4087, 4114), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': 'f'}), '(size=f)\n', (4106, 4114), False, 'import numpy\n'), ((4423, 4443), 'annoy.AnnoyIndex', 'AnnoyIndex', (['f', '"""dot"""'], {}), "(f, 'dot')\n", (4433, 4443), False, 'from annoy import AnnoyIndex\n'), ((2121, 2141), 'annoy.AnnoyIndex', 'AnnoyIndex', (['f', '"""dot"""'], {}), "(f, 'dot')\n", (2131, 2141), False, 'from annoy import AnnoyIndex\n'), ((4329, 4344), 'numpy.dot', 'numpy.dot', (['v', 'v'], {}), '(v, v)\n', (4338, 4344), False, 'import numpy\n'), ((4497, 4524), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': 'f'}), '(size=f)\n', (4516, 4524), False, 'import numpy\n'), ((2193, 2211), 'random.gauss', 'random.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (2205, 2211), False, 'import random\n')] |
import numpy as np
arr_1 = [2, 3, 5, 7, 11]
arr_2 = [13, 17, 19, 23, 29]
arr_3 = np.array([arr_1, arr_2])
print(arr_3)
| [
"numpy.array"
] | [((83, 107), 'numpy.array', 'np.array', (['[arr_1, arr_2]'], {}), '([arr_1, arr_2])\n', (91, 107), True, 'import numpy as np\n')] |
import chainer
import chainer.functions as F
import numpy as np
import argparse
from chainer import cuda, serializers
from pathlib import Path
from model import Generator, Discriminator, VGG, SAGenerator, SAGeneratorWithGuide
from utils import set_optimizer
from dataset import DataLoader, RefineDataset
from evaluation import Evaluation
xp = cuda.cupy
cuda.get_device(0).use()
class LossCalculator:
def __init__(self):
pass
@staticmethod
def content_loss(y, t):
return F.mean_absolute_error(y, t)
@staticmethod
def perceptual_loss(vgg, y, t):
y_feat = vgg(y)
t_feat = vgg(t)
sum_loss = 0
sum_loss += F.mean_squared_error(y_feat, t_feat)
return sum_loss
@staticmethod
def dis_hinge_loss(y_list, t_list):
sum_loss = 0
for y, t in zip(y_list, t_list):
loss = F.mean(F.relu(1. - t)) + F.mean(F.relu(1. + y))
sum_loss += loss
return sum_loss
@staticmethod
def gen_hinge_loss(y_list):
sum_loss = 0
for y in y_list:
loss = -F.mean(y)
sum_loss += loss
return sum_loss
@staticmethod
def positive_enforcing_loss(y):
sum_loss = 0
b, c, h, w = y.shape
for color in range(3):
ch = y[:, color, :, :]
mean = F.mean(ch)
mean = mean * chainer.as_variable(xp.ones(shape=(b, h, w)).astype(xp.float32))
loss = F.mean_squared_error(ch, mean)
sum_loss += loss
return -sum_loss
def train(epochs,
iterations,
batchsize,
validsize,
data_path,
sketch_path,
digi_path,
extension,
img_size,
outdir,
modeldir,
pretrained_epoch,
adv_weight,
enf_weight,
sn,
bn,
activ):
# Dataset Definition
dataloader = DataLoader(data_path, sketch_path, digi_path,
extension=extension, img_size=img_size)
print(dataloader)
color_valid, line_valid, mask_valid, ds_valid = dataloader(validsize, mode="valid")
# Model & Optimizer Definition
generator = SAGeneratorWithGuide(attn_type="sa", bn=bn, activ=activ)
#generator = SAGenerator(attn_type="sa", base=64)
generator.to_gpu()
gen_opt = set_optimizer(generator)
discriminator = Discriminator(sn=sn)
discriminator.to_gpu()
dis_opt = set_optimizer(discriminator)
vgg = VGG()
vgg.to_gpu()
vgg_opt = set_optimizer(vgg)
vgg.base.disable_update()
# Loss Function Definition
lossfunc = LossCalculator()
# Evaluation Definition
evaluator = Evaluation()
for epoch in range(epochs):
sum_loss = 0
for batch in range(0, iterations, batchsize):
color, line, mask, mask_ds = dataloader(batchsize)
line_input = F.concat([line, mask])
extractor = vgg(mask, extract=True)
extractor = F.average_pooling_2d(extractor, 3, 2, 1)
extractor.unchain_backward()
if epoch > pretrained_epoch:
adv_weight = 0.1
enf_weight = 0.0
# Discriminator update
fake, _ = generator(line_input, mask_ds, extractor)
y_dis = discriminator(fake, extractor)
t_dis = discriminator(color, extractor)
loss = adv_weight * lossfunc.dis_hinge_loss(y_dis, t_dis)
fake.unchain_backward()
discriminator.cleargrads()
loss.backward()
dis_opt.update()
loss.unchain_backward()
# Generator update
fake, guide = generator(line_input, mask_ds, extractor)
y_dis = discriminator(fake, extractor)
loss = adv_weight * lossfunc.gen_hinge_loss(y_dis)
loss += enf_weight * lossfunc.positive_enforcing_loss(fake)
loss += lossfunc.content_loss(fake, color)
loss += 0.9 * lossfunc.content_loss(guide, color)
loss += lossfunc.perceptual_loss(vgg, fake, color)
generator.cleargrads()
loss.backward()
gen_opt.update()
loss.unchain_backward()
sum_loss += loss.data
if batch == 0:
serializers.save_npz(f"{modeldir}/generator_{epoch}.model", generator)
extractor = vgg(line_valid, extract=True)
extractor = F.average_pooling_2d(extractor, 3, 2, 1)
extractor.unchain_backward()
line_valid_input = F.concat([line_valid, mask_valid])
with chainer.using_config('train', False):
y_valid, guide_valid = generator(line_valid_input, ds_valid, extractor)
y_valid = y_valid.data.get()
c_valid = color_valid.data.get()
input_valid = line_valid_input.data.get()
guide_valid = guide_valid.data.get()
evaluator(y_valid, c_valid, input_valid, guide_valid, outdir, epoch, validsize)
print(f"epoch: {epoch}")
print(f"loss: {sum_loss / iterations}")
def train_refine(epochs,
iterations,
batchsize,
validsize,
data_path,
sketch_path,
digi_path,
st_path,
extension,
img_size,
crop_size,
outdir,
modeldir,
adv_weight,
enf_weight):
# Dataset Definition
dataloader = RefineDataset(data_path, sketch_path, digi_path, st_path,
extension=extension, img_size=img_size, crop_size=crop_size)
print(dataloader)
color_valid, line_valid, mask_valid, ds_valid, cm_valid = dataloader(validsize, mode="valid")
# Model & Optimizer Definition
generator = SAGeneratorWithGuide(attn_type="sa", base=64, bn=True, activ=F.relu)
generator.to_gpu()
gen_opt = set_optimizer(generator)
discriminator = Discriminator()
discriminator.to_gpu()
dis_opt = set_optimizer(discriminator)
vgg = VGG()
vgg.to_gpu()
vgg_opt = set_optimizer(vgg)
vgg.base.disable_update()
# Loss Function Definition
lossfunc = LossCalculator()
# Evaluation Definition
evaluator = Evaluation()
iteration = 0
for epoch in range(epochs):
sum_dis_loss = 0
sum_gen_loss = 0
for batch in range(0, iterations, batchsize):
iteration += 1
color, line, mask, mask_ds, color_mask = dataloader(batchsize)
line_input = F.concat([line, mask])
extractor = vgg(color_mask, extract=True)
extractor = F.average_pooling_2d(extractor, 3, 2, 1)
extractor.unchain_backward()
# Discriminator update
fake, _ = generator(line_input, mask_ds, extractor)
y_dis = discriminator(fake, extractor)
t_dis = discriminator(color, extractor)
loss = adv_weight * lossfunc.dis_hinge_loss(y_dis, t_dis)
fake.unchain_backward()
discriminator.cleargrads()
loss.backward()
dis_opt.update()
loss.unchain_backward()
sum_dis_loss += loss.data
# Generator update
fake, guide = generator(line_input, mask_ds, extractor)
y_dis = discriminator(fake, extractor)
loss = adv_weight * lossfunc.gen_hinge_loss(y_dis)
loss += lossfunc.content_loss(fake, color)
loss += 0.9 * lossfunc.content_loss(guide, color)
generator.cleargrads()
loss.backward()
gen_opt.update()
loss.unchain_backward()
sum_gen_loss += loss.data
if batch == 0:
serializers.save_npz(f"{modeldir}/generator_{epoch}.model", generator)
extractor = vgg(cm_valid, extract=True)
extractor = F.average_pooling_2d(extractor, 3, 2, 1)
extractor.unchain_backward()
line_valid_input = F.concat([line_valid, mask_valid])
with chainer.using_config('train', False):
y_valid, guide_valid = generator(line_valid_input, ds_valid, extractor)
y_valid = y_valid.data.get()
c_valid = color_valid.data.get()
input_valid = line_valid_input.data.get()
cm_val = cm_valid.data.get()
guide_valid = guide_valid.data.get()
input_valid = np.concatenate([input_valid[:, 3:6], cm_val], axis=1)
evaluator(y_valid, c_valid, input_valid, guide_valid, outdir, epoch, validsize)
print(f"iter: {iteration} dis loss: {sum_dis_loss} gen loss: {gen_loss}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="RAM")
parser.add_argument('--e', type=int, default=1000, help="the number of epochs")
parser.add_argument('--i', type=int, default=2000, help="the number of iterations")
parser.add_argument('--b', type=int, default=16, help="batch size")
parser.add_argument('--v', type=int, default=5, help="valid size")
parser.add_argument('--outdir', type=Path, default='outdir', help="output directory")
parser.add_argument('--modeldir', type=Path, default='modeldir', help="model output directory")
parser.add_argument('--ext', type=str, default='.jpg', help="extension of training images")
parser.add_argument('--size', type=int, default=256, help="size of training images")
parser.add_argument('--isize', type=int, default=512, help="the overall size")
parser.add_argument('--pre', type=int, default=100, help="epochs of pretraining")
parser.add_argument('--enf', type=float, default=0.001, help="the weight of content loss")
parser.add_argument('--a', type=float, default=0.01, help="the weight of adversarial loss")
parser.add_argument('--sn', action="store_true", help="enable spectral normalization")
parser.add_argument('--bn', action="store_true", help="enable batch normalization in G")
parser.add_argument('--act', default=F.relu, help="activation function in G")
parser.add_argument('--data_path', type=Path, help="path which contains color images")
parser.add_argument('--sketch_path', type=Path, help="path which contains sketches")
parser.add_argument('--digi_path', type=Path, help="path which contains digitals")
parser.add_argument('--st_path', type=Path, help="path which contains spatial transformers")
parser.add_argument('--type', type=str, default='normal', help="type of training")
args = parser.parse_args()
outdir = args.outdir
outdir.mkdir(exist_ok=True)
modeldir = args.modeldir
modeldir.mkdir(exist_ok=True)
if args.type == "normal":
train(args.e, args.i, args.b, args.v, args.data_path, args.sketch_path, args.digi_path,
args.ext, args.size, outdir, modeldir, args.pre, args.a, args.enf,
args.sn, args.bn, args.act)
elif args.type == "refine":
train_refine(args.e, args.i, args.b, args.v, args.data_path, args.sketch_path,
args.digi_path, args.st_path, args.ext, args.isize, args.size,
outdir, modeldir, args.a, args.enf)
raise AttributeError
| [
"chainer.functions.mean_squared_error",
"chainer.functions.relu",
"dataset.RefineDataset",
"argparse.ArgumentParser",
"model.Discriminator",
"chainer.cuda.get_device",
"chainer.functions.concat",
"utils.set_optimizer",
"model.SAGeneratorWithGuide",
"chainer.serializers.save_npz",
"evaluation.Eva... | [((1946, 2036), 'dataset.DataLoader', 'DataLoader', (['data_path', 'sketch_path', 'digi_path'], {'extension': 'extension', 'img_size': 'img_size'}), '(data_path, sketch_path, digi_path, extension=extension, img_size\n =img_size)\n', (1956, 2036), False, 'from dataset import DataLoader, RefineDataset\n'), ((2222, 2278), 'model.SAGeneratorWithGuide', 'SAGeneratorWithGuide', ([], {'attn_type': '"""sa"""', 'bn': 'bn', 'activ': 'activ'}), "(attn_type='sa', bn=bn, activ=activ)\n", (2242, 2278), False, 'from model import Generator, Discriminator, VGG, SAGenerator, SAGeneratorWithGuide\n'), ((2370, 2394), 'utils.set_optimizer', 'set_optimizer', (['generator'], {}), '(generator)\n', (2383, 2394), False, 'from utils import set_optimizer\n'), ((2416, 2436), 'model.Discriminator', 'Discriminator', ([], {'sn': 'sn'}), '(sn=sn)\n', (2429, 2436), False, 'from model import Generator, Discriminator, VGG, SAGenerator, SAGeneratorWithGuide\n'), ((2478, 2506), 'utils.set_optimizer', 'set_optimizer', (['discriminator'], {}), '(discriminator)\n', (2491, 2506), False, 'from utils import set_optimizer\n'), ((2518, 2523), 'model.VGG', 'VGG', ([], {}), '()\n', (2521, 2523), False, 'from model import Generator, Discriminator, VGG, SAGenerator, SAGeneratorWithGuide\n'), ((2555, 2573), 'utils.set_optimizer', 'set_optimizer', (['vgg'], {}), '(vgg)\n', (2568, 2573), False, 'from utils import set_optimizer\n'), ((2713, 2725), 'evaluation.Evaluation', 'Evaluation', ([], {}), '()\n', (2723, 2725), False, 'from evaluation import Evaluation\n'), ((5638, 5761), 'dataset.RefineDataset', 'RefineDataset', (['data_path', 'sketch_path', 'digi_path', 'st_path'], {'extension': 'extension', 'img_size': 'img_size', 'crop_size': 'crop_size'}), '(data_path, sketch_path, digi_path, st_path, extension=\n extension, img_size=img_size, crop_size=crop_size)\n', (5651, 5761), False, 'from dataset import DataLoader, RefineDataset\n'), ((5960, 6028), 'model.SAGeneratorWithGuide', 'SAGeneratorWithGuide', ([], {'attn_type': '"""sa"""', 'base': '(64)', 'bn': '(True)', 'activ': 'F.relu'}), "(attn_type='sa', base=64, bn=True, activ=F.relu)\n", (5980, 6028), False, 'from model import Generator, Discriminator, VGG, SAGenerator, SAGeneratorWithGuide\n'), ((6066, 6090), 'utils.set_optimizer', 'set_optimizer', (['generator'], {}), '(generator)\n', (6079, 6090), False, 'from utils import set_optimizer\n'), ((6112, 6127), 'model.Discriminator', 'Discriminator', ([], {}), '()\n', (6125, 6127), False, 'from model import Generator, Discriminator, VGG, SAGenerator, SAGeneratorWithGuide\n'), ((6169, 6197), 'utils.set_optimizer', 'set_optimizer', (['discriminator'], {}), '(discriminator)\n', (6182, 6197), False, 'from utils import set_optimizer\n'), ((6209, 6214), 'model.VGG', 'VGG', ([], {}), '()\n', (6212, 6214), False, 'from model import Generator, Discriminator, VGG, SAGenerator, SAGeneratorWithGuide\n'), ((6246, 6264), 'utils.set_optimizer', 'set_optimizer', (['vgg'], {}), '(vgg)\n', (6259, 6264), False, 'from utils import set_optimizer\n'), ((6404, 6416), 'evaluation.Evaluation', 'Evaluation', ([], {}), '()\n', (6414, 6416), False, 'from evaluation import Evaluation\n'), ((8935, 8977), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""RAM"""'}), "(description='RAM')\n", (8958, 8977), False, 'import argparse\n'), ((355, 373), 'chainer.cuda.get_device', 'cuda.get_device', (['(0)'], {}), '(0)\n', (370, 373), False, 'from chainer import cuda, serializers\n'), ((503, 530), 'chainer.functions.mean_absolute_error', 'F.mean_absolute_error', (['y', 't'], {}), '(y, t)\n', (524, 530), True, 'import chainer.functions as F\n'), ((675, 711), 'chainer.functions.mean_squared_error', 'F.mean_squared_error', (['y_feat', 't_feat'], {}), '(y_feat, t_feat)\n', (695, 711), True, 'import chainer.functions as F\n'), ((1350, 1360), 'chainer.functions.mean', 'F.mean', (['ch'], {}), '(ch)\n', (1356, 1360), True, 'import chainer.functions as F\n'), ((1471, 1501), 'chainer.functions.mean_squared_error', 'F.mean_squared_error', (['ch', 'mean'], {}), '(ch, mean)\n', (1491, 1501), True, 'import chainer.functions as F\n'), ((2922, 2944), 'chainer.functions.concat', 'F.concat', (['[line, mask]'], {}), '([line, mask])\n', (2930, 2944), True, 'import chainer.functions as F\n'), ((3018, 3058), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', (['extractor', '(3)', '(2)', '(1)'], {}), '(extractor, 3, 2, 1)\n', (3038, 3058), True, 'import chainer.functions as F\n'), ((6700, 6722), 'chainer.functions.concat', 'F.concat', (['[line, mask]'], {}), '([line, mask])\n', (6708, 6722), True, 'import chainer.functions as F\n'), ((6802, 6842), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', (['extractor', '(3)', '(2)', '(1)'], {}), '(extractor, 3, 2, 1)\n', (6822, 6842), True, 'import chainer.functions as F\n'), ((1096, 1105), 'chainer.functions.mean', 'F.mean', (['y'], {}), '(y)\n', (1102, 1105), True, 'import chainer.functions as F\n'), ((4326, 4396), 'chainer.serializers.save_npz', 'serializers.save_npz', (['f"""{modeldir}/generator_{epoch}.model"""', 'generator'], {}), "(f'{modeldir}/generator_{epoch}.model', generator)\n", (4346, 4396), False, 'from chainer import cuda, serializers\n'), ((4484, 4524), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', (['extractor', '(3)', '(2)', '(1)'], {}), '(extractor, 3, 2, 1)\n', (4504, 4524), True, 'import chainer.functions as F\n'), ((4605, 4639), 'chainer.functions.concat', 'F.concat', (['[line_valid, mask_valid]'], {}), '([line_valid, mask_valid])\n', (4613, 4639), True, 'import chainer.functions as F\n'), ((7910, 7980), 'chainer.serializers.save_npz', 'serializers.save_npz', (['f"""{modeldir}/generator_{epoch}.model"""', 'generator'], {}), "(f'{modeldir}/generator_{epoch}.model', generator)\n", (7930, 7980), False, 'from chainer import cuda, serializers\n'), ((8066, 8106), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', (['extractor', '(3)', '(2)', '(1)'], {}), '(extractor, 3, 2, 1)\n', (8086, 8106), True, 'import chainer.functions as F\n'), ((8187, 8221), 'chainer.functions.concat', 'F.concat', (['[line_valid, mask_valid]'], {}), '([line_valid, mask_valid])\n', (8195, 8221), True, 'import chainer.functions as F\n'), ((8655, 8708), 'numpy.concatenate', 'np.concatenate', (['[input_valid[:, 3:6], cm_val]'], {'axis': '(1)'}), '([input_valid[:, 3:6], cm_val], axis=1)\n', (8669, 8708), True, 'import numpy as np\n'), ((884, 899), 'chainer.functions.relu', 'F.relu', (['(1.0 - t)'], {}), '(1.0 - t)\n', (890, 899), True, 'import chainer.functions as F\n'), ((909, 924), 'chainer.functions.relu', 'F.relu', (['(1.0 + y)'], {}), '(1.0 + y)\n', (915, 924), True, 'import chainer.functions as F\n'), ((4662, 4698), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (4682, 4698), False, 'import chainer\n'), ((8244, 8280), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (8264, 8280), False, 'import chainer\n')] |
# -*- coding:utf-8 -*-
# author: gfjiangly
# time: 2019/5/6 18:40
# e-mail: <EMAIL>
# software: PyCharm
"""
General Dataset Classes
"""
import random
import torch
import torch.utils.data as data
import cv2
import numpy as np
class AnnotationTransform(object):
"""Transforms a ELEVATOR annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
check annotations
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of ELEVATOR's 2 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self):
pass
def __call__(self, target, width, height, ignore_class=False):
"""
Arguments:
target (annotation) : the target annotation
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
scale = np.array([width, height, width, height]).astype(np.float32)
res = []
for box in target:
box = box.split(',')
box[:4] = list(map(lambda x: float(x) - 1., box[:4]))
if box[2] - box[0] < 2. or box[3] - box[1] < 2.:
continue
# if box[2] - box[0] < 10. or box[3] - box[1] < 10.:
# continue
box[4] = int(box[4])
bbox = np.array(box[0:4])/scale
# if box[2] - box[0] < 0.02 or box[3] - box[1] < 0.02:
# continue
if bbox.any() < 0. or bbox.any() > 1.:
pass
bbox[bbox < 0] = 0.
bbox[bbox > 1] = 1.
bbox = list(bbox)
if ignore_class:
box[4] = 0
bbox.append(box[4])
res += [bbox] # [xmin, ymin, xmax, ymax, label_ind]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class GeneralDataset(data.Dataset):
"""ELEVATOR Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, data_list,
transform=None, target_transform=AnnotationTransform(),
dataset_name='ELEVATOR'):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
self.label_files = data_list
self.ids = list()
for label_file in self.label_files:
with open(self.root+label_file, 'r', encoding='gbk') as f:
self.ids += f.readlines()
def __getitem__(self, index):
# im, gt, h, w = self.pull_item(index)
try:
# 如果发生异常,就将异常的数据丢弃掉
im, gt, h, w = self.pull_item(index)
except:
# 对于诸如样本损坏或数据集加载异常等情况,就随机取一张图片代替:
new_index = random.randint(0, len(self) - 1)
return self[new_index]
return im, gt
def __len__(self):
return len(self.ids)
def pull_item(self, index):
img_id = self.ids[index]
img_id = img_id.split()
if len(img_id) > 1:
target = img_id[1:]
else:
print("waring: !image ", img_id[0], "has no box")
target = None
img = cv2.imread(img_id[0]) # 如果路径中含中文,读取不成功,img为None
if img is None:
print("waring: !can't read image ", img_id[0])
height, width, channels = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height, ignore_class=False) # 须实现__call__方法
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_id = self.ids[index]
return cv2.imread(img_id.split()[0], cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
return img_id
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
| [
"numpy.array",
"torch.from_numpy",
"cv2.imread",
"numpy.expand_dims"
] | [((3869, 3890), 'cv2.imread', 'cv2.imread', (['img_id[0]'], {}), '(img_id[0])\n', (3879, 3890), False, 'import cv2\n'), ((4260, 4276), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (4268, 4276), True, 'import numpy as np\n'), ((1086, 1126), 'numpy.array', 'np.array', (['[width, height, width, height]'], {}), '([width, height, width, height])\n', (1094, 1126), True, 'import numpy as np\n'), ((1519, 1537), 'numpy.array', 'np.array', (['box[0:4]'], {}), '(box[0:4])\n', (1527, 1537), True, 'import numpy as np\n'), ((4501, 4531), 'numpy.expand_dims', 'np.expand_dims', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (4515, 4531), True, 'import numpy as np\n'), ((4549, 4570), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (4565, 4570), False, 'import torch\n')] |
# %load code/engram_functions.py
# Import dependencies
import xlrd
import numpy as np
from sympy.utilities.iterables import multiset_permutations
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
def permute_optimize_keys(fixed_letters, fixed_letter_indices, open_letter_indices,
all_letters, keys, data_matrix, bigrams, bigram_frequencies,
min_score=0, verbose=False):
"""
Find all permutations of letters, optimize layout, and generate output.
"""
matrix_selected = select_keys(data_matrix, keys, verbose=False)
unassigned_letters = []
for all_letter in all_letters:
if all_letter not in fixed_letters:
unassigned_letters.append(all_letter)
if len(unassigned_letters) == len(open_letter_indices):
break
letter_permutations = permute_letters(unassigned_letters, verbose)
if verbose:
print("{0} permutations".format(len(letter_permutations)))
top_permutation, top_score = optimize_layout(np.array([]), matrix_selected, bigrams, bigram_frequencies,
letter_permutations, open_letter_indices,
fixed_letters, fixed_letter_indices, min_score, verbose)
return top_permutation, top_score, letter_permutations
def permute_optimize(starting_permutation, letters, all_letters, all_keys,
data_matrix, bigrams, bigram_frequencies, min_score=0, verbose=False):
"""
Find all permutations of letters, optimize layout, and generate output.
"""
matrix_selected = select_keys(data_matrix, all_keys, verbose=False)
open_positions = []
fixed_positions = []
open_letters = []
fixed_letters = []
assigned_letters = []
for iletter, letter in enumerate(letters):
if letter.strip() == "":
open_positions.append(iletter)
for all_letter in all_letters:
if all_letter not in letters and all_letter not in assigned_letters:
open_letters.append(all_letter)
assigned_letters.append(all_letter)
break
else:
fixed_positions.append(iletter)
fixed_letters.append(letter)
letter_permutations = permute_letters(open_letters, verbose)
if verbose:
print("{0} permutations".format(len(letter_permutations)))
top_permutation, top_score = optimize_layout(starting_permutation, matrix_selected, bigrams,
bigram_frequencies, letter_permutations, open_positions,
fixed_letters, fixed_positions, min_score, verbose)
return top_permutation, top_score
def select_keys(data_matrix, keys, verbose=False):
"""
Select keys to quantify pairwise relationships.
"""
# Extract pairwise entries for the keys:
nkeys = len(keys)
Select = np.zeros((nkeys, nkeys))
u = 0
for i in keys:
u += 1
v = 0
for j in keys:
v += 1
Select[u-1,v-1] = data_matrix[i-1,j-1]
# Normalize matrix with min-max scaling to a range with max 1:
newMin = np.min(Select) / np.max(Select)
newMax = 1.0
Select = newMin + (Select - np.min(Select)) * (newMax - newMin) / (np.max(Select) - np.min(Select))
if verbose:
# Heatmap of array
heatmap(data=Select, title="Matrix heatmap", xlabel="Key 1", ylabel="Key 2", print_output=False); plt.show()
return Select
def permute_letters(letters, verbose=False):
"""
Find all permutations of a given set of letters (max: 8-10 letters).
"""
letter_permutations = []
for p in multiset_permutations(letters):
letter_permutations.append(p)
letter_permutations = np.array(letter_permutations)
return letter_permutations
def score_layout(data_matrix, letters, bigrams, bigram_frequencies, verbose=False):
"""
Compute the score for a given letter-key layout (NOTE normalization step).
"""
# Create a matrix of bigram frequencies:
nletters = len(letters)
F2 = np.zeros((nletters, nletters))
# Find the bigram frequency for each ordered pair of letters in the permutation:
for i1 in range(nletters):
for i2 in range(nletters):
bigram = letters[i1] + letters[i2]
i2gram = np.where(bigrams == bigram)
if np.size(i2gram) > 0:
F2[i1, i2] = bigram_frequencies[i2gram][0]
# Normalize matrices with min-max scaling to a range with max 1:
newMax = 1
minF2 = np.min(F2)
maxF2 = np.max(F2)
newMin2 = minF2 / maxF2
F2 = newMin + (F2 - minF2) * (newMax - newMin2) / (maxF2 - minF2)
# Compute the score for this permutation:
score = np.average(data_matrix * F2)
if verbose:
print("Score for letter permutation {0}: {1}".format(letters, score))
return score
def tally_bigrams(input_text, bigrams, normalize=True, verbose=False):
"""
Compute the score for a given letter-key layout (NOTE normalization step).
"""
# Find the bigram frequency for each ordered pair of letters in the input text
#input_text = [str.upper(str(x)) for x in input_text]
input_text = [str.upper(x) for x in input_text]
nchars = len(input_text)
F = np.zeros(len(bigrams))
for ichar in range(0, nchars-1):
bigram = input_text[ichar] + input_text[ichar + 1]
i2gram = np.where(bigrams == bigram)
if np.size(i2gram) > 0:
F[i2gram] += 1
# Normalize matrix with min-max scaling to a range with max 1:
if normalize:
newMax = 1
newMin = np.min(F) / np.max(F)
F = newMin + (F - np.min(F)) * (newMax - newMin) / (np.max(F) - np.min(F))
bigram_frequencies_for_input = F
if verbose:
print("Bigram frequencies for input: {0}".format(bigram_frequencies_for_input))
return bigram_frequencies_for_input
def tally_layout_samefinger_bigrams(layout, bigrams, bigram_frequencies, nkeys=32, verbose=False):
"""
Tally the number of same-finger bigrams within (a list of 24 letters representing) a layout:
['P','Y','O','U','C','I','E','A','G','K','J','X','M','D','L','B','R','T','N','S','H','V','W','F']
"""
if nkeys == 32:
# Left: Right:
# 1 2 3 4 25 28 13 14 15 16 31
# 5 6 7 8 26 29 17 18 19 20 32
# 9 10 11 12 27 30 21 22 23 24
same_finger_keys = [[1,5],[5,9],[1,9], [2,6],[6,10],[2,10],
[3,7],[7,11],[3,11], [4,8],[8,12],[4,12],
[25,26],[26,27],[25,27], [28,29],[29,30],[28,30], [31,32],
[4,25],[4,26],[4,27], [8,25],[8,26],[8,27], [12,25],[12,26],[12,27],
[13,28],[13,29],[13,30], [17,28],[17,29],[17,30], [21,28],[21,29],[21,30],
[31,16],[31,20],[31,24], [32,16],[32,20],[32,24],
[13,17],[17,21],[13,21], [14,18],[18,22],[14,22],
[15,19],[19,23],[15,23], [16,20],[20,24],[16,24]]
elif nkeys == 24:
# 1 2 3 4 13 14 15 16
# 5 6 7 8 17 18 19 20
# 9 10 11 12 21 22 23 24
same_finger_keys = [[1,5],[5,9],[1,9], [2,6],[6,10],[2,10],
[3,7],[7,11],[3,11], [4,8],[8,12],[4,12],
[13,17],[17,21],[13,21], [14,18],[18,22],[14,22],
[15,19],[19,23],[15,23], [16,20],[20,24],[16,24]]
layout = [str.upper(x) for x in layout]
max_frequency = 1.00273E+11
samefinger_bigrams = []
samefinger_bigram_counts = []
for bigram_keys in same_finger_keys:
bigram1 = layout[bigram_keys[0]-1] + layout[bigram_keys[1]-1]
bigram2 = layout[bigram_keys[1]-1] + layout[bigram_keys[0]-1]
i2gram1 = np.where(bigrams == bigram1)
i2gram2 = np.where(bigrams == bigram2)
if np.size(i2gram1) > 0:
samefinger_bigrams.append(bigram1)
samefinger_bigram_counts.append(max_frequency * bigram_frequencies[i2gram1] / np.max(bigram_frequencies))
if np.size(i2gram2) > 0:
samefinger_bigrams.append(bigram2)
samefinger_bigram_counts.append(max_frequency * bigram_frequencies[i2gram2] / np.max(bigram_frequencies))
samefinger_bigrams_total = np.sum([x[0] for x in samefinger_bigram_counts])
if verbose:
print(" Total same-finger bigram frequencies: {0:15.0f}".format(samefinger_bigrams_total))
return samefinger_bigrams, samefinger_bigram_counts, samefinger_bigrams_total
def tally_layout_bigram_rolls(layout, bigrams, bigram_frequencies, nkeys=32, verbose=False):
"""
Tally the number of bigrams that engage little-to-index finger inward rolls
for (a list of 24 or 32 letters representing) a layout,
within the four columns of one hand, or any column across two hands.
layout = ['P','Y','O','U','C','I','E','A','G','K','J','X','L','D','B','V','N','T','R','S','H','M','W','F']
bigram_rolls, bigram_roll_counts, bigram_rolls_total = tally_layout_bigram_rolls(layout, bigrams, bigram_frequencies, nkeys=24, verbose=True)
"""
if nkeys == 32:
# Left: Right:
# 1 2 3 4 25 28 13 14 15 16 31
# 5 6 7 8 26 29 17 18 19 20 32
# 9 10 11 12 27 30 21 22 23 24
roll_keys = [[1,2],[2,3],[3,4], [5,6],[6,7],[7,8], [9,10],[10,11],[11,12],
[16,15],[15,14],[14,13], [20,19],[19,18],[18,17], [24,23],[23,22],[22,21],
[1,3],[2,4],[1,4], [5,7],[6,8],[5,8], [9,11],[10,12],[9,12],
[16,14],[15,13],[16,13], [20,18],[19,17],[20,17], [24,22],[23,21],[24,21],
[1,6],[1,7],[1,8],[2,7],[2,8],[3,8],
[5,2],[5,3],[5,4],[6,3],[6,4],[7,4],
[5,10],[5,11],[5,12],[6,11],[6,12],[7,12],
[9,6],[9,7],[9,8],[10,7],[10,8],[11,8],
[16,19],[16,18],[16,17],[15,18],[15,17],[14,17],
[20,15],[20,14],[20,13],[19,14],[19,13],[18,13],
[20,23],[20,22],[20,21],[19,22],[19,21],[18,21],
[24,19],[24,18],[24,17],[23,18],[23,17],[22,17],
[1,10],[1,11],[1,12],[2,11],[2,12],[3,12],
[9,2],[9,3],[9,4],[10,3],[10,4],[11,4],
[16,23],[16,22],[16,21],[15,22],[15,21],[14,21],
[24,15],[24,14],[24,13],[23,14],[23,13],[22,13]]
for i in [1,2,3,4,5,6,7,8,9,10,11,12, 25,26,27]:
for j in [13,14,15,16,17,18,19,20,21,22,23,24, 28,29,30,31,32]:
roll_keys.append([i,j])
for i in [13,14,15,16,17,18,19,20,21,22,23,24, 28,29,30,31,32]:
for j in [1,2,3,4,5,6,7,8,9,10,11,12, 25,26,27]:
roll_keys.append([i,j])
elif nkeys == 24:
# 1 2 3 4 13 14 15 16
# 5 6 7 8 17 18 19 20
# 9 10 11 12 21 22 23 24
roll_keys = [[1,2],[2,3],[3,4], [5,6],[6,7],[7,8], [9,10],[10,11],[11,12],
[16,15],[15,14],[14,13], [20,19],[19,18],[18,17], [24,23],[23,22],[22,21],
[1,3],[2,4],[1,4], [5,7],[6,8],[5,8], [9,11],[10,12],[9,12],
[16,14],[15,13],[16,13], [20,18],[19,17],[20,17], [24,22],[23,21],[24,21],
[1,6],[1,7],[1,8],[2,7],[2,8],[3,8], [5,2],[5,3],[5,4],[6,3],[6,4],[7,4],
[5,10],[5,11],[5,12],[6,11],[6,12],[7,12], [9,6],[9,7],[9,8],[10,7],[10,8],[11,8],
[16,19],[16,18],[16,17],[15,18],[15,17],[14,17], [20,15],[20,14],[20,13],[19,14],[19,13],[18,13],
[20,23],[20,22],[20,21],[19,22],[19,21],[18,21], [24,19],[24,18],[24,17],[23,18],[23,17],[22,17],
[1,10],[1,11],[1,12],[2,11],[2,12],[3,12], [9,2],[9,3],[9,4],[10,3],[10,4],[11,4],
[16,23],[16,22],[16,21],[15,22],[15,21],[14,21], [24,15],[24,14],[24,13],[23,14],[23,13],[22,13]]
for i in range(0,12):
for j in range(12,24):
roll_keys.append([i,j])
for i in range(12,24):
for j in range(0,12):
roll_keys.append([i,j])
layout = [str.upper(x) for x in layout]
max_frequency = 1.00273E+11
bigram_rolls = []
bigram_roll_counts = []
for bigram_keys in roll_keys:
bigram1 = layout[bigram_keys[0]-1] + layout[bigram_keys[1]-1]
bigram2 = layout[bigram_keys[1]-1] + layout[bigram_keys[0]-1]
i2gram1 = np.where(bigrams == bigram1)
i2gram2 = np.where(bigrams == bigram2)
if np.size(i2gram1) > 0:
bigram_rolls.append(bigram1)
bigram_roll_counts.append(max_frequency * bigram_frequencies[i2gram1] / np.max(bigram_frequencies))
if np.size(i2gram2) > 0:
bigram_rolls.append(bigram2)
bigram_roll_counts.append(max_frequency * bigram_frequencies[i2gram2] / np.max(bigram_frequencies))
bigram_rolls_total = np.sum([x[0] for x in bigram_roll_counts])
if verbose:
print(" Total bigram inward roll frequencies: {0:15.0f}".format(bigram_rolls_total))
return bigram_rolls, bigram_roll_counts, bigram_rolls_total
def optimize_layout(starting_permutation, data_matrix, bigrams, bigram_frequencies, letter_permutations,
open_positions, fixed_letters, fixed_positions=[], min_score=0, verbose=False):
"""
Compute scores for all letter-key layouts.
"""
top_permutation = starting_permutation
top_score = min_score
use_score_function = False
nletters = len(open_positions) + len(fixed_positions)
F2 = np.zeros((nletters, nletters))
# Loop through the permutations of the selected letters:
for p in letter_permutations:
letters = np.array(['W' for x in range(nletters)]) # KEEP to initialize!
for imove, open_position in enumerate(open_positions):
letters[open_position] = p[imove]
for ifixed, fixed_position in enumerate(fixed_positions):
letters[fixed_position] = fixed_letters[ifixed]
# Compute the score for this permutation:
if use_score_function:
score = score_layout(data_matrix, letters, bigrams, bigram_frequencies, verbose=False)
else:
# Find the bigram frequency for each ordered pair of letters in the permutation:
for i1 in range(nletters):
for i2 in range(nletters):
bigram = letters[i1] + letters[i2]
i2gram = np.where(bigrams == bigram)
if np.size(i2gram) > 0:
F2[i1, i2] = bigram_frequencies[i2gram][0]
# Normalize matrices with min-max scaling to a range with max 1:
newMax = 1
minF2 = np.min(F2)
maxF2 = np.max(F2)
newMin2 = minF2 / maxF2
F = newMin + (F2 - minF2) * (newMax - newMin2) / (maxF2 - minF2)
# Compute the score for this permutation:
score = np.average(data_matrix * F)
if score > top_score:
top_score = score
top_permutation = letters
if verbose:
if top_score == min_score:
print("top_score = min_score")
print("{0:0.8f}".format(top_score))
print(*top_permutation)
return top_permutation, top_score
def exchange_letters(letters, fixed_letter_indices, all_letters, all_keys, data_matrix,
bigrams, bigram_frequencies, verbose=True):
"""
Exchange letters, 8 keys at a time (8! = 40,320) selected twice in 14 different ways:
Indices:
0 1 2 3 12 13 14 15
4 5 6 7 16 17 18 19
8 9 10 11 20 21 22 23
1. Top rows
0 1 2 3 12 13 14 15
2. Bottom rows
8 9 10 11 20 21 22 23
3. Top and bottom rows on the right side
12 13 14 15
20 21 22 23
4. Top and bottom rows on the left side
0 1 2 3
8 9 10 11
5. Top right and bottom left rows
12 13 14 15
8 9 10 11
6. Top left and bottom right rows
0 1 2 3
20 21 22 23
7. Center of the top and bottom rows on both sides
1 2 13 14
9 10 21 22
8. The eight corners
0 3 12 15
8 11 20 23
9. Left half of the top and bottom rows on both sides
0 1 12 13
8 9 20 21
10. Right half of the top and bottom rows on both sides
2 3 14 15
10 11 22 23
11. Left half of non-home rows on the left and right half of the same rows on the right
0 1 14 15
8 9 22 23
12. Right half of non-home rows on the left and left half of the same rows on the right
2 3 12 13
10 11 20 21
13. Top center and lower sides
1 2 13 14
8 11 20 23
14. Top sides and lower center
0 3 12 15
9 10 21 22
15. Repeat 1-14
"""
top_score = score_layout(data_matrix, letters, bigrams, bigram_frequencies, verbose=False)
print('Initial score: {0}'.format(top_score))
print(*letters)
top_permutation = letters
lists_of_open_indices = [
[0,1,2,3,12,13,14,15],
[8,9,10,11,20,21,22,23],
[12,13,14,15,20,21,22,23],
[0,1,2,3,8,9,10,11],
[12,13,14,15,8,9,10,11],
[0,1,2,3,20,21,22,23],
[1,2,13,14,9,10,21,22],
[0,3,12,15,8,11,20,23],
[0,1,12,13,8,9,20,21],
[2,3,14,15,10,11,22,23],
[0,1,14,15,8,9,22,23],
[2,3,12,13,10,11,20,21],
[1,2,8,11,13,14,20,23],
[0,3,9,10,12,15,21,22]
]
lists_of_print_statements = [
'1. Top rows',
'2. Bottom rows',
'3. Top and bottom rows on the right side',
'4. Top and bottom rows on the left side',
'5. Top right and bottom left rows',
'6. Top left and bottom right rows',
'7. Center of the top and bottom rows on both sides',
'8. The eight corners',
'9. Left half of the top and bottom rows on both sides',
'10. Right half of the top and bottom rows on both sides',
'11. Left half of non-home rows on the left and right half of the same rows on the right',
'12. Right half of non-home rows on the left and left half of the same rows on the right',
'13. Top center and lower sides',
'14. Top sides and lower center'
]
for istep in [1,2]:
if istep == 1:
s = "Set 1: 14 letter exchanges: "
elif istep == 2:
s = "Set 2: 14 letter exchanges: "
for ilist, open_indices in enumerate(lists_of_open_indices):
print_statement = lists_of_print_statements[ilist]
if verbose:
print('{0} {1}'.format(s, print_statement))
starting_permutation = top_permutation.copy()
for open_index in open_indices:
if open_index not in fixed_letter_indices:
top_permutation[open_index] = ''
top_permutation, top_score = permute_optimize(starting_permutation, top_permutation, letters24,
keys24, data_matrix, bigrams, bigram_frequencies,
min_score=top_score, verbose=True)
if verbose:
print('')
print(' -------- DONE --------')
print('')
return top_permutation, top_score
def rank_within_epsilon(numbers, epsilon, factor=False, verbose=True):
"""
numbers = np.array([10,9,8,7,6])
epsilon = 1
rank_within_epsilon(numbers, epsilon, factor=False, verbose=True)
>>> array([1., 1., 2., 2., 3.])
numbers = np.array([0.798900824, 0.79899900824, 0.79900824])
epsilon = 0.9**8 - 0.9**9
factor24 = ((24**2 - 1) + (1-epsilon)) / (24**2) # 0.999925266109375
rank_within_epsilon(numbers, factor24, factor=True, verbose=True)
>>> array([2., 1., 1.])
"""
numbers = np.array(numbers)
Isort = np.argsort(-numbers)
numbers_sorted = numbers[Isort]
count = 1
ranks = np.zeros(np.size(numbers))
for i, num in enumerate(numbers_sorted):
if ranks[i] == 0:
if factor:
lower_bound = num * epsilon
else:
lower_bound = num - epsilon
bounded_nums1 = num >= numbers_sorted
bounded_nums2 = numbers_sorted >= lower_bound
bounded_nums = bounded_nums1 * bounded_nums2
count += 1
for ibounded, bounded_num in enumerate(bounded_nums):
if bounded_num == True:
ranks[ibounded] = count
uranks = np.unique(ranks)
nranks = np.size(uranks)
new_ranks = ranks.copy()
new_count = 0
for rank in uranks:
new_count += 1
same_ranks = ranks == rank
for isame, same_rank in enumerate(same_ranks):
if same_rank == True:
new_ranks[isame] = new_count
#ranks_sorted = new_ranks[Isort]
ranks_sorted = [np.int(x) for x in new_ranks]
if verbose:
for i, num in enumerate(numbers_sorted):
print(" ({0}) {1}".format(np.int(ranks_sorted[i]), num))
return numbers_sorted, ranks_sorted, Isort
def print_matrix_info(matrix_data, matrix_label, nkeys, nlines=10):
"""
Print matrix output.
"""
print("{0} min = {1}, max = {2}".format(matrix_label, np.min(matrix_data), np.max(matrix_data)))
matrix_flat = matrix_data.flatten()
argsort = np.argsort(matrix_flat)
print("{0} key number pairs with minimum values:".format(matrix_label))
for x in argsort[0:nlines]:
if x % nkeys == 0:
min_row = np.int(np.ceil(x / nkeys)) + 1
min_col = 1
else:
min_row = np.int(np.ceil(x / nkeys))
min_col = x - nkeys * (min_row-1) + 1
print(" {0} -> {1} ({2})".format(min_row, min_col, matrix_flat[x]))
print("{0} key number pairs with maximum values:".format(matrix_label))
max_sort = argsort[-nlines::]
for x in max_sort[::-1]:
if x % nkeys == 0:
max_row = np.int(np.ceil(x / nkeys)) + 1
max_col = 1
else:
max_row = np.int(np.ceil(x / nkeys))
max_col = x - nkeys * (max_row-1) + 1
print(" {0} -> {1} ({2})".format(max_row, max_col, matrix_flat[x]))
def heatmap(data, title="", xlabel="", ylabel="", x_axis_labels=[], y_axis_labels=[], print_output=True):
"""
Plot heatmap of matrix.
"""
# use heatmap function, set the color as viridis and
# make each cell seperate using linewidth parameter
plt.figure()
sns_plot = sns.heatmap(data, xticklabels=x_axis_labels, yticklabels=y_axis_labels, linewidths=1,
cmap="viridis", square=True, vmin=np.min(data), vmax=np.max(data))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
sns_plot.set_xticklabels(x_axis_labels) #, rotation=75)
sns_plot.set_yticklabels(y_axis_labels)
if print_output:
sns_plot.figure.savefig("{0}_heatmap.png".format(title))
def histmap(data, title="", print_output=True):
"""
Plot histogram.
"""
sns.distplot(data)
plt.title(title)
if print_output:
sns_plot.figure.savefig("{0}_histogram.png".format(title))
def print_layout24(layout):
"""
Print layout.
"""
print(' {0} {1}'.format(' '.join(layout[0:4]),
' '.join(layout[12:16])))
print(' {0} {1}'.format(' '.join(layout[4:8]),
' '.join(layout[16:20])))
print(' {0} {1}'.format(' '.join(layout[8:12]),
' '.join(layout[20:24])))
def print_layout24_instances(layout, letters24, instances24, bigrams, bigram_frequencies):
"""
Print billions of instances per letter (not Z or Q) in layout form.
layout = ['P','Y','O','U','C','I','E','A','G','K','J','X','M','D','L','B','R','T','N','S','H','V','W','F']
print_layout24_instances(layout, letters24, instances24, bigrams, bigram_frequencies)
"""
layout_instances = []
layout_instances_strings = []
for letter in layout:
index = letters24.index(letter)
layout_instances.append(instances24[index])
layout_instances_strings.append('{0:3.0f}'.format(instances24[index]/instances_denominator))
print(' {0} {1}'.format(' '.join(layout_instances_strings[0:4]),
' '.join(layout_instances_strings[12:16])))
print(' {0} {1}'.format(' '.join(layout_instances_strings[4:8]),
' '.join(layout_instances_strings[16:20])))
print(' {0} {1}'.format(' '.join(layout_instances_strings[8:12]),
' '.join(layout_instances_strings[20:24])))
left_sum = np.sum(layout_instances[0:12])
right_sum = np.sum(layout_instances[12:24])
pL = ''
pR = ''
if left_sum > right_sum:
pL = ' ({0:3.2f}%)'.format(100 * (left_sum - right_sum) / right_sum)
elif right_sum > left_sum:
pR = ' ({0:3.2f}%)'.format(100 * (right_sum - left_sum) / left_sum)
print('\n left: {0}{1} right: {2}{3}'.format(left_sum, pL, right_sum, pR))
tally_layout_samefinger_bigrams(layout, bigrams, bigram_frequencies, nkeys=24, verbose=True)
tally_layout_bigram_rolls(layout, bigrams, bigram_frequencies, nkeys=24, verbose=True)
def print_bigram_frequency(input_pair, bigrams, bigram_frequencies):
"""
>>> print_bigram_frequency(['t','h'], bigrams, bigram_frequencies)
"""
# Find the bigram frequency
input_text = [str.upper(str(x)) for x in input_pair]
nchars = len(input_text)
for ichar in range(0, nchars-1):
bigram1 = input_text[ichar] + input_text[ichar + 1]
bigram2 = input_text[ichar + 1] + input_text[ichar]
i2gram1 = np.where(bigrams == bigram1)
i2gram2 = np.where(bigrams == bigram2)
if np.size(i2gram1) > 0:
freq1 = bigram_frequencies[i2gram1[0][0]]
print("{0}: {1:3.2f}".format(bigram1, freq1))
if np.size(i2gram2) > 0:
freq2 = bigram_frequencies[i2gram2[0][0]]
print("{0}: {1:3.2f}".format(bigram2, freq2))
| [
"matplotlib.pyplot.ylabel",
"numpy.argsort",
"numpy.array",
"seaborn.distplot",
"numpy.where",
"matplotlib.pyplot.xlabel",
"numpy.max",
"sympy.utilities.iterables.multiset_permutations",
"numpy.min",
"numpy.ceil",
"numpy.average",
"numpy.size",
"matplotlib.pyplot.title",
"numpy.int",
"ma... | [((3034, 3058), 'numpy.zeros', 'np.zeros', (['(nkeys, nkeys)'], {}), '((nkeys, nkeys))\n', (3042, 3058), True, 'import numpy as np\n'), ((3810, 3840), 'sympy.utilities.iterables.multiset_permutations', 'multiset_permutations', (['letters'], {}), '(letters)\n', (3831, 3840), False, 'from sympy.utilities.iterables import multiset_permutations\n'), ((3906, 3935), 'numpy.array', 'np.array', (['letter_permutations'], {}), '(letter_permutations)\n', (3914, 3935), True, 'import numpy as np\n'), ((4235, 4265), 'numpy.zeros', 'np.zeros', (['(nletters, nletters)'], {}), '((nletters, nletters))\n', (4243, 4265), True, 'import numpy as np\n'), ((4706, 4716), 'numpy.min', 'np.min', (['F2'], {}), '(F2)\n', (4712, 4716), True, 'import numpy as np\n'), ((4729, 4739), 'numpy.max', 'np.max', (['F2'], {}), '(F2)\n', (4735, 4739), True, 'import numpy as np\n'), ((4898, 4926), 'numpy.average', 'np.average', (['(data_matrix * F2)'], {}), '(data_matrix * F2)\n', (4908, 4926), True, 'import numpy as np\n'), ((8553, 8601), 'numpy.sum', 'np.sum', (['[x[0] for x in samefinger_bigram_counts]'], {}), '([x[0] for x in samefinger_bigram_counts])\n', (8559, 8601), True, 'import numpy as np\n'), ((13238, 13280), 'numpy.sum', 'np.sum', (['[x[0] for x in bigram_roll_counts]'], {}), '([x[0] for x in bigram_roll_counts])\n', (13244, 13280), True, 'import numpy as np\n'), ((13899, 13929), 'numpy.zeros', 'np.zeros', (['(nletters, nletters)'], {}), '((nletters, nletters))\n', (13907, 13929), True, 'import numpy as np\n'), ((20710, 20727), 'numpy.array', 'np.array', (['numbers'], {}), '(numbers)\n', (20718, 20727), True, 'import numpy as np\n'), ((20740, 20760), 'numpy.argsort', 'np.argsort', (['(-numbers)'], {}), '(-numbers)\n', (20750, 20760), True, 'import numpy as np\n'), ((21402, 21418), 'numpy.unique', 'np.unique', (['ranks'], {}), '(ranks)\n', (21411, 21418), True, 'import numpy as np\n'), ((21432, 21447), 'numpy.size', 'np.size', (['uranks'], {}), '(uranks)\n', (21439, 21447), True, 'import numpy as np\n'), ((22266, 22289), 'numpy.argsort', 'np.argsort', (['matrix_flat'], {}), '(matrix_flat)\n', (22276, 22289), True, 'import numpy as np\n'), ((23452, 23464), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23462, 23464), True, 'import matplotlib.pyplot as plt\n'), ((23665, 23681), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (23674, 23681), True, 'import matplotlib.pyplot as plt\n'), ((23686, 23704), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (23696, 23704), True, 'import matplotlib.pyplot as plt\n'), ((23709, 23727), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (23719, 23727), True, 'import matplotlib.pyplot as plt\n'), ((24023, 24041), 'seaborn.distplot', 'sns.distplot', (['data'], {}), '(data)\n', (24035, 24041), True, 'import seaborn as sns\n'), ((24046, 24062), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (24055, 24062), True, 'import matplotlib.pyplot as plt\n'), ((25700, 25730), 'numpy.sum', 'np.sum', (['layout_instances[0:12]'], {}), '(layout_instances[0:12])\n', (25706, 25730), True, 'import numpy as np\n'), ((25747, 25778), 'numpy.sum', 'np.sum', (['layout_instances[12:24]'], {}), '(layout_instances[12:24])\n', (25753, 25778), True, 'import numpy as np\n'), ((1069, 1081), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1077, 1081), True, 'import numpy as np\n'), ((3291, 3305), 'numpy.min', 'np.min', (['Select'], {}), '(Select)\n', (3297, 3305), True, 'import numpy as np\n'), ((3308, 3322), 'numpy.max', 'np.max', (['Select'], {}), '(Select)\n', (3314, 3322), True, 'import numpy as np\n'), ((3598, 3608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3606, 3608), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5605), 'numpy.where', 'np.where', (['(bigrams == bigram)'], {}), '(bigrams == bigram)\n', (5586, 5605), True, 'import numpy as np\n'), ((8049, 8077), 'numpy.where', 'np.where', (['(bigrams == bigram1)'], {}), '(bigrams == bigram1)\n', (8057, 8077), True, 'import numpy as np\n'), ((8096, 8124), 'numpy.where', 'np.where', (['(bigrams == bigram2)'], {}), '(bigrams == bigram2)\n', (8104, 8124), True, 'import numpy as np\n'), ((12764, 12792), 'numpy.where', 'np.where', (['(bigrams == bigram1)'], {}), '(bigrams == bigram1)\n', (12772, 12792), True, 'import numpy as np\n'), ((12811, 12839), 'numpy.where', 'np.where', (['(bigrams == bigram2)'], {}), '(bigrams == bigram2)\n', (12819, 12839), True, 'import numpy as np\n'), ((20832, 20848), 'numpy.size', 'np.size', (['numbers'], {}), '(numbers)\n', (20839, 20848), True, 'import numpy as np\n'), ((21769, 21778), 'numpy.int', 'np.int', (['x'], {}), '(x)\n', (21775, 21778), True, 'import numpy as np\n'), ((26751, 26779), 'numpy.where', 'np.where', (['(bigrams == bigram1)'], {}), '(bigrams == bigram1)\n', (26759, 26779), True, 'import numpy as np\n'), ((26798, 26826), 'numpy.where', 'np.where', (['(bigrams == bigram2)'], {}), '(bigrams == bigram2)\n', (26806, 26826), True, 'import numpy as np\n'), ((4486, 4513), 'numpy.where', 'np.where', (['(bigrams == bigram)'], {}), '(bigrams == bigram)\n', (4494, 4513), True, 'import numpy as np\n'), ((5617, 5632), 'numpy.size', 'np.size', (['i2gram'], {}), '(i2gram)\n', (5624, 5632), True, 'import numpy as np\n'), ((5787, 5796), 'numpy.min', 'np.min', (['F'], {}), '(F)\n', (5793, 5796), True, 'import numpy as np\n'), ((5799, 5808), 'numpy.max', 'np.max', (['F'], {}), '(F)\n', (5805, 5808), True, 'import numpy as np\n'), ((8136, 8152), 'numpy.size', 'np.size', (['i2gram1'], {}), '(i2gram1)\n', (8143, 8152), True, 'import numpy as np\n'), ((8334, 8350), 'numpy.size', 'np.size', (['i2gram2'], {}), '(i2gram2)\n', (8341, 8350), True, 'import numpy as np\n'), ((12851, 12867), 'numpy.size', 'np.size', (['i2gram1'], {}), '(i2gram1)\n', (12858, 12867), True, 'import numpy as np\n'), ((13037, 13053), 'numpy.size', 'np.size', (['i2gram2'], {}), '(i2gram2)\n', (13044, 13053), True, 'import numpy as np\n'), ((15057, 15067), 'numpy.min', 'np.min', (['F2'], {}), '(F2)\n', (15063, 15067), True, 'import numpy as np\n'), ((15088, 15098), 'numpy.max', 'np.max', (['F2'], {}), '(F2)\n', (15094, 15098), True, 'import numpy as np\n'), ((15288, 15315), 'numpy.average', 'np.average', (['(data_matrix * F)'], {}), '(data_matrix * F)\n', (15298, 15315), True, 'import numpy as np\n'), ((22169, 22188), 'numpy.min', 'np.min', (['matrix_data'], {}), '(matrix_data)\n', (22175, 22188), True, 'import numpy as np\n'), ((22190, 22209), 'numpy.max', 'np.max', (['matrix_data'], {}), '(matrix_data)\n', (22196, 22209), True, 'import numpy as np\n'), ((23628, 23640), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (23634, 23640), True, 'import numpy as np\n'), ((23647, 23659), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (23653, 23659), True, 'import numpy as np\n'), ((26838, 26854), 'numpy.size', 'np.size', (['i2gram1'], {}), '(i2gram1)\n', (26845, 26854), True, 'import numpy as np\n'), ((26983, 26999), 'numpy.size', 'np.size', (['i2gram2'], {}), '(i2gram2)\n', (26990, 26999), True, 'import numpy as np\n'), ((3411, 3425), 'numpy.max', 'np.max', (['Select'], {}), '(Select)\n', (3417, 3425), True, 'import numpy as np\n'), ((3428, 3442), 'numpy.min', 'np.min', (['Select'], {}), '(Select)\n', (3434, 3442), True, 'import numpy as np\n'), ((4529, 4544), 'numpy.size', 'np.size', (['i2gram'], {}), '(i2gram)\n', (4536, 4544), True, 'import numpy as np\n'), ((22545, 22563), 'numpy.ceil', 'np.ceil', (['(x / nkeys)'], {}), '(x / nkeys)\n', (22552, 22563), True, 'import numpy as np\n'), ((23007, 23025), 'numpy.ceil', 'np.ceil', (['(x / nkeys)'], {}), '(x / nkeys)\n', (23014, 23025), True, 'import numpy as np\n'), ((3372, 3386), 'numpy.min', 'np.min', (['Select'], {}), '(Select)\n', (3378, 3386), True, 'import numpy as np\n'), ((5869, 5878), 'numpy.max', 'np.max', (['F'], {}), '(F)\n', (5875, 5878), True, 'import numpy as np\n'), ((5881, 5890), 'numpy.min', 'np.min', (['F'], {}), '(F)\n', (5887, 5890), True, 'import numpy as np\n'), ((8295, 8321), 'numpy.max', 'np.max', (['bigram_frequencies'], {}), '(bigram_frequencies)\n', (8301, 8321), True, 'import numpy as np\n'), ((8493, 8519), 'numpy.max', 'np.max', (['bigram_frequencies'], {}), '(bigram_frequencies)\n', (8499, 8519), True, 'import numpy as np\n'), ((12998, 13024), 'numpy.max', 'np.max', (['bigram_frequencies'], {}), '(bigram_frequencies)\n', (13004, 13024), True, 'import numpy as np\n'), ((13184, 13210), 'numpy.max', 'np.max', (['bigram_frequencies'], {}), '(bigram_frequencies)\n', (13190, 13210), True, 'import numpy as np\n'), ((14797, 14824), 'numpy.where', 'np.where', (['(bigrams == bigram)'], {}), '(bigrams == bigram)\n', (14805, 14824), True, 'import numpy as np\n'), ((21913, 21936), 'numpy.int', 'np.int', (['ranks_sorted[i]'], {}), '(ranks_sorted[i])\n', (21919, 21936), True, 'import numpy as np\n'), ((22454, 22472), 'numpy.ceil', 'np.ceil', (['(x / nkeys)'], {}), '(x / nkeys)\n', (22461, 22472), True, 'import numpy as np\n'), ((22916, 22934), 'numpy.ceil', 'np.ceil', (['(x / nkeys)'], {}), '(x / nkeys)\n', (22923, 22934), True, 'import numpy as np\n'), ((5835, 5844), 'numpy.min', 'np.min', (['F'], {}), '(F)\n', (5841, 5844), True, 'import numpy as np\n'), ((14848, 14863), 'numpy.size', 'np.size', (['i2gram'], {}), '(i2gram)\n', (14855, 14863), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
class CorrectBias(object):
def __init__(self, taper_width, npv_observations):
self.taper_width = taper_width
self.sampled_controls_loc, self.sampled_controls_beta = self.get_data(npv_observations)
self.est_mean_alpha = np.mean(self.sampled_controls_beta)
def get_data(self, obs_data):
"""
obs_data: col_1 = random control;
col_2 = npv obtained from a single model realization (random controls applied to different realizations)
col_3 = npv obtained from mean model
"""
# get a list of sampled controls from .csv files
action_name = obs_data["control"]
action_list = list()
for action_index in range(0, action_name.size):
temp_seq = action_name[action_index]
temp_seq = temp_seq[1:-1].split(",")
temp_seq = [it.replace(" ", "")[1:-1] for it in temp_seq]
action_list.append(temp_seq)
# compute partial correction factor beta f
sampled_controls_beta = obs_data["individual_realization"].to_numpy() / obs_data["mean_model"].to_numpy()
# compute well-position based location for all sampled random drilling sequences
sampled_controls_loc = self.get_loc_multiple(action_list)
return sampled_controls_loc, sampled_controls_beta
def get_loc_multiple(self, action_list):
ref_order = ['OP_1', 'OP_2', 'OP_3', 'OP_4', 'OP_5', 'WI_1', 'WI_2', 'WI_3']
all_locations = np.zeros((len(action_list), 8))
for action_index in range(0, len(action_list)):
temp_action = action_list[action_index]
if temp_action[0] == '':
temp_order = [1] * len(ref_order)
all_locations[action_index, :] = temp_order
else:
temp_order = [len(temp_action) + 1] * len(ref_order)
temp_order = np.array(temp_order)
upd_order = list(range(1, len(temp_action) + 1))
upd_order = np.array(upd_order)
order_index = []
for j in range(0, len(temp_action)):
get_index = ref_order.index(temp_action[j])
order_index.append(get_index)
order_index = np.array(order_index)
temp_order[order_index] = upd_order
temp_order = np.reshape(temp_order, (1, 8))
all_locations[action_index, :] = temp_order
return all_locations
# compute location for a set of random drilling sequences
def get_loc_single(self, seq_action):
"""
# permutation encodings:
ref., ['OP_1', 'OP_2', 'OP_3', 'OP_4', 'OP_5', 'WI_1', 'WI_2', 'WI_3'] -> [1 , 2 , 3 , 4, 5, 6, 7, 8];
if input = ['WI_3', 'OP_5', 'OP_3', 'OP_1', 'OP_4', 'WI_2', 'OP_2', 'WI_1']
output = [4 , 7 , 3 , 5, 2, 8, 6, 1];
"""
ref_order = ['OP_1', 'OP_2', 'OP_3', 'OP_4', 'OP_5', 'WI_1', 'WI_2', 'WI_3']
loc_seq = np.zeros((1, 8))
temp_action = seq_action
temp_order = [len(temp_action) + 1] * len(ref_order)
temp_order = np.array(temp_order)
upd_order = list(range(1, len(temp_action) + 1))
upd_order = np.array(upd_order)
order_index = []
if temp_action == []:
temp_order = [1] * len(ref_order)
loc_seq[0, :] = temp_order
else:
for j in range(0, len(temp_action)):
get_index = ref_order.index(temp_action[j])
order_index.append(get_index)
order_index = np.array(order_index)
temp_order[order_index] = upd_order
temp_order = np.reshape(temp_order, (1, 8))
loc_seq[0, :] = temp_order
return loc_seq
#
def gaspari_cohn(self, distance_values):
distance_range = self.taper_width
weight_values = np.zeros(distance_values.shape[0], )
z = distance_values / distance_range
index_1 = np.where(z <= 1)[0]
index_2 = np.where(z <= 2)[0]
index_12 = np.setdiff1d(index_2, index_1)
weight_values[index_1] = - (z[index_1] ** 5) / 4 + (z[index_1] ** 4) / 2 + (z[index_1] ** 3) * (5 / 8) - (
z[index_1] ** 2) * (5 / 3) + 1
weight_values[index_12] = (z[index_12] ** 5) / 12 - (z[index_12] ** 4) / 2 + (z[index_12] ** 3) * (5 / 8) + (
z[index_12] ** 2) * (5 / 3) - z[index_12] * 5 + 4 - (z[index_12] ** -1) * (2 / 3)
return weight_values
def cal_distance(self, est_control_loc):
"""
ref_seq_loc: location for estimation point (
return: a list of distance between estimation point
"""
# sampled_control_loc = self.selected_seq_loc
# compute distance between estimation point and all sampled control
dist_each = np.abs(self.sampled_controls_loc - est_control_loc)
manhattan_distance = np.sum(dist_each, axis=1)
return manhattan_distance
# compute local estimate of bias-correction factor alpha
def cal_alpha_loc(self, weight_values):
sum_weight = np.sum(weight_values, axis=0)
n_eff = np.sum(weight_values) ** 2 / np.sum(weight_values ** 2)
est_alpha_loc = np.sum(weight_values * self.sampled_controls_beta) / sum_weight
return est_alpha_loc, n_eff
# compute local estimate of expected value
def local_estimate(self, seq_action, initial_approx):
# permutation encodings: eg., [W1 , W2 , W3 , W4 ] -> [1 , 2 , 3 , 4]; [W3 , W1 , W4 , W2 ]-> [2, 4, 1, 3]
seq_loc = self.get_loc_single(seq_action)
# compute manhattan distance between estimation point(current control) and sampled controls
seq_distance = self.cal_distance(seq_loc)
# compute distance-based weights
weights = self.gaspari_cohn(seq_distance)
# compute local estimate of alpha & effective sample size (n_eff)
# (for distance-based localization, n_eff is not necessary )
est_alpha_loc, est_n_eff = self.cal_alpha_loc(weights)
# compute local estimation of expected NPV
expected_value_loc = est_alpha_loc * initial_approx
return expected_value_loc, est_alpha_loc, est_n_eff
# if varaince of alpha / beta is known - > regularized localization
def regularized_estimate(self, seq_action, initial_approx, var_alpha, var_beta):
# compute local estimation of expected value, bias-correction factor and effective sample size
expected_value_loc, alpha_loc, n_eff = self.local_estimate(seq_action, initial_approx)
# compute regularized estimate of expected value
gamma = 1 + var_beta / (var_alpha * n_eff)
expected_value_reg = (expected_value_loc + gamma * self.est_mean_alpha) / (1 + gamma)
return expected_value_reg
if __name__ == '__main__':
"""
Bias-correction method - computing an approximation of expected value
Main idea: (Paper: https://link.springer.com/article/10.1007/s10596-020-10017-y)
E(J(x_j,m)) ≈ \alpha * J(x_j, m_ref) (eg., m_ref = \bar m)
\alpha(x_j) = G(\beta_{1},\beta_{2}, ... ,\beta_n , x_j).
alpha: bias-correction factor
beta: partial correction factor:\beta_{ij} =\beta(\mathbf{x_i,m_j,\bar m}) = \frac{J(\mathbf{x_i,m_j})}{J(\mathbf{x_i, \bar m})}.
Three possible ways to estimate bias correction factor \alpha
1. pure distance-based localization
2. regularized localization
3. Covariance-based optimal weights
(The latter two require additional information about the correction factor (variance of bias-correction factor),
"""
# npv from single realization/ mean model (100 random drilling sequences)
# col_1 = drilling sequences (x_i), col_2 = J(x_i,m_i), col_3 =J(x_i,\bar m)
npv_obs = pd.read_csv("npv_obs_example.csv")
# current_control: x_j (eg., specific drilling sequence)
current_control = ['WI_3', 'OP_5', 'OP_3', 'OP_1', 'OP_4', 'WI_2', 'OP_2', 'WI_1']
# initial approximation obtained from reference model (mean model): J(x_j, \bar m)
npv_mean_model = 6870454326
# depend on application / distance measure (for reference, the maximum Manhattan distance between 8 wells is 32)
taper_length = 25
# estimate expected value using bias-corrected mean model (eg., distance-based localization)
approx_update = CorrectBias(taper_length, npv_obs)
expected_NPV_loc, alpha_loc, n_eff = approx_update.local_estimate(current_control, npv_mean_model)
print("---------------------------------------------------------------------------------------------")
print("Estimation Point:", current_control, "\nNPV from mean model:", npv_mean_model)
print("------------------------ Bias-correction method (distance-based localization) ---------------")
print("Approximation of expected NPV:", expected_NPV_loc,
"\nEstimation of bias correction factor (alpha):", alpha_loc,
"\nEnsemble size / number of sampled controls:", 100)
| [
"numpy.mean",
"numpy.abs",
"numpy.reshape",
"pandas.read_csv",
"numpy.where",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.setdiff1d"
] | [((7861, 7895), 'pandas.read_csv', 'pd.read_csv', (['"""npv_obs_example.csv"""'], {}), "('npv_obs_example.csv')\n", (7872, 7895), True, 'import pandas as pd\n'), ((288, 323), 'numpy.mean', 'np.mean', (['self.sampled_controls_beta'], {}), '(self.sampled_controls_beta)\n', (295, 323), True, 'import numpy as np\n'), ((3047, 3063), 'numpy.zeros', 'np.zeros', (['(1, 8)'], {}), '((1, 8))\n', (3055, 3063), True, 'import numpy as np\n'), ((3180, 3200), 'numpy.array', 'np.array', (['temp_order'], {}), '(temp_order)\n', (3188, 3200), True, 'import numpy as np\n'), ((3278, 3297), 'numpy.array', 'np.array', (['upd_order'], {}), '(upd_order)\n', (3286, 3297), True, 'import numpy as np\n'), ((3943, 3977), 'numpy.zeros', 'np.zeros', (['distance_values.shape[0]'], {}), '(distance_values.shape[0])\n', (3951, 3977), True, 'import numpy as np\n'), ((4122, 4152), 'numpy.setdiff1d', 'np.setdiff1d', (['index_2', 'index_1'], {}), '(index_2, index_1)\n', (4134, 4152), True, 'import numpy as np\n'), ((4897, 4948), 'numpy.abs', 'np.abs', (['(self.sampled_controls_loc - est_control_loc)'], {}), '(self.sampled_controls_loc - est_control_loc)\n', (4903, 4948), True, 'import numpy as np\n'), ((4978, 5003), 'numpy.sum', 'np.sum', (['dist_each'], {'axis': '(1)'}), '(dist_each, axis=1)\n', (4984, 5003), True, 'import numpy as np\n'), ((5166, 5195), 'numpy.sum', 'np.sum', (['weight_values'], {'axis': '(0)'}), '(weight_values, axis=0)\n', (5172, 5195), True, 'import numpy as np\n'), ((3636, 3657), 'numpy.array', 'np.array', (['order_index'], {}), '(order_index)\n', (3644, 3657), True, 'import numpy as np\n'), ((3731, 3761), 'numpy.reshape', 'np.reshape', (['temp_order', '(1, 8)'], {}), '(temp_order, (1, 8))\n', (3741, 3761), True, 'import numpy as np\n'), ((4045, 4061), 'numpy.where', 'np.where', (['(z <= 1)'], {}), '(z <= 1)\n', (4053, 4061), True, 'import numpy as np\n'), ((4083, 4099), 'numpy.where', 'np.where', (['(z <= 2)'], {}), '(z <= 2)\n', (4091, 4099), True, 'import numpy as np\n'), ((5241, 5267), 'numpy.sum', 'np.sum', (['(weight_values ** 2)'], {}), '(weight_values ** 2)\n', (5247, 5267), True, 'import numpy as np\n'), ((5292, 5342), 'numpy.sum', 'np.sum', (['(weight_values * self.sampled_controls_beta)'], {}), '(weight_values * self.sampled_controls_beta)\n', (5298, 5342), True, 'import numpy as np\n'), ((1940, 1960), 'numpy.array', 'np.array', (['temp_order'], {}), '(temp_order)\n', (1948, 1960), True, 'import numpy as np\n'), ((2054, 2073), 'numpy.array', 'np.array', (['upd_order'], {}), '(upd_order)\n', (2062, 2073), True, 'import numpy as np\n'), ((2306, 2327), 'numpy.array', 'np.array', (['order_index'], {}), '(order_index)\n', (2314, 2327), True, 'import numpy as np\n'), ((2409, 2439), 'numpy.reshape', 'np.reshape', (['temp_order', '(1, 8)'], {}), '(temp_order, (1, 8))\n', (2419, 2439), True, 'import numpy as np\n'), ((5212, 5233), 'numpy.sum', 'np.sum', (['weight_values'], {}), '(weight_values)\n', (5218, 5233), True, 'import numpy as np\n')] |
import numpy as np
def expected_calibration_error(y_pred, proba, y_true, n_bins=10):
intervals = np.linspace(0, 1, n_bins+1)
accuracy = (y_pred == y_true).astype(int)
num_predictions = y_pred.shape[0]
error = 0
for lower, upper in zip(intervals[:-1], intervals[1:]):
mask = np.logical_and(proba > lower, proba <= upper)
bin_size = mask.sum()
if bin_size > 0:
proba_bin_mean = proba[mask].mean()
acc_bin_mean = accuracy[mask].mean()
error += bin_size / num_predictions * np.abs(acc_bin_mean - proba_bin_mean)
return error
| [
"numpy.abs",
"numpy.linspace",
"numpy.logical_and"
] | [((104, 133), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(n_bins + 1)'], {}), '(0, 1, n_bins + 1)\n', (115, 133), True, 'import numpy as np\n'), ((307, 352), 'numpy.logical_and', 'np.logical_and', (['(proba > lower)', '(proba <= upper)'], {}), '(proba > lower, proba <= upper)\n', (321, 352), True, 'import numpy as np\n'), ((556, 593), 'numpy.abs', 'np.abs', (['(acc_bin_mean - proba_bin_mean)'], {}), '(acc_bin_mean - proba_bin_mean)\n', (562, 593), True, 'import numpy as np\n')] |
from dolo.numeric.tensor import sdot,mdot
import numpy as np
TOL = 1e-10
# credits : second_order_solver is adapted from <NAME>'s port of Uhlig's Toolkit.
def second_order_solver(FF,GG,HH):
from scipy.linalg import qz
from dolo.numeric.extern.qz import qzdiv
from numpy import array,mat,c_,r_,eye,zeros,real_if_close,diag,allclose,where,diagflat
from numpy.linalg import solve
Psi_mat = array(FF)
Gamma_mat = array(-GG)
Theta_mat = array(-HH)
m_states = FF.shape[0]
Xi_mat = r_[c_[Gamma_mat, Theta_mat],
c_[eye(m_states), zeros((m_states, m_states))]]
Delta_mat = r_[c_[Psi_mat, zeros((m_states, m_states))],
c_[zeros((m_states, m_states)), eye(m_states)]]
AAA,BBB,Q,Z = qz(Delta_mat, Xi_mat)
Delta_up,Xi_up,UUU,VVV = [real_if_close(mm) for mm in (AAA,BBB,Q,Z)]
Xi_eigval = diag(Xi_up)/where(diag(Delta_up)>TOL, diag(Delta_up), TOL)
Xi_sortindex = abs(Xi_eigval).argsort()
# (Xi_sortabs doesn't really seem to be needed)
Xi_sortval = Xi_eigval[Xi_sortindex]
Xi_select = slice(0, m_states)
stake = (abs(Xi_sortval[Xi_select])).max() + TOL
Delta_up,Xi_up,UUU,VVV = qzdiv(stake,Delta_up,Xi_up,UUU,VVV)
try:
# check that all unused roots are unstable
assert abs(Xi_sortval[m_states]) > (1-TOL)
# check that all used roots are stable
assert abs(Xi_sortval[Xi_select]).max() < 1+TOL
except:
raise BKError('generic')
# check for unit roots anywhere
# assert (abs((abs(Xi_sortval) - 1)) > TOL).all()
Lambda_mat = diagflat(Xi_sortval[Xi_select])
VVVH = VVV.T
VVV_2_1 = VVVH[m_states:2*m_states, :m_states]
VVV_2_2 = VVVH[m_states:2*m_states, m_states:2*m_states]
UUU_2_1 = UUU[m_states:2*m_states, :m_states]
PP = - solve(VVV_2_1, VVV_2_2)
# slightly different check than in the original toolkit:
assert allclose(real_if_close(PP), PP.real)
PP = PP.real
## end of solve_qz!
return [Xi_sortval[Xi_select],PP]
def solve_sylvester(A,B,C,D,Ainv = None):
# Solves equation : A X + B X [C,...,C] + D = 0
# where X is a multilinear function whose dimension is determined by D
# inverse of A can be optionally specified as an argument
import slycot
n_d = D.ndim - 1
n_v = C.shape[1]
n_c = D.size/n_v**n_d
# import dolo.config
# opts = dolo.config.use_engine
# if opts['sylvester']:
# DD = D.flatten().reshape( n_c, n_v**n_d)
# [err,XX] = dolo.config.engine.engine.feval(2,'gensylv',n_d,A,B,C,-DD)
# X = XX.reshape( (n_c,)+(n_v,)*(n_d))
DD = D.reshape( n_c, n_v**n_d )
if n_d == 1:
CC = C
else:
CC = np.kron(C,C)
for i in range(n_d-2):
CC = np.kron(CC,C)
if Ainv != None:
Q = sdot(Ainv,B)
S = sdot(Ainv,DD)
else:
Q = np.linalg.solve(A,B)
S = np.linalg.solve(A,DD)
n = n_c
m = n_v**n_d
XX = slycot.sb04qd(n,m,Q,CC,-S)
X = XX.reshape( (n_c,)+(n_v,)*(n_d) )
return X
class BKError(Exception):
def __init__(self,type):
self.type = type
def __str__(self):
return 'Blanchard-Kahn error ({0})'.format(self.type)
| [
"numpy.eye",
"numpy.linalg.solve",
"scipy.linalg.qz",
"dolo.numeric.tensor.sdot",
"dolo.numeric.extern.qz.qzdiv",
"numpy.diag",
"numpy.kron",
"numpy.array",
"numpy.zeros",
"numpy.diagflat",
"numpy.real_if_close",
"slycot.sb04qd"
] | [((422, 431), 'numpy.array', 'array', (['FF'], {}), '(FF)\n', (427, 431), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((448, 458), 'numpy.array', 'array', (['(-GG)'], {}), '(-GG)\n', (453, 458), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((475, 485), 'numpy.array', 'array', (['(-HH)'], {}), '(-HH)\n', (480, 485), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((787, 808), 'scipy.linalg.qz', 'qz', (['Delta_mat', 'Xi_mat'], {}), '(Delta_mat, Xi_mat)\n', (789, 808), False, 'from scipy.linalg import qz\n'), ((1249, 1288), 'dolo.numeric.extern.qz.qzdiv', 'qzdiv', (['stake', 'Delta_up', 'Xi_up', 'UUU', 'VVV'], {}), '(stake, Delta_up, Xi_up, UUU, VVV)\n', (1254, 1288), False, 'from dolo.numeric.extern.qz import qzdiv\n'), ((1665, 1696), 'numpy.diagflat', 'diagflat', (['Xi_sortval[Xi_select]'], {}), '(Xi_sortval[Xi_select])\n', (1673, 1696), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((3053, 3083), 'slycot.sb04qd', 'slycot.sb04qd', (['n', 'm', 'Q', 'CC', '(-S)'], {}), '(n, m, Q, CC, -S)\n', (3066, 3083), False, 'import slycot\n'), ((848, 865), 'numpy.real_if_close', 'real_if_close', (['mm'], {}), '(mm)\n', (861, 865), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((915, 926), 'numpy.diag', 'diag', (['Xi_up'], {}), '(Xi_up)\n', (919, 926), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((1888, 1911), 'numpy.linalg.solve', 'solve', (['VVV_2_1', 'VVV_2_2'], {}), '(VVV_2_1, VVV_2_2)\n', (1893, 1911), False, 'from numpy.linalg import solve\n'), ((1998, 2015), 'numpy.real_if_close', 'real_if_close', (['PP'], {}), '(PP)\n', (2011, 2015), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((2796, 2809), 'numpy.kron', 'np.kron', (['C', 'C'], {}), '(C, C)\n', (2803, 2809), True, 'import numpy as np\n'), ((2849, 2863), 'numpy.kron', 'np.kron', (['CC', 'C'], {}), '(CC, C)\n', (2856, 2863), True, 'import numpy as np\n'), ((2897, 2910), 'dolo.numeric.tensor.sdot', 'sdot', (['Ainv', 'B'], {}), '(Ainv, B)\n', (2901, 2910), False, 'from dolo.numeric.tensor import sdot, mdot\n'), ((2922, 2936), 'dolo.numeric.tensor.sdot', 'sdot', (['Ainv', 'DD'], {}), '(Ainv, DD)\n', (2926, 2936), False, 'from dolo.numeric.tensor import sdot, mdot\n'), ((2958, 2979), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (2973, 2979), True, 'import numpy as np\n'), ((2991, 3013), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'DD'], {}), '(A, DD)\n', (3006, 3013), True, 'import numpy as np\n'), ((953, 967), 'numpy.diag', 'diag', (['Delta_up'], {}), '(Delta_up)\n', (957, 967), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((933, 947), 'numpy.diag', 'diag', (['Delta_up'], {}), '(Delta_up)\n', (937, 947), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((583, 596), 'numpy.eye', 'eye', (['m_states'], {}), '(m_states)\n', (586, 596), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((598, 625), 'numpy.zeros', 'zeros', (['(m_states, m_states)'], {}), '((m_states, m_states))\n', (603, 625), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((669, 696), 'numpy.zeros', 'zeros', (['(m_states, m_states)'], {}), '((m_states, m_states))\n', (674, 696), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((722, 749), 'numpy.zeros', 'zeros', (['(m_states, m_states)'], {}), '((m_states, m_states))\n', (727, 749), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n'), ((751, 764), 'numpy.eye', 'eye', (['m_states'], {}), '(m_states)\n', (754, 764), False, 'from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat\n')] |
# -*- coding: utf-8 -*-
"""Collection of mathematical functions."""
import numpy as np
from .core import (angle, besselI0, besselI1, besselK0, besselK1, cos,
cot, det, dot, exp, exp10, imag, log, log10, max, median,
min, pow, rand, randn, real, rms, round, rrms, sign,
sin, sqrt, sum, toComplex, unique)
def symlog(x, tol=None, linearSpread=0):
"""Symmetric bi-logarithmic transformation (as used in matplotlib).
Transforms a signed values in a logarithmic way preserving the sign.
All absolute values below a certain threshold are treated zero or linearly
distributed (if linearSpread>0).
Parameters
----------
x : iterable
array to be transformed
tol : float [None]
tolerance for minimum values to be treated zero (or linear)
linearSpread : float
define how wide linear transformation is done (0-not, 1-one decade)
"""
if tol is None:
tol = np.min(np.abs(x))
return np.sign(x) * (np.log10(1 + np.abs(x/tol))+linearSpread/2)
| [
"numpy.abs",
"numpy.sign"
] | [((1011, 1021), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (1018, 1021), True, 'import numpy as np\n'), ((988, 997), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (994, 997), True, 'import numpy as np\n'), ((1038, 1053), 'numpy.abs', 'np.abs', (['(x / tol)'], {}), '(x / tol)\n', (1044, 1053), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""Basic Model Interface implementation for the pyMarshMorpho2D model."""
import numpy as np
from bmipy import Bmi
from pymarshmorpho2d import MarshEvolver
from landlab import load_params
_DEFAULT_PARAMETERS = {
'rel_sl_rise_rate': 0.001 / 365.0,
'tidal_range': 3.1,
}
class MarshMorphoBmi(Bmi):
"""Simulate tidal marsh evolution."""
_name = "MarshMorphoBmi"
_input_var_names = ("topographic__elevation",)
_output_var_names = ("topographic__elevation",)
def __init__(self):
"""Create a MarshMorphoBmi model that is ready for initialization."""
self._model = None
self._values = {}
self._var_units = {}
self._var_loc = {}
self._grids = {}
self._grid_type = {}
self._start_time = 0.0
self._end_time = np.finfo("d").max
self._time_units = "d"
def initialize(self, filename=None):
"""Initialize the MarshMorphoBmi model.
Parameters
----------
filename : str, optional
Path to name of input file, or dict.
"""
if isinstance(filename, str):
p = load_params(filename)
elif isinstance(filename, dict):
p = filename
else:
p = _DEFAULT_PARAMETERS
if ('number_of_node_rows' in p and 'number_of_node_columns' in p):
p['grid_size'] = (p['number_of_node_rows'],
p['number_of_node_columns'])
p.pop('number_of_node_rows')
p.pop('number_of_node_columns')
# Instantiate model and get handle to grid
self._model = MarshMorphoModel(**p)
self.grid = self._model.grid # Landlab grid as public attribute
#self._values = {"node_state": self.grid.at_node['node_state']}
#self._var_units = {"node_state": "-"}
#self._var_loc = {"node_state": "node"}
#self._grids = {0: ["node_state"]}
#self._grid_type = {0: "unstructured"} # closest BMI category to hexagona
#self._initialized = True
def update(self):
"""Advance forward for one year."""
self._model.run(to=self._model.current_time + 1.0)
def update_frac(self, time_frac):
"""Update model by a fraction of a time step.
Parameters
----------
time_frac : float
Fraction of a year.
"""
self._model.run(to=self._model.current_time + time_frac)
def update_until(self, then):
"""Update model until a particular time.
Parameters
----------
then : float
Time to run model until.
"""
self._model.run(to=then)
def finalize(self):
"""Finalize model."""
self._model = None
def get_var_type(self, var_name):
"""Data type of variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
str
Data type.
"""
return str(self.get_value_ptr(var_name).dtype)
def get_var_units(self, var_name):
"""Get units of variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
str
Variable units.
"""
return self._var_units[var_name]
def get_var_nbytes(self, var_name):
"""Get units of variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
int
Size of data array in bytes.
"""
return self.get_value_ptr(var_name).nbytes
def get_var_itemsize(self, name):
return np.dtype(self.get_var_type(name)).itemsize
def get_var_location(self, name):
return self._var_loc[name]
def get_var_grid(self, var_name):
"""Grid id for a variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
int
Grid id.
"""
for grid_id, var_name_list in self._grids.items():
if var_name in var_name_list:
return grid_id
def get_grid_rank(self, grid_id):
"""Rank of grid.
Parameters
----------
grid_id : int
Identifier of a grid.
Returns
-------
int
Rank of grid.
"""
return 2
def get_grid_size(self, grid_id):
"""Size of grid.
Parameters
----------
grid_id : int
Identifier of a grid.
Returns
-------
int
Size of grid.
"""
return self.grid.number_of_nodes
def get_value_ptr(self, var_name):
"""Reference to values.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
array_like
Value array.
"""
return self._values[var_name]
def get_value(self, var_name):
"""Copy of values.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
array_like
Copy of values.
"""
return self.get_value_ptr(var_name).copy()
def get_value_at_indices(self, var_name, indices):
"""Get values at particular indices.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
indices : array_like
Array of indices.
Returns
-------
array_like
Values at indices.
"""
return self.get_value_ptr(var_name).take(indices)
def set_value(self, var_name, src):
"""Set model values.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
src : array_like
Array of new values.
"""
val = self.get_value_ptr(var_name)
val[:] = src
def set_value_at_indices(self, name, inds, src):
"""Set model values at particular indices.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
src : array_like
Array of new values.
indices : array_like
Array of indices.
"""
val = self.get_value_ptr(name)
val.flat[inds] = src
def get_component_name(self):
"""Name of the component."""
return self._name
def get_input_item_count(self):
"""Get names of input variables."""
return len(self._input_var_names)
def get_output_item_count(self):
"""Get names of output variables."""
return len(self._output_var_names)
def get_input_var_names(self):
"""Get names of input variables."""
return self._input_var_names
def get_output_var_names(self):
"""Get names of output variables."""
return self._output_var_names
def get_grid_shape(self, grid_id):
"""Number of rows and columns of uniform rectilinear grid."""
return np.array([self.grid.number_of_node_rows,
self.grid.number_of_node_columns],
dtype=np.int)
def get_grid_spacing(self, grid_id):
"""Spacing of rows and columns of uniform rectilinear grid."""
return 1.0
def get_grid_origin(self, grid_id):
"""Origin of uniform rectilinear grid."""
return np.zeros(2)
def get_grid_type(self, grid_id):
"""Type of grid."""
return self._grid_type[grid_id]
def get_start_time(self):
"""Start time of model."""
return 0.0
def get_end_time(self):
"""End time of model."""
return self._model.run_duration
def get_current_time(self):
return self._model.current_time
def get_time_step(self):
return 1.0 # GrainHill does not use a time step
def get_time_units(self):
return "y"
def get_grid_edge_count(self, grid):
return self.grid.number_of_links
def get_grid_edge_nodes(self, grid, edge_nodes):
edge_nodes[:] = self.grid.nodes_at_link.flatten()
return 0
def get_grid_face_count(self, grid):
return self.grid.number_of_cells
def get_grid_node_count(self, grid):
return self.grid.number_of_nodes
def get_grid_nodes_per_face(self, grid, nodes_per_face):
nodes_per_face[:] = 6 + np.zeros(self.grid.number_of_cells,
dtype=np.int)
return 0
def get_grid_x(self, grid, x):
x[:] = self.grid.x_of_node
return 0
def get_grid_y(self, grid, y):
y[:] = self.grid.y_of_node
return 0
# To implement, Landlab HexModelGrid first needs
# a nodes_at_cell property
def get_grid_face_nodes(self, grid, face_nodes):
raise NotImplementedError("get_grid_node_count")
def get_grid_face_edges(self, grid, face_edges):
raise NotImplementedError("get_grid_node_count")
def get_grid_z(self, grid, z):
raise NotImplementedError("get_grid_z")
| [
"numpy.finfo",
"numpy.array",
"numpy.zeros",
"landlab.load_params"
] | [((7366, 7459), 'numpy.array', 'np.array', (['[self.grid.number_of_node_rows, self.grid.number_of_node_columns]'], {'dtype': 'np.int'}), '([self.grid.number_of_node_rows, self.grid.number_of_node_columns],\n dtype=np.int)\n', (7374, 7459), True, 'import numpy as np\n'), ((7744, 7755), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (7752, 7755), True, 'import numpy as np\n'), ((831, 844), 'numpy.finfo', 'np.finfo', (['"""d"""'], {}), "('d')\n", (839, 844), True, 'import numpy as np\n'), ((1157, 1178), 'landlab.load_params', 'load_params', (['filename'], {}), '(filename)\n', (1168, 1178), False, 'from landlab import load_params\n'), ((8732, 8781), 'numpy.zeros', 'np.zeros', (['self.grid.number_of_cells'], {'dtype': 'np.int'}), '(self.grid.number_of_cells, dtype=np.int)\n', (8740, 8781), True, 'import numpy as np\n')] |
"""
Authors: <NAME>.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import onnx
from onnx import helper
import pytest
# Athos DIR
import sys, os
import optparse
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
from tests.utils import (
ONNXConfig,
Compiler,
assert_almost_equal,
make_onnx_graph,
run_onnx,
Frontend,
)
# TODO: add conv autopad, convtranspose autopad, convtranspose_dilations, fix grouped conv dims
@pytest.mark.parametrize(
"a_shape, kernel_shape, pads, strides, output_shape, group",
[
pytest.param(
[1, 1, 5, 5],
[1, 1, 3, 3],
[1, 1, 1, 1],
[1, 1],
[1, 1, 5, 5],
1,
id="conv2d_pad",
),
pytest.param(
[1, 1, 5, 5],
[1, 1, 3, 3],
[0, 0, 0, 0],
[1, 1],
[1, 1, 3, 3],
1,
id="conv2d_nopad",
),
pytest.param(
[1, 1, 7, 5],
[1, 1, 3, 3],
[1, 1, 1, 1],
[2, 2],
[1, 1, 4, 3],
1,
id="conv2d_strides_pad",
),
pytest.param(
[1, 1, 7, 5],
[1, 1, 3, 3],
[0, 0, 0, 0],
[2, 2],
[1, 1, 3, 2],
1,
id="conv2d_strides_nopad",
),
pytest.param(
[1, 1, 7, 5],
[1, 1, 3, 3],
[1, 0, 1, 0],
[2, 2],
[1, 1, 4, 2],
1,
marks=pytest.mark.skip(reason="Seedot reshape typecheck assertion"),
id="conv2d_strides_assymetric_pad",
), # padding only along H dimension
# a_shape, kernel_shape, pads, strides, output_shape",
pytest.param(
[1, 2, 4, 16, 16],
[2, 2, 3, 3, 3],
[1, 1, 1, 1, 1, 1],
[1, 1, 1],
[1, 2, 4, 16, 16],
1,
id="conv3d_pad",
),
pytest.param(
[1, 2, 4, 16, 16],
[2, 2, 3, 3, 3],
[0, 0, 0, 0, 0, 0],
[1, 1, 1],
[1, 2, 2, 14, 14],
1,
id="conv3d_nopad",
),
pytest.param(
[1, 2, 4, 16, 16],
[2, 2, 3, 3, 3],
[1, 1, 1, 1, 1, 1],
[2, 2, 2],
[1, 2, 2, 8, 8],
1,
id="conv3d_strides_pad",
),
pytest.param(
[1, 2, 4, 16, 16],
[2, 2, 3, 3, 3],
[0, 0, 0, 0, 0, 0],
[2, 2, 2],
[1, 2, 1, 7, 7],
1,
id="conv3d_strides_nopad",
),
pytest.param(
[1, 4, 5, 5],
[1, 1, 3, 3],
[0, 0, 0, 0],
[1, 1],
[1, 1, 3, 3],
4,
id="conv2d_grouped",
marks=pytest.mark.skip(reason="fix test dims"),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_conv(
test_dir, backend, a_shape, kernel_shape, pads, strides, output_shape, group, dtype
):
Op = "Conv"
if len(a_shape) == 4:
version = 2 # 2d
elif len(a_shape) == 5:
version = 3 # 3d
if version == 3 and backend in ["2PC_HE", "2PC_OT"]:
pytest.skip("[conv3d] Missing Support in SCI")
a = np.random.randn(*a_shape).astype(dtype)
kernel = np.random.randn(*kernel_shape).astype(dtype)
# Only need this for its shape
out = np.zeros(output_shape).astype(dtype)
hw_kernel_shape = kernel_shape[-version:]
node = onnx.helper.make_node(
Op,
inputs=["a", "kernel"],
outputs=["output"],
kernel_shape=hw_kernel_shape,
pads=pads,
strides=strides,
group=group,
# Default values for other attributes: dilations=[1, 1], groups=1
)
graph = make_onnx_graph(
node,
inputs=[a],
outputs=[out],
tensors=[kernel],
tensor_names=["kernel"],
name=Op + "_test",
)
expected_output = run_onnx(graph, [a])
config = ONNXConfig(backend).parse_io(graph)
config.config["scale"] = 12
compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
mpc_output = compiler.compile_and_run([a])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, kernel_shape, pads, strides, output_shape, output_padding",
[
pytest.param(
[1, 1, 3, 3],
[1, 2, 3, 3],
[0, 0, 0, 0],
[1, 1],
[1, 2, 5, 5],
False,
id="convtranspose2d_nopad",
),
pytest.param(
[1, 1, 3, 3],
[1, 2, 3, 3],
[1, 1, 1, 1],
[1, 1],
[1, 2, 3, 3],
False,
id="convtranspose2d_pad",
),
pytest.param(
[1, 1, 3, 3],
[1, 2, 3, 3],
[0, 0, 0, 0],
[3, 2],
[1, 2, 10, 8],
True,
id="convtranspose2d_output_padding",
),
pytest.param(
[1, 1, 3, 4, 5],
[1, 2, 3, 3, 3],
[0, 0, 0, 0, 0, 0],
[1, 1, 1],
[1, 2, 5, 6, 7],
False,
id="convtranspose3d_nopad",
),
pytest.param(
[1, 1, 3, 4, 5],
[1, 2, 3, 3, 3],
[1, 1, 1, 1, 1, 1],
[1, 1, 1],
[1, 2, 3, 4, 5],
False,
id="convtranspose3d_pad",
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_convtranspose(
test_dir,
backend,
a_shape,
kernel_shape,
pads,
strides,
output_shape,
output_padding,
dtype,
):
Op = "ConvTranspose"
if len(a_shape) == 4:
version = 2 # 2d
elif len(a_shape) == 5:
version = 3 # 3d
if version == 3 and backend in ["2PC_HE", "2PC_OT"]:
pytest.skip("[conv3dtranspose] Missing Support in SCI")
a = np.random.randn(*a_shape).astype(dtype)
kernel = np.random.randn(*kernel_shape).astype(dtype)
# Only need this for its shape
out = np.zeros(output_shape).astype(dtype)
hw_kernel_shape = kernel_shape[-version:]
if not output_padding:
node = onnx.helper.make_node(
Op,
inputs=["a", "kernel"],
outputs=["output"],
kernel_shape=hw_kernel_shape,
pads=pads,
strides=strides
# Default values for other attributes: dilations=[1, 1], groups=1
)
else:
node = onnx.helper.make_node(
Op,
inputs=["a", "kernel"],
outputs=["output"],
kernel_shape=hw_kernel_shape,
pads=pads,
strides=strides,
output_padding=[1, 1]
# Default values for other attributes: dilations=[1, 1], groups=1
)
graph = make_onnx_graph(
node,
inputs=[a],
outputs=[out],
tensors=[kernel],
tensor_names=["kernel"],
name=Op + "_test",
)
expected_output = run_onnx(graph, [a])
config = ONNXConfig(backend).parse_io(graph)
config.config["scale"] = 12
compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
mpc_output = compiler.compile_and_run([a])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
| [
"tests.utils.assert_almost_equal",
"onnx.helper.make_node",
"tests.utils.make_onnx_graph",
"pytest.mark.skip",
"pytest.param",
"tests.utils.Compiler",
"pytest.mark.parametrize",
"tests.utils.run_onnx",
"os.path.dirname",
"numpy.zeros",
"tests.utils.ONNXConfig",
"pytest.skip",
"numpy.random.r... | [((4021, 4066), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (4044, 4066), False, 'import pytest\n'), ((6702, 6747), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (6725, 6747), False, 'import pytest\n'), ((4656, 4800), 'onnx.helper.make_node', 'onnx.helper.make_node', (['Op'], {'inputs': "['a', 'kernel']", 'outputs': "['output']", 'kernel_shape': 'hw_kernel_shape', 'pads': 'pads', 'strides': 'strides', 'group': 'group'}), "(Op, inputs=['a', 'kernel'], outputs=['output'],\n kernel_shape=hw_kernel_shape, pads=pads, strides=strides, group=group)\n", (4677, 4800), False, 'import onnx\n'), ((4947, 5061), 'tests.utils.make_onnx_graph', 'make_onnx_graph', (['node'], {'inputs': '[a]', 'outputs': '[out]', 'tensors': '[kernel]', 'tensor_names': "['kernel']", 'name': "(Op + '_test')"}), "(node, inputs=[a], outputs=[out], tensors=[kernel],\n tensor_names=['kernel'], name=Op + '_test')\n", (4962, 5061), False, 'from tests.utils import ONNXConfig, Compiler, assert_almost_equal, make_onnx_graph, run_onnx, Frontend\n'), ((5135, 5155), 'tests.utils.run_onnx', 'run_onnx', (['graph', '[a]'], {}), '(graph, [a])\n', (5143, 5155), False, 'from tests.utils import ONNXConfig, Compiler, assert_almost_equal, make_onnx_graph, run_onnx, Frontend\n'), ((5252, 5300), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir', 'Frontend.ONNX'], {}), '(graph, config, test_dir, Frontend.ONNX)\n', (5260, 5300), False, 'from tests.utils import ONNXConfig, Compiler, assert_almost_equal, make_onnx_graph, run_onnx, Frontend\n'), ((5352, 5441), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (5371, 5441), False, 'from tests.utils import ONNXConfig, Compiler, assert_almost_equal, make_onnx_graph, run_onnx, Frontend\n'), ((8086, 8200), 'tests.utils.make_onnx_graph', 'make_onnx_graph', (['node'], {'inputs': '[a]', 'outputs': '[out]', 'tensors': '[kernel]', 'tensor_names': "['kernel']", 'name': "(Op + '_test')"}), "(node, inputs=[a], outputs=[out], tensors=[kernel],\n tensor_names=['kernel'], name=Op + '_test')\n", (8101, 8200), False, 'from tests.utils import ONNXConfig, Compiler, assert_almost_equal, make_onnx_graph, run_onnx, Frontend\n'), ((8274, 8294), 'tests.utils.run_onnx', 'run_onnx', (['graph', '[a]'], {}), '(graph, [a])\n', (8282, 8294), False, 'from tests.utils import ONNXConfig, Compiler, assert_almost_equal, make_onnx_graph, run_onnx, Frontend\n'), ((8391, 8439), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir', 'Frontend.ONNX'], {}), '(graph, config, test_dir, Frontend.ONNX)\n', (8399, 8439), False, 'from tests.utils import ONNXConfig, Compiler, assert_almost_equal, make_onnx_graph, run_onnx, Frontend\n'), ((8491, 8580), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (8510, 8580), False, 'from tests.utils import ONNXConfig, Compiler, assert_almost_equal, make_onnx_graph, run_onnx, Frontend\n'), ((1242, 1267), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1257, 1267), False, 'import sys, os\n'), ((4361, 4407), 'pytest.skip', 'pytest.skip', (['"""[conv3d] Missing Support in SCI"""'], {}), "('[conv3d] Missing Support in SCI')\n", (4372, 4407), False, 'import pytest\n'), ((1624, 1724), 'pytest.param', 'pytest.param', (['[1, 1, 5, 5]', '[1, 1, 3, 3]', '[1, 1, 1, 1]', '[1, 1]', '[1, 1, 5, 5]', '(1)'], {'id': '"""conv2d_pad"""'}), "([1, 1, 5, 5], [1, 1, 3, 3], [1, 1, 1, 1], [1, 1], [1, 1, 5, 5],\n 1, id='conv2d_pad')\n", (1636, 1724), False, 'import pytest\n'), ((1825, 1927), 'pytest.param', 'pytest.param', (['[1, 1, 5, 5]', '[1, 1, 3, 3]', '[0, 0, 0, 0]', '[1, 1]', '[1, 1, 3, 3]', '(1)'], {'id': '"""conv2d_nopad"""'}), "([1, 1, 5, 5], [1, 1, 3, 3], [0, 0, 0, 0], [1, 1], [1, 1, 3, 3],\n 1, id='conv2d_nopad')\n", (1837, 1927), False, 'import pytest\n'), ((2028, 2136), 'pytest.param', 'pytest.param', (['[1, 1, 7, 5]', '[1, 1, 3, 3]', '[1, 1, 1, 1]', '[2, 2]', '[1, 1, 4, 3]', '(1)'], {'id': '"""conv2d_strides_pad"""'}), "([1, 1, 7, 5], [1, 1, 3, 3], [1, 1, 1, 1], [2, 2], [1, 1, 4, 3],\n 1, id='conv2d_strides_pad')\n", (2040, 2136), False, 'import pytest\n'), ((2237, 2347), 'pytest.param', 'pytest.param', (['[1, 1, 7, 5]', '[1, 1, 3, 3]', '[0, 0, 0, 0]', '[2, 2]', '[1, 1, 3, 2]', '(1)'], {'id': '"""conv2d_strides_nopad"""'}), "([1, 1, 7, 5], [1, 1, 3, 3], [0, 0, 0, 0], [2, 2], [1, 1, 3, 2],\n 1, id='conv2d_strides_nopad')\n", (2249, 2347), False, 'import pytest\n'), ((2846, 2968), 'pytest.param', 'pytest.param', (['[1, 2, 4, 16, 16]', '[2, 2, 3, 3, 3]', '[1, 1, 1, 1, 1, 1]', '[1, 1, 1]', '[1, 2, 4, 16, 16]', '(1)'], {'id': '"""conv3d_pad"""'}), "([1, 2, 4, 16, 16], [2, 2, 3, 3, 3], [1, 1, 1, 1, 1, 1], [1, 1,\n 1], [1, 2, 4, 16, 16], 1, id='conv3d_pad')\n", (2858, 2968), False, 'import pytest\n'), ((3069, 3193), 'pytest.param', 'pytest.param', (['[1, 2, 4, 16, 16]', '[2, 2, 3, 3, 3]', '[0, 0, 0, 0, 0, 0]', '[1, 1, 1]', '[1, 2, 2, 14, 14]', '(1)'], {'id': '"""conv3d_nopad"""'}), "([1, 2, 4, 16, 16], [2, 2, 3, 3, 3], [0, 0, 0, 0, 0, 0], [1, 1,\n 1], [1, 2, 2, 14, 14], 1, id='conv3d_nopad')\n", (3081, 3193), False, 'import pytest\n'), ((3294, 3422), 'pytest.param', 'pytest.param', (['[1, 2, 4, 16, 16]', '[2, 2, 3, 3, 3]', '[1, 1, 1, 1, 1, 1]', '[2, 2, 2]', '[1, 2, 2, 8, 8]', '(1)'], {'id': '"""conv3d_strides_pad"""'}), "([1, 2, 4, 16, 16], [2, 2, 3, 3, 3], [1, 1, 1, 1, 1, 1], [2, 2,\n 2], [1, 2, 2, 8, 8], 1, id='conv3d_strides_pad')\n", (3306, 3422), False, 'import pytest\n'), ((3523, 3653), 'pytest.param', 'pytest.param', (['[1, 2, 4, 16, 16]', '[2, 2, 3, 3, 3]', '[0, 0, 0, 0, 0, 0]', '[2, 2, 2]', '[1, 2, 1, 7, 7]', '(1)'], {'id': '"""conv3d_strides_nopad"""'}), "([1, 2, 4, 16, 16], [2, 2, 3, 3, 3], [0, 0, 0, 0, 0, 0], [2, 2,\n 2], [1, 2, 1, 7, 7], 1, id='conv3d_strides_nopad')\n", (3535, 3653), False, 'import pytest\n'), ((7102, 7157), 'pytest.skip', 'pytest.skip', (['"""[conv3dtranspose] Missing Support in SCI"""'], {}), "('[conv3dtranspose] Missing Support in SCI')\n", (7113, 7157), False, 'import pytest\n'), ((7437, 7568), 'onnx.helper.make_node', 'onnx.helper.make_node', (['Op'], {'inputs': "['a', 'kernel']", 'outputs': "['output']", 'kernel_shape': 'hw_kernel_shape', 'pads': 'pads', 'strides': 'strides'}), "(Op, inputs=['a', 'kernel'], outputs=['output'],\n kernel_shape=hw_kernel_shape, pads=pads, strides=strides)\n", (7458, 7568), False, 'import onnx\n'), ((7750, 7908), 'onnx.helper.make_node', 'onnx.helper.make_node', (['Op'], {'inputs': "['a', 'kernel']", 'outputs': "['output']", 'kernel_shape': 'hw_kernel_shape', 'pads': 'pads', 'strides': 'strides', 'output_padding': '[1, 1]'}), "(Op, inputs=['a', 'kernel'], outputs=['output'],\n kernel_shape=hw_kernel_shape, pads=pads, strides=strides,\n output_padding=[1, 1])\n", (7771, 7908), False, 'import onnx\n'), ((5579, 5694), 'pytest.param', 'pytest.param', (['[1, 1, 3, 3]', '[1, 2, 3, 3]', '[0, 0, 0, 0]', '[1, 1]', '[1, 2, 5, 5]', '(False)'], {'id': '"""convtranspose2d_nopad"""'}), "([1, 1, 3, 3], [1, 2, 3, 3], [0, 0, 0, 0], [1, 1], [1, 2, 5, 5],\n False, id='convtranspose2d_nopad')\n", (5591, 5694), False, 'import pytest\n'), ((5795, 5908), 'pytest.param', 'pytest.param', (['[1, 1, 3, 3]', '[1, 2, 3, 3]', '[1, 1, 1, 1]', '[1, 1]', '[1, 2, 3, 3]', '(False)'], {'id': '"""convtranspose2d_pad"""'}), "([1, 1, 3, 3], [1, 2, 3, 3], [1, 1, 1, 1], [1, 1], [1, 2, 3, 3],\n False, id='convtranspose2d_pad')\n", (5807, 5908), False, 'import pytest\n'), ((6009, 6134), 'pytest.param', 'pytest.param', (['[1, 1, 3, 3]', '[1, 2, 3, 3]', '[0, 0, 0, 0]', '[3, 2]', '[1, 2, 10, 8]', '(True)'], {'id': '"""convtranspose2d_output_padding"""'}), "([1, 1, 3, 3], [1, 2, 3, 3], [0, 0, 0, 0], [3, 2], [1, 2, 10, 8\n ], True, id='convtranspose2d_output_padding')\n", (6021, 6134), False, 'import pytest\n'), ((6234, 6368), 'pytest.param', 'pytest.param', (['[1, 1, 3, 4, 5]', '[1, 2, 3, 3, 3]', '[0, 0, 0, 0, 0, 0]', '[1, 1, 1]', '[1, 2, 5, 6, 7]', '(False)'], {'id': '"""convtranspose3d_nopad"""'}), "([1, 1, 3, 4, 5], [1, 2, 3, 3, 3], [0, 0, 0, 0, 0, 0], [1, 1, 1\n ], [1, 2, 5, 6, 7], False, id='convtranspose3d_nopad')\n", (6246, 6368), False, 'import pytest\n'), ((6468, 6600), 'pytest.param', 'pytest.param', (['[1, 1, 3, 4, 5]', '[1, 2, 3, 3, 3]', '[1, 1, 1, 1, 1, 1]', '[1, 1, 1]', '[1, 2, 3, 4, 5]', '(False)'], {'id': '"""convtranspose3d_pad"""'}), "([1, 1, 3, 4, 5], [1, 2, 3, 3, 3], [1, 1, 1, 1, 1, 1], [1, 1, 1\n ], [1, 2, 3, 4, 5], False, id='convtranspose3d_pad')\n", (6480, 6600), False, 'import pytest\n'), ((4417, 4442), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (4432, 4442), True, 'import numpy as np\n'), ((4470, 4500), 'numpy.random.randn', 'np.random.randn', (['*kernel_shape'], {}), '(*kernel_shape)\n', (4485, 4500), True, 'import numpy as np\n'), ((4561, 4583), 'numpy.zeros', 'np.zeros', (['output_shape'], {}), '(output_shape)\n', (4569, 4583), True, 'import numpy as np\n'), ((5169, 5188), 'tests.utils.ONNXConfig', 'ONNXConfig', (['backend'], {}), '(backend)\n', (5179, 5188), False, 'from tests.utils import ONNXConfig, Compiler, assert_almost_equal, make_onnx_graph, run_onnx, Frontend\n'), ((7167, 7192), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (7182, 7192), True, 'import numpy as np\n'), ((7220, 7250), 'numpy.random.randn', 'np.random.randn', (['*kernel_shape'], {}), '(*kernel_shape)\n', (7235, 7250), True, 'import numpy as np\n'), ((7311, 7333), 'numpy.zeros', 'np.zeros', (['output_shape'], {}), '(output_shape)\n', (7319, 7333), True, 'import numpy as np\n'), ((8308, 8327), 'tests.utils.ONNXConfig', 'ONNXConfig', (['backend'], {}), '(backend)\n', (8318, 8327), False, 'from tests.utils import ONNXConfig, Compiler, assert_almost_equal, make_onnx_graph, run_onnx, Frontend\n'), ((2619, 2680), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Seedot reshape typecheck assertion"""'}), "(reason='Seedot reshape typecheck assertion')\n", (2635, 2680), False, 'import pytest\n'), ((3958, 3998), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""fix test dims"""'}), "(reason='fix test dims')\n", (3974, 3998), False, 'import pytest\n')] |
import numpy as np
class Graph:
"""Class that specifies graph.
Graph is an object that consists of vertices and edges.
We can divide it into several objects, each object contains as many vertices
as the number of letters in alphabet.
We assign label to each vertex, in out case letters.
For each vertex and edge we can assign a weight.
We represent graph as a matrix of vertex weights and 4-dimensional array for edge weights.
"""
def __init__(self, alphabet, string):
self.alphabet = alphabet
self.string = string
self.objects_number = string.shape[1]
self.vertex_weights = np.zeros((self.objects_number, len(alphabet)))
self.edge_weights = np.inf * np.ones((self.objects_number, self.objects_number,
len(alphabet), len(alphabet)))
self.mapping = {i: char for i, char in enumerate(self.alphabet)}
def init_edges(self):
for i in np.arange(self.objects_number)[::-1]:
for char in self.mapping:
char_image = self.alphabet[self.mapping[char]]
char_width = char_image.shape[1]
if i - char_width + 1 < 0:
continue
self.edge_weights[i - char_width, i, :, char] = np.sum((char_image
- self.string[:, i - char_width + 1:i + 1])**2)
| [
"numpy.sum",
"numpy.arange"
] | [((977, 1007), 'numpy.arange', 'np.arange', (['self.objects_number'], {}), '(self.objects_number)\n', (986, 1007), True, 'import numpy as np\n'), ((1302, 1370), 'numpy.sum', 'np.sum', (['((char_image - self.string[:, i - char_width + 1:i + 1]) ** 2)'], {}), '((char_image - self.string[:, i - char_width + 1:i + 1]) ** 2)\n', (1308, 1370), True, 'import numpy as np\n')] |
import dataclasses
import itertools
import operator
import xml.etree.ElementTree as ET
from abc import ABC, abstractmethod
import collections
from copy import deepcopy
from functools import reduce
from typing import Optional, Callable, List, Union, Iterable
import more_itertools.more
import numpy as np
from more_itertools import collapse, first
from pytest import approx
import botbowl
import botbowl.core.forward_model as forward_model
import botbowl.core.pathfinding.python_pathfinding as pf
import botbowl.core.procedure as procedures
from botbowl import Skill, BBDieResult
from examples.tree_search.hashmap import HashMap, create_gamestate_hash
from tests.util import only_fixed_rolls
accumulated_prob_2d_roll = np.array([36, 36, 36, 35, 33, 30, 26, 21, 15, 10, 6, 3, 1]) / 36
HeuristicVector = collections.namedtuple('HeuristicVector', ['score',
'tv_on_pitch',
'ball_position',
'ball_carried',
'ball_marked'])
@dataclasses.dataclass
class MCTS_Info:
probabilities: np.ndarray
actions: List[botbowl.Action]
action_values: np.ndarray
visits: np.ndarray
heuristic: np.ndarray
reward: np.ndarray
state_value: float
class Node(ABC):
parent: Optional['Node']
children: List['Node']
change_log: List[botbowl.core.forward_model.Step]
step_nbr: int # forward model's step count
top_proc: str
def __init__(self, game: botbowl.Game, parent: Optional['Node']):
self.step_nbr = game.get_step()
self.parent = parent
self.children = []
self.change_log = game.trajectory.action_log[parent.step_nbr:] if parent is not None else []
self.top_proc = str(game.get_procedure()) if not game.state.game_over else "GAME_OVER"
assert parent is None or len(self.change_log) > 0
def _connect_child(self, child_node: 'Node'):
assert child_node.parent is self
self.children.append(child_node)
def __repr__(self):
self_type = str(type(self)).split(".")[-1]
return f"{self_type}({self.step_nbr=}, {self.top_proc}"
@staticmethod
def format_proc(proc) -> str:
index_first_parenthesis = str(proc).find('(')
return str(proc)[:index_first_parenthesis]
@abstractmethod
def to_xml(self, parent, weights):
pass
class ActionNode(Node):
team: botbowl.Team
explored_actions: List[botbowl.Action]
is_home: bool
turn: int
info: Optional[MCTS_Info] # Only purpose is to store information for users of SearchTree
simple_hash: str
def __init__(self, game: botbowl.Game, parent: Optional[Node]):
super().__init__(game, parent)
self.team = game.active_team
if game.state.game_over:
self.team = game.state.home_team
self.is_home = self.team is game.state.home_team
assert self.is_home or self.team is game.state.away_team or game.state.game_over
self.explored_actions = []
self.turn = self.team.state.turn
self.info = None
self.simple_hash = create_gamestate_hash(game)
@property
def depth(self):
return len(list(filter(lambda n: type(n) is ActionNode, self.get_all_parents(include_self=False))))
def connect_child(self, child_node: Node, action: botbowl.Action):
super()._connect_child(child_node)
self.explored_actions.append(action)
def get_child_action(self, child: Node) -> botbowl.Action:
assert child in self.children
return self.explored_actions[self.children.index(child)]
def make_root(self):
self.parent = None
self.change_log.clear()
def get_all_parents(self, include_self) -> Iterable[Node]:
if include_self:
yield self
node = self.parent
while node is not None:
yield node
node = node.parent
return
def get_children_from_action(self, action: botbowl.Action) -> Iterable['ActionNode']:
if action not in self.explored_actions:
return []
child = self.children[self.explored_actions.index(action)]
return get_action_node_children(child)
def get_accum_prob(self, *, end_node=None):
"""
:param end_node: node where search ends, if None (default) it ends at the root of the tree
:returns: accumulated probability from chance nodes
"""
node = self
prob = 1.0
while node.parent is not end_node:
if isinstance(node.parent, ChanceNode):
prob *= node.parent.get_child_prob(node)
node = node.parent
return prob
def __repr__(self):
team = "home" if self.is_home else "away"
return f"ActionNode({team}, {self.top_proc}, depth={self.depth}, acc_prob={self.get_accum_prob():.3f}, " \
f"len(children)={len(self.children)})"
@staticmethod
def format_action(action: botbowl.Action) -> str:
pos_str = "" if action.position is None else f" {action.position}"
return f"{action.action_type.name}{pos_str}"
def to_xml(self, parent: Union[ET.Element, ET.SubElement], weights: HeuristicVector):
team = "home" if self.is_home else "away"
tag_attributes = {'proc': Node.format_proc(self.top_proc),
'team': team,
'num_actions': str(len(self.explored_actions))}
this_tag = ET.SubElement(parent, 'action_node',
attrib=tag_attributes)
for action, child_node in zip(self.explored_actions, self.children):
a_index = self.info.actions.index(action)
visits = self.info.visits[a_index]
action_values = np.dot(weights, self.info.action_values[a_index]) / visits
action_tag_attributes = {'action': ActionNode.format_action(action),
'visits': str(visits),
'action_values': f'{action_values:.3f}'}
action_tag = ET.SubElement(this_tag, 'action', attrib=action_tag_attributes)
child_node.to_xml(action_tag, weights)
def get_action_node_children(node: Node) -> Iterable[ActionNode]:
if isinstance(node, ActionNode):
return [node]
elif isinstance(node, ChanceNode):
return more_itertools.collapse(map(get_action_node_children, node.children))
else:
raise ValueError()
class ChanceNode(Node):
"""
Contains different outcomes of dices rolls.
If there are not any connected children,
then step the game from here until there are available actions
could possibly be converted to an ActionNode
"""
child_probability: List[float]
def __init__(self, game: botbowl.Game, parent: Optional[Node]):
super().__init__(game, parent)
self.child_probability = []
def connect_child(self, child_node: Node, prob: float):
super()._connect_child(child_node)
self.child_probability.append(prob)
def get_child_prob(self, child_node: Node) -> float:
assert child_node in self.children
return self.child_probability[self.children.index(child_node)]
def to_xml(self, parent: Union[ET.Element, ET.SubElement], weights: HeuristicVector):
tag_attributes = {'proc': Node.format_proc(self.top_proc)}
this_tag = ET.SubElement(parent, 'chance_node', attrib=tag_attributes)
for prob, child_node in zip(self.child_probability, self.children):
child_node: Union[ChanceNode, ActionNode]
outcome_tag = ET.SubElement(this_tag, 'outcome', attrib={'p': f"{prob:.2f}"})
child_node.to_xml(outcome_tag, weights)
class SearchTree:
game: botbowl.Game
root_node: ActionNode
all_action_nodes: HashMap
current_node: ActionNode
on_every_action_node: Callable[['SearchTree', ActionNode], None]
def __init__(self, game, on_every_action_node=None):
self.game = deepcopy(game)
self.game.home_agent.human = True
self.game.away_agent.human = True
if not self.game.trajectory.enabled:
self.game.enable_forward_model()
self.root_node = ActionNode(game, None)
self.all_action_nodes = HashMap([self.root_node])
self.current_node = self.root_node
self.on_every_action_node = on_every_action_node
if self.on_every_action_node is not None:
self.on_every_action_node(self, self.root_node)
def set_new_root(self, game: botbowl.Game) -> None:
if self.game is game:
raise ValueError("Can't search the tree for its own game object.")
target_node = ActionNode(game, None)
found_node = None
# compare with all nodes that have the same hash
for node in self.all_action_nodes[target_node]:
self.set_game_to_node(node)
diff = self.game.state.compare(game.state)
diff = filter(lambda d: d[:13] != 'state.reports', diff)
diff = list(diff)
if len(diff) == 0:
found_node = node
break
if found_node is None:
self.__init__(game, self.on_every_action_node)
else:
self.root_node = found_node
self.root_node.make_root()
self.set_game_to_node(self.root_node)
self.all_action_nodes = HashMap()
self._look_for_action_nodes(self.root_node) # add all children to the 'self.all_action_nodes'
def set_game_to_node(self, target_node: ActionNode) -> None:
"""Uses forward model to set self.game to the state of Node"""
assert self.current_node.step_nbr == self.game.get_step(), \
f"gamestate {self.game.get_step()} and SearchTree {self.current_node.step_nbr} are not synced, big fault!"
if target_node is self.current_node:
return
if target_node is self.root_node:
self.game.revert(self.root_node.step_nbr)
self.current_node = target_node
return
assert target_node in self.all_action_nodes, "target node is not in SearchTree, major fault"
if self.current_node.step_nbr < target_node.step_nbr \
and self.current_node in itertools.takewhile(lambda n: n.step_nbr >= self.current_node.step_nbr,
target_node.get_all_parents(include_self=False)):
# forward current_node -> target_node
nodes_to_forward = itertools.takewhile(lambda n: n is not self.current_node,
target_node.get_all_parents(include_self=True))
for node in reversed(list(nodes_to_forward)):
self.game.forward(node.change_log)
elif self.current_node.step_nbr > target_node.step_nbr \
and target_node in itertools.takewhile(lambda n: n.step_nbr >= target_node.step_nbr,
self.current_node.get_all_parents(include_self=False)):
self.game.revert(target_node.step_nbr)
else: # not in same branch. We need to revert back to a common node and the forward to target
current_node_parents = set(self.current_node.get_all_parents(include_self=False))
first_common_node = more_itertools.first_true(iterable=target_node.get_all_parents(include_self=True),
pred=lambda n: n in current_node_parents)
self.game.revert(first_common_node.step_nbr)
nodes_to_forward = itertools.takewhile(lambda n: n is not first_common_node,
target_node.get_all_parents(include_self=True))
for node in reversed(list(nodes_to_forward)):
self.game.forward(node.change_log)
self.current_node = target_node
assert target_node.step_nbr == self.game.get_step(), f"{target_node.step_nbr} != {self.game.get_step()}"
def expand_action_node(self, node: ActionNode, action: botbowl.Action) -> List[ActionNode]:
assert action not in node.explored_actions, f"{action} has already been explored in this node"
assert node in self.all_action_nodes, f"{node} is not in all_action_nodes"
self.set_game_to_node(node)
new_node = expand_action(self.game, action, node)
node.connect_child(new_node, action)
self.set_game_to_node(self.root_node)
# find all newly added action nodes
return self._look_for_action_nodes(new_node)
def _look_for_action_nodes(self, node: Node) -> List[ActionNode]:
new_action_nodes = []
if isinstance(node, ActionNode):
assert node not in self.all_action_nodes
new_action_nodes.append(node)
self.all_action_nodes.add(node)
if self.on_every_action_node is not None:
self.on_every_action_node(self, node)
for child_node in node.children:
new_action_nodes.extend(self._look_for_action_nodes(child_node))
return new_action_nodes
def to_xml(self, weights: HeuristicVector = None) -> ET.ElementTree:
if weights is None:
weights = HeuristicVector(score=1, tv_on_pitch=0, ball_position=0, ball_carried=0, ball_marked=0)
root = ET.Element('search_tree')
self.root_node.to_xml(root, weights)
if hasattr(ET, 'indent'):
ET.indent(root)
return ET.ElementTree(root)
def expand_action(game: botbowl.Game, action: botbowl.Action, parent: ActionNode) -> Node:
"""
:param game: game object used for calculations. Will be reverted to original state.
:param action: action to be evaluated.
:param parent: parent node
:returns - list of tuples containing (Steps, probability) for each possible outcome.
- probabilities sums to 1.0
Not called recursively
"""
# noinspection PyProtectedMember
assert game._is_action_allowed(action)
assert game.trajectory.enabled
game.config.fast_mode = False
with only_fixed_rolls(game):
game.step(action)
return expand_none_action(game, parent)
def get_expanding_function(proc, moving_handled, pickup_handled) -> Optional[Callable[[botbowl.Game, Node], Node]]:
proc_type = type(proc)
if proc_type in {procedures.Dodge, procedures.GFI} and not moving_handled:
return expand_moving
elif proc_type is procedures.Pickup and not pickup_handled and proc.roll is None:
return expand_pickup
elif proc_type is procedures.Block and proc.roll is None and proc.gfi is False:
return expand_block
elif proc_type is procedures.Armor:
return expand_armor
elif proc_type is procedures.Injury:
return expand_injury
elif proc_type is procedures.Bounce:
return expand_bounce
elif proc_type is procedures.Catch:
return expand_catch
elif proc_type is procedures.ThrowIn:
return expand_throw_in
elif proc_type is procedures.PreKickoff:
return handle_ko_wakeup
elif proc_type is procedures.ClearBoard:
return handle_sweltering_heat
else:
return None
# saved for later
# procedures.Foul
# procedures.KickoffTable
# procedures.PassAttempt
# procedures.Intercept
# procedures.Scatter
def expand_none_action(game: botbowl.Game, parent: Node, moving_handled=False, pickup_handled=False) -> Node:
"""
:param game: the game state is changed during expansion but restored to state of argument 'parent'
:param parent: shall represent the current state of argument 'game'. game state is restored to parent.step_nbr
:param moving_handled:
:param pickup_handled:
:returns: A subclass of Node:
- ChanceNode in a nestled structure with multiple ActionNode as leaf nodes.
- ActionNode if only one possible outcome.
param game is changed but restored to initial state af
Called recursively.
"""
while len(game.state.available_actions) == 0 and not game.state.game_over:
proc = game.get_procedure()
expand_func = get_expanding_function(proc, moving_handled, pickup_handled)
if expand_func is not None:
assert len(botbowl.D6.FixedRolls) == 0
return_node = expand_func(game, parent)
assert len(botbowl.D6.FixedRolls) == 0
game.revert(parent.step_nbr)
return return_node
try:
with only_fixed_rolls(game):
game.step()
except AttributeError as e:
raise e
action_node = ActionNode(game, parent)
game.revert(parent.step_nbr)
assert parent.step_nbr == game.get_step()
return action_node
def expand_throw_in(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
active_proc: procedures.ThrowIn = game.get_procedure()
assert type(active_proc) is procedures.ThrowIn
d6_fixes = []
d3_fixes = [2] # direction roll
if game.config.throw_in_dice == "2d6":
d6_fixes = [3, 4]
elif game.config.throw_in_dice == "d6":
d6_fixes = [4]
elif game.config.throw_in_dice == "d3":
d3_fixes.append = [1] # distance roll is sampled after direction roll
with only_fixed_rolls(game, d3=d3_fixes, d6=d6_fixes):
game.step()
assert active_proc is not game.get_procedure()
return expand_none_action(game, parent)
def expand_bounce(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
active_proc: procedures.Bounce = game.get_procedure()
assert type(active_proc) is procedures.Bounce
new_parent = ChanceNode(game, parent)
ball_pos = active_proc.piece.position
# todo: consider ball bouncing out.
sq_to_num_tz = {}
for sq in game.get_adjacent_squares(ball_pos, occupied=False, out=True):
if sq.out_of_bounds:
sq_to_num_tz[sq] = 'out'
else:
home_tz = len(game.get_adjacent_players(sq, team=game.state.home_team, standing=True))
away_tz = len(game.get_adjacent_players(sq, team=game.state.away_team, standing=True))
sq_to_num_tz[sq] = (home_tz, away_tz)
num_squares = len(sq_to_num_tz)
if not (num_squares > 0):
raise AssertionError(f"num_squares should be non-zero! ball_pos={ball_pos}")
num_tz_to_sq = {}
for sq, num_tz in sq_to_num_tz.items():
num_tz_to_sq.setdefault(num_tz, []).append(sq)
for num_tz, count in collections.Counter(sq_to_num_tz.values()).items():
possible_squares = num_tz_to_sq[num_tz]
square = np.random.choice(possible_squares, 1)[0]
roll = botbowl.D8.d8_from_xy[(square.x - ball_pos.x, square.y - ball_pos.y)]
expand_with_fixes(game, new_parent, probability=count / num_squares, d8=[roll])
assert game.get_step() == new_parent.step_nbr
sum_prob = sum(new_parent.child_probability)
# new_parent.child_probability = [prob/sum_prob for prob in new_parent.child_probability]
assert sum(new_parent.child_probability) == approx(1.0, abs=1e-9)
assert game.get_step() == new_parent.step_nbr
return new_parent
def expand_pickup(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
active_proc: procedures.Pickup = game.get_procedure()
assert type(active_proc) is procedures.Pickup
assert active_proc.roll is None
probability_success = game.get_pickup_prob(active_proc.player, active_proc.ball.position)
new_parent = ChanceNode(game, parent)
# SUCCESS SCENARIO
with only_fixed_rolls(game, d6=[6]):
game.step()
success_node = expand_none_action(game, new_parent, pickup_handled=True)
new_parent.connect_child(success_node, probability_success)
assert game.get_step() == new_parent.step_nbr
# FAILURE SCENARIO
fixes = [1]
if active_proc.player.has_skill(Skill.SURE_HANDS):
fixes.append(1)
with only_fixed_rolls(game, d6=fixes):
while len(botbowl.D6.FixedRolls) > 0:
game.step()
fail_node = expand_none_action(game, new_parent, pickup_handled=True)
new_parent.connect_child(fail_node, 1 - probability_success)
assert game.get_step() == new_parent.step_nbr
return new_parent
def expand_moving(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
active_proc: Union[procedures.GFI, procedures.Dodge] = game.get_procedure()
assert type(active_proc) is procedures.Dodge or type(active_proc) is procedures.GFI
move_action_proc: procedures.MoveAction = first(proc for proc in reversed(game.state.stack.items)
if isinstance(proc, procedures.MoveAction))
is_blitz = type(move_action_proc) is procedures.BlitzAction
is_handoff = type(move_action_proc) is procedures.HandoffAction
player = move_action_proc.player
if move_action_proc.steps is not None:
final_step = move_action_proc.steps[-1]
else:
if is_blitz:
block_proc: procedures.Block = first(
filter(lambda proc: type(proc) is procedures.Block, game.state.stack.items))
final_step = block_proc.defender.position
elif is_handoff:
raise ValueError()
else:
final_step = active_proc.position
is_pickup = game.get_ball().position == final_step and not game.get_ball().is_carried
path = move_action_proc.paths[final_step]
if len(path.rolls) != len(path.steps):
raise AssertionError("wrong!")
"""
This block of code sets two important variables:
probability_success - probability of the remaining path
rolls - list[int] - the remaining rolls of the path
Normal case we just fetch this from the path object. If we're in a rerolled proc, it's nasty...
"""
if active_proc.roll is None:
probability_success = path.prob
rolls = list(collapse(path.rolls))
if is_pickup:
# remove the pickup roll and probability
rolls.pop()
probability_success /= game.get_pickup_prob(active_proc.player, final_step)
else:
with only_fixed_rolls(game):
game.step()
new_proc = game.get_procedure()
if type(new_proc) not in {procedures.GFI, procedures.Dodge}:
assert not active_proc.reroll.use_reroll
return expand_none_action(game, parent)
# if we get here, it means that a reroll was used.
assert new_proc is active_proc
assert active_proc.roll is None
assert active_proc.reroll is None
current_step = active_proc.position
try:
assert player.position.distance(current_step) == 1 or is_pickup or is_blitz
except AssertionError as e:
raise e
i = 0
while path.steps[i] != current_step:
i += 1
remaining_current_step_rolls = path.rolls[i][:]
if is_pickup and current_step == final_step:
remaining_current_step_rolls.pop()
num_current_step_remaining_rolls = 0
gfi_proc = game.get_proc(procedures.GFI)
dodge_proc = game.get_proc(procedures.Dodge)
block_proc = game.get_proc(procedures.Block)
if dodge_proc is not None:
num_current_step_remaining_rolls += 1
if gfi_proc is not None and block_proc is None:
num_current_step_remaining_rolls += 1
remaining_current_step_rolls = remaining_current_step_rolls[
len(remaining_current_step_rolls) - num_current_step_remaining_rolls:]
probability_success = reduce(operator.mul, map(lambda d: (7 - d) / 6, remaining_current_step_rolls), 1.0)
rolls = list(collapse(remaining_current_step_rolls))
if current_step != final_step:
step_count = game.get_step()
if block_proc is not None:
player.state.moves -= 1
if player.position != current_step:
try:
game.move(player, current_step)
except AssertionError as e:
raise e
new_path = pf.get_safest_path(game, player, final_step, blitz=is_blitz)
game.revert(step_count)
# try:
# # assert new_path.steps == path.steps[-len(new_path):] this assert can't be made because of small randomness in pathfinder
# assert list(collapse(new_path.rolls)) == list(collapse(path.rolls[-len(new_path):])), f"{new_path.rolls} != {path.rolls[-len(new_path):]}"
# except AssertionError as e:
# raise e
try:
if new_path is not None:
rolls.extend(collapse(new_path.rolls))
probability_success *= new_path.prob
except AttributeError as e:
raise e
if is_pickup:
# remove the pickup roll and probability
rolls.pop()
probability_success /= game.get_pickup_prob(active_proc.player, final_step)
try:
p = np.array(rolls) / sum(rolls)
index_of_failure = np.random.choice(range(len(rolls)), 1, p=p)[0]
except ValueError as e:
raise e
# STEP UNTIL FAILURE (possibly no steps at all)
with only_fixed_rolls(game, d6=[6] * index_of_failure):
while len(botbowl.D6.FixedRolls) > 0:
if len(game.get_available_actions()) > 0:
raise AttributeError("wrong")
game.step()
new_parent = ChanceNode(game, parent)
debug_step_count = game.get_step()
# SUCCESS SCENARIO
with only_fixed_rolls(game, d6=[6] * (len(rolls) - index_of_failure)):
while len(botbowl.D6.FixedRolls) > 0:
if type(game.get_procedure()) not in {procedures.GFI, procedures.Block, procedures.Dodge, procedures.Move,
procedures.MoveAction, procedures.BlitzAction, procedures.HandoffAction}:
raise AttributeError("wrong")
if len(game.get_available_actions()) > 0:
raise AttributeError("wrong")
if type(game.get_procedure()) is procedures.Block and not game.get_procedure().gfi:
raise AttributeError("wrong")
game.step()
success_node = expand_none_action(game, new_parent, moving_handled=True)
new_parent.connect_child(success_node, probability_success)
assert debug_step_count == game.get_step()
# FAILURE SCENARIO
fail_rolls = [1]
if type(game.get_procedure()) is procedures.Dodge and player.can_use_skill(Skill.DODGE):
fail_rolls.append(1)
with only_fixed_rolls(game, d6=fail_rolls):
while len(botbowl.D6.FixedRolls) > 0:
if len(game.get_available_actions()) > 0:
raise AttributeError("wrong")
game.step()
if type(game.get_procedure()) is procedures.Reroll and len(game.get_available_actions()) == 0:
with only_fixed_rolls(game):
game.step()
if type(game.get_procedure()) is {procedures.Dodge, procedures.GFI}:
raise ValueError()
fail_node = expand_none_action(game, new_parent, moving_handled=True)
new_parent.connect_child(fail_node, 1 - probability_success)
assert debug_step_count == game.get_step()
return new_parent
def expand_armor(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
proc: procedures.Armor = game.get_procedure()
assert not proc.foul
p_armorbreak = accumulated_prob_2d_roll[proc.player.get_av() + 1]
new_parent = ChanceNode(game, parent)
expand_with_fixes(game, new_parent, p_armorbreak, d6=[6, 6]) # Armor broken
expand_with_fixes(game, new_parent, 1 - p_armorbreak, d6=[1, 1]) # Armor not broken
return new_parent
def expand_injury(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
proc: procedures.Injury = game.get_procedure()
assert not proc.foul
if proc.in_crowd:
with only_fixed_rolls(game, d6=[5, 4]): # straight to KO
game.step()
return expand_none_action(game, parent)
p_removal = accumulated_prob_2d_roll[8]
new_parent = ChanceNode(game, parent)
expand_with_fixes(game, new_parent, p_removal, d6=[5, 4]) # KO
expand_with_fixes(game, new_parent, 1 - p_removal, d6=[1, 1]) # Stun
return new_parent
def expand_block(game: botbowl.Game, parent: Node) -> Node:
proc: botbowl.Block = game.get_procedure()
assert type(proc) is botbowl.Block
assert not proc.gfi, "Can't handle GFI:s here =( "
assert proc.roll is None
attacker: botbowl.Player = proc.attacker
defender: botbowl.Player = proc.defender
dice = game.num_block_dice(attacker, defender)
num_dice = abs(dice)
# initialize as 1d block without skills
dice_outcomes = np.array([2, 2, 1, 1], dtype=int)
DEF_DOWN, NOONE_DOWN, ALL_DOWN, ATT_DOWN = (0, 1, 2, 3)
die_results = ([BBDieResult.DEFENDER_DOWN, BBDieResult.DEFENDER_STUMBLES],
[BBDieResult.PUSH],
[BBDieResult.BOTH_DOWN],
[BBDieResult.ATTACKER_DOWN])
who_has_block = (attacker.has_skill(Skill.BLOCK), defender.has_skill(Skill.BLOCK))
if any(who_has_block):
dice_outcomes[ALL_DOWN] = 0
die_results[ALL_DOWN].clear()
if who_has_block == (True, True): # both
dice_outcomes[NOONE_DOWN] += 1
die_results[NOONE_DOWN].append(BBDieResult.BOTH_DOWN)
elif who_has_block == (True, False): # only attacker
dice_outcomes[DEF_DOWN] += 1
die_results[DEF_DOWN].append(BBDieResult.BOTH_DOWN)
elif who_has_block == (False, True): # only defender
dice_outcomes[ATT_DOWN] += 1
die_results[ATT_DOWN].append(BBDieResult.BOTH_DOWN)
crowd_surf: bool = game.get_push_squares(attacker.position, defender.position)[0].out_of_bounds
if crowd_surf:
dice_outcomes[DEF_DOWN] += 2
dice_outcomes[NOONE_DOWN] -= 2
die_results[DEF_DOWN].append(BBDieResult.PUSH)
die_results[NOONE_DOWN].remove(BBDieResult.PUSH)
elif defender.has_skill(Skill.DODGE): # and not attacker.has_skill(Skill.TACKLE):
dice_outcomes[DEF_DOWN] -= 1
dice_outcomes[NOONE_DOWN] += 1
die_results[DEF_DOWN].remove(BBDieResult.DEFENDER_STUMBLES)
die_results[NOONE_DOWN].append(BBDieResult.DEFENDER_STUMBLES)
prob = np.zeros(4)
probability_left = 1.0
available_dice = 6
evaluation_order = [DEF_DOWN, NOONE_DOWN, ALL_DOWN, ATT_DOWN]
if dice < 0:
evaluation_order = reversed(evaluation_order)
for i in evaluation_order:
prob[i] = probability_left * (1 - (1 - dice_outcomes[i] / available_dice) ** num_dice)
available_dice -= dice_outcomes[i]
probability_left -= prob[i]
assert available_dice == 0 and probability_left == approx(0) and prob.sum() == approx(1)
new_parent = ChanceNode(game, parent)
for prob, die_res in zip(prob, die_results):
if prob == approx(0) or len(die_res) == 0:
assert prob == approx(0) and len(die_res) == 0
continue
expand_with_fixes(game, new_parent, prob,
block_dice=np.random.choice(die_res, num_dice))
assert sum(new_parent.child_probability) == approx(1.0)
return new_parent
def expand_catch(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
proc: procedures.Catch = game.get_procedure()
assert type(proc) is procedures.Catch
if not proc.player.can_catch():
with only_fixed_rolls(game):
game.step()
assert game.get_procedure() is not proc
return expand_none_action(game, parent)
if proc.roll is not None:
with only_fixed_rolls(game):
game.step()
if game.get_procedure() is not proc:
# If the catch proc was removed from the stack, we just continue
return expand_none_action(game, parent)
p_catch = game.get_catch_prob(proc.player, accurate=proc.accurate, handoff=proc.handoff)
new_parent = ChanceNode(game, parent)
assert proc.player.can_catch()
assert proc.roll is None
# Success scenario
expand_with_fixes(game, new_parent, p_catch, d6=[6])
# Failure scenario
if proc.player.has_skill(Skill.CATCH):
expand_with_fixes(game, new_parent, 1 - p_catch, d6=[1, 1])
else:
expand_with_fixes(game, new_parent, 1 - p_catch, d6=[1])
return new_parent
def handle_sweltering_heat(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
proc: procedures.ClearBoard = game.get_procedure()
assert type(proc) is procedures.ClearBoard
if game.state.weather == botbowl.WeatherType.SWELTERING_HEAT:
num_players = len(game.get_players_on_pitch())
with only_fixed_rolls(game, d6=[6] * num_players):
game.step()
else:
with only_fixed_rolls(game):
game.step()
return expand_none_action(game, parent)
def handle_ko_wakeup(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
active_proc: procedures.PreKickoff = game.get_procedure()
assert type(active_proc) is procedures.PreKickoff
d6_fixes = [1] * len(game.get_knocked_out(active_proc.team))
with only_fixed_rolls(game, d6=d6_fixes):
while active_proc is game.get_procedure():
game.step()
assert active_proc is not game.get_procedure()
return expand_none_action(game, parent)
def expand_with_fixes(game, parent, probability, **fixes):
try:
with only_fixed_rolls(game, **fixes):
game.step()
except AssertionError as e:
raise e
new_node = expand_none_action(game, parent)
parent.connect_child(new_node, probability)
| [
"pytest.approx",
"tests.util.only_fixed_rolls",
"collections.namedtuple",
"numpy.random.choice",
"examples.tree_search.hashmap.create_gamestate_hash",
"xml.etree.ElementTree.indent",
"more_itertools.collapse",
"numpy.array",
"numpy.zeros",
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.... | [((806, 925), 'collections.namedtuple', 'collections.namedtuple', (['"""HeuristicVector"""', "['score', 'tv_on_pitch', 'ball_position', 'ball_carried', 'ball_marked']"], {}), "('HeuristicVector', ['score', 'tv_on_pitch',\n 'ball_position', 'ball_carried', 'ball_marked'])\n", (828, 925), False, 'import collections\n'), ((722, 781), 'numpy.array', 'np.array', (['[36, 36, 36, 35, 33, 30, 26, 21, 15, 10, 6, 3, 1]'], {}), '([36, 36, 36, 35, 33, 30, 26, 21, 15, 10, 6, 3, 1])\n', (730, 781), True, 'import numpy as np\n'), ((29200, 29233), 'numpy.array', 'np.array', (['[2, 2, 1, 1]'], {'dtype': 'int'}), '([2, 2, 1, 1], dtype=int)\n', (29208, 29233), True, 'import numpy as np\n'), ((30811, 30822), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (30819, 30822), True, 'import numpy as np\n'), ((3257, 3284), 'examples.tree_search.hashmap.create_gamestate_hash', 'create_gamestate_hash', (['game'], {}), '(game)\n', (3278, 3284), False, 'from examples.tree_search.hashmap import HashMap, create_gamestate_hash\n'), ((5617, 5676), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['parent', '"""action_node"""'], {'attrib': 'tag_attributes'}), "(parent, 'action_node', attrib=tag_attributes)\n", (5630, 5676), True, 'import xml.etree.ElementTree as ET\n'), ((7553, 7612), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['parent', '"""chance_node"""'], {'attrib': 'tag_attributes'}), "(parent, 'chance_node', attrib=tag_attributes)\n", (7566, 7612), True, 'import xml.etree.ElementTree as ET\n'), ((8160, 8174), 'copy.deepcopy', 'deepcopy', (['game'], {}), '(game)\n', (8168, 8174), False, 'from copy import deepcopy\n'), ((8431, 8456), 'examples.tree_search.hashmap.HashMap', 'HashMap', (['[self.root_node]'], {}), '([self.root_node])\n', (8438, 8456), False, 'from examples.tree_search.hashmap import HashMap, create_gamestate_hash\n'), ((13567, 13592), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""search_tree"""'], {}), "('search_tree')\n", (13577, 13592), True, 'import xml.etree.ElementTree as ET\n'), ((13716, 13736), 'xml.etree.ElementTree.ElementTree', 'ET.ElementTree', (['root'], {}), '(root)\n', (13730, 13736), True, 'import xml.etree.ElementTree as ET\n'), ((14324, 14346), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {}), '(game)\n', (14340, 14346), False, 'from tests.util import only_fixed_rolls\n'), ((17545, 17593), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {'d3': 'd3_fixes', 'd6': 'd6_fixes'}), '(game, d3=d3_fixes, d6=d6_fixes)\n', (17561, 17593), False, 'from tests.util import only_fixed_rolls\n'), ((19350, 19372), 'pytest.approx', 'approx', (['(1.0)'], {'abs': '(1e-09)'}), '(1.0, abs=1e-09)\n', (19356, 19372), False, 'from pytest import approx\n'), ((19855, 19885), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {'d6': '[6]'}), '(game, d6=[6])\n', (19871, 19885), False, 'from tests.util import only_fixed_rolls\n'), ((20228, 20260), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {'d6': 'fixes'}), '(game, d6=fixes)\n', (20244, 20260), False, 'from tests.util import only_fixed_rolls\n'), ((25627, 25676), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {'d6': '([6] * index_of_failure)'}), '(game, d6=[6] * index_of_failure)\n', (25643, 25676), False, 'from tests.util import only_fixed_rolls\n'), ((26996, 27033), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {'d6': 'fail_rolls'}), '(game, d6=fail_rolls)\n', (27012, 27033), False, 'from tests.util import only_fixed_rolls\n'), ((31708, 31719), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (31714, 31719), False, 'from pytest import approx\n'), ((33723, 33758), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {'d6': 'd6_fixes'}), '(game, d6=d6_fixes)\n', (33739, 33758), False, 'from tests.util import only_fixed_rolls\n'), ((6220, 6283), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['this_tag', '"""action"""'], {'attrib': 'action_tag_attributes'}), "(this_tag, 'action', attrib=action_tag_attributes)\n", (6233, 6283), True, 'import xml.etree.ElementTree as ET\n'), ((7769, 7832), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['this_tag', '"""outcome"""'], {'attrib': "{'p': f'{prob:.2f}'}"}), "(this_tag, 'outcome', attrib={'p': f'{prob:.2f}'})\n", (7782, 7832), True, 'import xml.etree.ElementTree as ET\n'), ((9573, 9582), 'examples.tree_search.hashmap.HashMap', 'HashMap', ([], {}), '()\n', (9580, 9582), False, 'from examples.tree_search.hashmap import HashMap, create_gamestate_hash\n'), ((13685, 13700), 'xml.etree.ElementTree.indent', 'ET.indent', (['root'], {}), '(root)\n', (13694, 13700), True, 'import xml.etree.ElementTree as ET\n'), ((18886, 18923), 'numpy.random.choice', 'np.random.choice', (['possible_squares', '(1)'], {}), '(possible_squares, 1)\n', (18902, 18923), True, 'import numpy as np\n'), ((22229, 22249), 'more_itertools.collapse', 'collapse', (['path.rolls'], {}), '(path.rolls)\n', (22237, 22249), False, 'from more_itertools import collapse, first\n'), ((22462, 22484), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {}), '(game)\n', (22478, 22484), False, 'from tests.util import only_fixed_rolls\n'), ((24055, 24093), 'more_itertools.collapse', 'collapse', (['remaining_current_step_rolls'], {}), '(remaining_current_step_rolls)\n', (24063, 24093), False, 'from more_itertools import collapse, first\n'), ((24472, 24532), 'botbowl.core.pathfinding.python_pathfinding.get_safest_path', 'pf.get_safest_path', (['game', 'player', 'final_step'], {'blitz': 'is_blitz'}), '(game, player, final_step, blitz=is_blitz)\n', (24490, 24532), True, 'import botbowl.core.pathfinding.python_pathfinding as pf\n'), ((25418, 25433), 'numpy.array', 'np.array', (['rolls'], {}), '(rolls)\n', (25426, 25433), True, 'import numpy as np\n'), ((27317, 27339), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {}), '(game)\n', (27333, 27339), False, 'from tests.util import only_fixed_rolls\n'), ((28360, 28393), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {'d6': '[5, 4]'}), '(game, d6=[5, 4])\n', (28376, 28393), False, 'from tests.util import only_fixed_rolls\n'), ((31272, 31281), 'pytest.approx', 'approx', (['(0)'], {}), '(0)\n', (31278, 31281), False, 'from pytest import approx\n'), ((31300, 31309), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (31306, 31309), False, 'from pytest import approx\n'), ((31979, 32001), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {}), '(game)\n', (31995, 32001), False, 'from tests.util import only_fixed_rolls\n'), ((32167, 32189), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {}), '(game)\n', (32183, 32189), False, 'from tests.util import only_fixed_rolls\n'), ((33246, 33290), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {'d6': '([6] * num_players)'}), '(game, d6=[6] * num_players)\n', (33262, 33290), False, 'from tests.util import only_fixed_rolls\n'), ((33339, 33361), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {}), '(game)\n', (33355, 33361), False, 'from tests.util import only_fixed_rolls\n'), ((34015, 34046), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {}), '(game, **fixes)\n', (34031, 34046), False, 'from tests.util import only_fixed_rolls\n'), ((5916, 5965), 'numpy.dot', 'np.dot', (['weights', 'self.info.action_values[a_index]'], {}), '(weights, self.info.action_values[a_index])\n', (5922, 5965), True, 'import numpy as np\n'), ((16758, 16780), 'tests.util.only_fixed_rolls', 'only_fixed_rolls', (['game'], {}), '(game)\n', (16774, 16780), False, 'from tests.util import only_fixed_rolls\n'), ((31422, 31431), 'pytest.approx', 'approx', (['(0)'], {}), '(0)\n', (31428, 31431), False, 'from pytest import approx\n'), ((31622, 31657), 'numpy.random.choice', 'np.random.choice', (['die_res', 'num_dice'], {}), '(die_res, num_dice)\n', (31638, 31657), True, 'import numpy as np\n'), ((31481, 31490), 'pytest.approx', 'approx', (['(0)'], {}), '(0)\n', (31487, 31490), False, 'from pytest import approx\n'), ((25045, 25069), 'more_itertools.collapse', 'collapse', (['new_path.rolls'], {}), '(new_path.rolls)\n', (25053, 25069), False, 'from more_itertools import collapse, first\n')] |
import numpy as np
from .muscle_simulation_stepupdate import step_update_state
class MuscleTendonComplex:
def __init__(self, nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi, eref=0.04, act=0.01,
tau=0.01, w=0.56, c=0.05, N=1.5, K=5.0, stim=0.0,
vce=0.0, frcmtc=0.0, lmtc=0.0):
self.init_nameMuscle = nameMuscle
self.init_frcmax = float(frcmax)
self.init_vmax = float(vmax)
self.init_eref = float(eref)
self.init_lslack = float(lslack)
self.init_lopt = float(lopt)
self.init_tau = float(tau)
self.init_w = float(w)
self.init_c = float(c)
self.init_N = float(N)
self.init_K = float(K)
self.init_stim = float(stim)
self.init_act = float(act)
self.init_lmtc = float(lmtc)
self.init_lce = float(lce)
self.init_vce = float(vce)
self.init_frcmtc = float(frcmtc)
self.init_r = r.astype('float')
self.init_phiref = phiref.astype('float')
self.init_phimaxref = phimaxref.astype('float')
self.init_rho = rho.astype('float')
self.init_dirAng = dirAng.astype('float')
self.init_phiScale = phiScale.astype('float')
self.init_offsetCorr = offsetCorr.astype('int')
self.init_timestep = float(timestep)
self.init_angJoi = angJoi.astype('float')
self.reset_state()
self.MR = float(0.01)
self.typeMuscle = int(self.angJoi.size)
self.levelArm = np.zeros(self.typeMuscle).astype('float')
tmpL = np.zeros(self.typeMuscle)
for i in range(0, self.typeMuscle):
if self.offsetCorr[i] == 0:
tmpL[i] = self.dirAng[i] * (self.angJoi[i] - self.phiref[i]) * self.r[i] * self.rho[i]
self.levelArm[i] = self.r[i]
elif self.offsetCorr[i] == 1:
tmp1 = np.sin((self.phiref[i] - self.phimaxref[i]) * self.phiScale[i])
tmp2 = np.sin((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i])
tmpL[i] = self.dirAng[i] * (tmp2 - tmp1) * self.r[i] * self.rho[i] / self.phiScale[i]
self.levelArm[i] = np.cos((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i]) * self.r[i]
else:
raise ValueError('Invalid muscle level arm offset correction type. ')
self.lmtc = self.lslack + self.lopt + np.sum(tmpL)
self.lce = self.lmtc - self.lslack
self.lse = float(self.lmtc - self.lce)
# unitless parameters
self.Lse = float(self.lse / self.lslack)
self.Lce = float(self.lce / self.lopt)
self.actsubstep = float((self.stim - self.act) * self.timestep / 2.0 / self.tau + self.act)
self.lcesubstep = float(self.vce * self.timestep / 2.0 + self.lce)
# test
self.lce_avg = float(self.lce)
self.vce_avg = float(self.vce)
self.frcmtc_avg = float(0)
self.act_avg = float(self.act)
self.frame = 0
def stepUpdateState(self, angJoi):
"""
Muscle Tendon Complex Dynamics
update muscle states based on the muscle dynamics
Muscle state stim has to be updated outside before this function is called
"""
self.frcmax, self.vmax, self.eref, self.lslack, self.lopt, self.tau, \
self.w, self.c, self.N, self.K, self.stim, self.act, self.lmtc, self.lce, \
self.vce, self.frcmtc, \
self.r, self.phiref, \
self.phimaxref, self.rho, \
self.dirAng, self.phiScale, \
self.angJoi, self.levelArm, self.offsetCorr, \
self.timestep, self.MR, self.typeMuscle, \
self.lse, self.Lse, self.Lce, self.actsubstep, \
self.lcesubstep, self.lce_avg, self.vce_avg, self.frcmtc_avg, self.act_avg, self.frame = \
step_update_state(
self.frcmax, self.vmax, self.eref, self.lslack, self.lopt, self.tau,
self.w, self.c, self.N, self.K, self.stim, self.act, self.lmtc, self.lce,
self.vce, self.frcmtc,
self.r, self.phiref,
self.phimaxref, self.rho,
self.dirAng, self.phiScale,
angJoi, self.levelArm, self.offsetCorr,
self.timestep, self.MR, self.typeMuscle,
self.lse, self.Lse, self.Lce, self.actsubstep,
self.lcesubstep, self.lce_avg, self.vce_avg, self.frcmtc_avg, self.act_avg, self.frame)
def reset_state(self):
self.frame = int(0)
self.lce_avg = float(0)
self.frcmtc_avg = float(0)
self.act_avg = float(0)
self.vce_avg = float(0)
self.nameMuscle = self.init_nameMuscle
self.frcmax = self.init_frcmax
self.vmax = self.init_vmax
self.eref = self.init_eref
self.lslack = self.init_lslack
self.lopt = self.init_lopt
self.tau = self.init_tau
self.w = self.init_w
self.c = self.init_c
self.N = self.init_N
self.K = self.init_K
self.stim = self.init_stim
self.act = self.init_act
self.lmtc = self.init_lmtc
self.lce = self.init_lce
self.vce = self.init_vce
self.frcmtc = self.init_frcmtc
self.r = self.init_r
self.phiref = self.init_phiref
self.phimaxref = self.init_phimaxref
self.rho = self.init_rho
self.dirAng = self.init_dirAng
self.phiScale = self.init_phiScale
self.offsetCorr = self.init_offsetCorr
self.timestep = self.init_timestep
self.angJoi = self.init_angJoi
class TIA(MuscleTendonComplex):
"""
Tibialis Anterior (TIA): The Tibialis anterior (Tibialis anticus) is situated on the lateral
side of the tibia. In real human it serves multiple function which are, Dorsal Flexion
of the ankle, Inversion of the foot, Adduction of the foot and also Contributing in
maintaining the medial arch of the foot. Here TIA is modelled as muscle actuating the
ankle dorsiflexion in the sagittal plane.
"""
def __init__(self, angAnk, timestep):
frcmax = 800 # maximum isometric force [N]
lopt = 0.06 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.24 # tendon slack length [m]
# Tibialis Anterior attachment
rTIAmax = 0.04 # [m] maximum lever contribution
rTIAmin = 0.01 # [m] minimum lever contribution
phimaxTIA = 80 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminTIA = 180 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefTIA = 110 * np.pi / 180 # [rad] reference angle at which MTU length equals
phiScaleTIA = np.arccos(rTIAmin / rTIAmax) / (phiminTIA - phimaxTIA)
rhoTIA = 0.7
r = np.array((rTIAmax,))
phiref = np.array((phirefTIA,))
phimaxref = np.array((phimaxTIA,))
rho = np.array((rhoTIA,))
dirAng = np.array((1.0,))
offsetCorr = np.array((1,))
phiScale = np.array((phiScaleTIA,))
lce = lopt
angJoi = np.array((angAnk,))
nameMuscle = "TIA"
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class SOL(MuscleTendonComplex):
"""
Soleus (SOL): Soleus muscles is Located in superficial posterior compartment of the
leg, along with GAS it helps in the plantarflexion of the ankle joint. Here SOL is
modelled as a muscle actuating the ankle plantarflexion in the sagittal plane.
"""
def __init__(self, angAnk, timestep):
frcmax = 4000 # maximum isometric force [N]
lopt = 0.04 # optimum fiber length CE [m]
vmax = 6.0 # maximum contraction velocity [lopt/s]
lslack = 0.26 # tendon slack length [m]
# SOLeus attachment
rSOLmax = 0.06 # [m] maximum lever contribution
rSOLmin = 0.02 # [m] minimum lever contribution
phimaxSOL = 100 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminSOL = 180 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefSOL = 90 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoSOL = 0.5 # sum of lopt and lslack
phiScaleSOL = np.arccos(rSOLmin / rSOLmax) / (phiminSOL - phimaxSOL)
r = np.array((rSOLmax,))
phiref = np.array((phirefSOL,))
phimaxref = np.array((phimaxSOL,))
rho = np.array((rhoSOL,))
dirAng = np.array((-1.0,))
offsetCorr = np.array((1,))
phiScale = np.array((phiScaleSOL,))
lce = lopt
angJoi = np.array((angAnk,))
nameMuscle = "SOL"
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class GAS(MuscleTendonComplex):
"""
Gastrocnemius (GAS): Gastrocnemius muscle which the major bulk at the back of
lower leg is a bi-articular muscle having two heads and runs from back of knee to the
heel. The gastrocnemius helps plantarflexion of the ankle joint and flexion of the knee
joint. Here GAS is modelled as a bi-articular MTU contributing to the knee flexion
and ankle plantarflexion actuations in the sagittal plane.
"""
def __init__(self, angKne, angAnk, timestep):
frcmax = 1500 # maximum isometric force [N]
lopt = 0.05 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.40 # tendon slack length [m]
# GAStrocnemius attachment (knee joint)
rGASkmax = 0.05 # [m] maximum lever contribution
rGASkmin = 0.02 # [m] minimum lever contribution
phimaxGASk = 140 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminGASk = 45 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefGASk = 165 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoGASk = 0.7 # sum of lopt and lslack
# rhoGASk = 0.045 # sum of lopt and lslack
phiScaleGASk = np.arccos(rGASkmin / rGASkmax) / (phiminGASk - phimaxGASk)
# GAStrocnemius attachment (ankle joint)
rGASamax = 0.06 # [m] maximum lever contribution
rGASamin = 0.02 # [m] minimum lever contribution
phimaxGASa = 100 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminGASa = 180 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefGASa = 80 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoGASa = 0.7 # sum of lopt and lslack
# rhoGASa = 0.045 # sum of lopt and lslack
phiScaleGASa = np.arccos(rGASamin / rGASamax) / (phiminGASa - phimaxGASa)
r = np.array((rGASkmax, rGASamax))
phiref = np.array((phirefGASk, phirefGASa))
phimaxref = np.array((phimaxGASk, phimaxGASa))
rho = np.array((rhoGASk, rhoGASa))
dirAng = np.array((1.0, -1.0))
offsetCorr = np.array((1, 1))
phiScale = np.array((phiScaleGASk, phiScaleGASa))
lce = lopt
nameMuscle = "GAS"
angJoi = np.array((angKne, angAnk))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class BFSH(MuscleTendonComplex):
"""
Biceps Femoris Short Head(BFSH): This is a part of hamstring muscle in the real hu-
man and is responsible for knee flexion. Here BFSH is modelled as muscle contributing
to the knee flexion actuation in the sagittal plane.
"""
def __init__(self, angKne, timestep):
frcmax = 350 # maximum isometric force [N]
lopt = 0.12 # optimum fiber length CE [m]
vmax = 12.0 # 6 # maximum contraction velocity [lopt/s]
lslack = 0.10 # tendon slack length [m]
# BFSH group attachment
rBFSH = 0.04 # [m] constant lever contribution
phirefBFSH = 160 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoBFSH = 0.7 # sum of lopt and lslack
r = np.array((rBFSH,))
phiref = np.array((phirefBFSH,))
phimaxref = np.array((0.0,))
rho = np.array((rhoBFSH,))
dirAng = np.array((1.0,))
offsetCorr = np.array((0,))
phiScale = np.array((0.0,))
lce = lopt
nameMuscle = "BFSH",
angJoi = np.array((angKne,))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class VAS(MuscleTendonComplex):
"""
Vasti (VAS): Vasti is a group of 3 muscles located in the thigh and is responsible for
knee extension. Here VAS is modelled as a muscle actuating the knee extension in the
sagittal plane.
"""
def __init__(self, angKne, timestep):
frcmax = 6000 # maximum isometric force [N]
lopt = 0.08 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.23 # tendon slack length [m]
# VAS group attachment
rVASmax = 0.06 # [m] maximum lever contribution
rVASmin = 0.04 # [m] minimum lever contribution
phimaxVAS = 165 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminVAS = 45 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefVAS = 120 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoVAS = 0.6 # sum of lopt and lslack
phiScaleVAS = np.arccos(rVASmin / rVASmax) / (phiminVAS - phimaxVAS)
r = np.array((rVASmax,))
phiref = np.array((phirefVAS,))
phimaxref = np.array((phimaxVAS,))
rho = np.array((rhoVAS,))
dirAng = np.array((-1.0,))
offsetCorr = np.array((1,))
phiScale = np.array((phiScaleVAS,))
lce = lopt
nameMuscle = "VAS"
angJoi = np.array((angKne,))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class REF(MuscleTendonComplex):
"""
Rectus Femoris (RF): The Rectus femoris muscle is one of the four quadriceps mus-
cles. It is located in the middle of the front of the thigh and is responsible for knee
extension and hip flexion. Here RF is modelled as a bi-articular MTU contributing to
the hip flexion and knee extension actuations in the sagittal plane.
"""
def __init__(self, angHip, angKne, timestep):
frcmax = 1200 # maximum isometric force [N]
lopt = 0.08 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.35 # tendon slack length [m]
# REF group attachement (hip)
rREFh = 0.08 # [m] constant lever contribution
phirefREFh = 170 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoREFh = 0.3 # sum of lopt and lslack
# REF group attachment (knee)
rREFkmax = 0.06 # [m] maximum lever contribution
rREFkmin = 0.04 # [m] minimum lever contribution
phimaxREFk = 165 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminREFk = 45 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefREFk = 125 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoREFk = 0.5 # sum of lopt and lslack
phiScaleREFk = np.arccos(rREFkmin / rREFkmax) / (phiminREFk - phimaxREFk)
r = np.array((rREFh, rREFkmax))
phiref = np.array((phirefREFh, phirefREFk))
phimaxref = np.array((0.0, phimaxREFk))
rho = np.array((rhoREFh, rhoREFk))
dirAng = np.array((1.0, -1.0))
offsetCorr = np.array((0, 1))
phiScale = np.array((0.0, phiScaleREFk))
lce = lopt
nameMuscle = "REF"
angJoi = np.array((angHip, angKne))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class HAM(MuscleTendonComplex):
"""
Hamstrings (HAM): The hamstring muscles are a group of four muscles located in the
back of the thigh. They are bi-articular muscles crossing the hip and knee joints, so
they can help in both knee flexion and hip extension at the same time. Here HAM
is modelled as a bi-articular MTU contributing to the hip extension and knee flexion
actuations in the sagittal plane.
"""
def __init__(self, angHip, angKne, timestep):
frcmax = 3000 # maximum isometric force [N]
lopt = 0.10 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.31 # tendon slack length [m]
# hamstring hip level arm and refernce angle
rHAMh = 0.08 # [m] constant lever contribution
phirefHAMh = 150 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoHAMh = 0.5 # sum of lopt and lslack
# hamstring knee level arm and reference angle
rHAMk = 0.05 # [m] constant lever contribution
phirefHAMk = 180 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoHAMk = 0.5 # sum of lopt and lslack
r = np.array((rHAMh, rHAMk))
phiref = np.array((phirefHAMh, phirefHAMk))
phimaxref = np.array((0.0, 0.0))
rho = np.array((rhoHAMh, rhoHAMk))
dirAng = np.array((-1.0, 1.0))
offsetCorr = np.array((0, 0))
phiScale = np.array((0.0, 0.0))
lce = lopt
nameMuscle = "HAM"
angJoi = np.array((angHip, angKne))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class HFL(MuscleTendonComplex):
"""
Hip Flexor (HFL): The hip flexors are a group of muscles that help to bring the legs
and trunk together in a flexion movement. HFL allow us to move our leg or knee up
towards your torso, as well as to bend your torso forward at the hip. The HLF modelled
here is one of the actuator for the hip flexion in the sagittal plane.
"""
def __init__(self, angHip, timestep):
frcmax = 2000 # maximum isometric force [N]
lopt = 0.11 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.10 # tendon slack length [m]
# level arm and reference angle
r = np.array((0.08,)) # [m] constant lever contribution
phiref = np.array((160 * np.pi / 180,)) # [rad] reference angle at which MTU length equals
phimaxref = np.array((0.0, 0.0))
rho = np.array((0.5,)) # sum of lopt and lslack
dirAng = np.array((1.0,)) # angle increase leads to MTC length increase
offsetCorr = np.array((0,)) # no level arm correction
phiScale = np.array((0.0,))
lce = lopt
angJoi = np.array((angHip,))
nameMuscle = "HFL"
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class GLU(MuscleTendonComplex):
"""
Glutei (GLU): The glutei muscles are a group muscles in the gluteal region, in real life
locomotion their functions include extension, abduction, external rotation and internal
rotation of the hip joint. But here in the model GLU is modelled antagonistic to HFL
as hip extensor, acting as one of the hip joint actuator in the sagittal plane.
"""
def __init__(self, angHip, timestep):
frcmax = 1500.0 # maximum isometric force [N]
lopt = 0.11 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.13 # tendon slack length [m]
# level arm and reference angle
r = np.array((0.08,)) # [m] constant lever contribution
phiref = np.array((120 * np.pi / 180,)) # [rad] reference angle at which MTU length equals
phimaxref = np.array((0.0, 0.0))
rho = np.array((0.5,)) # sum of lopt and lslack
dirAng = np.array((-1.0,)) # angle increase leads to MTC length decrease
offsetCorr = np.array((0,)) # no level arm correction
phiScale = np.array((0.0,))
lce = lopt # will be computed in the initialization
nameMuscle = "GLU"
angJoi = np.array((angHip,))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class HAD(MuscleTendonComplex):
"""
Hip Adductor (HAD): Hip adductors in the thigh are a group of muscles near the groin
area which helps in moving the leg towards the midline of the body in the coronal
plane. They are basically the are antagonistic to the hip abductors and also help in
stabilizing the hip joint in real life locomotion. The HAD modelled here will act as the
second actuator for the hip adduction in the coronal plane.
"""
def __init__(self, angHipFront, timestep):
frcmax = 4500.0 # maximum isometric force [N]
lopt = 0.10 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.18 # tendon slack length [m]
rHAD = 0.03 # [m] constant lever contribution
phirefHAD = 15 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoHAD = 1.0 # sum of lopt and lslack
r = np.array((rHAD,))
phiref = np.array((phirefHAD,))
phimaxref = np.array((0.0,))
rho = np.array((rhoHAD,))
dirAng = np.array((1.0,))
offsetCorr = np.array((0,))
phiScale = np.array((0.0,))
lce = lopt
nameMuscle = "HAD"
angJoi = np.array((angHipFront,))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class HAB(MuscleTendonComplex):
"""
Hip Abductor (HAB): The hip abductor muscles in the thigh include a group of muscles
which helps in moving the leg away from the midline of the body in the coronal plane.
They also help to rotate the thigh in the hip socket and to stabilize the hip joint. The
HAB modelled here will act as an actuator for the hip adbuction in the coronal plane.
"""
def __init__(self, angHipFront, timestep):
frcmax = 3000.0 # maximum isometric force [N]
lopt = 0.09 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.07 # tendon slack length [m]
rHAB = 0.06 # [m] constant lever contribution
phirefHAB = 10 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoHAB = 0.7 # sum of lopt and lslack
r = np.array((rHAB,))
phiref = np.array((phirefHAB,))
phimaxref = np.array((0.0,))
rho = np.array((rhoHAB,))
dirAng = np.array((-1.0,))
offsetCorr = np.array((0,))
phiScale = np.array((0.0,))
lce = lopt
nameMuscle = "HAB"
angJoi = np.array((angHipFront,))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
| [
"numpy.arccos",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.cos",
"numpy.sin"
] | [((1671, 1696), 'numpy.zeros', 'np.zeros', (['self.typeMuscle'], {}), '(self.typeMuscle)\n', (1679, 1696), True, 'import numpy as np\n'), ((6950, 6970), 'numpy.array', 'np.array', (['(rTIAmax,)'], {}), '((rTIAmax,))\n', (6958, 6970), True, 'import numpy as np\n'), ((6988, 7010), 'numpy.array', 'np.array', (['(phirefTIA,)'], {}), '((phirefTIA,))\n', (6996, 7010), True, 'import numpy as np\n'), ((7031, 7053), 'numpy.array', 'np.array', (['(phimaxTIA,)'], {}), '((phimaxTIA,))\n', (7039, 7053), True, 'import numpy as np\n'), ((7068, 7087), 'numpy.array', 'np.array', (['(rhoTIA,)'], {}), '((rhoTIA,))\n', (7076, 7087), True, 'import numpy as np\n'), ((7105, 7121), 'numpy.array', 'np.array', (['(1.0,)'], {}), '((1.0,))\n', (7113, 7121), True, 'import numpy as np\n'), ((7143, 7157), 'numpy.array', 'np.array', (['(1,)'], {}), '((1,))\n', (7151, 7157), True, 'import numpy as np\n'), ((7177, 7201), 'numpy.array', 'np.array', (['(phiScaleTIA,)'], {}), '((phiScaleTIA,))\n', (7185, 7201), True, 'import numpy as np\n'), ((7240, 7259), 'numpy.array', 'np.array', (['(angAnk,)'], {}), '((angAnk,))\n', (7248, 7259), True, 'import numpy as np\n'), ((8595, 8615), 'numpy.array', 'np.array', (['(rSOLmax,)'], {}), '((rSOLmax,))\n', (8603, 8615), True, 'import numpy as np\n'), ((8633, 8655), 'numpy.array', 'np.array', (['(phirefSOL,)'], {}), '((phirefSOL,))\n', (8641, 8655), True, 'import numpy as np\n'), ((8676, 8698), 'numpy.array', 'np.array', (['(phimaxSOL,)'], {}), '((phimaxSOL,))\n', (8684, 8698), True, 'import numpy as np\n'), ((8713, 8732), 'numpy.array', 'np.array', (['(rhoSOL,)'], {}), '((rhoSOL,))\n', (8721, 8732), True, 'import numpy as np\n'), ((8750, 8767), 'numpy.array', 'np.array', (['(-1.0,)'], {}), '((-1.0,))\n', (8758, 8767), True, 'import numpy as np\n'), ((8789, 8803), 'numpy.array', 'np.array', (['(1,)'], {}), '((1,))\n', (8797, 8803), True, 'import numpy as np\n'), ((8823, 8847), 'numpy.array', 'np.array', (['(phiScaleSOL,)'], {}), '((phiScaleSOL,))\n', (8831, 8847), True, 'import numpy as np\n'), ((8886, 8905), 'numpy.array', 'np.array', (['(angAnk,)'], {}), '((angAnk,))\n', (8894, 8905), True, 'import numpy as np\n'), ((11143, 11173), 'numpy.array', 'np.array', (['(rGASkmax, rGASamax)'], {}), '((rGASkmax, rGASamax))\n', (11151, 11173), True, 'import numpy as np\n'), ((11191, 11225), 'numpy.array', 'np.array', (['(phirefGASk, phirefGASa)'], {}), '((phirefGASk, phirefGASa))\n', (11199, 11225), True, 'import numpy as np\n'), ((11246, 11280), 'numpy.array', 'np.array', (['(phimaxGASk, phimaxGASa)'], {}), '((phimaxGASk, phimaxGASa))\n', (11254, 11280), True, 'import numpy as np\n'), ((11295, 11323), 'numpy.array', 'np.array', (['(rhoGASk, rhoGASa)'], {}), '((rhoGASk, rhoGASa))\n', (11303, 11323), True, 'import numpy as np\n'), ((11341, 11362), 'numpy.array', 'np.array', (['(1.0, -1.0)'], {}), '((1.0, -1.0))\n', (11349, 11362), True, 'import numpy as np\n'), ((11384, 11400), 'numpy.array', 'np.array', (['(1, 1)'], {}), '((1, 1))\n', (11392, 11400), True, 'import numpy as np\n'), ((11420, 11458), 'numpy.array', 'np.array', (['(phiScaleGASk, phiScaleGASa)'], {}), '((phiScaleGASk, phiScaleGASa))\n', (11428, 11458), True, 'import numpy as np\n'), ((11524, 11550), 'numpy.array', 'np.array', (['(angKne, angAnk)'], {}), '((angKne, angAnk))\n', (11532, 11550), True, 'import numpy as np\n'), ((12544, 12562), 'numpy.array', 'np.array', (['(rBFSH,)'], {}), '((rBFSH,))\n', (12552, 12562), True, 'import numpy as np\n'), ((12580, 12603), 'numpy.array', 'np.array', (['(phirefBFSH,)'], {}), '((phirefBFSH,))\n', (12588, 12603), True, 'import numpy as np\n'), ((12624, 12640), 'numpy.array', 'np.array', (['(0.0,)'], {}), '((0.0,))\n', (12632, 12640), True, 'import numpy as np\n'), ((12655, 12675), 'numpy.array', 'np.array', (['(rhoBFSH,)'], {}), '((rhoBFSH,))\n', (12663, 12675), True, 'import numpy as np\n'), ((12693, 12709), 'numpy.array', 'np.array', (['(1.0,)'], {}), '((1.0,))\n', (12701, 12709), True, 'import numpy as np\n'), ((12731, 12745), 'numpy.array', 'np.array', (['(0,)'], {}), '((0,))\n', (12739, 12745), True, 'import numpy as np\n'), ((12765, 12781), 'numpy.array', 'np.array', (['(0.0,)'], {}), '((0.0,))\n', (12773, 12781), True, 'import numpy as np\n'), ((12849, 12868), 'numpy.array', 'np.array', (['(angKne,)'], {}), '((angKne,))\n', (12857, 12868), True, 'import numpy as np\n'), ((14122, 14142), 'numpy.array', 'np.array', (['(rVASmax,)'], {}), '((rVASmax,))\n', (14130, 14142), True, 'import numpy as np\n'), ((14160, 14182), 'numpy.array', 'np.array', (['(phirefVAS,)'], {}), '((phirefVAS,))\n', (14168, 14182), True, 'import numpy as np\n'), ((14203, 14225), 'numpy.array', 'np.array', (['(phimaxVAS,)'], {}), '((phimaxVAS,))\n', (14211, 14225), True, 'import numpy as np\n'), ((14240, 14259), 'numpy.array', 'np.array', (['(rhoVAS,)'], {}), '((rhoVAS,))\n', (14248, 14259), True, 'import numpy as np\n'), ((14277, 14294), 'numpy.array', 'np.array', (['(-1.0,)'], {}), '((-1.0,))\n', (14285, 14294), True, 'import numpy as np\n'), ((14316, 14330), 'numpy.array', 'np.array', (['(1,)'], {}), '((1,))\n', (14324, 14330), True, 'import numpy as np\n'), ((14350, 14374), 'numpy.array', 'np.array', (['(phiScaleVAS,)'], {}), '((phiScaleVAS,))\n', (14358, 14374), True, 'import numpy as np\n'), ((14440, 14459), 'numpy.array', 'np.array', (['(angKne,)'], {}), '((angKne,))\n', (14448, 14459), True, 'import numpy as np\n'), ((16118, 16145), 'numpy.array', 'np.array', (['(rREFh, rREFkmax)'], {}), '((rREFh, rREFkmax))\n', (16126, 16145), True, 'import numpy as np\n'), ((16163, 16197), 'numpy.array', 'np.array', (['(phirefREFh, phirefREFk)'], {}), '((phirefREFh, phirefREFk))\n', (16171, 16197), True, 'import numpy as np\n'), ((16218, 16245), 'numpy.array', 'np.array', (['(0.0, phimaxREFk)'], {}), '((0.0, phimaxREFk))\n', (16226, 16245), True, 'import numpy as np\n'), ((16260, 16288), 'numpy.array', 'np.array', (['(rhoREFh, rhoREFk)'], {}), '((rhoREFh, rhoREFk))\n', (16268, 16288), True, 'import numpy as np\n'), ((16306, 16327), 'numpy.array', 'np.array', (['(1.0, -1.0)'], {}), '((1.0, -1.0))\n', (16314, 16327), True, 'import numpy as np\n'), ((16349, 16365), 'numpy.array', 'np.array', (['(0, 1)'], {}), '((0, 1))\n', (16357, 16365), True, 'import numpy as np\n'), ((16385, 16414), 'numpy.array', 'np.array', (['(0.0, phiScaleREFk)'], {}), '((0.0, phiScaleREFk))\n', (16393, 16414), True, 'import numpy as np\n'), ((16480, 16506), 'numpy.array', 'np.array', (['(angHip, angKne)'], {}), '((angHip, angKne))\n', (16488, 16506), True, 'import numpy as np\n'), ((17937, 17961), 'numpy.array', 'np.array', (['(rHAMh, rHAMk)'], {}), '((rHAMh, rHAMk))\n', (17945, 17961), True, 'import numpy as np\n'), ((17979, 18013), 'numpy.array', 'np.array', (['(phirefHAMh, phirefHAMk)'], {}), '((phirefHAMh, phirefHAMk))\n', (17987, 18013), True, 'import numpy as np\n'), ((18034, 18054), 'numpy.array', 'np.array', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (18042, 18054), True, 'import numpy as np\n'), ((18069, 18097), 'numpy.array', 'np.array', (['(rhoHAMh, rhoHAMk)'], {}), '((rhoHAMh, rhoHAMk))\n', (18077, 18097), True, 'import numpy as np\n'), ((18115, 18136), 'numpy.array', 'np.array', (['(-1.0, 1.0)'], {}), '((-1.0, 1.0))\n', (18123, 18136), True, 'import numpy as np\n'), ((18158, 18174), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (18166, 18174), True, 'import numpy as np\n'), ((18194, 18214), 'numpy.array', 'np.array', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (18202, 18214), True, 'import numpy as np\n'), ((18280, 18306), 'numpy.array', 'np.array', (['(angHip, angKne)'], {}), '((angHip, angKne))\n', (18288, 18306), True, 'import numpy as np\n'), ((19215, 19232), 'numpy.array', 'np.array', (['(0.08,)'], {}), '((0.08,))\n', (19223, 19232), True, 'import numpy as np\n'), ((19287, 19317), 'numpy.array', 'np.array', (['(160 * np.pi / 180,)'], {}), '((160 * np.pi / 180,))\n', (19295, 19317), True, 'import numpy as np\n'), ((19390, 19410), 'numpy.array', 'np.array', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (19398, 19410), True, 'import numpy as np\n'), ((19425, 19441), 'numpy.array', 'np.array', (['(0.5,)'], {}), '((0.5,))\n', (19433, 19441), True, 'import numpy as np\n'), ((19485, 19501), 'numpy.array', 'np.array', (['(1.0,)'], {}), '((1.0,))\n', (19493, 19501), True, 'import numpy as np\n'), ((19570, 19584), 'numpy.array', 'np.array', (['(0,)'], {}), '((0,))\n', (19578, 19584), True, 'import numpy as np\n'), ((19631, 19647), 'numpy.array', 'np.array', (['(0.0,)'], {}), '((0.0,))\n', (19639, 19647), True, 'import numpy as np\n'), ((19686, 19705), 'numpy.array', 'np.array', (['(angHip,)'], {}), '((angHip,))\n', (19694, 19705), True, 'import numpy as np\n'), ((20661, 20678), 'numpy.array', 'np.array', (['(0.08,)'], {}), '((0.08,))\n', (20669, 20678), True, 'import numpy as np\n'), ((20733, 20763), 'numpy.array', 'np.array', (['(120 * np.pi / 180,)'], {}), '((120 * np.pi / 180,))\n', (20741, 20763), True, 'import numpy as np\n'), ((20836, 20856), 'numpy.array', 'np.array', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (20844, 20856), True, 'import numpy as np\n'), ((20871, 20887), 'numpy.array', 'np.array', (['(0.5,)'], {}), '((0.5,))\n', (20879, 20887), True, 'import numpy as np\n'), ((20931, 20948), 'numpy.array', 'np.array', (['(-1.0,)'], {}), '((-1.0,))\n', (20939, 20948), True, 'import numpy as np\n'), ((21017, 21031), 'numpy.array', 'np.array', (['(0,)'], {}), '((0,))\n', (21025, 21031), True, 'import numpy as np\n'), ((21078, 21094), 'numpy.array', 'np.array', (['(0.0,)'], {}), '((0.0,))\n', (21086, 21094), True, 'import numpy as np\n'), ((21202, 21221), 'numpy.array', 'np.array', (['(angHip,)'], {}), '((angHip,))\n', (21210, 21221), True, 'import numpy as np\n'), ((22377, 22394), 'numpy.array', 'np.array', (['(rHAD,)'], {}), '((rHAD,))\n', (22385, 22394), True, 'import numpy as np\n'), ((22412, 22434), 'numpy.array', 'np.array', (['(phirefHAD,)'], {}), '((phirefHAD,))\n', (22420, 22434), True, 'import numpy as np\n'), ((22455, 22471), 'numpy.array', 'np.array', (['(0.0,)'], {}), '((0.0,))\n', (22463, 22471), True, 'import numpy as np\n'), ((22486, 22505), 'numpy.array', 'np.array', (['(rhoHAD,)'], {}), '((rhoHAD,))\n', (22494, 22505), True, 'import numpy as np\n'), ((22523, 22539), 'numpy.array', 'np.array', (['(1.0,)'], {}), '((1.0,))\n', (22531, 22539), True, 'import numpy as np\n'), ((22561, 22575), 'numpy.array', 'np.array', (['(0,)'], {}), '((0,))\n', (22569, 22575), True, 'import numpy as np\n'), ((22595, 22611), 'numpy.array', 'np.array', (['(0.0,)'], {}), '((0.0,))\n', (22603, 22611), True, 'import numpy as np\n'), ((22677, 22701), 'numpy.array', 'np.array', (['(angHipFront,)'], {}), '((angHipFront,))\n', (22685, 22701), True, 'import numpy as np\n'), ((23794, 23811), 'numpy.array', 'np.array', (['(rHAB,)'], {}), '((rHAB,))\n', (23802, 23811), True, 'import numpy as np\n'), ((23829, 23851), 'numpy.array', 'np.array', (['(phirefHAB,)'], {}), '((phirefHAB,))\n', (23837, 23851), True, 'import numpy as np\n'), ((23872, 23888), 'numpy.array', 'np.array', (['(0.0,)'], {}), '((0.0,))\n', (23880, 23888), True, 'import numpy as np\n'), ((23903, 23922), 'numpy.array', 'np.array', (['(rhoHAB,)'], {}), '((rhoHAB,))\n', (23911, 23922), True, 'import numpy as np\n'), ((23940, 23957), 'numpy.array', 'np.array', (['(-1.0,)'], {}), '((-1.0,))\n', (23948, 23957), True, 'import numpy as np\n'), ((23979, 23993), 'numpy.array', 'np.array', (['(0,)'], {}), '((0,))\n', (23987, 23993), True, 'import numpy as np\n'), ((24013, 24029), 'numpy.array', 'np.array', (['(0.0,)'], {}), '((0.0,))\n', (24021, 24029), True, 'import numpy as np\n'), ((24095, 24119), 'numpy.array', 'np.array', (['(angHipFront,)'], {}), '((angHipFront,))\n', (24103, 24119), True, 'import numpy as np\n'), ((2508, 2520), 'numpy.sum', 'np.sum', (['tmpL'], {}), '(tmpL)\n', (2514, 2520), True, 'import numpy as np\n'), ((6861, 6889), 'numpy.arccos', 'np.arccos', (['(rTIAmin / rTIAmax)'], {}), '(rTIAmin / rTIAmax)\n', (6870, 6889), True, 'import numpy as np\n'), ((8527, 8555), 'numpy.arccos', 'np.arccos', (['(rSOLmin / rSOLmax)'], {}), '(rSOLmin / rSOLmax)\n', (8536, 8555), True, 'import numpy as np\n'), ((10443, 10473), 'numpy.arccos', 'np.arccos', (['(rGASkmin / rGASkmax)'], {}), '(rGASkmin / rGASkmax)\n', (10452, 10473), True, 'import numpy as np\n'), ((11071, 11101), 'numpy.arccos', 'np.arccos', (['(rGASamin / rGASamax)'], {}), '(rGASamin / rGASamax)\n', (11080, 11101), True, 'import numpy as np\n'), ((14054, 14082), 'numpy.arccos', 'np.arccos', (['(rVASmin / rVASmax)'], {}), '(rVASmin / rVASmax)\n', (14063, 14082), True, 'import numpy as np\n'), ((16046, 16076), 'numpy.arccos', 'np.arccos', (['(rREFkmin / rREFkmax)'], {}), '(rREFkmin / rREFkmax)\n', (16055, 16076), True, 'import numpy as np\n'), ((1613, 1638), 'numpy.zeros', 'np.zeros', (['self.typeMuscle'], {}), '(self.typeMuscle)\n', (1621, 1638), True, 'import numpy as np\n'), ((1994, 2057), 'numpy.sin', 'np.sin', (['((self.phiref[i] - self.phimaxref[i]) * self.phiScale[i])'], {}), '((self.phiref[i] - self.phimaxref[i]) * self.phiScale[i])\n', (2000, 2057), True, 'import numpy as np\n'), ((2081, 2144), 'numpy.sin', 'np.sin', (['((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i])'], {}), '((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i])\n', (2087, 2144), True, 'import numpy as np\n'), ((2282, 2345), 'numpy.cos', 'np.cos', (['((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i])'], {}), '((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i])\n', (2288, 2345), True, 'import numpy as np\n')] |
'''interface with serlib.cparser module
We'll use platform defined C-types int (np.intc) and long long int
(np.ulonglong) for maximal portability because python C-api does not
support fixed width C types: https://docs.python.org/3/c-api/long.html
For numpy reference see
https://numpy.org/devdocs/user/basics.types.html
For platform reference see
https://en.cppreference.com/w/cpp/language/types
On 32 bit data model ILP32 (Win32, 32-bit Linux, OSX) and 64 bit
datamodel LP64 (Linux, OSX) and LLP64 (Windows):
int is 32 bit
long long is 64 bit
Py_ssize_t is signed integer of the same bitwidth as ssize_t
Py_ssize_t seems to be equivalent to np.intp
"Integer used for indexing, typically the same as ssize_t"
per https://numpy.org/devdocs/user/basics.types.html
'''
from typing import List
import numpy as np
import serlib.cparser
# buffer limit to exchange with C code to parse S-expression
# TODO: need to find how to determine BUFFER_SIZE_LIMIT dynamically
BUFFER_SIZE_LIMIT = 100000000
def hash_bytestring(bytestring: bytes) -> int:
''' interface to serlib.cparser.hash_string '''
return serlib.cparser.hash_string(bytestring)
# TODO: REMOVE
# def list_of_string(input_s: str, parser=None) -> List[str]:
# ''' from s-expression string "(expr_1 expr_2 ... expr_k)"
# returns a list of strings ["expr_1", "expr_2", ..., "expr_k"]
# '''
# if parser is None:
# p = SExpParser()
# input_b = input_s.encode()
# res, loc = p.postfix_of_bytestring(input_b), p.last_location()
# n_args = -res[-1]
# if not n_args >= 0:
# raise ValueError("the input s-exprression {input_s} is not an s-expr list (may be an atom?)")
# else:
# args = []
# for i in range(n_args):
# _, loc = p.postfix_of_bytestring(input_b, [i]), p.last_location()
# args.append(input_b[loc].decode())
# return args
class SExpParser:
''' This class provides parsing functions for s-expressions.
A typical usage is to parse s-expressions obtained as goals
from coq-serapi
'''
def __init__(self):
# Load the shared library
buffer_size = BUFFER_SIZE_LIMIT
# Persistent dictionary
self.hash_list = np.zeros(buffer_size, dtype=np.ulonglong)
self.dict = {}
self.inv_dict = [b'']
def postfix_of_sexp(self, string, address=None):
"""
return a postfix representation in np.array[int] of the input string
containing the subtree s-expression at the address
"""
return self.postfix_of_bytestring(string.encode('utf8'), address)
def postfix_of_bytestring(self, bytestring, address=None):
"""
return a postfix representation in np.array[int] of the input s-expression bytestring
at the tree address address
//former parse_bytestring
"""
if address is None:
address = []
np_address = np.array(address, dtype=np.intc)
self._start_pos, self._end_pos, post_fix, np_add_dict = serlib.cparser.parse(
bytestring, np_address, self.hash_list, len(self.dict))
for i in range(np_add_dict.shape[0]//2):
start = np_add_dict[2*i]
end = np_add_dict[2*i+1]
word = bytestring[start:end]
word_hash = hash_bytestring(word)
self.hash_list[len(self.dict)] = word_hash
self.dict[word] = len(self.dict)+1
self.inv_dict.append(word)
return post_fix
def parse_bytestring_new(self, bytestring, address=[]):
postfix = self.postfix_of_bytestring(bytestring, [])
ann = serlib.cparser.annotate(postfix)
start, end = serlib.cparser.subtree(postfix, ann, np.array(address, dtype=np.intc))
return postfix[start:end]
def last_location(self):
return slice(self._start_pos, self._end_pos)
def hash_dict(self):
return {key: self.hash_list[value-1] for key,value in self.dict.items()}
def to_sexp_legacy(self, encoding_list):
stack = []
for value in encoding_list:
if value > 0:
new_element = self.inv_dict[value]
elif value == 0:
new_element = b'()'
else:
new_element = b'(' + b' '.join(stack[value:]) + b')'
del(stack[value:])
stack.append(new_element)
if stack:
return stack[0].decode('utf8')
else:
return None
def to_sexp(self, encoding_list):
stack = []
for value in encoding_list:
if value > 0:
new_element = self.inv_dict[value]
elif value == 0:
new_element = b'()'
else:
value_ = len(stack) + value
new_element = b'(' + stack[value_]
for element in stack[value_+1:]:
if not (new_element[-1] in [ord(')'), ord('"')] or element[0] in [ord('('), ord('"')]):
new_element += b' '
new_element += element
new_element += b')'
del(stack[value:])
stack.append(new_element)
if stack:
return stack[0].decode('utf8')
else:
return None
def check_inverse(parser: SExpParser, bytestring: bytes) -> bool:
encoding = parser.postfix_of_bytestring(bytestring)
decoding = parser.to_sexp(encoding)
reencoding = parser.postfix_of_bytestring(decoding)
return (encoding == reencoding).all()
def encode(string, address):
levels = len(address)
open_pars = []
output = []
pos = 0
while pos < len(string):
if string[pos] == '(':
open_pars.append(0)
pos += 1
elif string[pos] == ')':
last_element = open_pars.pop()
if open_pars[:levels] == address:
output.append(-last_element)
if open_pars:
open_pars[-1] += 1
pos += 1
else:
token = ''
while pos < len(string) and string[pos] not in '()':
token += string[pos]
pos += 1
print(token, open_pars)
if open_pars[:levels] == address:
output.append(token)
open_pars[-1] += 1
return output
| [
"numpy.array",
"numpy.zeros"
] | [((2250, 2291), 'numpy.zeros', 'np.zeros', (['buffer_size'], {'dtype': 'np.ulonglong'}), '(buffer_size, dtype=np.ulonglong)\n', (2258, 2291), True, 'import numpy as np\n'), ((2964, 2996), 'numpy.array', 'np.array', (['address'], {'dtype': 'np.intc'}), '(address, dtype=np.intc)\n', (2972, 2996), True, 'import numpy as np\n'), ((3764, 3796), 'numpy.array', 'np.array', (['address'], {'dtype': 'np.intc'}), '(address, dtype=np.intc)\n', (3772, 3796), True, 'import numpy as np\n')] |
import os
import os.path as osp
import math
import numpy as np
# ---- load random results from 'process.txt' ----
random_res = []
for seed in range(10):
main_path = osp.join('Experiment', 'optimal', 'seed%d'%seed)
sub_p_res = []
for power in range(0, 35, 5):
fpath = osp.join(main_path, 'power%d'%power, 'process.txt')
with open(fpath, 'r') as f:
f.readline()
sub_p_res.append(float(f.readline().split()[1]))
random_res.append(np.array(sub_p_res))
np.save(osp.join('Resdata', 'random'), np.array(random_res))
# ---- generate fixed test channels ----
from comms import Environment
rhos = [x*0.1 for x in range(11)]
for rho in rhos:
fold = osp.join('Testdata', 'rho%.1f'%rho)
os.makedirs(fold, exist_ok=True)
env = Environment(30)
env.rho = rho
env.reset(2020)
bs2user_csi = [env.bs2user_csi]
bs2ris_csi = [env.bs2ris_csi]
ris2user_csi = [env.ris2user_csi]
for i in range(499):
env._changeCSI()
bs2user_csi.append(env.bs2user_csi)
bs2ris_csi.append(env.bs2ris_csi)
ris2user_csi.append(env.ris2user_csi)
np.save(osp.join(fold, 'bs2user_csi'), np.array(bs2user_csi))
np.save(osp.join(fold, 'bs2ris_csi'), np.array(bs2ris_csi))
np.save(osp.join(fold, 'ris2user_csi'), np.array(ris2user_csi))
# ---- merge result ----
from run import test
save_path = osp.join('Resdata', 'noise_compare')
os.makedirs(save_path, exist_ok=True)
test_fpath = osp.join('Testdata', 'rho_main')
config_path = osp.join('Experiment', 'compare_noise', 'seed%d', 'power%d', 'degree%d')
power = 30
res = []
for i in [0, 1, 5, 6]:
sub_res = []
for degree in range(1, 7):
sub_res.append(test(test_fpath, config_path%(i, power, degree)))
print(f'seed:{i}, power:{power}, exp:{-degree}, rew:{sub_res[-1]}')
res.append(np.array(sub_res))
np.save(osp.join(save_path, 'power%d.npy'%power), np.array(res)) | [
"os.makedirs",
"comms.Environment",
"os.path.join",
"numpy.array",
"run.test"
] | [((1385, 1421), 'os.path.join', 'osp.join', (['"""Resdata"""', '"""noise_compare"""'], {}), "('Resdata', 'noise_compare')\n", (1393, 1421), True, 'import os.path as osp\n'), ((1422, 1459), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (1433, 1459), False, 'import os\n'), ((1473, 1505), 'os.path.join', 'osp.join', (['"""Testdata"""', '"""rho_main"""'], {}), "('Testdata', 'rho_main')\n", (1481, 1505), True, 'import os.path as osp\n'), ((1521, 1593), 'os.path.join', 'osp.join', (['"""Experiment"""', '"""compare_noise"""', '"""seed%d"""', '"""power%d"""', '"""degree%d"""'], {}), "('Experiment', 'compare_noise', 'seed%d', 'power%d', 'degree%d')\n", (1529, 1593), True, 'import os.path as osp\n'), ((171, 221), 'os.path.join', 'osp.join', (['"""Experiment"""', '"""optimal"""', "('seed%d' % seed)"], {}), "('Experiment', 'optimal', 'seed%d' % seed)\n", (179, 221), True, 'import os.path as osp\n'), ((514, 543), 'os.path.join', 'osp.join', (['"""Resdata"""', '"""random"""'], {}), "('Resdata', 'random')\n", (522, 543), True, 'import os.path as osp\n'), ((545, 565), 'numpy.array', 'np.array', (['random_res'], {}), '(random_res)\n', (553, 565), True, 'import numpy as np\n'), ((701, 738), 'os.path.join', 'osp.join', (['"""Testdata"""', "('rho%.1f' % rho)"], {}), "('Testdata', 'rho%.1f' % rho)\n", (709, 738), True, 'import os.path as osp\n'), ((741, 773), 'os.makedirs', 'os.makedirs', (['fold'], {'exist_ok': '(True)'}), '(fold, exist_ok=True)\n', (752, 773), False, 'import os\n'), ((784, 799), 'comms.Environment', 'Environment', (['(30)'], {}), '(30)\n', (795, 799), False, 'from comms import Environment\n'), ((1876, 1918), 'os.path.join', 'osp.join', (['save_path', "('power%d.npy' % power)"], {}), "(save_path, 'power%d.npy' % power)\n", (1884, 1918), True, 'import os.path as osp\n'), ((1918, 1931), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (1926, 1931), True, 'import numpy as np\n'), ((289, 342), 'os.path.join', 'osp.join', (['main_path', "('power%d' % power)", '"""process.txt"""'], {}), "(main_path, 'power%d' % power, 'process.txt')\n", (297, 342), True, 'import os.path as osp\n'), ((485, 504), 'numpy.array', 'np.array', (['sub_p_res'], {}), '(sub_p_res)\n', (493, 504), True, 'import numpy as np\n'), ((1140, 1169), 'os.path.join', 'osp.join', (['fold', '"""bs2user_csi"""'], {}), "(fold, 'bs2user_csi')\n", (1148, 1169), True, 'import os.path as osp\n'), ((1171, 1192), 'numpy.array', 'np.array', (['bs2user_csi'], {}), '(bs2user_csi)\n', (1179, 1192), True, 'import numpy as np\n'), ((1206, 1234), 'os.path.join', 'osp.join', (['fold', '"""bs2ris_csi"""'], {}), "(fold, 'bs2ris_csi')\n", (1214, 1234), True, 'import os.path as osp\n'), ((1236, 1256), 'numpy.array', 'np.array', (['bs2ris_csi'], {}), '(bs2ris_csi)\n', (1244, 1256), True, 'import numpy as np\n'), ((1270, 1300), 'os.path.join', 'osp.join', (['fold', '"""ris2user_csi"""'], {}), "(fold, 'ris2user_csi')\n", (1278, 1300), True, 'import os.path as osp\n'), ((1302, 1324), 'numpy.array', 'np.array', (['ris2user_csi'], {}), '(ris2user_csi)\n', (1310, 1324), True, 'import numpy as np\n'), ((1849, 1866), 'numpy.array', 'np.array', (['sub_res'], {}), '(sub_res)\n', (1857, 1866), True, 'import numpy as np\n'), ((1708, 1758), 'run.test', 'test', (['test_fpath', '(config_path % (i, power, degree))'], {}), '(test_fpath, config_path % (i, power, degree))\n', (1712, 1758), False, 'from run import test\n')] |
import numpy as np
import sys
pp = "/Users/andres.perez/source/parametric_spatial_audio_processing"
sys.path.append(pp)
import parametric_spatial_audio_processing as psa
import matplotlib.pyplot as plt
import scipy.stats
from utils import *
from file_utils import build_result_dict_from_metadata_array, build_metadata_result_array_from_event_dict
from seld_dcase2019_master.metrics.evaluation_metrics import distance_between_spherical_coordinates_rad
def preprocess(data, sr, params):
"""
Assert first order ambisonics and dimensionality order.
Compute Stft.
:param data: np.array (num_frames, num_channels)
:param sr: sampling rate
:param params: params dict
:return: psa.Stft instance
"""
num_frames = np.shape(data)[0]
num_channels = np.shape(data)[1]
assert num_channels == 4
start_frame = 0
if params['quick_test']:
end_frame = int(np.ceil(sr * params['quick_test_file_duration']))
else:
end_frame = num_frames
window_size = params['window_size']
window_overlap = params['window_overlap']
nfft = params['nfft']
x = psa.Signal(data[start_frame:end_frame].T, sr, 'acn', 'n3d')
X = psa.Stft.fromSignal(x,
window_size=window_size,
window_overlap=window_overlap,
nfft=nfft
).limit_bands(params['fmin'], params['fmax'])
if params['plot']:
psa.plot_magnitude_spectrogram(X)
return X
def estimate_doa(data, sr, params):
"""
Given an input audio, get the most significant tf bins per frame
:param data: np.array (num_frames, num_channels)
:param sr: sampling rate
:param params: params dict
:return: an array in the form :
[frame, [class_id, azi, ele],[class_id, azi, ele]... ]
without repeated frame instances, quantized at hop_size,
containing all valid tf bins doas.
"""
### Preprocess data
X = preprocess(data, sr, params)
N = X.get_num_time_bins()
K = X.get_num_frequency_bins()
r = params['r']
### Diffuseness mask
doa = psa.compute_DOA(X)
directivity = X.compute_ita_re(r=r)
directivity_mask = directivity.compute_mask(th=params['directivity_th'])
### Energy density mask
e = psa.compute_energy_density(X)
block_size = params['energy_density_local_th_size']
tl = e.compute_threshold_local(block_size=block_size)
e_mask = e.compute_mask(tl)
### DOA Variance mask (computed on azimuth variance)
vicinity_radius = params['doa_std_vicinity_radius']
if np.size(vicinity_radius) == 1:
# Square!
r_k = vicinity_radius
r_n = vicinity_radius
elif np.size(vicinity_radius) == 2:
# Rectangle! [k, n]
r_k = vicinity_radius[0]
r_n = vicinity_radius[1]
else:
Warning.warn()
# TODO: optimize the for loop
std = np.zeros((K, N))
doa0_k_array = []
for r in range(-r_n,r_n+1):
doa0_k_array.append(np.roll(doa.data[0,:,:],r))
doa0_k = np.stack(doa0_k_array, axis=0)
for k in range(r_k, K - r_k):
std[k, :] = scipy.stats.circstd(doa0_k[:, k - r_k:k + r_k + 1, :], high=np.pi, low=-np.pi, axis=(0, 1))
# not optimized version...
# for k in range(r_k, K-r_k):
# for n in range(r_n, N-r_n):
# # azi
# std[k, n] = scipy.stats.circstd(doa.data[0, k-r_k:k+r_k+1, n-r_n:n+r_n+1], high=np.pi, low=-np.pi)
# # ele
# # std[k, n] = np.std(doa.data[1, k-r_k:k+r_k+1, n-r_n:n+r_n+1])
# Edges: largest value
std_max = np.max(std)
std[0:r_k, :] = std_max
std[K-r_k:K, :] = std_max
std[:, 0:r_n] = std_max
std[:, N - r_n:N] = std_max
# Scale values to min/max
std_scaled = std / std_max
# Invert values
std_scaled_inv = 1 - std_scaled
# Compute mask
doa_std = psa.Stft(doa.t, doa.f, std_scaled_inv, doa.sample_rate)
doa_std_mask = doa_std.compute_mask(th=params['doa_std_th'])
mask_all = doa_std_mask.apply_mask(directivity_mask).apply_mask(e_mask)
doa_th = doa.apply_mask(mask_all)
## Median average
median_averaged_doa = np.empty(doa.data.shape)
median_averaged_doa.fill(np.nan)
vicinity_size = (2*r_k-1) + (2*r_n-1)
doa_median_average_nan_th = params['doa_median_average_nan_th']
vicinity_radius = params['median_filter_vicinity_radius']
if np.size(vicinity_radius) == 1:
# Square!
r_k = vicinity_radius
r_n = vicinity_radius
elif np.size(vicinity_radius) == 2:
# Rectangle! [k, n]
r_k = vicinity_radius[0]
r_n = vicinity_radius[1]
else:
Warning.warn()
# TODO: optimize the for loop
for k in range(r_k, K - r_k):
for n in range(r_n, N - r_n):
azis = discard_nans(doa_th.data[0, k - r_k:k + r_k + 1, n - r_n:n + r_n + 1].flatten())
if azis.size > vicinity_size * doa_median_average_nan_th:
median_averaged_doa[0, k, n] = circmedian(azis, 'rad')
eles = discard_nans(doa_th.data[1, k - r_k:k + r_k + 1, n - r_n:n + r_n + 1].flatten())
if eles.size > vicinity_size * doa_median_average_nan_th:
median_averaged_doa[1, k, n] = np.median(eles)
doa_th_median = psa.Stft(doa.t, doa.f, median_averaged_doa, doa.sample_rate)
## Plot stuff
if params['plot']:
psa.plot_doa(doa, title='doa')
psa.plot_doa(doa.apply_mask(e_mask), title='e mask')
psa.plot_doa(doa.apply_mask(directivity_mask), title='directivity mask')
psa.plot_doa(doa.apply_mask(doa_std_mask), title='doa std mask')
psa.plot_doa(doa_th, title='doa mask all')
psa.plot_doa(doa_th_median, title='doa circmedian')
plt.show()
## Fold values into a vector
# Get a list of bins with the position estimation according to the selected doa_method
# TODO: OPTIMIZE
active_windows = []
position = []
for n in range(N):
azi = discard_nans(doa_th_median.data[0, :, n])
ele = discard_nans(doa_th_median.data[1, :, n])
if np.size(azi) < params['num_min_valid_bins']:
# Empty! not enough suitable doa values in this analysis window
pass
else:
active_windows.append(n)
position.append([rad2deg(azi), rad2deg(ele)])
# result = [bin, class_id, azi, ele] with likely repeated bin instances
result = []
label = params['default_class_id']
for window_idx, window in enumerate(active_windows):
num_bins = np.shape(position[window_idx])[1]
for b in range(num_bins):
azi = position[window_idx][0][b]
ele = position[window_idx][1][b]
result.append([window, label, azi, ele])
# Perform the window transformation by averaging within frame
## TODO: assert our bins are smaller than required ones
current_window_hop = (params['window_size'] - params['window_overlap']) / float(sr)
window_factor = params['required_window_hop'] / current_window_hop
# Since frames are ordered (at least they should), we can optimise that a little bit
last_frame = -1
# result_quantized = [frame, [class_id, azi, ele],[class_id, azi, ele]... ] without repeated bin instances
result_quantized = []
for row in result:
frame = row[0]
new_frame = int(np.floor(frame / window_factor))
if new_frame == last_frame:
result_quantized[-1].append([row[1], row[2], row[3]])
else:
result_quantized.append([new_frame, [row[1], row[2], row[3]]])
last_frame = new_frame
return result_quantized
# Assumes overlapping, compute (1,2)-Kmeans on each segment
def group_events(result_quantized, params):
"""
Segmentate an array of doas into events
:param result_quantized: an array containing frames and doas
in the form [frame, [class_id, azi, ele],[class_id, azi, ele]... ]
without repeated frame instances, with ordered frames
:param params: params dict
:return: metadata_result_array, result_dict
metadata_result_array: array with one event per row, in the form
[sound_event_recording,start_time,end_time,ele,azi,dist]
result_dict: dict with one frame per key, in the form:
{frame: [class_id, azi, ele] or [[class_id1, azi1, ele1], [class_id2, azi2, ele2]]}
"""
## Generate result_averaged_dict: grouping doas per frame into 1 or 2 clusters
## result_averaged_dict = {frame: [label, azi, ele] or [[label, azi1, ele1],label, azi2, ele2]]}
result_averaged_dict = {}
frames = []
for row in result_quantized:
frames.append(row[0])
std_azis = []
std_eles = []
std_all = []
std_th = params['min_std_overlapping']
label = params['default_class_id']
for r_idx, row in enumerate(result_quantized):
# Get all doas
frame = row[0]
azis = []
eles = []
for v in row[1:]:
azis.append(v[1])
eles.append(v[2])
# Compute std of doas
std_azis.append(scipy.stats.circstd(azis, high=180, low=-180))
std_eles.append(np.std(eles))
std_all.append(std_azis[-1]/2 + std_eles[-1])
# If big std, we assume 2-overlap
if std_all[-1] >= std_th:
# 2 clusters:
x = deg2rad(np.asarray([azis, eles]).T)
try:
kmeans2 = HybridKMeans(n_init=params['num_init_kmeans']).fit(x)
except RuntimeWarning:
# All points in x are equal...
result_averaged_dict[frame] = [label, rad2deg(x[0,0]), rad2deg(x[0,1])]
continue
# Keep the centroids of this frame
result_averaged_dict[frame] = []
for c in kmeans2.cluster_centers_:
azi = rad2deg(c[0])
ele = rad2deg(c[1])
result_averaged_dict[frame].append([label, azi, ele])
else:
# 1 cluster: directly compute the median and keep it
azi = circmedian(np.asarray(azis), unit='deg')
ele = np.median(eles)
result_averaged_dict[frame] = [label, azi, ele]
if params['plot']:
plt.figure()
plt.suptitle('kmeans stds')
plt.scatter(frames,std_all,label='all')
plt.axhline(y=std_th)
plt.legend()
plt.grid()
plt.show()
## Group doas by distance and time proximity
# Generate event_dict = { event_id: [ [label, azi_frame, ele_frame] ...}
# each individual event is a key, and values is a list of [frame, azi, ele]
d_th = params['max_angular_distance_within_event']
frame_th = params['max_frame_distance_within_event']
event_idx = 0
event_dict = {}
# Ensure ascending order
frames = result_averaged_dict.keys()
frames.sort()
# TODO: write in a more modular way
for frame in frames:
value = result_averaged_dict[frame]
if len(value) == 3:
# One source
azi = value[1]
ele = value[2]
if not bool(event_dict):
# Empty: append
event_dict[event_idx] = [[frame, azi, ele]]
event_idx += 1
else:
# Compute distance with all previous frames
new_event = True # default
for idx in range(event_idx):
# Compute distance with median of all previous
azis = np.asarray(event_dict[idx])[:, 1]
eles = np.asarray(event_dict[idx])[:, 2]
median_azi = circmedian(azis, unit='deg')
median_ele = np.median(eles)
d = distance_between_spherical_coordinates_rad(deg2rad(median_azi),
deg2rad(median_ele),
deg2rad(azi),
deg2rad(ele))
last_frame, last_azi, last_ele = event_dict[idx][-1]
if d < d_th and abs(frame - last_frame) < frame_th:
# Same event
new_event = False
event_dict[idx].append([frame, azi, ele])
break
if new_event:
event_dict[event_idx] = [[frame, azi, ele]]
event_idx += 1
elif len(value) == 2:
# Two sources
for v in value:
azi = v[1]
ele = v[2]
if not bool(event_dict):
# Empty: append
event_dict[event_idx] = [[frame, azi, ele]]
event_idx += 1
# print(event_dict)
else:
# Compute distance with previous frame
new_event = True
for idx in range(event_idx):
# Compute distance with median of all previous frames
azis = np.asarray(event_dict[idx])[:, 1]
eles = np.asarray(event_dict[idx])[:, 2]
median_azi = circmedian(azis, unit='deg')
median_ele = np.median(eles)
d = distance_between_spherical_coordinates_rad(deg2rad(median_azi),
deg2rad(median_ele),
deg2rad(azi),
deg2rad(ele))
last_frame, last_azi, last_ele = event_dict[idx][-1]
if d < d_th and abs(frame - last_frame) < frame_th:
# Same event
new_event = False
event_dict[idx].append([frame, azi, ele])
break
if new_event:
event_dict[event_idx] = [[frame, azi, ele]]
event_idx += 1
## Explicitly avoid overlapping > 2
# Generate event_dict_no_overlap: pop doas (in ascending order) if more than 2 overlapping events
# TODO: more sophisticated algorithm based on event confidence or similar
# Get max frame (it might be over 3000)
max_frame = 0
for event_idx, event_values in event_dict.iteritems():
end_frame = event_values[-1][0]
if end_frame >= max_frame:
max_frame = end_frame
# Compute the number of events per frame
events_per_frame = []
for i in range(max_frame+1):
events_per_frame.append([])
for event_idx, event_values in event_dict.iteritems():
start_frame = event_values[0][0]
end_frame = event_values[-1][0]
for frame in range(start_frame, end_frame + 1):
events_per_frame[frame].append(event_idx)
# Pop exceeding events
for i, e in enumerate(events_per_frame):
while len(e) > 2:
e.pop()
# Build event_dict_no_overlap from events_per_frame
event_dict_no_overlap = {}
for event_idx, event_values in event_dict.iteritems():
event_dict_no_overlap[event_idx] = []
for e in event_values:
frame = e[0]
if event_idx in events_per_frame[frame]:
event_dict_no_overlap[event_idx].append(e)
## Filter events to eliminate the spureous ones
event_dict_filtered = {}
filtered_event_idx = 0
min_frames = params['min_num_frames_per_event']
for frame, v in event_dict_no_overlap.iteritems():
if len(v) >= min_frames:
event_dict_filtered[filtered_event_idx] = event_dict_no_overlap[frame]
filtered_event_idx += 1
## Build metadata result array
offset = params['frame_offset']
if np.size(offset) == 1:
pre_offset = post_offset = offset
elif np.size(offset) == 2:
# Rectangle! [k, n]
pre_offset = offset[0]
post_offset= offset[1]
else:
Warning.warn()
hop_size = params['required_window_hop'] # s
metadata_result_array = build_metadata_result_array_from_event_dict(event_dict_filtered,
label,
hop_size,
pre_offset,
post_offset)
## Build result dictionary
result_dict = build_result_dict_from_metadata_array(metadata_result_array,
hop_size)
return metadata_result_array, result_dict
| [
"matplotlib.pyplot.grid",
"parametric_spatial_audio_processing.compute_energy_density",
"parametric_spatial_audio_processing.compute_DOA",
"sys.path.append",
"parametric_spatial_audio_processing.Stft.fromSignal",
"numpy.asarray",
"parametric_spatial_audio_processing.Stft",
"numpy.max",
"matplotlib.p... | [((100, 119), 'sys.path.append', 'sys.path.append', (['pp'], {}), '(pp)\n', (115, 119), False, 'import sys\n'), ((1116, 1175), 'parametric_spatial_audio_processing.Signal', 'psa.Signal', (['data[start_frame:end_frame].T', 'sr', '"""acn"""', '"""n3d"""'], {}), "(data[start_frame:end_frame].T, sr, 'acn', 'n3d')\n", (1126, 1175), True, 'import parametric_spatial_audio_processing as psa\n'), ((2139, 2157), 'parametric_spatial_audio_processing.compute_DOA', 'psa.compute_DOA', (['X'], {}), '(X)\n', (2154, 2157), True, 'import parametric_spatial_audio_processing as psa\n'), ((2313, 2342), 'parametric_spatial_audio_processing.compute_energy_density', 'psa.compute_energy_density', (['X'], {}), '(X)\n', (2339, 2342), True, 'import parametric_spatial_audio_processing as psa\n'), ((2932, 2948), 'numpy.zeros', 'np.zeros', (['(K, N)'], {}), '((K, N))\n', (2940, 2948), True, 'import numpy as np\n'), ((3072, 3102), 'numpy.stack', 'np.stack', (['doa0_k_array'], {'axis': '(0)'}), '(doa0_k_array, axis=0)\n', (3080, 3102), True, 'import numpy as np\n'), ((3627, 3638), 'numpy.max', 'np.max', (['std'], {}), '(std)\n', (3633, 3638), True, 'import numpy as np\n'), ((3908, 3963), 'parametric_spatial_audio_processing.Stft', 'psa.Stft', (['doa.t', 'doa.f', 'std_scaled_inv', 'doa.sample_rate'], {}), '(doa.t, doa.f, std_scaled_inv, doa.sample_rate)\n', (3916, 3963), True, 'import parametric_spatial_audio_processing as psa\n'), ((4193, 4217), 'numpy.empty', 'np.empty', (['doa.data.shape'], {}), '(doa.data.shape)\n', (4201, 4217), True, 'import numpy as np\n'), ((5312, 5372), 'parametric_spatial_audio_processing.Stft', 'psa.Stft', (['doa.t', 'doa.f', 'median_averaged_doa', 'doa.sample_rate'], {}), '(doa.t, doa.f, median_averaged_doa, doa.sample_rate)\n', (5320, 5372), True, 'import parametric_spatial_audio_processing as psa\n'), ((16318, 16428), 'file_utils.build_metadata_result_array_from_event_dict', 'build_metadata_result_array_from_event_dict', (['event_dict_filtered', 'label', 'hop_size', 'pre_offset', 'post_offset'], {}), '(event_dict_filtered, label,\n hop_size, pre_offset, post_offset)\n', (16361, 16428), False, 'from file_utils import build_result_dict_from_metadata_array, build_metadata_result_array_from_event_dict\n'), ((16763, 16833), 'file_utils.build_result_dict_from_metadata_array', 'build_result_dict_from_metadata_array', (['metadata_result_array', 'hop_size'], {}), '(metadata_result_array, hop_size)\n', (16800, 16833), False, 'from file_utils import build_result_dict_from_metadata_array, build_metadata_result_array_from_event_dict\n'), ((745, 759), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (753, 759), True, 'import numpy as np\n'), ((782, 796), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (790, 796), True, 'import numpy as np\n'), ((1463, 1496), 'parametric_spatial_audio_processing.plot_magnitude_spectrogram', 'psa.plot_magnitude_spectrogram', (['X'], {}), '(X)\n', (1493, 1496), True, 'import parametric_spatial_audio_processing as psa\n'), ((2611, 2635), 'numpy.size', 'np.size', (['vicinity_radius'], {}), '(vicinity_radius)\n', (2618, 2635), True, 'import numpy as np\n'), ((4435, 4459), 'numpy.size', 'np.size', (['vicinity_radius'], {}), '(vicinity_radius)\n', (4442, 4459), True, 'import numpy as np\n'), ((5424, 5454), 'parametric_spatial_audio_processing.plot_doa', 'psa.plot_doa', (['doa'], {'title': '"""doa"""'}), "(doa, title='doa')\n", (5436, 5454), True, 'import parametric_spatial_audio_processing as psa\n'), ((5678, 5720), 'parametric_spatial_audio_processing.plot_doa', 'psa.plot_doa', (['doa_th'], {'title': '"""doa mask all"""'}), "(doa_th, title='doa mask all')\n", (5690, 5720), True, 'import parametric_spatial_audio_processing as psa\n'), ((5729, 5780), 'parametric_spatial_audio_processing.plot_doa', 'psa.plot_doa', (['doa_th_median'], {'title': '"""doa circmedian"""'}), "(doa_th_median, title='doa circmedian')\n", (5741, 5780), True, 'import parametric_spatial_audio_processing as psa\n'), ((5789, 5799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5797, 5799), True, 'import matplotlib.pyplot as plt\n'), ((10292, 10304), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10302, 10304), True, 'import matplotlib.pyplot as plt\n'), ((10313, 10340), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""kmeans stds"""'], {}), "('kmeans stds')\n", (10325, 10340), True, 'import matplotlib.pyplot as plt\n'), ((10349, 10390), 'matplotlib.pyplot.scatter', 'plt.scatter', (['frames', 'std_all'], {'label': '"""all"""'}), "(frames, std_all, label='all')\n", (10360, 10390), True, 'import matplotlib.pyplot as plt\n'), ((10397, 10418), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'std_th'}), '(y=std_th)\n', (10408, 10418), True, 'import matplotlib.pyplot as plt\n'), ((10427, 10439), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10437, 10439), True, 'import matplotlib.pyplot as plt\n'), ((10448, 10458), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (10456, 10458), True, 'import matplotlib.pyplot as plt\n'), ((10467, 10477), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10475, 10477), True, 'import matplotlib.pyplot as plt\n'), ((16021, 16036), 'numpy.size', 'np.size', (['offset'], {}), '(offset)\n', (16028, 16036), True, 'import numpy as np\n'), ((903, 951), 'numpy.ceil', 'np.ceil', (["(sr * params['quick_test_file_duration'])"], {}), "(sr * params['quick_test_file_duration'])\n", (910, 951), True, 'import numpy as np\n'), ((1184, 1278), 'parametric_spatial_audio_processing.Stft.fromSignal', 'psa.Stft.fromSignal', (['x'], {'window_size': 'window_size', 'window_overlap': 'window_overlap', 'nfft': 'nfft'}), '(x, window_size=window_size, window_overlap=\n window_overlap, nfft=nfft)\n', (1203, 1278), True, 'import parametric_spatial_audio_processing as psa\n'), ((2729, 2753), 'numpy.size', 'np.size', (['vicinity_radius'], {}), '(vicinity_radius)\n', (2736, 2753), True, 'import numpy as np\n'), ((3031, 3060), 'numpy.roll', 'np.roll', (['doa.data[0, :, :]', 'r'], {}), '(doa.data[0, :, :], r)\n', (3038, 3060), True, 'import numpy as np\n'), ((4553, 4577), 'numpy.size', 'np.size', (['vicinity_radius'], {}), '(vicinity_radius)\n', (4560, 4577), True, 'import numpy as np\n'), ((6136, 6148), 'numpy.size', 'np.size', (['azi'], {}), '(azi)\n', (6143, 6148), True, 'import numpy as np\n'), ((6591, 6621), 'numpy.shape', 'np.shape', (['position[window_idx]'], {}), '(position[window_idx])\n', (6599, 6621), True, 'import numpy as np\n'), ((7407, 7438), 'numpy.floor', 'np.floor', (['(frame / window_factor)'], {}), '(frame / window_factor)\n', (7415, 7438), True, 'import numpy as np\n'), ((9232, 9244), 'numpy.std', 'np.std', (['eles'], {}), '(eles)\n', (9238, 9244), True, 'import numpy as np\n'), ((10184, 10199), 'numpy.median', 'np.median', (['eles'], {}), '(eles)\n', (10193, 10199), True, 'import numpy as np\n'), ((16094, 16109), 'numpy.size', 'np.size', (['offset'], {}), '(offset)\n', (16101, 16109), True, 'import numpy as np\n'), ((5276, 5291), 'numpy.median', 'np.median', (['eles'], {}), '(eles)\n', (5285, 5291), True, 'import numpy as np\n'), ((10136, 10152), 'numpy.asarray', 'np.asarray', (['azis'], {}), '(azis)\n', (10146, 10152), True, 'import numpy as np\n'), ((9427, 9451), 'numpy.asarray', 'np.asarray', (['[azis, eles]'], {}), '([azis, eles])\n', (9437, 9451), True, 'import numpy as np\n'), ((11753, 11768), 'numpy.median', 'np.median', (['eles'], {}), '(eles)\n', (11762, 11768), True, 'import numpy as np\n'), ((11563, 11590), 'numpy.asarray', 'np.asarray', (['event_dict[idx]'], {}), '(event_dict[idx])\n', (11573, 11590), True, 'import numpy as np\n'), ((11624, 11651), 'numpy.asarray', 'np.asarray', (['event_dict[idx]'], {}), '(event_dict[idx])\n', (11634, 11651), True, 'import numpy as np\n'), ((13390, 13405), 'numpy.median', 'np.median', (['eles'], {}), '(eles)\n', (13399, 13405), True, 'import numpy as np\n'), ((13188, 13215), 'numpy.asarray', 'np.asarray', (['event_dict[idx]'], {}), '(event_dict[idx])\n', (13198, 13215), True, 'import numpy as np\n'), ((13253, 13280), 'numpy.asarray', 'np.asarray', (['event_dict[idx]'], {}), '(event_dict[idx])\n', (13263, 13280), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from esmvalcore.experimental import Recipe
from esmvalcore.experimental.recipe_output import DataFile
from ewatercycle.forcing import generate, load
from ewatercycle.forcing._lisflood import LisfloodForcing
def test_plot():
f = LisfloodForcing(
directory=".",
start_time="1989-01-02T00:00:00Z",
end_time="1999-01-02T00:00:00Z",
)
with pytest.raises(NotImplementedError):
f.plot()
def create_netcdf(var_name, filename):
var = 15 + 8 * np.random.randn(2, 2, 3)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
ds = xr.Dataset(
{var_name: (["longitude", "latitude", "time"], var)},
coords={
"lon": (["longitude", "latitude"], lon),
"lat": (["longitude", "latitude"], lat),
"time": pd.date_range("2014-09-06", periods=3),
},
)
ds.to_netcdf(filename)
return DataFile(filename)
@pytest.fixture
def mock_recipe_run(monkeypatch, tmp_path):
"""Overload the `run` method on esmvalcore Recipe's."""
data = {}
# TODO add lisvap input files once implemented, see issue #96
class MockTaskOutput:
data_files = (
create_netcdf("pr", tmp_path / "lisflood_pr.nc"),
create_netcdf("tas", tmp_path / "lisflood_tas.nc"),
)
def mock_run(self):
"""Store recipe for inspection and return dummy output."""
nonlocal data
data["data_during_run"] = self.data
return {"diagnostic_daily/script": MockTaskOutput()}
monkeypatch.setattr(Recipe, "run", mock_run)
return data
class TestGenerateRegionFromShapeFile:
@pytest.fixture
def forcing(self, mock_recipe_run, sample_shape):
return generate(
target_model="lisflood",
dataset="ERA5",
start_time="1989-01-02T00:00:00Z",
end_time="1999-01-02T00:00:00Z",
shape=sample_shape,
)
@pytest.fixture
def reference_recipe(self):
return {
"datasets": [
{
"dataset": "ERA5",
"project": "OBS6",
"tier": 3,
"type": "reanaly",
"version": 1,
}
],
"diagnostics": {
"diagnostic_daily": {
"description": "LISFLOOD input "
"preprocessor for "
"ERA-Interim and ERA5 "
"data",
"scripts": {
"script": {
"catchment": "Rhine",
"script": "hydrology/lisflood.py",
}
},
"variables": {
"pr": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_water",
"start_year": 1989,
},
"rsds": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_radiation",
"start_year": 1989,
},
"tas": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_temperature",
"start_year": 1989,
},
"tasmax": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_temperature",
"start_year": 1989,
},
"tasmin": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_temperature",
"start_year": 1989,
},
"tdps": {
"end_year": 1999,
"mip": "Eday",
"preprocessor": "daily_temperature",
"start_year": 1989,
},
"uas": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_windspeed",
"start_year": 1989,
},
"vas": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_windspeed",
"start_year": 1989,
},
},
}
},
"documentation": {
"authors": ["verhoeven_stefan", "kalverla_peter", "andela_bouwe"],
"projects": ["ewatercycle"],
"references": ["acknow_project"],
},
"preprocessors": {
"daily_radiation": {
"convert_units": {"units": "J m-2 " "day-1"},
"custom_order": True,
"extract_region": {
"end_latitude": 52.2,
"end_longitude": 11.9,
"start_latitude": 46.3,
"start_longitude": 4.1,
},
"extract_shape": {"crop": True, "method": "contains"},
"regrid": {
"lat_offset": True,
"lon_offset": True,
"scheme": "linear",
"target_grid": "0.1x0.1",
},
},
"daily_temperature": {
"convert_units": {"units": "degC"},
"custom_order": True,
"extract_region": {
"end_latitude": 52.2,
"end_longitude": 11.9,
"start_latitude": 46.3,
"start_longitude": 4.1,
},
"extract_shape": {"crop": True, "method": "contains"},
"regrid": {
"lat_offset": True,
"lon_offset": True,
"scheme": "linear",
"target_grid": "0.1x0.1",
},
},
"daily_water": {
"convert_units": {"units": "kg m-2 d-1"},
"custom_order": True,
"extract_region": {
"end_latitude": 52.2,
"end_longitude": 11.9,
"start_latitude": 46.3,
"start_longitude": 4.1,
},
"extract_shape": {"crop": True, "method": "contains"},
"regrid": {
"lat_offset": True,
"lon_offset": True,
"scheme": "linear",
"target_grid": "0.1x0.1",
},
},
"daily_windspeed": {
"custom_order": True,
"extract_region": {
"end_latitude": 52.2,
"end_longitude": 11.9,
"start_latitude": 46.3,
"start_longitude": 4.1,
},
"extract_shape": {"crop": True, "method": "contains"},
"regrid": {
"lat_offset": True,
"lon_offset": True,
"scheme": "linear",
"target_grid": "0.1x0.1",
},
},
"general": {
"custom_order": True,
"extract_region": {
"end_latitude": 52.2,
"end_longitude": 11.9,
"start_latitude": 46.3,
"start_longitude": 4.1,
},
"extract_shape": {"crop": True, "method": "contains"},
"regrid": {
"lat_offset": True,
"lon_offset": True,
"scheme": "linear",
"target_grid": "0.1x0.1",
},
},
},
}
def test_result(self, forcing, tmp_path, sample_shape):
expected = LisfloodForcing(
directory=str(tmp_path),
start_time="1989-01-02T00:00:00Z",
end_time="1999-01-02T00:00:00Z",
shape=str(sample_shape),
PrefixPrecipitation="lisflood_pr.nc",
PrefixTavg="lisflood_tas.nc",
)
assert forcing == expected
def test_recipe_configured(
self, forcing, mock_recipe_run, reference_recipe, sample_shape
):
actual = mock_recipe_run["data_during_run"]
# Remove long description and absolute path so assert is easier
actual_desc = actual["documentation"]["description"]
del actual["documentation"]["description"]
actual_shapefile = actual["preprocessors"]["general"]["extract_shape"][
"shapefile"
]
# Will also del other occurrences of shapefile due to extract shape object
# being shared between preprocessors
del actual["preprocessors"]["general"]["extract_shape"]["shapefile"]
assert actual == reference_recipe
assert actual_shapefile == sample_shape
assert "LISFLOOD" in actual_desc
def test_saved_yaml(self, forcing, tmp_path):
saved_forcing = load(tmp_path)
# shape should is not included in the yaml file
forcing.shape = None
assert forcing == saved_forcing
| [
"ewatercycle.forcing.load",
"esmvalcore.experimental.recipe_output.DataFile",
"pytest.raises",
"ewatercycle.forcing.generate",
"ewatercycle.forcing._lisflood.LisfloodForcing",
"numpy.random.randn",
"pandas.date_range"
] | [((308, 411), 'ewatercycle.forcing._lisflood.LisfloodForcing', 'LisfloodForcing', ([], {'directory': '"""."""', 'start_time': '"""1989-01-02T00:00:00Z"""', 'end_time': '"""1999-01-02T00:00:00Z"""'}), "(directory='.', start_time='1989-01-02T00:00:00Z', end_time=\n '1999-01-02T00:00:00Z')\n", (323, 411), False, 'from ewatercycle.forcing._lisflood import LisfloodForcing\n'), ((996, 1014), 'esmvalcore.experimental.recipe_output.DataFile', 'DataFile', (['filename'], {}), '(filename)\n', (1004, 1014), False, 'from esmvalcore.experimental.recipe_output import DataFile\n'), ((447, 481), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (460, 481), False, 'import pytest\n'), ((1817, 1964), 'ewatercycle.forcing.generate', 'generate', ([], {'target_model': '"""lisflood"""', 'dataset': '"""ERA5"""', 'start_time': '"""1989-01-02T00:00:00Z"""', 'end_time': '"""1999-01-02T00:00:00Z"""', 'shape': 'sample_shape'}), "(target_model='lisflood', dataset='ERA5', start_time=\n '1989-01-02T00:00:00Z', end_time='1999-01-02T00:00:00Z', shape=sample_shape\n )\n", (1825, 1964), False, 'from ewatercycle.forcing import generate, load\n'), ((10030, 10044), 'ewatercycle.forcing.load', 'load', (['tmp_path'], {}), '(tmp_path)\n', (10034, 10044), False, 'from ewatercycle.forcing import generate, load\n'), ((560, 584), 'numpy.random.randn', 'np.random.randn', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (575, 584), True, 'import numpy as np\n'), ((901, 939), 'pandas.date_range', 'pd.date_range', (['"""2014-09-06"""'], {'periods': '(3)'}), "('2014-09-06', periods=3)\n", (914, 939), True, 'import pandas as pd\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Util functions
# @author <EMAIL>
from lxml import etree
from lxml.etree import tostring
from itertools import chain
from nltk.tokenize import wordpunct_tokenize
from random import shuffle
from sklearn import metrics
import numpy
import operator
import pandas
import re
import subprocess
import torch
debug = True
#def clean_file(filename):
# remove blank lines | remove extra spaces| remove leading and trailing spaces | fix utf-8 chars
#command = r"sed '/^\s*$/d' $file | sed -e 's/ */ /g' | sed -e 's/^ //g' | sed -e 's/ $//g' | sed -e 's/&/and/g' | sed -e 's/ / /g' | sed -e 's/’/\'/g' | sed -e 's/”/\"/g' | sed -e 's/“/\"/g' | sed -e 's/�//g' | sed -e 's/¯\7;//g'| sed -e 's/۪/\'/g'"
# TODO
def add_labels(df, labels, labelname):
print('add_labels:', len(labels))
df[labelname] = ''
for i, row in df.iterrows():
#print('add_labels i=', i)
if i < len(labels):
#print('add_labels labelname:', labelname, labels[i])
df.at[i, labelname] = labels[i]
else:
print('WARNING, add_labels i out of range:', i)
return df
def add_time_ids(event_elem, tag_elem):
time_map = {}
for tlink in tag_elem.findall('TLINK'):
eventid = None
timeid = None
if 'eventID' in tlink.attrib and 'relatedToTime' in tlink.attrib:
eventid = tlink.get('eventID')
timeid = tlink.get('relatedToTime')
elif 'timeID' in tlink.attrib and 'relatedToEventID' in tlink.attrib:
eventid = tlink.get('relatedToEventID')
timeid = tlink.get('timeID')
if timeid is not None and eventid is not None:
if eventid not in time_map:
time_map[eventid] = []
time_map[eventid].append(timeid)
for event in event_elem:
eid = event.get('eid')
time_ids = time_map[eid]
if time_ids is not None:
tid_string = ','.join(time_ids)
event.set('relatedToTime', tid_string)
return event_elem
''' (In progress)
'''
def add_thyme_labels(filename, outfile):
brain = []
colon = []
xmltree = etree.parse(filename)
root = xmltree.getroot()
for child in root:
idname = child.get('record_id').text.split('_')[0]
id = idname[2:]
if int(id) in brain:
label = 'brain_cancer'
elif int(id) in colon:
label = 'colon_cancer'
else:
print('WARNING: id not found:', id)
label = 'none'
labelnode = etree.SubElement(child, 'diagnosis')
labelnode.text = label
etree.write(outfile)
def create_df(df):
return pandas.DataFrame(columns=['ID'])
def collapse_labels(labels):
flat_labels = []
for lab in labels:
for item in lab:
flat_labels.append(item)
return flat_labels
def extract_ranks(events, event_list=None, allow_empty=False):
elem = load_xml_tags(events, decode=False)
ranks = []
event_map = {}
if debug: print('extract_ranks: events:', type(events))# 'elem:', etree.tostring(elem))
if debug: print('extract_ranks: event_list:', type(event_list))
if event_list is not None:
for event in event_list:
if event.tag == 'EVENT':
#print(etree.tostring(event))
id = event.get('eid')
rank = event.get('rank')
if rank is None:
print('ERROR: no rank attribute found:', etree.tostring(event))
rank = 0
if not allow_empty:
exit(1)
event_map[id] = rank
event_count = 0
for event in elem:
if debug: print('child tag:', event.tag)
if event.tag == 'EVENT':
event_count += 1
#print('elem event:', etree.tostring(event))
if event_list is None:
rank = event.get('rank')
else:
eventid = event.get('eid')
#print('looking up eid', eventid)
rank = event_map[eventid]
if rank is None:
print('ERROR: no rank attribute found:', etree.tostring(event))
rank = 0
if not allow_empty:
exit(1)
#ranks.append(0)
#if int(rank) == 0:
# print('WARNING: rank is 0:', etree.tostring(event))
ranks.append(float(rank))
#if int(rank) == 0:
# print('WARNING: rank is 0:', etree.tostring(event))
if debug: print('events:', event_count, 'ranks:', len(ranks))
assert(len(ranks) == event_count)
return ranks
''' Convert arrows in text to non-arrows (for xml processing)
filename: the file to fix (file will be overwritten)
'''
def fix_arrows(filename):
sed_command = r"sed -e 's/-->/to/g' " + filename + r" | sed -e 's/->/to/g' | sed -e 's/ < / lt /g' | sed -e 's/ > / gt /g'"
print("sed_command: ", sed_command)
#f = open("temp", 'wb')
ps = subprocess.Popen(sed_command, shell=True, stdout=subprocess.PIPE)
output = ps.communicate()[0]
out = open(filename, 'w')
out.write(output)
out.close()
def fix_escaped_chars(filename):
subprocess.call(["sed", "-i", "-e", 's/</ </g', filename])
subprocess.call(["sed", "-i", "-e", 's/>/> /g', filename])
subprocess.call(["sed", "-i", "-e", 's/ / /g', filename])
subprocess.call(["sed", "-i", "-e", "s/‘/'/g", filename])
subprocess.call(["sed", "-i", "-e", "s/’/'/g", filename])
subprocess.call(["sed", "-i", "-e", "s/‘/'/g", filename])
subprocess.call(["sed", "-i", "-e", "s/’/'/g", filename])
subprocess.call(["sed", "-i", "-e", "s/–/,/g", filename])
''' Remove blank lines, convert \n to space, remove double spaces, insert a line break before each record
filename: the file to fix (file will be overwritten)
rec_type: the type of record: adult, child, or neonate
'''
def fix_line_breaks(filename, rec_type):
tag = "<Adult_Anonymous>"
if rec_type == "child":
tag = "<Child_Anonymous>"
elif rec_type == "neonate":
tag = "<Neonate_Anonymous>"
sed_command = "s/" + tag + r"/\n" + tag + "/g"
sed_command2 = r"sed -e 's/<\/root>/\n<\/root>/g'"
#print "sed_command: " + sed_command
tr_command = "tr " + r"'\n' " + "' '"
#print "tr_command: " + tr_command
#f = open("temp", 'wb')
command = "sed -e '/^\s$/d' " + filename + " | " + tr_command + " | sed -e 's/ / /g' | sed -e '" + sed_command + "'" + " | " + sed_command2
ps = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = ps.communicate()[0]
out = open(filename, 'w')
out.write(output)
out.close()
def fix_xml_tags(text):
text = text.replace('&', '&')
text = text.replace('<EVENT>', '<EVENT>').replace('</EVENT>', '</EVENT>')
text = text.replace('<EVENT', '<EVENT')
text = text.replace('&lt;EVENT&gt;;', '<EVENT>').replace('&lt;/EVENT&gt;', '</EVENT>')
text = text.replace('<TIMEX3>', '<TIMEX3>').replace('</TIMEX3>', '</TIMEX3>')
text = text.replace('<TIMEX3', '<TIMEX3')
text = text.replace('&lt;TIMEX3&gt;', '<TIMEX3>').replace('&lt;/TIMEX3&gt;', '</TIMEX3>')
text = text.replace('<SIGNAL>', '<SIGNAL>').replace('</SIGNAL>', '</SIGNAL>')
text = text.replace('<SIGNAL', '<SIGNAL')
text = text.replace('<TLINK', '<TLINK').replace('/>', '/>')
text = text.replace('" >', '">')
text = text.replace('">', '">').replace(' >', '>')
text = text.replace('&', '&') # escape any leftover and signs
return text
def shuffle_input(x, y):
new_x = []
new_y = []
for n in range(len(x)):
#rank_map = {}
x_list = x[n]
y_list = y[n]
new_x_list = []
new_y_list = []
temp_list = list(zip(x_list, y_list))
shuffle(temp_list)
new_lists = [list(t) for t in zip(*temp_list)]
new_x_list = new_lists[0]
new_y_list = new_lists[1]
new_x.append(new_x_list)
new_y.append(new_y_list)
#print('Shuffle entry:', str(new_y_list))
return new_x, new_y
''' Shuffle events within the same rank value, produce one shuffled example for every in-order example
'''
def generate_permutations(ids, x, y):
new_ids = ids
new_x = x
new_y = y
for n in range(len(x)):
#rank_map = {}
doc_id = ids[n]
x_list = x[n]
y_list = y[n]
new_x_list = []
new_y_list = []
temp_list = list(zip(x_list, y_list))
shuffle(temp_list)
new_lists = [list(t) for t in zip(*temp_list)]
new_x_list = new_lists[0]
new_y_list = new_lists[1]
new_ids.append(doc_id)
new_x.append(new_x_list)
new_y.append(new_y_list)
#print('Shuffle entry:', str(new_y_list))
# Shuffle the final training list
temp_pairs = list(zip(new_ids, new_x, new_y))
shuffle(temp_pairs)
#print('shuffle temp pairs[0]:', str(temp_pairs[0]))
new_lists = [list(t) for t in zip(*temp_pairs)]
new_ids = new_lists[0]
new_x = new_lists[1]
new_y = new_lists[2]
#print('shuffle new_y[0]', str(new_y[0]))
return new_ids, new_x, new_y
def load_time_pairs(filename):
print('load time pairs:', filename)
time_df = pandas.read_csv(filename, header=None, index_col=False)
time_df.columns = ['time1', 'time2', 'order']
pairs = []
labels = []
for i, row in time_df.iterrows():
pairs.append((split_words(row['time1']), split_words(row['time2'])))
labels.append(row['order'])
#print('loaded time pair:', pairs[-1], labels[-1])
return pairs, labels
def load_xml_tags(ann, unwrap=True, decode=False):
if debug: print('load_xml_tags:', ann)
if decode or type(ann) is bytes:
ann = ann.decode('utf8')
if unwrap:
ann_xml = etree.fromstring(ann)
ann_text = stringify_children(ann_xml)
else:
ann_text = ann
ann_text = fix_xml_tags(ann_text) # Escape & signs that might have been unescaped
#if len(ann_text) > 830:
# print(ann_text[820:])
ann_element = etree.fromstring("<root>" + ann_text + "</root>")
return ann_element
def reorder_encodings(encodings, orderings):
print('reorder encodings:', len(encodings), len(orderings))
assert(len(encodings) == len(orderings))
dim = 0 # Get the dim later after we make sure it's not None
new_encodings = []
for x in range(len(encodings)):
if encodings[x] is not None:
dim = encodings[x].size(-1)
#print('dim:', dim)
enc = encodings[x].view(-1, dim)
order = orderings[x]#.squeeze()
print('timeline for reordering:', enc.size(), 'ranks:', order)
indices = []
for y in range(len(order)):
indices.append((y, order[y]))
indices.sort(key=lambda k: k[1])
#shuffle(indices)
enc_list = []
for pair in indices:
rank = pair[1]
index = pair[0]
print('picking rank:', rank, 'at index:', index)
enc_list.append(enc[index])
new_enc = torch.stack(enc_list).view(1, -1, dim)
print('encodings size:', new_enc.size())
new_encodings.append(new_enc)
return new_encodings
def score_majority_class(true_labs):
pred_labs = []
majority_lab = None
count_map = {}
for lab in true_labs:
if lab not in count_map.keys():
count_map[lab] = 0
count_map[lab] = count_map[lab]+1
majority_lab = max(count_map.iteritems(), key=operator.itemgetter(1))[0]
for lab in true_labs:
pred_labs = majority_lab
# Score
precision = metrics.precision_score(true_labs, pred_labs, average="weighted")
recall = metrics.recall_score(true_labs, pred_labs, average="weighted")
f1 = metrics.f1_score(true_labs, pred_labs, average="weighted")
return precision, recall, f1
''' Scores vector labels with binary values
returns: avg precision, recall, f1 of 1 labels (not 0s)
'''
def score_vec_labels(true_labs, pred_labs):
p_scores = []
r_scores = []
f1_scores = []
micro_pos = 0
micro_tp = 0
micro_fp = 0
assert(len(true_labs) == len(pred_labs))
for x in range(len(true_labs)):
true_lab = true_labs[x]
pred_lab = pred_labs[x]
pos = 0
tp = 0
fp = 0
for y in range(len(true_lab)):
true_val = true_lab[y]
pred_val = pred_lab[y]
if true_val == 1:
pos = pos+1
micro_pos = micro_pos+1
if pred_val == 1:
tp = tp+1
micro_tp=micro_tp+1
else:
if pred_val == 1:
fp = fp+1
micro_fp = micro_fp+1
p = 0.0
r = 0.0
if (tp+fp) > 0:
p = float(tp) / float(tp+fp)
if pos > 0:
r = float(tp) / float(pos)
if p == 0.0 and r == 0.0:
f1 = float(0)
else:
f1 = 2*(p*r)/(p+r)
p_scores.append(p)
r_scores.append(r)
f1_scores.append(f1)
precision = numpy.average(p_scores)
recall = numpy.average(r_scores)
f1 = numpy.average(f1_scores)
micro_p = 0.0
micro_r = 0.0
if (micro_tp+micro_fp) > 0:
micro_p = float(micro_tp) / float(micro_tp+micro_fp)
if micro_pos > 0:
micro_r = float(micro_tp) / float(micro_pos)
if micro_p == 0.0 and micro_r == 0.0:
micro_f1 = float(0)
else:
micro_f1 = 2*(micro_p*micro_r)/(micro_p+micro_r)
return precision, recall, f1, micro_p, micro_r, micro_f1
''' A function for separating words and punctuation
Not using NLTK because it would split apart contractions and we don't want that
'''
def split_words(text):
return wordpunct_tokenize(text)
#return re.findall(r"[\w']+|[.,!?;$=/\-\[\]]", text.strip())
''' Get content of a tree node as a string
node: etree.Element
'''
def stringify_children(node):
parts = ([str(node.text)] + list(chain(*([tostring(c)] for c in node.getchildren()))))
# filter removes possible Nones in texts and tails
for x in range(len(parts)):
if type(parts[x]) != str:
parts[x] = str(parts[x])
return ''.join(filter(None, parts))
''' Get contents of tags as a list of strings
text: the xml-tagged text to process
tags: a list of the tags to extract
atts: a list of attributes to extract as well
'''
def phrases_from_tags(text, tags, atts=[]):
for x in range(len(tags)):
tags[x] = tags[x].lower()
text = "<root>" + text + "</root>"
phrases = []
root = etree.fromstring(text)
#print "phrases_from tags text: " + text
for child in root:
if child.tag.lower() in tags:
print("found tag: ", child.tag)
phrase = {}
if child.text is not None:
phrase['text'] = child.text
for att in atts:
if att in child.keys():
phrase[att] = child.get(att)
phrases.append(phrase)
return phrases
''' Get contents of tags as a list of strings
text: the xml-tagged text to process
tags: a list of the tags to extract
'''
def text_from_tags(text, tags):
for x in range(len(tags)):
tags[x] = tags[x].lower()
text = "<root>" + text + "</root>"
newtext = ""
root = etree.fromstring(text)
print("text: ", text)
for child in root:
print("--child")
if child.tag.lower() in tags:
print("found tag: ", child.tag)
if child.text is not None:
newtext = newtext + ' ' + child.text
return newtext
''' matrix: a list of dictionaries
dict_keys: a list of the dictionary keys
outfile: the file to write to
'''
def write_to_file(matrix, dict_keys, outfile):
# Write the features to file
print("writing ", str(len(matrix)), " feature vectors to file...")
output = open(outfile, 'w')
for feat in matrix:
#print "ICD_cat: " + feat["ICD_cat"]
feat_string = str(feat).replace('\n', '')
output.write(feat_string + "\n")
output.close()
key_output = open(outfile + ".keys", "w")
key_output.write(str(dict_keys))
key_output.close()
return dict_keys
def xml_to_txt(filename):
name = filename.split(".")[0]
sed_command = r"sed '$d' < " + filename + r" | sed '1d' > " + name + ".txt"
ps = subprocess.Popen(sed_command, shell=True, stdout=subprocess.PIPE)
ps.communicate()
def zero_vec(dim):
vec = []
for x in range(dim):
vec.append(0)
return vec
| [
"sklearn.metrics.f1_score",
"random.shuffle",
"pandas.read_csv",
"numpy.average",
"lxml.etree.SubElement",
"subprocess.Popen",
"lxml.etree.parse",
"torch.stack",
"lxml.etree.write",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"subprocess.call",
"lxml.etree.fromstring",... | [((2212, 2233), 'lxml.etree.parse', 'etree.parse', (['filename'], {}), '(filename)\n', (2223, 2233), False, 'from lxml import etree\n'), ((2680, 2700), 'lxml.etree.write', 'etree.write', (['outfile'], {}), '(outfile)\n', (2691, 2700), False, 'from lxml import etree\n'), ((2733, 2765), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': "['ID']"}), "(columns=['ID'])\n", (2749, 2765), False, 'import pandas\n'), ((5103, 5168), 'subprocess.Popen', 'subprocess.Popen', (['sed_command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(sed_command, shell=True, stdout=subprocess.PIPE)\n', (5119, 5168), False, 'import subprocess\n'), ((5309, 5370), 'subprocess.call', 'subprocess.call', (["['sed', '-i', '-e', 's/</ </g', filename]"], {}), "(['sed', '-i', '-e', 's/</ </g', filename])\n", (5324, 5370), False, 'import subprocess\n'), ((5375, 5436), 'subprocess.call', 'subprocess.call', (["['sed', '-i', '-e', 's/>/> /g', filename]"], {}), "(['sed', '-i', '-e', 's/>/> /g', filename])\n", (5390, 5436), False, 'import subprocess\n'), ((5441, 5499), 'subprocess.call', 'subprocess.call', (["['sed', '-i', '-e', 's/ / /g', filename]"], {}), "(['sed', '-i', '-e', 's/ / /g', filename])\n", (5456, 5499), False, 'import subprocess\n'), ((5504, 5561), 'subprocess.call', 'subprocess.call', (['[\'sed\', \'-i\', \'-e\', "s/‘/\'/g", filename]'], {}), '([\'sed\', \'-i\', \'-e\', "s/‘/\'/g", filename])\n', (5519, 5561), False, 'import subprocess\n'), ((5566, 5623), 'subprocess.call', 'subprocess.call', (['[\'sed\', \'-i\', \'-e\', "s/’/\'/g", filename]'], {}), '([\'sed\', \'-i\', \'-e\', "s/’/\'/g", filename])\n', (5581, 5623), False, 'import subprocess\n'), ((5628, 5691), 'subprocess.call', 'subprocess.call', (['[\'sed\', \'-i\', \'-e\', "s/‘/\'/g", filename]'], {}), '([\'sed\', \'-i\', \'-e\', "s/‘/\'/g", filename])\n', (5643, 5691), False, 'import subprocess\n'), ((5696, 5759), 'subprocess.call', 'subprocess.call', (['[\'sed\', \'-i\', \'-e\', "s/’/\'/g", filename]'], {}), '([\'sed\', \'-i\', \'-e\', "s/’/\'/g", filename])\n', (5711, 5759), False, 'import subprocess\n'), ((5764, 5827), 'subprocess.call', 'subprocess.call', (["['sed', '-i', '-e', 's/–/,/g', filename]"], {}), "(['sed', '-i', '-e', 's/–/,/g', filename])\n", (5779, 5827), False, 'import subprocess\n'), ((6667, 6728), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE)\n', (6683, 6728), False, 'import subprocess\n'), ((9120, 9139), 'random.shuffle', 'shuffle', (['temp_pairs'], {}), '(temp_pairs)\n', (9127, 9139), False, 'from random import shuffle\n'), ((9492, 9547), 'pandas.read_csv', 'pandas.read_csv', (['filename'], {'header': 'None', 'index_col': '(False)'}), '(filename, header=None, index_col=False)\n', (9507, 9547), False, 'import pandas\n'), ((10329, 10378), 'lxml.etree.fromstring', 'etree.fromstring', (["('<root>' + ann_text + '</root>')"], {}), "('<root>' + ann_text + '</root>')\n", (10345, 10378), False, 'from lxml import etree\n'), ((11959, 12024), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['true_labs', 'pred_labs'], {'average': '"""weighted"""'}), "(true_labs, pred_labs, average='weighted')\n", (11982, 12024), False, 'from sklearn import metrics\n'), ((12038, 12100), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['true_labs', 'pred_labs'], {'average': '"""weighted"""'}), "(true_labs, pred_labs, average='weighted')\n", (12058, 12100), False, 'from sklearn import metrics\n'), ((12110, 12168), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['true_labs', 'pred_labs'], {'average': '"""weighted"""'}), "(true_labs, pred_labs, average='weighted')\n", (12126, 12168), False, 'from sklearn import metrics\n'), ((13450, 13473), 'numpy.average', 'numpy.average', (['p_scores'], {}), '(p_scores)\n', (13463, 13473), False, 'import numpy\n'), ((13487, 13510), 'numpy.average', 'numpy.average', (['r_scores'], {}), '(r_scores)\n', (13500, 13510), False, 'import numpy\n'), ((13520, 13544), 'numpy.average', 'numpy.average', (['f1_scores'], {}), '(f1_scores)\n', (13533, 13544), False, 'import numpy\n'), ((14123, 14147), 'nltk.tokenize.wordpunct_tokenize', 'wordpunct_tokenize', (['text'], {}), '(text)\n', (14141, 14147), False, 'from nltk.tokenize import wordpunct_tokenize\n'), ((14964, 14986), 'lxml.etree.fromstring', 'etree.fromstring', (['text'], {}), '(text)\n', (14980, 14986), False, 'from lxml import etree\n'), ((15713, 15735), 'lxml.etree.fromstring', 'etree.fromstring', (['text'], {}), '(text)\n', (15729, 15735), False, 'from lxml import etree\n'), ((16764, 16829), 'subprocess.Popen', 'subprocess.Popen', (['sed_command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(sed_command, shell=True, stdout=subprocess.PIPE)\n', (16780, 16829), False, 'import subprocess\n'), ((2608, 2644), 'lxml.etree.SubElement', 'etree.SubElement', (['child', '"""diagnosis"""'], {}), "(child, 'diagnosis')\n", (2624, 2644), False, 'from lxml import etree\n'), ((8042, 8060), 'random.shuffle', 'shuffle', (['temp_list'], {}), '(temp_list)\n', (8049, 8060), False, 'from random import shuffle\n'), ((8738, 8756), 'random.shuffle', 'shuffle', (['temp_list'], {}), '(temp_list)\n', (8745, 8756), False, 'from random import shuffle\n'), ((10063, 10084), 'lxml.etree.fromstring', 'etree.fromstring', (['ann'], {}), '(ann)\n', (10079, 10084), False, 'from lxml import etree\n'), ((11845, 11867), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (11864, 11867), False, 'import operator\n'), ((4240, 4261), 'lxml.etree.tostring', 'etree.tostring', (['event'], {}), '(event)\n', (4254, 4261), False, 'from lxml import etree\n'), ((11395, 11416), 'torch.stack', 'torch.stack', (['enc_list'], {}), '(enc_list)\n', (11406, 11416), False, 'import torch\n'), ((3552, 3573), 'lxml.etree.tostring', 'etree.tostring', (['event'], {}), '(event)\n', (3566, 3573), False, 'from lxml import etree\n'), ((14362, 14373), 'lxml.etree.tostring', 'tostring', (['c'], {}), '(c)\n', (14370, 14373), False, 'from lxml.etree import tostring\n')] |
import os
import argparse
import numpy as np
import processors as pe
from paz.backend.camera import VideoPlayer
from paz.backend.camera import Camera
from demo_pipeline import DetectEigenFaces
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Real-time face classifier')
parser.add_argument('-c', '--camera_id', type=int, default=0,
help='Camera device ID')
parser.add_argument('-o', '--offset', type=float, default=0.1,
help='Scaled offset to be added to bounding boxes')
parser.add_argument('-e', '--experiments_path', type=str,
default='experiments',
help='Directory for writing and loading experiments')
parser.add_argument('-d', '--database_path', type=str,
default='database',
help='Directory for the database')
args = parser.parse_args()
if not os.path.exists(args.experiments_path):
os.makedirs(args.experiments_path)
if not os.path.exists(args.database_path):
os.makedirs(args.database_path)
# check if eigenfaces and mean face are already computed
needed_files = ['eigenvalues.npy', 'eigenfaces.npy', 'mean_face.npy']
if set(os.listdir(args.experiments_path)) != set(needed_files):
raise FileNotFoundError('''Need necessary files to run the demo. Please
run eigenface.py first and then try running the
demo.''')
# check if database is available
needed_files = ['images', 'database.npy']
if set(os.listdir(args.database_path)) != set(needed_files):
raise FileNotFoundError('''Need database to run the demo. Please
update the database with database.py first
and then try running the demo.''')
eigenfaces = np.load(os.path.join(args.experiments_path, 'eigenfaces.npy'))
mean_face = np.load(os.path.join(args.experiments_path, 'mean_face.npy'))
database_path = os.path.join(args.database_path, 'database.npy')
weights = np.load(database_path, allow_pickle=True).item()
# user defined parameters
thresh = 1e4
norm_order = 2
# measure = pe.CalculateNorm(norm_order)
measure = pe.CalculateCosineSimilarity()
pipeline = DetectEigenFaces(weights, measure, thresh, eigenfaces,
mean_face, [args.offset, args.offset])
camera = Camera(args.camera_id)
player = VideoPlayer((640, 480), pipeline, camera)
player.run()
| [
"os.path.exists",
"os.listdir",
"demo_pipeline.DetectEigenFaces",
"argparse.ArgumentParser",
"os.makedirs",
"paz.backend.camera.VideoPlayer",
"os.path.join",
"processors.CalculateCosineSimilarity",
"numpy.load",
"paz.backend.camera.Camera"
] | [((235, 299), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Real-time face classifier"""'}), "(description='Real-time face classifier')\n", (258, 299), False, 'import argparse\n'), ((2071, 2119), 'os.path.join', 'os.path.join', (['args.database_path', '"""database.npy"""'], {}), "(args.database_path, 'database.npy')\n", (2083, 2119), False, 'import os\n'), ((2309, 2339), 'processors.CalculateCosineSimilarity', 'pe.CalculateCosineSimilarity', ([], {}), '()\n', (2337, 2339), True, 'import processors as pe\n'), ((2356, 2454), 'demo_pipeline.DetectEigenFaces', 'DetectEigenFaces', (['weights', 'measure', 'thresh', 'eigenfaces', 'mean_face', '[args.offset, args.offset]'], {}), '(weights, measure, thresh, eigenfaces, mean_face, [args.\n offset, args.offset])\n', (2372, 2454), False, 'from demo_pipeline import DetectEigenFaces\n'), ((2495, 2517), 'paz.backend.camera.Camera', 'Camera', (['args.camera_id'], {}), '(args.camera_id)\n', (2501, 2517), False, 'from paz.backend.camera import Camera\n'), ((2531, 2572), 'paz.backend.camera.VideoPlayer', 'VideoPlayer', (['(640, 480)', 'pipeline', 'camera'], {}), '((640, 480), pipeline, camera)\n', (2542, 2572), False, 'from paz.backend.camera import VideoPlayer\n'), ((950, 987), 'os.path.exists', 'os.path.exists', (['args.experiments_path'], {}), '(args.experiments_path)\n', (964, 987), False, 'import os\n'), ((997, 1031), 'os.makedirs', 'os.makedirs', (['args.experiments_path'], {}), '(args.experiments_path)\n', (1008, 1031), False, 'import os\n'), ((1043, 1077), 'os.path.exists', 'os.path.exists', (['args.database_path'], {}), '(args.database_path)\n', (1057, 1077), False, 'import os\n'), ((1087, 1118), 'os.makedirs', 'os.makedirs', (['args.database_path'], {}), '(args.database_path)\n', (1098, 1118), False, 'import os\n'), ((1917, 1970), 'os.path.join', 'os.path.join', (['args.experiments_path', '"""eigenfaces.npy"""'], {}), "(args.experiments_path, 'eigenfaces.npy')\n", (1929, 1970), False, 'import os\n'), ((1996, 2048), 'os.path.join', 'os.path.join', (['args.experiments_path', '"""mean_face.npy"""'], {}), "(args.experiments_path, 'mean_face.npy')\n", (2008, 2048), False, 'import os\n'), ((1267, 1300), 'os.listdir', 'os.listdir', (['args.experiments_path'], {}), '(args.experiments_path)\n', (1277, 1300), False, 'import os\n'), ((1622, 1652), 'os.listdir', 'os.listdir', (['args.database_path'], {}), '(args.database_path)\n', (1632, 1652), False, 'import os\n'), ((2134, 2175), 'numpy.load', 'np.load', (['database_path'], {'allow_pickle': '(True)'}), '(database_path, allow_pickle=True)\n', (2141, 2175), True, 'import numpy as np\n')] |
import os
import glob
import copy
import random
import time
import numpy as np
import numpy.ma as ma
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import scipy.io as scio
from scipy.spatial.transform import Rotation as R
from sklearn.neighbors import KDTree
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import torch.nn.functional as F
from torch.autograd import Variable
#######################################
#######################################
import sys
sys.path.append('../../../')
#######################################
#######################################
from lib.network import PoseNet, PoseRefineNet
from lib.transformations import euler_matrix, quaternion_matrix, quaternion_from_matrix
#######################################
#######################################
from affpose.YCB_Aff import cfg as config
from affpose.YCB_Aff.dataset import ycb_aff_dataset_utils
from affpose.YCB_Aff.dataset import dataloader as ycb_aff_dataloader
from affpose.YCB_Aff.utils.bbox.extract_bboxs_from_label import get_bbox, get_obj_part_bbox, get_posecnn_bbox
from affpose.YCB_Aff.eval import eval_utils
#######################################
#######################################
DELETE_OLD_RESULTS = True
SPLIT = 'test'
USE_PRED_MASKS = False
SELECT_RANDOM_IMAGES = False
NUM_IMAGES = 50
VISUALIZE_AND_GET_ERROR_METRICS = True
PROJECT_MESH_ON_IMAGE = True
def main():
##################################
# Remove old results.
##################################
if DELETE_OLD_RESULTS:
files = glob.glob(config.AFF_EVAL_FOLDER_GT + '/*')
for file in files:
os.remove(file)
files = glob.glob(config.AFF_EVAL_FOLDER_DF_WO_REFINE + '/*')
for file in files:
os.remove(file)
files = glob.glob(config.AFF_EVAL_FOLDER_DF_ITERATIVE + '/*')
for file in files:
os.remove(file)
##################################
# DenseFusion
##################################
estimator = PoseNet(num_points=config.NUM_PT, num_obj=config.NUM_OBJECTS)
estimator.cuda()
estimator.load_state_dict(torch.load(config.TRAINED_AFF_MODEL))
estimator.eval()
refiner = PoseRefineNet(num_points=config.NUM_PT, num_obj=config.NUM_OBJECTS)
refiner.cuda()
refiner.load_state_dict(torch.load(config.TRAINED_AFF_REFINE_MODEL))
refiner.eval()
img_norm = transforms.Normalize(mean=config.IMG_MEAN, std=config.IMG_STD)
###################################
# Load ARL AFFPose
###################################
# load real images.
dataloader = ycb_aff_dataloader.YCBAff(split=SPLIT, select_random_images=SELECT_RANDOM_IMAGES)
###################################
# Stats
###################################
stats_pred_class_ids = np.zeros(shape=(len(dataloader.img_files), 10))
stats_pred_choose = np.zeros(shape=(len(dataloader.img_files), 10))
stats_pred_c = np.zeros(shape=(len(dataloader.img_files), 10))
for image_idx, image_addr in enumerate(dataloader.img_files):
t0 = time.time()
#####################
# Load GT images.
#####################
data = dataloader.get_item(image_idx)
# data = dataloader.draw_gt_obj_pose(image_idx, project_mesh_on_image=False) # PROJECT_MESH_ON_IMAGE)
rgb = data["rgb"]
depth_16bit = data["depth_16bit"]
depth_8bit = data["depth_8bit"]
obj_part_label = data["obj_part_label"]
cv2_obj_part_pose_img = data["cv2_obj_part_pose_img"]
meta = data["meta"]
#####################
# Get Pred Masks from PoseCNN
#####################
# gt pose.
gt_poses = np.array(meta['poses']).flatten().reshape(3, 4, -1)
# posecnn
posecnn_meta_idx = str(1000000 + image_idx)[1:] # gt results and posecnn are offset by 1
posecnn_meta_addr = config.YCB_TOOLBOX_CONFIG + posecnn_meta_idx + config.POSECNN_EXT
posecnn_meta = scio.loadmat(posecnn_meta_addr)
posecnn_label = np.array(posecnn_meta['labels'])
posecnn_rois = np.array(posecnn_meta['rois'])
poses_icp = np.array(posecnn_meta['poses_icp'])
pred_obj_ids = np.array(posecnn_rois[:, 1], dtype=np.uint8)
gt_obj_ids = np.array(meta['cls_indexes'].flatten(), dtype=np.uint8)
gt_poses = np.array(meta['poses']).flatten().reshape(3, 4, -1)
gt_to_pred_idxs = []
for pred_obj_id in pred_obj_ids:
if pred_obj_id in gt_obj_ids.tolist():
gt_to_pred_idxs.append(gt_obj_ids.tolist().index(pred_obj_id))
print("\nPred [{}]: {}\nGT [{}]: {}".format(len(pred_obj_ids), pred_obj_ids, len(gt_obj_ids), gt_obj_ids))
#####################
#####################
# TODO: MATLAB EVAL
class_ids_list = []
pose_est_gt = []
pose_est_df_wo_refine = []
pose_est_df_iterative = []
choose_list = []
pred_c_list = []
gt_to_pred_idx = 0
for pred_idx, pred_obj_id in enumerate(gt_obj_ids):
if pred_obj_id in gt_obj_ids:
# TODO: MATLAB EVAL
class_ids_list.append(pred_obj_id)
obj_color = ycb_aff_dataset_utils.obj_color_map(pred_obj_id)
print("Object: ID:{}, Name:{}".format(pred_obj_id, dataloader.obj_classes[int(pred_obj_id) - 1]))
gt_idx = pred_idx # gt_to_pred_idxs[gt_to_pred_idx]
# gt_obj_id = gt_obj_ids[gt_idx]
# print("pred\t idx:{},\t class id:{}".format(pred_idx, pred_obj_id))
# print("gt \t idx:{},\t class id:{}".format(gt_idx, gt_obj_id))
gt_to_pred_idx += 1
obj_part_ids = ycb_aff_dataset_utils.map_obj_ids_to_obj_part_ids(pred_obj_id)
for pred_obj_part_id in obj_part_ids:
if pred_obj_part_id in dataloader.obj_part_ids:
#######################################
# gt
#######################################
obj_part_centered = dataloader.cld_obj_part_centered[pred_obj_part_id]
obj_part_id_idx = str(1000 + pred_obj_part_id)[1:]
gt_obj_part_r = meta['obj_part_rotation_' + np.str(obj_part_id_idx)]
gt_obj_part_t = meta['obj_part_translation_' + np.str(obj_part_id_idx)]
gt_obj_part_q = quaternion_from_matrix(gt_obj_part_r)
gt_list = np.append(np.array(gt_obj_part_q), np.array(gt_obj_part_t))
# TODO: MATLAB EVAL
pose_est_gt.append(gt_list.tolist())
try:
#######################################
# bbox
#######################################
# TODO: USE_PRED_MASKS
# if USE_PRED_MASKS:
# obj_part_label = posecnn_label
# mask_label = ma.getmaskarray(ma.masked_equal(obj_label, pred_obj_id)).astype(np.uint8)
# rmin, rmax, cmin, cmax = get_posecnn_bbox(posecnn_rois, pred_idx)
# else:
obj_part_label = obj_part_label
mask_label = ma.getmaskarray(ma.masked_equal(obj_part_label, pred_obj_part_id)).astype(np.uint8)
rmin, rmax, cmin, cmax = get_bbox(mask_label)
#######################################
# visualize label.
#######################################
# if VISUALIZE_AND_GET_ERROR_METRICS:
# # colour_obj_label = ycb_aff_dataset_utils.colorize_obj_mask(obj_label)
# # cv2.imshow('obj_label', cv2.cvtColor(colour_obj_label, cv2.COLOR_BGR2RGB))
# colour_obj_label = ycb_aff_dataset_utils.colorize_obj_mask(mask_label*pred_obj_id)
# colour_obj_label = cv2.addWeighted(rgb, 0.35, colour_obj_label, 0.65, 0)
# colour_obj_label = cv2.rectangle(colour_obj_label, (cmin, rmin), (cmax, rmax), obj_color, 2)
# cv2.imshow('colour_obj_label', cv2.cvtColor(colour_obj_label, cv2.COLOR_BGR2RGB))
# cv2.waitKey(0)
#######################################
# real cam for test frames
#######################################
cam_cx = config.CAM_CX_1
cam_cy = config.CAM_CY_1
cam_fx = config.CAM_FX_1
cam_fy = config.CAM_FY_1
#######################################
#######################################
mask_depth = ma.getmaskarray(ma.masked_not_equal(depth_16bit, 0))
mask = mask_label * mask_depth
choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
obj_choose = len(choose.copy())
if len(choose) == 0:
raise ZeroDivisionError
elif len(choose) > config.NUM_PT:
c_mask = np.zeros(len(choose), dtype=int)
c_mask[:config.NUM_PT] = 1
np.random.shuffle(c_mask)
choose = choose[c_mask.nonzero()]
else:
choose = np.pad(choose, (0, config.NUM_PT - len(choose)), 'wrap')
depth_masked = depth_16bit[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
xmap_masked = config.XMAP[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
ymap_masked = config.YMAP[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
choose = np.array([choose])
pt2 = depth_masked / config.CAM_SCALE
pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
cloud = np.concatenate((pt0, pt1, pt2), axis=1)
img_masked = np.array(rgb)[:, :, :3]
img_masked = np.transpose(img_masked, (2, 0, 1))
img_masked = img_masked[:, rmin:rmax, cmin:cmax]
cloud = torch.from_numpy(cloud.astype(np.float32))
choose = torch.LongTensor(choose.astype(np.int32))
img_masked = img_norm(torch.from_numpy(img_masked.astype(np.float32)))
index = torch.LongTensor([pred_obj_id - 1])
cloud = Variable(cloud).cuda()
choose = Variable(choose).cuda()
img_masked = Variable(img_masked).cuda()
index = Variable(index).cuda()
cloud = cloud.view(1, config.NUM_PT, 3)
img_masked = img_masked.view(1, 3, img_masked.size()[1], img_masked.size()[2])
#######################################
#######################################
pred_r, pred_t, pred_c, emb = estimator(img_masked, cloud, choose, index)
pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, config.NUM_PT, 1)
pred_c = pred_c.view(config.BATCH_SIZE, config.NUM_PT)
how_max, which_max = torch.max(pred_c, 1)
pred_t = pred_t.view(config.BATCH_SIZE * config.NUM_PT, 1, 3)
points = cloud.view(config.BATCH_SIZE * config.NUM_PT, 1, 3)
how_max = how_max.detach().clone().cpu().numpy()[0]
my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()
my_t = (points + pred_t)[which_max[0]].view(-1).cpu().data.numpy()
my_pred = np.append(my_r, my_t)
# TODO: MATLAB EVAL
pose_est_df_wo_refine.append(my_pred.tolist())
for ite in range(0, config.REFINE_ITERATIONS):
T = Variable(torch.from_numpy(my_t.astype(np.float32))).cuda().view(1, 3).repeat(config.NUM_PT,1).contiguous().view(1, config.NUM_PT, 3)
my_mat = quaternion_matrix(my_r)
R = Variable(torch.from_numpy(my_mat[:3, :3].astype(np.float32))).cuda().view(1, 3, 3)
my_mat[0:3, 3] = my_t
new_cloud = torch.bmm((cloud - T), R).contiguous()
pred_r, pred_t = refiner(new_cloud, emb, index)
pred_r = pred_r.view(1, 1, -1)
pred_r = pred_r / (torch.norm(pred_r, dim=2).view(1, 1, 1))
my_r_2 = pred_r.view(-1).cpu().data.numpy()
my_t_2 = pred_t.view(-1).cpu().data.numpy()
my_mat_2 = quaternion_matrix(my_r_2)
my_mat_2[0:3, 3] = my_t_2
my_mat_final = np.dot(my_mat, my_mat_2)
my_r_final = copy.deepcopy(my_mat_final)
my_r_final[0:3, 3] = 0
my_r_final = quaternion_from_matrix(my_r_final, True)
my_t_final = np.array([my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]])
my_pred = np.append(my_r_final, my_t_final)
my_r = my_r_final
my_t = my_t_final
# TODO: MATLAB EVAL
pose_est_df_iterative.append(my_pred.tolist())
# choose_list.append(obj_choose)
# pred_c_list.append(how_max)
############################
# Stats
############################
stats_pred_class_ids[image_idx, pred_idx] = pred_obj_id
stats_pred_choose[image_idx, pred_idx] = obj_choose
stats_pred_c[image_idx, pred_idx] = how_max
#######################################
# Error Metrics.
#######################################
# if VISUALIZE_AND_GET_ERROR_METRICS:
# pred
pred_obj_part_t, pred_obj_part_q = my_t, my_r
pred_obj_part_r = quaternion_matrix(pred_obj_part_q)[0:3, 0:3]
# eval pose.
eval_utils.get_error_metrics(gt_obj_t=gt_obj_part_t, gt_obj_r=gt_obj_part_r,
pred_obj_t=pred_obj_part_t, pred_obj_r=pred_obj_part_r,
refinement_idx=ite+1,
choose=obj_choose, pred_c=how_max,
verbose=True)
#######################################
# plotting pred pose.
#######################################
if PROJECT_MESH_ON_IMAGE:
obj_cld = dataloader.cld_obj_part_centered[pred_obj_part_id]
# projecting 3D model to 2D image
imgpts, jac = cv2.projectPoints(obj_cld * 1e3, pred_obj_part_r, pred_obj_part_t * 1e3, dataloader.cam_mat, dataloader.cam_dist)
cv2_obj_part_pose_img = cv2.polylines(cv2_obj_part_pose_img, np.int32([np.squeeze(imgpts)]), True, obj_color)
# draw pose
rotV, _ = cv2.Rodrigues(pred_obj_part_r)
points = np.float32([[100, 0, 0], [0, 100, 0], [0, 0, 100], [0, 0, 0]]).reshape(-1, 3)
axisPoints, _ = cv2.projectPoints(points, rotV, pred_obj_part_t * 1e3, dataloader.cam_mat, dataloader.cam_dist)
axis_color = (255, 255, 255)
cv2_obj_part_pose_img = cv2.line(cv2_obj_part_pose_img, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (255, 0, 0), 3)
cv2_obj_part_pose_img = cv2.line(cv2_obj_part_pose_img, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), 3)
cv2_obj_part_pose_img = cv2.line(cv2_obj_part_pose_img, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (0, 0, 255), 3)
except ZeroDivisionError:
print("DenseFusion Detector Lost keyframe ..")
# TODO: MATLAB EVAL
pose_est_df_wo_refine.append([0.0 for i in range(7)])
pose_est_df_iterative.append([0.0 for i in range(7)])
print('Average Time for Pred: {:.3f} [s]'.format((time.time()-t0)/len(gt_obj_ids)))
#####################
# PLOTTING
#####################
if VISUALIZE_AND_GET_ERROR_METRICS:
# SAVE_FOLDER = '/home/akeaveny/Desktop/DenseFusion_YCB/'
# pred_name = SAVE_FOLDER + str(image_idx) + "_aff.png"
# cv2.imwrite(pred_name, cv2.cvtColor(cv2_obj_part_pose_img, cv2.COLOR_BGR2RGB))
cv2.imshow('depth', depth_8bit)
cv2.imshow('cv2_obj_part_pose_img', cv2.cvtColor(cv2_obj_part_pose_img, cv2.COLOR_BGR2RGB))
cv2.waitKey(1)
time.sleep(0.35)
############################
# TODO: MATLAB EVAL
############################
scio.savemat('{0}/{1}.mat'.format(config.AFF_EVAL_FOLDER_GT, '%04d' % image_idx),
{"class_ids": class_ids_list, 'poses': pose_est_gt})
scio.savemat('{0}/{1}.mat'.format(config.AFF_EVAL_FOLDER_DF_WO_REFINE, '%04d' % image_idx),
{"class_ids": class_ids_list, 'poses': pose_est_df_wo_refine})
scio.savemat('{0}/{1}.mat'.format(config.AFF_EVAL_FOLDER_DF_ITERATIVE, '%04d' % image_idx),
{"class_ids": class_ids_list, 'poses': pose_est_df_iterative})
############################
# Stats
############################
print('\n\n\nPrinting stats ..')
eval_utils.get_obj_stats(stats_pred_class_ids, stats_pred_choose, stats_pred_c)
if __name__ == '__main__':
main() | [
"numpy.ma.masked_equal",
"cv2.projectPoints",
"torch.LongTensor",
"affpose.YCB_Aff.eval.eval_utils.get_error_metrics",
"scipy.io.loadmat",
"torch.max",
"time.sleep",
"cv2.imshow",
"affpose.YCB_Aff.utils.bbox.extract_bboxs_from_label.get_bbox",
"affpose.YCB_Aff.eval.eval_utils.get_obj_stats",
"li... | [((708, 736), 'sys.path.append', 'sys.path.append', (['"""../../../"""'], {}), "('../../../')\n", (723, 736), False, 'import sys\n'), ((2249, 2310), 'lib.network.PoseNet', 'PoseNet', ([], {'num_points': 'config.NUM_PT', 'num_obj': 'config.NUM_OBJECTS'}), '(num_points=config.NUM_PT, num_obj=config.NUM_OBJECTS)\n', (2256, 2310), False, 'from lib.network import PoseNet, PoseRefineNet\n'), ((2436, 2503), 'lib.network.PoseRefineNet', 'PoseRefineNet', ([], {'num_points': 'config.NUM_PT', 'num_obj': 'config.NUM_OBJECTS'}), '(num_points=config.NUM_PT, num_obj=config.NUM_OBJECTS)\n', (2449, 2503), False, 'from lib.network import PoseNet, PoseRefineNet\n'), ((2631, 2693), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'config.IMG_MEAN', 'std': 'config.IMG_STD'}), '(mean=config.IMG_MEAN, std=config.IMG_STD)\n', (2651, 2693), True, 'import torchvision.transforms as transforms\n'), ((2840, 2926), 'affpose.YCB_Aff.dataset.dataloader.YCBAff', 'ycb_aff_dataloader.YCBAff', ([], {'split': 'SPLIT', 'select_random_images': 'SELECT_RANDOM_IMAGES'}), '(split=SPLIT, select_random_images=\n SELECT_RANDOM_IMAGES)\n', (2865, 2926), True, 'from affpose.YCB_Aff.dataset import dataloader as ycb_aff_dataloader\n'), ((19707, 19786), 'affpose.YCB_Aff.eval.eval_utils.get_obj_stats', 'eval_utils.get_obj_stats', (['stats_pred_class_ids', 'stats_pred_choose', 'stats_pred_c'], {}), '(stats_pred_class_ids, stats_pred_choose, stats_pred_c)\n', (19731, 19786), False, 'from affpose.YCB_Aff.eval import eval_utils\n'), ((1784, 1827), 'glob.glob', 'glob.glob', (["(config.AFF_EVAL_FOLDER_GT + '/*')"], {}), "(config.AFF_EVAL_FOLDER_GT + '/*')\n", (1793, 1827), False, 'import glob\n'), ((1900, 1953), 'glob.glob', 'glob.glob', (["(config.AFF_EVAL_FOLDER_DF_WO_REFINE + '/*')"], {}), "(config.AFF_EVAL_FOLDER_DF_WO_REFINE + '/*')\n", (1909, 1953), False, 'import glob\n'), ((2026, 2079), 'glob.glob', 'glob.glob', (["(config.AFF_EVAL_FOLDER_DF_ITERATIVE + '/*')"], {}), "(config.AFF_EVAL_FOLDER_DF_ITERATIVE + '/*')\n", (2035, 2079), False, 'import glob\n'), ((2362, 2398), 'torch.load', 'torch.load', (['config.TRAINED_AFF_MODEL'], {}), '(config.TRAINED_AFF_MODEL)\n', (2372, 2398), False, 'import torch\n'), ((2551, 2594), 'torch.load', 'torch.load', (['config.TRAINED_AFF_REFINE_MODEL'], {}), '(config.TRAINED_AFF_REFINE_MODEL)\n', (2561, 2594), False, 'import torch\n'), ((3310, 3321), 'time.time', 'time.time', ([], {}), '()\n', (3319, 3321), False, 'import time\n'), ((4238, 4269), 'scipy.io.loadmat', 'scio.loadmat', (['posecnn_meta_addr'], {}), '(posecnn_meta_addr)\n', (4250, 4269), True, 'import scipy.io as scio\n'), ((4295, 4327), 'numpy.array', 'np.array', (["posecnn_meta['labels']"], {}), "(posecnn_meta['labels'])\n", (4303, 4327), True, 'import numpy as np\n'), ((4351, 4381), 'numpy.array', 'np.array', (["posecnn_meta['rois']"], {}), "(posecnn_meta['rois'])\n", (4359, 4381), True, 'import numpy as np\n'), ((4402, 4437), 'numpy.array', 'np.array', (["posecnn_meta['poses_icp']"], {}), "(posecnn_meta['poses_icp'])\n", (4410, 4437), True, 'import numpy as np\n'), ((4462, 4506), 'numpy.array', 'np.array', (['posecnn_rois[:, 1]'], {'dtype': 'np.uint8'}), '(posecnn_rois[:, 1], dtype=np.uint8)\n', (4470, 4506), True, 'import numpy as np\n'), ((1867, 1882), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1876, 1882), False, 'import os\n'), ((1993, 2008), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (2002, 2008), False, 'import os\n'), ((2119, 2134), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (2128, 2134), False, 'import os\n'), ((18758, 18789), 'cv2.imshow', 'cv2.imshow', (['"""depth"""', 'depth_8bit'], {}), "('depth', depth_8bit)\n", (18768, 18789), False, 'import cv2\n'), ((18906, 18920), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (18917, 18920), False, 'import cv2\n'), ((18933, 18949), 'time.sleep', 'time.sleep', (['(0.35)'], {}), '(0.35)\n', (18943, 18949), False, 'import time\n'), ((5483, 5531), 'affpose.YCB_Aff.dataset.ycb_aff_dataset_utils.obj_color_map', 'ycb_aff_dataset_utils.obj_color_map', (['pred_obj_id'], {}), '(pred_obj_id)\n', (5518, 5531), False, 'from affpose.YCB_Aff.dataset import ycb_aff_dataset_utils\n'), ((6001, 6063), 'affpose.YCB_Aff.dataset.ycb_aff_dataset_utils.map_obj_ids_to_obj_part_ids', 'ycb_aff_dataset_utils.map_obj_ids_to_obj_part_ids', (['pred_obj_id'], {}), '(pred_obj_id)\n', (6050, 6063), False, 'from affpose.YCB_Aff.dataset import ycb_aff_dataset_utils\n'), ((18838, 18892), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_obj_part_pose_img', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_obj_part_pose_img, cv2.COLOR_BGR2RGB)\n', (18850, 18892), False, 'import cv2\n'), ((3952, 3975), 'numpy.array', 'np.array', (["meta['poses']"], {}), "(meta['poses'])\n", (3960, 3975), True, 'import numpy as np\n'), ((4604, 4627), 'numpy.array', 'np.array', (["meta['poses']"], {}), "(meta['poses'])\n", (4612, 4627), True, 'import numpy as np\n'), ((6746, 6783), 'lib.transformations.quaternion_from_matrix', 'quaternion_from_matrix', (['gt_obj_part_r'], {}), '(gt_obj_part_r)\n', (6768, 6783), False, 'from lib.transformations import euler_matrix, quaternion_matrix, quaternion_from_matrix\n'), ((18355, 18366), 'time.time', 'time.time', ([], {}), '()\n', (18364, 18366), False, 'import time\n'), ((6828, 6851), 'numpy.array', 'np.array', (['gt_obj_part_q'], {}), '(gt_obj_part_q)\n', (6836, 6851), True, 'import numpy as np\n'), ((6853, 6876), 'numpy.array', 'np.array', (['gt_obj_part_t'], {}), '(gt_obj_part_t)\n', (6861, 6876), True, 'import numpy as np\n'), ((7847, 7867), 'affpose.YCB_Aff.utils.bbox.extract_bboxs_from_label.get_bbox', 'get_bbox', (['mask_label'], {}), '(mask_label)\n', (7855, 7867), False, 'from affpose.YCB_Aff.utils.bbox.extract_bboxs_from_label import get_bbox, get_obj_part_bbox, get_posecnn_bbox\n'), ((10676, 10694), 'numpy.array', 'np.array', (['[choose]'], {}), '([choose])\n', (10684, 10694), True, 'import numpy as np\n'), ((10942, 10981), 'numpy.concatenate', 'np.concatenate', (['(pt0, pt1, pt2)'], {'axis': '(1)'}), '((pt0, pt1, pt2), axis=1)\n', (10956, 10981), True, 'import numpy as np\n'), ((11089, 11124), 'numpy.transpose', 'np.transpose', (['img_masked', '(2, 0, 1)'], {}), '(img_masked, (2, 0, 1))\n', (11101, 11124), True, 'import numpy as np\n'), ((11496, 11531), 'torch.LongTensor', 'torch.LongTensor', (['[pred_obj_id - 1]'], {}), '([pred_obj_id - 1])\n', (11512, 11531), False, 'import torch\n'), ((12428, 12448), 'torch.max', 'torch.max', (['pred_c', '(1)'], {}), '(pred_c, 1)\n', (12437, 12448), False, 'import torch\n'), ((12930, 12951), 'numpy.append', 'np.append', (['my_r', 'my_t'], {}), '(my_r, my_t)\n', (12939, 12951), True, 'import numpy as np\n'), ((15886, 16100), 'affpose.YCB_Aff.eval.eval_utils.get_error_metrics', 'eval_utils.get_error_metrics', ([], {'gt_obj_t': 'gt_obj_part_t', 'gt_obj_r': 'gt_obj_part_r', 'pred_obj_t': 'pred_obj_part_t', 'pred_obj_r': 'pred_obj_part_r', 'refinement_idx': '(ite + 1)', 'choose': 'obj_choose', 'pred_c': 'how_max', 'verbose': '(True)'}), '(gt_obj_t=gt_obj_part_t, gt_obj_r=gt_obj_part_r,\n pred_obj_t=pred_obj_part_t, pred_obj_r=pred_obj_part_r, refinement_idx=\n ite + 1, choose=obj_choose, pred_c=how_max, verbose=True)\n', (15914, 16100), False, 'from affpose.YCB_Aff.eval import eval_utils\n'), ((6584, 6607), 'numpy.str', 'np.str', (['obj_part_id_idx'], {}), '(obj_part_id_idx)\n', (6590, 6607), True, 'import numpy as np\n'), ((6680, 6703), 'numpy.str', 'np.str', (['obj_part_id_idx'], {}), '(obj_part_id_idx)\n', (6686, 6703), True, 'import numpy as np\n'), ((9452, 9487), 'numpy.ma.masked_not_equal', 'ma.masked_not_equal', (['depth_16bit', '(0)'], {}), '(depth_16bit, 0)\n', (9471, 9487), True, 'import numpy.ma as ma\n'), ((11024, 11037), 'numpy.array', 'np.array', (['rgb'], {}), '(rgb)\n', (11032, 11037), True, 'import numpy as np\n'), ((13361, 13384), 'lib.transformations.quaternion_matrix', 'quaternion_matrix', (['my_r'], {}), '(my_r)\n', (13378, 13384), False, 'from lib.transformations import euler_matrix, quaternion_matrix, quaternion_from_matrix\n'), ((14072, 14097), 'lib.transformations.quaternion_matrix', 'quaternion_matrix', (['my_r_2'], {}), '(my_r_2)\n', (14089, 14097), False, 'from lib.transformations import euler_matrix, quaternion_matrix, quaternion_from_matrix\n'), ((14205, 14229), 'numpy.dot', 'np.dot', (['my_mat', 'my_mat_2'], {}), '(my_mat, my_mat_2)\n', (14211, 14229), True, 'import numpy as np\n'), ((14275, 14302), 'copy.deepcopy', 'copy.deepcopy', (['my_mat_final'], {}), '(my_mat_final)\n', (14288, 14302), False, 'import copy\n'), ((14403, 14443), 'lib.transformations.quaternion_from_matrix', 'quaternion_from_matrix', (['my_r_final', '(True)'], {}), '(my_r_final, True)\n', (14425, 14443), False, 'from lib.transformations import euler_matrix, quaternion_matrix, quaternion_from_matrix\n'), ((14489, 14559), 'numpy.array', 'np.array', (['[my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]]'], {}), '([my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]])\n', (14497, 14559), True, 'import numpy as np\n'), ((14603, 14636), 'numpy.append', 'np.append', (['my_r_final', 'my_t_final'], {}), '(my_r_final, my_t_final)\n', (14612, 14636), True, 'import numpy as np\n'), ((15772, 15806), 'lib.transformations.quaternion_matrix', 'quaternion_matrix', (['pred_obj_part_q'], {}), '(pred_obj_part_q)\n', (15789, 15806), False, 'from lib.transformations import euler_matrix, quaternion_matrix, quaternion_from_matrix\n'), ((16767, 16891), 'cv2.projectPoints', 'cv2.projectPoints', (['(obj_cld * 1000.0)', 'pred_obj_part_r', '(pred_obj_part_t * 1000.0)', 'dataloader.cam_mat', 'dataloader.cam_dist'], {}), '(obj_cld * 1000.0, pred_obj_part_r, pred_obj_part_t * \n 1000.0, dataloader.cam_mat, dataloader.cam_dist)\n', (16784, 16891), False, 'import cv2\n'), ((17110, 17140), 'cv2.Rodrigues', 'cv2.Rodrigues', (['pred_obj_part_r'], {}), '(pred_obj_part_r)\n', (17123, 17140), False, 'import cv2\n'), ((17308, 17411), 'cv2.projectPoints', 'cv2.projectPoints', (['points', 'rotV', '(pred_obj_part_t * 1000.0)', 'dataloader.cam_mat', 'dataloader.cam_dist'], {}), '(points, rotV, pred_obj_part_t * 1000.0, dataloader.\n cam_mat, dataloader.cam_dist)\n', (17325, 17411), False, 'import cv2\n'), ((10029, 10054), 'numpy.random.shuffle', 'np.random.shuffle', (['c_mask'], {}), '(c_mask)\n', (10046, 10054), True, 'import numpy as np\n'), ((11569, 11584), 'torch.autograd.Variable', 'Variable', (['cloud'], {}), '(cloud)\n', (11577, 11584), False, 'from torch.autograd import Variable\n'), ((11629, 11645), 'torch.autograd.Variable', 'Variable', (['choose'], {}), '(choose)\n', (11637, 11645), False, 'from torch.autograd import Variable\n'), ((11694, 11714), 'torch.autograd.Variable', 'Variable', (['img_masked'], {}), '(img_masked)\n', (11702, 11714), False, 'from torch.autograd import Variable\n'), ((11758, 11773), 'torch.autograd.Variable', 'Variable', (['index'], {}), '(index)\n', (11766, 11773), False, 'from torch.autograd import Variable\n'), ((7726, 7775), 'numpy.ma.masked_equal', 'ma.masked_equal', (['obj_part_label', 'pred_obj_part_id'], {}), '(obj_part_label, pred_obj_part_id)\n', (7741, 7775), True, 'import numpy.ma as ma\n'), ((12243, 12268), 'torch.norm', 'torch.norm', (['pred_r'], {'dim': '(2)'}), '(pred_r, dim=2)\n', (12253, 12268), False, 'import torch\n'), ((13603, 13626), 'torch.bmm', 'torch.bmm', (['(cloud - T)', 'R'], {}), '(cloud - T, R)\n', (13612, 13626), False, 'import torch\n'), ((17182, 17244), 'numpy.float32', 'np.float32', (['[[100, 0, 0], [0, 100, 0], [0, 0, 100], [0, 0, 0]]'], {}), '([[100, 0, 0], [0, 100, 0], [0, 0, 100], [0, 0, 0]])\n', (17192, 17244), True, 'import numpy as np\n'), ((13836, 13861), 'torch.norm', 'torch.norm', (['pred_r'], {'dim': '(2)'}), '(pred_r, dim=2)\n', (13846, 13861), False, 'import torch\n'), ((16984, 17002), 'numpy.squeeze', 'np.squeeze', (['imgpts'], {}), '(imgpts)\n', (16994, 17002), True, 'import numpy as np\n')] |
from .output_base import OutputBase
import numpy as np
class InMemoryOutput(OutputBase):
_aliases = ["memory"]
def __init__(self):
super(InMemoryOutput,self).__init__()
self.rows = []
self.meta = {}
self.final_meta = {}
self.comments = []
def _write_parameters(self, params):
self.rows.append(params)
def _write_metadata(self, key, value, comment):
self.meta[key] = (value,comment)
def _write_comment(self, comment):
self.comments.append(comment)
def _write_final(self, key, value, comment):
self.final_meta[key] = (value,comment)
def __getitem__(self, key_or_index):
if isinstance(key_or_index, int):
return self.rows[key_or_index]
else:
column_index = [c[0] for c in self.columns].index(key_or_index)
return np.array([row[column_index] for row in self.rows])
@classmethod
def from_options(cls, options, resume=False):
if resume:
raise ValueError("Cannot resume from in-memory output")
return cls()
@classmethod
def load_from_options(cls, options):
raise ValueError("No output was saved from this run")
| [
"numpy.array"
] | [((868, 918), 'numpy.array', 'np.array', (['[row[column_index] for row in self.rows]'], {}), '([row[column_index] for row in self.rows])\n', (876, 918), True, 'import numpy as np\n')] |
import numpy
def entropy2(*args):
''' E = ENTROPY2(MTX,BINSIZE)
Compute the first-order sample entropy of MTX. Samples of VEC are
first discretized. Optional argument BINSIZE controls the
discretization, and defaults to 256/(max(VEC)-min(VEC)).
NOTE: This is a heavily biased estimate of entropy when you
don't have much data.
<NAME>, 6/96. Ported to Python by <NAME>, 10/15. '''
vec = numpy.array(args[0])
# if 2D flatten to a vector
if len(vec.shape) != 1 and (vec.shape[0] != 1 or vec.shape[1] != 1):
vec = vec.flatten()
(mn, mx) = range2(vec)
if len(args) > 1:
binsize = args[1]
# FIX: why is this max in the Matlab code; it's just a float?
# we insure that vec isn't 2D above, so this shouldn't be needed
#nbins = max( float(mx-mn)/float(binsize) )
nbins = float(mx-mn) / float(binsize)
else:
nbins = 256
[bincount, bins] = histo(vec, nbins)
## Collect non-zero bins:
H = bincount[ numpy.where(bincount > 0) ]
H = H / float(sum(H))
return -sum(H * numpy.log2(H))
| [
"numpy.where",
"numpy.array",
"numpy.log2"
] | [((469, 489), 'numpy.array', 'numpy.array', (['args[0]'], {}), '(args[0])\n', (480, 489), False, 'import numpy\n'), ((1062, 1087), 'numpy.where', 'numpy.where', (['(bincount > 0)'], {}), '(bincount > 0)\n', (1073, 1087), False, 'import numpy\n'), ((1137, 1150), 'numpy.log2', 'numpy.log2', (['H'], {}), '(H)\n', (1147, 1150), False, 'import numpy\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# https://yq.aliyun.com/articles/118726
np.random.seed(111)
rng = pd.date_range(start='2000', periods=209, freq='M')
ts = pd.Series(np.random.uniform(-10, 10, size=len(rng)), rng).cumsum()
ts.plot(c='b', title='Example Time Series')
# plt.ion()
# plt.show()
plt.savefig("./test.png")
print(ts.head(10))
TS = np.array(ts)
num_periods = 20
f_horizon = 1
x_data = TS[:(len(TS)-(len(TS) % num_periods))]
print(x_data.shape)
x_batches = x_data.reshape(-1, 20, 1)
y_data = TS[1:(len(TS)-(len(TS) % num_periods))+f_horizon]
y_batches = y_data.reshape(-1, 20, 1)
print(len(x_batches))
print(x_batches.shape)
print(x_batches[0:2])
print(y_batches[0:1])
print(y_batches.shape)
def test_data(series, forcast, num_periods):
test_x_setup = TS[-(num_periods+forcast):]
testX = test_x_setup[:num_periods].reshape(-1, 20, 1)
testY = TS[-(num_periods):].reshape(-1, 20, 1)
return testX, testY
X_test, Y_test = test_data(TS, f_horizon, num_periods)
print(X_test.shape)
print(X_test)
tf.reset_default_graph()
inputs = 1
hidden = 100
output = 1
X = tf.placeholder(tf.float32, [None, num_periods, inputs])
y = tf.placeholder(tf.float32, [None, num_periods, output])
basic_cell = tf.contrib.rnn.BasicRNNCell(
num_units=hidden, activation=tf.nn.relu)
rnn_output, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
lr = 0.001
stacked_rnn_output = tf.reshape(rnn_output, [-1, hidden])
stacked_outputs = tf.layers.dense(stacked_rnn_output, output)
outputs = tf.reshape(stacked_outputs, [-1, num_periods, output])
loss = tf.reduce_sum(tf.square(outputs-y))
optimizer = tf.train.AdamOptimizer(lr)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
epochs = 1000
with tf.Session() as s:
s.run(init)
for ep in range(epochs):
s.run(training_op, feed_dict={X: x_batches, y: y_batches})
if ep % 100 == 0:
mse = loss.eval(feed_dict={X: x_batches, y: y_batches})
print(ep, "\tMSE:", mse)
y_pred = s.run(outputs, feed_dict={X: X_test})
print("-------------yyyyyyyyyyyyyyyyyy----------")
print(y_pred)
| [
"matplotlib.pyplot.savefig",
"tensorflow.reset_default_graph",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.contrib.rnn.BasicRNNCell",
"tensorflow.nn.dynamic_rnn",
"tensorflow.global_variables_initializer",
"numpy.array",
"numpy.random.seed",
"tensorflow.reshape",
"tensorflow.laye... | [((138, 157), 'numpy.random.seed', 'np.random.seed', (['(111)'], {}), '(111)\n', (152, 157), True, 'import numpy as np\n'), ((165, 215), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2000"""', 'periods': '(209)', 'freq': '"""M"""'}), "(start='2000', periods=209, freq='M')\n", (178, 215), True, 'import pandas as pd\n'), ((358, 383), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./test.png"""'], {}), "('./test.png')\n", (369, 383), True, 'import matplotlib.pyplot as plt\n'), ((410, 422), 'numpy.array', 'np.array', (['ts'], {}), '(ts)\n', (418, 422), True, 'import numpy as np\n'), ((1092, 1116), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1114, 1116), True, 'import tensorflow as tf\n'), ((1157, 1212), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, num_periods, inputs]'], {}), '(tf.float32, [None, num_periods, inputs])\n', (1171, 1212), True, 'import tensorflow as tf\n'), ((1217, 1272), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, num_periods, output]'], {}), '(tf.float32, [None, num_periods, output])\n', (1231, 1272), True, 'import tensorflow as tf\n'), ((1287, 1355), 'tensorflow.contrib.rnn.BasicRNNCell', 'tf.contrib.rnn.BasicRNNCell', ([], {'num_units': 'hidden', 'activation': 'tf.nn.relu'}), '(num_units=hidden, activation=tf.nn.relu)\n', (1314, 1355), True, 'import tensorflow as tf\n'), ((1382, 1432), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['basic_cell', 'X'], {'dtype': 'tf.float32'}), '(basic_cell, X, dtype=tf.float32)\n', (1399, 1432), True, 'import tensorflow as tf\n'), ((1467, 1503), 'tensorflow.reshape', 'tf.reshape', (['rnn_output', '[-1, hidden]'], {}), '(rnn_output, [-1, hidden])\n', (1477, 1503), True, 'import tensorflow as tf\n'), ((1522, 1565), 'tensorflow.layers.dense', 'tf.layers.dense', (['stacked_rnn_output', 'output'], {}), '(stacked_rnn_output, output)\n', (1537, 1565), True, 'import tensorflow as tf\n'), ((1576, 1630), 'tensorflow.reshape', 'tf.reshape', (['stacked_outputs', '[-1, num_periods, output]'], {}), '(stacked_outputs, [-1, num_periods, output])\n', (1586, 1630), True, 'import tensorflow as tf\n'), ((1687, 1713), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (1709, 1713), True, 'import tensorflow as tf\n'), ((1761, 1794), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1792, 1794), True, 'import tensorflow as tf\n'), ((1653, 1675), 'tensorflow.square', 'tf.square', (['(outputs - y)'], {}), '(outputs - y)\n', (1662, 1675), True, 'import tensorflow as tf\n'), ((1816, 1828), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1826, 1828), True, 'import tensorflow as tf\n')] |
import os
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import numpy as np
import torch
import torchvision.datasets as datasets
class CIFAR10NoisyLabels(datasets.CIFAR10):
"""CIFAR10 Dataset with noisy labels.
Args:
noise_type (string): Noise type (default: 'symmetric').
The value is either 'symmetric' or 'asymmetric'.
noise_rate (float): Probability of label corruption (default: 0.0).
seed (int): Random seed (default: 12345).
This is a subclass of the `CIFAR10` Dataset.
"""
def __init__(self,
noise_type='symmetric',
noise_rate=0.0,
seed=12345,
**kwargs):
super(CIFAR10NoisyLabels, self).__init__(**kwargs)
self.seed = seed
self.num_classes = 10
self.flip_pairs = np.asarray([[9, 1], [2, 0], [4, 7], [3, 5], [5, 3]])
if noise_rate > 0:
if noise_type == 'symmetric':
self.symmetric_noise(noise_rate)
elif noise_type == 'asymmetric':
self.asymmetric_noise(noise_rate)
else:
raise ValueError(
'expected noise_type is either symmetric or asymmetric '
'(got {})'.format(noise_type))
def symmetric_noise(self, noise_rate):
"""Insert symmetric noise.
For all classes, ground truth labels are replaced with uniform random
classes.
"""
np.random.seed(self.seed)
targets = np.array(self.targets)
mask = np.random.rand(len(targets)) <= noise_rate
rnd_targets = np.random.choice(self.num_classes, mask.sum())
targets[mask] = rnd_targets
targets = [int(target) for target in targets]
self.targets = targets
def asymmetric_noise(self, noise_rate):
"""Insert asymmetric noise.
Ground truth labels are flipped by mimicking real mistakes between
similar classes. Following `Making Deep Neural Networks Robust to Label Noise: a Loss Correction Approach`_,
ground truth labels are replaced with
* truck -> automobile,
* bird -> airplane,
* deer -> horse
* cat -> dog
* dog -> cat
.. _Making Deep Neural Networks Robust to Label Noise: a Loss Correction Approach
https://arxiv.org/abs/1609.03683
"""
np.random.seed(self.seed)
targets = np.array(self.targets)
for i, target in enumerate(targets):
if target in self.flip_pairs[:, 0]:
if np.random.uniform(0, 1) <= noise_rate:
idx = int(np.where(self.flip_pairs[:, 0] == target)[0])
targets[i] = self.flip_pairs[idx, 1]
targets = [int(x) for x in targets]
self.targets = targets
def T(self, noise_type, noise_rate):
if noise_type == 'symmetric':
T = (torch.eye(self.num_classes) * (1 - noise_rate) +
(torch.ones([self.num_classes, self.num_classes]) /
self.num_classes * noise_rate))
elif noise_type == 'asymmetric':
T = torch.eye(self.num_classes)
for i, j in self.flip_pairs:
T[i, i] = 1 - noise_rate
T[i, j] = noise_rate
return T
class CIFAR100NoisyLabels(datasets.CIFAR100):
"""CIFAR100 Dataset with noisy labels.
Args:
noise_type (string): Noise type (default: 'symmetric').
The value is either 'symmetric' or 'asymmetric'.
noise_rate (float): Probability of label corruption (default: 0.0).
seed (int): Random seed (default: 12345).
This is a subclass of the `CIFAR100` Dataset.
"""
def __init__(self,
noise_type='synmetric',
noise_rate=0.0,
seed=12345,
**kwargs):
super(CIFAR100NoisyLabels, self).__init__(**kwargs)
self.seed = seed
self.num_classes = 100
self.num_superclasses = 20
if noise_rate > 0:
if noise_type == 'symmetric':
self.symmetric_noise(noise_rate)
elif noise_type == 'asymmetric':
self.asymmetric_noise(noise_rate)
else:
raise ValueError(
'expected noise_type is either symmetric or asymmetric '
'(got {})'.format(noise_type))
def symmetric_noise(self, noise_rate):
"""Symmetric noise in CIFAR100.
For all classes, ground truth labels are replaced with uniform random
classes.
"""
np.random.seed(self.seed)
targets = np.array(self.targets)
mask = np.random.rand(len(targets)) <= noise_rate
rnd_targets = np.random.choice(self.num_classes, mask.sum())
targets[mask] = rnd_targets
targets = [int(x) for x in targets]
self.targets = targets
def asymmetric_noise(self, noise_rate):
"""Insert asymmetric noise.
Ground truth labels are flipped by mimicking real mistakes between
similar classes. Following `Making Deep Neural Networks Robust to Label Noise: a Loss Correction Approach`_,
ground truth labels are flipped into the next class circularly within
the same superclasses
.. _Making Deep Neural Networks Robust to Label Noise: a Loss Correction Approach
https://arxiv.org/abs/1609.03683
"""
np.random.seed(self.seed)
targets = np.array(self.targets)
Tdata = self.T('asymmetric', noise_rate).numpy().astype(np.float64)
Tdata = Tdata / np.sum(Tdata, axis=1)[:, None]
for i, target in enumerate(targets):
one_hot = np.random.multinomial(1, Tdata[target, :], 1)[0]
targets[i] = np.where(one_hot == 1)[0]
targets = [int(x) for x in targets]
self.targets = targets
def _load_coarse_targets(self):
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
coarse_targets = []
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
coarse_targets.extend(entry['coarse_labels'])
return coarse_targets
def T(self, noise_type, noise_rate):
if noise_type == 'symmetric':
T = (torch.eye(self.num_classes) * (1 - noise_rate) +
(torch.ones([self.num_classes, self.num_classes]) /
self.num_classes * noise_rate))
elif noise_type == 'asymmetric':
num_classes = self.num_classes
num_superclasses = self.num_superclasses
num_subclasses = num_classes // num_superclasses
targets = np.array(self.targets)
coarse_targets = np.asarray(self._load_coarse_targets())
T = torch.eye(num_classes) * (1 - noise_rate)
for i in range(num_superclasses):
subclass_targets = np.unique(targets[coarse_targets == i])
clean = subclass_targets
noisy = np.concatenate([clean[1:], clean[:1]])
for j in range(num_subclasses):
T[clean[j], noisy[j]] = noise_rate
return T
| [
"numpy.unique",
"numpy.where",
"torch.eye",
"os.path.join",
"numpy.asarray",
"pickle.load",
"numpy.random.multinomial",
"numpy.array",
"numpy.sum",
"numpy.random.seed",
"numpy.concatenate",
"numpy.random.uniform",
"torch.ones"
] | [((888, 940), 'numpy.asarray', 'np.asarray', (['[[9, 1], [2, 0], [4, 7], [3, 5], [5, 3]]'], {}), '([[9, 1], [2, 0], [4, 7], [3, 5], [5, 3]])\n', (898, 940), True, 'import numpy as np\n'), ((1530, 1555), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (1544, 1555), True, 'import numpy as np\n'), ((1574, 1596), 'numpy.array', 'np.array', (['self.targets'], {}), '(self.targets)\n', (1582, 1596), True, 'import numpy as np\n'), ((2456, 2481), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (2470, 2481), True, 'import numpy as np\n'), ((2500, 2522), 'numpy.array', 'np.array', (['self.targets'], {}), '(self.targets)\n', (2508, 2522), True, 'import numpy as np\n'), ((4680, 4705), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (4694, 4705), True, 'import numpy as np\n'), ((4724, 4746), 'numpy.array', 'np.array', (['self.targets'], {}), '(self.targets)\n', (4732, 4746), True, 'import numpy as np\n'), ((5524, 5549), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (5538, 5549), True, 'import numpy as np\n'), ((5568, 5590), 'numpy.array', 'np.array', (['self.targets'], {}), '(self.targets)\n', (5576, 5590), True, 'import numpy as np\n'), ((6234, 6286), 'os.path.join', 'os.path.join', (['self.root', 'self.base_folder', 'file_name'], {}), '(self.root, self.base_folder, file_name)\n', (6246, 6286), False, 'import os\n'), ((3204, 3231), 'torch.eye', 'torch.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (3213, 3231), False, 'import torch\n'), ((5691, 5712), 'numpy.sum', 'np.sum', (['Tdata'], {'axis': '(1)'}), '(Tdata, axis=1)\n', (5697, 5712), True, 'import numpy as np\n'), ((5789, 5834), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'Tdata[target, :]', '(1)'], {}), '(1, Tdata[target, :], 1)\n', (5810, 5834), True, 'import numpy as np\n'), ((5863, 5885), 'numpy.where', 'np.where', (['(one_hot == 1)'], {}), '(one_hot == 1)\n', (5871, 5885), True, 'import numpy as np\n'), ((7083, 7105), 'numpy.array', 'np.array', (['self.targets'], {}), '(self.targets)\n', (7091, 7105), True, 'import numpy as np\n'), ((2635, 2658), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2652, 2658), True, 'import numpy as np\n'), ((2979, 3006), 'torch.eye', 'torch.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (2988, 3006), False, 'import torch\n'), ((6405, 6419), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6416, 6419), False, 'import pickle\n'), ((6470, 6503), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (6481, 6503), False, 'import pickle\n'), ((6694, 6721), 'torch.eye', 'torch.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (6703, 6721), False, 'import torch\n'), ((7192, 7214), 'torch.eye', 'torch.eye', (['num_classes'], {}), '(num_classes)\n', (7201, 7214), False, 'import torch\n'), ((7315, 7354), 'numpy.unique', 'np.unique', (['targets[coarse_targets == i]'], {}), '(targets[coarse_targets == i])\n', (7324, 7354), True, 'import numpy as np\n'), ((7420, 7458), 'numpy.concatenate', 'np.concatenate', (['[clean[1:], clean[:1]]'], {}), '([clean[1:], clean[:1]])\n', (7434, 7458), True, 'import numpy as np\n'), ((3046, 3094), 'torch.ones', 'torch.ones', (['[self.num_classes, self.num_classes]'], {}), '([self.num_classes, self.num_classes])\n', (3056, 3094), False, 'import torch\n'), ((6761, 6809), 'torch.ones', 'torch.ones', (['[self.num_classes, self.num_classes]'], {}), '([self.num_classes, self.num_classes])\n', (6771, 6809), False, 'import torch\n'), ((2704, 2745), 'numpy.where', 'np.where', (['(self.flip_pairs[:, 0] == target)'], {}), '(self.flip_pairs[:, 0] == target)\n', (2712, 2745), True, 'import numpy as np\n')] |
# This file implements spiking neural networks as described
# in the work:
# <NAME>, Coarse scale representation of spiking neural networks:
# backpropagation through spikes and applications to neuromorphic hardware,
# International Conference on Neuromorphic Systems (ICONS), 2020
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import math as m
from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes
class SpikingShallowNetwork(nn.Module):
def __init__(self, Nin, Nout, Nsp, t1, beta=5, scale=1):
super(SpikingShallowNetwork, self).__init__()
self.Nsp = Nsp
self.Nout = Nout
self.Nin = Nin
self.l1 = nn.Linear(self.Nin, self.Nout, bias=None)
# torch.nn.init.constant_(self.l1.weight, 0.0005)
self.sl = SpikingLayer(t1, beta=beta)
self.scale = scale
def forward(self, x, device):
x = x.view(-1, 28*28)
s = torch.zeros(x.shape[0], self.Nout).to(device)
v = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x, self.scale).to(device)
xi = self.l1(xi)
s, v = self.sl(xi, s, v)
nsp += s
return nsp
class SpikingHiddenNetwork(nn.Module):
def __init__(self, Nin, Nhid, Nout, Nsp, t1, t2, beta=5, scale=1):
super(SpikingHiddenNetwork, self).__init__()
self.Nsp = Nsp
self.Nhid = Nhid
self.Nout = Nout
self.l1 = nn.Linear(Nin, self.Nhid)
self.l2 = nn.Linear(self.Nhid, self.Nout, bias=None)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t2, beta=beta)
self.scale = scale
def forward(self, x, device):
x = x.view(-1, 28*28)
s1 = torch.zeros(x.shape[0], self.Nhid).to(device)
v1 = torch.zeros(x.shape[0], self.Nhid).to(device)
s2 = torch.zeros(x.shape[0], self.Nout).to(device)
v2 = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x, self.scale).to(device)
s1, v1 = self.sl1(self.l1(xi), s1, v1)
xi = self.l2(s1)
s2, v2 = self.sl2(xi, s2, v2)
nsp += s2
return nsp
class SpikingConvNetwork(nn.Module):
def __init__(self, Nin, Nout, Nsp, t1, t2, beta=5, scale=1):
super(SpikingConvNetwork, self).__init__()
self.Nsp = Nsp
self.Nout = Nout
self.Nin = Nin
self.Nhid = 784
self.conv1 = nn.Conv2d(1, 4, (5,5), stride=2, padding=2)
self.l1 = nn.Linear(self.Nhid, self.Nout, bias=None)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t2, beta=beta)
self.scale = scale
def forward(self, x, device):
s1 = torch.zeros(x.shape[0], self.Nhid).to(device)
v1 = torch.zeros(x.shape[0], self.Nhid).to(device)
s2 = torch.zeros(x.shape[0], self.Nout).to(device)
v2 = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x, self.scale).to(device)
xi = self.conv1(xi)
xi = xi.view(xi.shape[0],-1)
s1, v1 = self.sl1(xi, s1, v1)
xi2 = self.l1(s1)
s2, v2 = self.sl2(xi2, s2, v2)
nsp += s2
return nsp
class SpikingConvNetwork2(nn.Module):
def __init__(self, Nin, Nout, Nsp, t1, t2, beta=5, scale=1):
super(SpikingConvNetwork2, self).__init__()
self.Nsp = Nsp
self.Nout = Nout
self.Nin = Nin
self.Nhid1 = Nin
self.Nhid2 = 600
self.scale = scale
self.conv1 = nn.Conv2d(1, 4, (5,5), stride=2, padding=2)
self.l1 = nn.Linear(self.Nhid2, self.Nout, bias=None)
self.conv2 = nn.Conv2d(4, 6, (5,5), stride=1, padding=0)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t1, beta=beta)
self.sl3 = SpikingLayer(t2, beta=beta)
def forward(self, x, device):
s1 = torch.zeros(x.shape[0], 4, 14, 14).to(device)
v1 = torch.zeros(x.shape[0], 4, 14, 14).to(device)
s2 = torch.zeros(x.shape[0], 6, 10, 10).to(device)
v2 = torch.zeros(x.shape[0], 6, 10, 10).to(device)
s3 = torch.zeros(x.shape[0], self.Nout).to(device)
v3 = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x,self.scale).to(device)
xi = self.conv1(xi)
s1, v1 = self.sl1(xi, s1, v1)
xi = self.conv2(s1)
s2, v2 = self.sl2(xi, s2, v2)
xi = s2.view(s2.shape[0],-1)
xi2 = self.l1(xi)
s3, v3 = self.sl3(xi2, s3, v3)
nsp += s3
return nsp
class SpikingLeNet5(nn.Module):
def __init__(self, Nsp, t1, t2, beta=5, scale=1):
self.Nsp = Nsp
super(SpikingLeNet5, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6,
kernel_size=5, stride=1, padding=2, bias=True)
self.max_pool_1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16,
kernel_size=5, stride=1, padding=0, bias=True)
self.max_pool_2 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(16*5*5, 120, bias=False)
self.fc2 = nn.Linear(120,84, bias=False)
self.fc3 = nn.Linear(84,10, bias=False)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t1, beta=beta)
self.sl3 = SpikingLayer(t2, beta=beta)
self.sl4 = SpikingLayer(t2, beta=beta)
self.sl5 = SpikingLayer(t2, beta=beta)
self.scale = scale
def build_x(self, x):
xi = torch.zeros_like(x)
xout = torch.rand_like(x)
xout[xout>self.scale*x] = 0.0
xout[xout>0] = 1.0
return xout
def forward(self, x, device):
s1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
v1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
s2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
v2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
s3 = torch.zeros(x.shape[0], 120).to(device)
v3 = torch.zeros(x.shape[0], 120).to(device)
s4 = torch.zeros(x.shape[0], 84).to(device)
v4 = torch.zeros(x.shape[0], 84).to(device)
s5 = torch.zeros(x.shape[0], 10).to(device)
v5 = torch.zeros(x.shape[0], 10).to(device)
nsp = torch.zeros(x.shape[0], 10).to(device)
for i in range(self.Nsp):
xi = self.build_x(x).to(device)
xi = self.conv1(xi)
s1, v1 = self.sl1(xi, s1, v1)
xi = self.max_pool_1(s1)
xi = self.conv2(xi)
s2, v2 = self.sl2(xi, s2, v2)
xi = self.max_pool_2(s2)
xi = xi.view(xi.shape[0],-1)
xi = self.fc1(xi)
s3, v3 = self.sl3(xi, s3, v3)
xi = self.fc2(s3)
s4, v4 = self.sl4(xi, s4, v4)
xi = self.fc3(s4)
s5, v5 = self.sl5(xi, s5, v5)
nsp += s5
return nsp
class SpikingLeNet5const(nn.Module):
def __init__(self, Nsp, t0, t1, t2, beta=5, scale=1):
self.Nsp = Nsp
super(SpikingLeNet5const, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6,
kernel_size=5, stride=1, padding=2, bias=True)
self.max_pool_1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16,
kernel_size=5, stride=1, padding=0, bias=True)
self.max_pool_2 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(16*5*5, 120, bias=False)
self.fc2 = nn.Linear(120,84, bias=False)
self.fc3 = nn.Linear(84,10, bias=False)
self.sl0 = SpikingVextLayer(t0, beta=beta)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t1, beta=beta)
self.sl3 = SpikingLayer(t2, beta=beta)
self.sl4 = SpikingLayer(t2, beta=beta)
self.sl5 = SpikingLayer(t2, beta=beta)
self.scale = scale
def build_x(self, x):
xi = torch.zeros_like(x)
xout = torch.rand_like(x)
xout[xout>self.scale*x] = 0.0
xout[xout>0] = 1.0
return xout
def forward(self, x, device):
s0 = torch.zeros(x.shape[0], 1, 28, 28).to(device)
v0 = torch.zeros(x.shape[0], 1, 28, 28).to(device)
s1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
v1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
s2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
v2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
s3 = torch.zeros(x.shape[0], 120).to(device)
v3 = torch.zeros(x.shape[0], 120).to(device)
s4 = torch.zeros(x.shape[0], 84).to(device)
v4 = torch.zeros(x.shape[0], 84).to(device)
s5 = torch.zeros(x.shape[0], 10).to(device)
v5 = torch.zeros(x.shape[0], 10).to(device)
nsp = torch.zeros(x.shape[0], 10).to(device)
for i in range(self.Nsp):
s0, v0 = self.sl0(x, s0, v0)
xi = self.conv1(s0)
s1, v1 = self.sl1(xi, s1, v1)
xi = self.max_pool_1(s1)
xi = self.conv2(xi)
s2, v2 = self.sl2(xi, s2, v2)
xi = self.max_pool_2(s2)
xi = xi.view(xi.shape[0],-1)
xi = self.fc1(xi)
s3, v3 = self.sl3(xi, s3, v3)
xi = self.fc2(s3)
s4, v4 = self.sl4(xi, s4, v4)
xi = self.fc3(s4)
s5, v5 = self.sl5(xi, s5, v5)
nsp += s5
return nsp
def train(args, model, device, train_loader, optimizer, epoch, scale=4):
model.train()
Nsp = model.Nsp
for batch_idx, (data, target) in enumerate(train_loader):
bsize = target.shape[0]
optimizer.zero_grad()
mtarget = target
mdata = data
data, mtarget = mdata.to(device), mtarget.to(device)
output = scale*(model(data, device)-0.5*model.Nsp)
loss = F.cross_entropy(output, mtarget)
loss.backward()
optimizer.step()
if batch_idx % 5 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# print(output)
def test(args, model, device, test_loader, scale=4):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
mtarget = target
mdata = data
data, mtarget = mdata.to(device), mtarget.to(device)
output = scale*(model(data, device)-0.5*model.Nsp)
test_loss += F.cross_entropy(output, mtarget).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred).to(device)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return 100. * correct / len(test_loader.dataset)
def train_mse(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
bsize = target.shape[0]
optimizer.zero_grad()
mtarget = torch.zeros(target.shape[0],10)
for i in range(target.shape[0]):
mtarget[i,target[i]]= args.spikes
data, target = data.to(device), mtarget.to(device)
output = model(data, device)
loss = F.mse_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 5 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# print(output)
def test_mse(args, model, device, test_loader):
model.eval()
Nst = model.Nsp
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
mtarget = torch.zeros(target.shape[0],10)
for i in range(target.shape[0]):
mtarget[i,target[i]]= args.spikes
data, mtarget = data.to(device), mtarget.to(device)
output = model(data, device)
test_loss += F.mse_loss(output, mtarget, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred).to(device)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return 100. * correct / len(test_loader.dataset)
def main():
# Training settings
parser = argparse.ArgumentParser(description='SpikingNet example')
parser.add_argument('name', metavar='N', type=str, nargs=1,
help='filename')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--dataset', type=int, default=0, metavar='N',
help='dataset: mnist-0 fashionmnist-1')
parser.add_argument('--length', type=int, default=8, metavar='N',
help='length: (default: 8)')
parser.add_argument('--leakage1', type=int, default=4, metavar='N',
help='leakage2: (default: 4)')
parser.add_argument('--leakage2', type=int, default=4, metavar='N',
help='leakage1: (default: 4)')
parser.add_argument('--leakage0', type=int, default=4, metavar='N',
help='leakage0: (default: 4)')
parser.add_argument('--beta', type=float, default=5.0, metavar='N',
help='beta: (default: 5.0)')
parser.add_argument('--scale', type=float, default=1.0, metavar='N',
help='scale: (default: 1.0)')
parser.add_argument('--cost', type=int, default=1, metavar='N',
help='cost function 0 - xent, 1 - mse: (default: 1)')
parser.add_argument('--spikes', type=int, default=4, metavar='N',
help='# output spikes in mse: (default: 4)')
parser.add_argument('--model', type=int, default=0, metavar='N',
help='model: shallow-0 hidden-1 conv1-2 conv2-3 \
Lenet5-4 Lenet5const-5')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
if args.dataset == 1:
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('fashionMNIST', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('fashionMNIST', train=False, transform=transforms.Compose([
transforms.ToTensor()])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('MNIST', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('MNIST', train=False, transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
if args.model == 0:
model = SpikingShallowNetwork(784, 10, args.length, args.leakage1,
beta=args.beta, scale=args.scale).to(device)
elif args.model == 1:
model = SpikingHiddenNetwork(784, 10, 30, args.length, args.leakage1,
args.leakage2, beta=args.beta, scale=args.scale).to(device)
elif args.model == 2:
model = SpikingConvNetwork(784, 10, args.length, args.leakage1,
args.leakage2, beta=args.beta, scale=args.scale).to(device)
elif args.model == 3:
model = SpikingConvNetwork2(784, 10, args.length, args.leakage1,
args.leakage2, beta=args.beta, scale=args.scale).to(device)
elif args.model == 4:
model = SpikingLeNet5(args.length, args.leakage1, args.leakage2,
beta=args.beta, scale=args.scale).to(device)
elif args.model == 5:
model = SpikingLeNet5const(args.length, args.leakage0, args.leakage1,
args.leakage2, beta=args.beta, scale=args.scale).to(device)
if args.cost == 0:
trainf = train
testf = test
else:
trainf = train_mse
testf = test_mse
optimizer = optim.Adam(model.parameters(), lr=args.lr*8/args.length)
data = []
for epoch in range(1, args.epochs + 1):
trainf(args, model, device, train_loader, optimizer, epoch)
result = testf(args, model, device, test_loader)
data.append([epoch, result])
data = np.array(data)
# condstring = "%d_%d_%d_%d_%d" % (args.length, args.leakage0, args.leakage1, args.leakage2, args.dataset)
condstring = "%d_%d_%d_%d" % (args.length, args.leakage1, args.beta, args.dataset)
filename = args.name[0] + "_" + condstring + ".npy"
filemode = args.name[0] + "_" + condstring + ".pt"
np.save(filename, data)
torch.save(model.state_dict(), filemode)
if __name__ == '__main__':
main()
| [
"torch.nn.functional.mse_loss",
"torch.rand_like",
"argparse.ArgumentParser",
"spikingnet.poisson_spikes",
"torch.nn.Conv2d",
"numpy.array",
"spikingnet.SpikingVextLayer",
"torch.nn.MaxPool2d",
"spikingnet.SpikingLayer",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.nn.functional.cross_... | [((13796, 13853), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SpikingNet example"""'}), "(description='SpikingNet example')\n", (13819, 13853), False, 'import argparse\n'), ((16194, 16237), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (16206, 16237), False, 'import torch\n'), ((19073, 19087), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (19081, 19087), True, 'import numpy as np\n'), ((19407, 19430), 'numpy.save', 'np.save', (['filename', 'data'], {}), '(filename, data)\n', (19414, 19430), True, 'import numpy as np\n'), ((826, 867), 'torch.nn.Linear', 'nn.Linear', (['self.Nin', 'self.Nout'], {'bias': 'None'}), '(self.Nin, self.Nout, bias=None)\n', (835, 867), True, 'import torch.nn as nn\n'), ((945, 972), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t1'], {'beta': 'beta'}), '(t1, beta=beta)\n', (957, 972), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((1719, 1744), 'torch.nn.Linear', 'nn.Linear', (['Nin', 'self.Nhid'], {}), '(Nin, self.Nhid)\n', (1728, 1744), True, 'import torch.nn as nn\n'), ((1764, 1806), 'torch.nn.Linear', 'nn.Linear', (['self.Nhid', 'self.Nout'], {'bias': 'None'}), '(self.Nhid, self.Nout, bias=None)\n', (1773, 1806), True, 'import torch.nn as nn\n'), ((1827, 1854), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t1'], {'beta': 'beta'}), '(t1, beta=beta)\n', (1839, 1854), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((1875, 1902), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t2'], {'beta': 'beta'}), '(t2, beta=beta)\n', (1887, 1902), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((2847, 2891), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(4)', '(5, 5)'], {'stride': '(2)', 'padding': '(2)'}), '(1, 4, (5, 5), stride=2, padding=2)\n', (2856, 2891), True, 'import torch.nn as nn\n'), ((2910, 2952), 'torch.nn.Linear', 'nn.Linear', (['self.Nhid', 'self.Nout'], {'bias': 'None'}), '(self.Nhid, self.Nout, bias=None)\n', (2919, 2952), True, 'import torch.nn as nn\n'), ((2973, 3000), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t1'], {'beta': 'beta'}), '(t1, beta=beta)\n', (2985, 3000), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((3021, 3048), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t2'], {'beta': 'beta'}), '(t2, beta=beta)\n', (3033, 3048), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((4085, 4129), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(4)', '(5, 5)'], {'stride': '(2)', 'padding': '(2)'}), '(1, 4, (5, 5), stride=2, padding=2)\n', (4094, 4129), True, 'import torch.nn as nn\n'), ((4148, 4191), 'torch.nn.Linear', 'nn.Linear', (['self.Nhid2', 'self.Nout'], {'bias': 'None'}), '(self.Nhid2, self.Nout, bias=None)\n', (4157, 4191), True, 'import torch.nn as nn\n'), ((4214, 4258), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(6)', '(5, 5)'], {'stride': '(1)', 'padding': '(0)'}), '(4, 6, (5, 5), stride=1, padding=0)\n', (4223, 4258), True, 'import torch.nn as nn\n'), ((4278, 4305), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t1'], {'beta': 'beta'}), '(t1, beta=beta)\n', (4290, 4305), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((4326, 4353), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t1'], {'beta': 'beta'}), '(t1, beta=beta)\n', (4338, 4353), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((4374, 4401), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t2'], {'beta': 'beta'}), '(t2, beta=beta)\n', (4386, 4401), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((5450, 5541), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(6)', 'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': '(True)'}), '(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2,\n bias=True)\n', (5459, 5541), True, 'import torch.nn as nn\n'), ((5578, 5605), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (5590, 5605), True, 'import torch.nn as nn\n'), ((5628, 5721), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(6)', 'out_channels': '(16)', 'kernel_size': '(5)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=\n 0, bias=True)\n', (5637, 5721), True, 'import torch.nn as nn\n'), ((5757, 5784), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (5769, 5784), True, 'import torch.nn as nn\n'), ((5807, 5845), 'torch.nn.Linear', 'nn.Linear', (['(16 * 5 * 5)', '(120)'], {'bias': '(False)'}), '(16 * 5 * 5, 120, bias=False)\n', (5816, 5845), True, 'import torch.nn as nn\n'), ((5862, 5892), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(84)'], {'bias': '(False)'}), '(120, 84, bias=False)\n', (5871, 5892), True, 'import torch.nn as nn\n'), ((5912, 5941), 'torch.nn.Linear', 'nn.Linear', (['(84)', '(10)'], {'bias': '(False)'}), '(84, 10, bias=False)\n', (5921, 5941), True, 'import torch.nn as nn\n'), ((5961, 5988), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t1'], {'beta': 'beta'}), '(t1, beta=beta)\n', (5973, 5988), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((6009, 6036), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t1'], {'beta': 'beta'}), '(t1, beta=beta)\n', (6021, 6036), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((6057, 6084), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t2'], {'beta': 'beta'}), '(t2, beta=beta)\n', (6069, 6084), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((6105, 6132), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t2'], {'beta': 'beta'}), '(t2, beta=beta)\n', (6117, 6132), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((6153, 6180), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t2'], {'beta': 'beta'}), '(t2, beta=beta)\n', (6165, 6180), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((6254, 6273), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (6270, 6273), False, 'import torch\n'), ((6290, 6308), 'torch.rand_like', 'torch.rand_like', (['x'], {}), '(x)\n', (6305, 6308), False, 'import torch\n'), ((7864, 7955), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(6)', 'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': '(True)'}), '(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2,\n bias=True)\n', (7873, 7955), True, 'import torch.nn as nn\n'), ((7992, 8019), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (8004, 8019), True, 'import torch.nn as nn\n'), ((8042, 8135), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(6)', 'out_channels': '(16)', 'kernel_size': '(5)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=\n 0, bias=True)\n', (8051, 8135), True, 'import torch.nn as nn\n'), ((8171, 8198), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (8183, 8198), True, 'import torch.nn as nn\n'), ((8221, 8259), 'torch.nn.Linear', 'nn.Linear', (['(16 * 5 * 5)', '(120)'], {'bias': '(False)'}), '(16 * 5 * 5, 120, bias=False)\n', (8230, 8259), True, 'import torch.nn as nn\n'), ((8276, 8306), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(84)'], {'bias': '(False)'}), '(120, 84, bias=False)\n', (8285, 8306), True, 'import torch.nn as nn\n'), ((8326, 8355), 'torch.nn.Linear', 'nn.Linear', (['(84)', '(10)'], {'bias': '(False)'}), '(84, 10, bias=False)\n', (8335, 8355), True, 'import torch.nn as nn\n'), ((8375, 8406), 'spikingnet.SpikingVextLayer', 'SpikingVextLayer', (['t0'], {'beta': 'beta'}), '(t0, beta=beta)\n', (8391, 8406), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((8427, 8454), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t1'], {'beta': 'beta'}), '(t1, beta=beta)\n', (8439, 8454), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((8475, 8502), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t1'], {'beta': 'beta'}), '(t1, beta=beta)\n', (8487, 8502), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((8523, 8550), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t2'], {'beta': 'beta'}), '(t2, beta=beta)\n', (8535, 8550), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((8571, 8598), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t2'], {'beta': 'beta'}), '(t2, beta=beta)\n', (8583, 8598), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((8619, 8646), 'spikingnet.SpikingLayer', 'SpikingLayer', (['t2'], {'beta': 'beta'}), '(t2, beta=beta)\n', (8631, 8646), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((8720, 8739), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (8736, 8739), False, 'import torch\n'), ((8756, 8774), 'torch.rand_like', 'torch.rand_like', (['x'], {}), '(x)\n', (8771, 8774), False, 'import torch\n'), ((10679, 10711), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'mtarget'], {}), '(output, mtarget)\n', (10694, 10711), True, 'import torch.nn.functional as F\n'), ((11163, 11178), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11176, 11178), False, 'import torch\n'), ((12131, 12163), 'torch.zeros', 'torch.zeros', (['target.shape[0]', '(10)'], {}), '(target.shape[0], 10)\n', (12142, 12163), False, 'import torch\n'), ((12366, 12392), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['output', 'target'], {}), '(output, target)\n', (12376, 12392), True, 'import torch.nn.functional as F\n'), ((12862, 12877), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12875, 12877), False, 'import torch\n'), ((16154, 16179), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16177, 16179), False, 'import torch\n'), ((12944, 12976), 'torch.zeros', 'torch.zeros', (['target.shape[0]', '(10)'], {}), '(target.shape[0], 10)\n', (12955, 12976), False, 'import torch\n'), ((1082, 1116), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (1093, 1116), False, 'import torch\n'), ((1141, 1175), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (1152, 1175), False, 'import torch\n'), ((1202, 1236), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (1213, 1236), False, 'import torch\n'), ((2015, 2049), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nhid'], {}), '(x.shape[0], self.Nhid)\n', (2026, 2049), False, 'import torch\n'), ((2075, 2109), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nhid'], {}), '(x.shape[0], self.Nhid)\n', (2086, 2109), False, 'import torch\n'), ((2135, 2169), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (2146, 2169), False, 'import torch\n'), ((2195, 2229), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (2206, 2229), False, 'import torch\n'), ((2256, 2290), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (2267, 2290), False, 'import torch\n'), ((3128, 3162), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nhid'], {}), '(x.shape[0], self.Nhid)\n', (3139, 3162), False, 'import torch\n'), ((3188, 3222), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nhid'], {}), '(x.shape[0], self.Nhid)\n', (3199, 3222), False, 'import torch\n'), ((3248, 3282), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (3259, 3282), False, 'import torch\n'), ((3308, 3342), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (3319, 3342), False, 'import torch\n'), ((3369, 3403), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (3380, 3403), False, 'import torch\n'), ((4453, 4487), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(4)', '(14)', '(14)'], {}), '(x.shape[0], 4, 14, 14)\n', (4464, 4487), False, 'import torch\n'), ((4513, 4547), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(4)', '(14)', '(14)'], {}), '(x.shape[0], 4, 14, 14)\n', (4524, 4547), False, 'import torch\n'), ((4573, 4607), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(6)', '(10)', '(10)'], {}), '(x.shape[0], 6, 10, 10)\n', (4584, 4607), False, 'import torch\n'), ((4633, 4667), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(6)', '(10)', '(10)'], {}), '(x.shape[0], 6, 10, 10)\n', (4644, 4667), False, 'import torch\n'), ((4693, 4727), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (4704, 4727), False, 'import torch\n'), ((4753, 4787), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (4764, 4787), False, 'import torch\n'), ((4814, 4848), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.Nout'], {}), '(x.shape[0], self.Nout)\n', (4825, 4848), False, 'import torch\n'), ((6448, 6482), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(6)', '(28)', '(28)'], {}), '(x.shape[0], 6, 28, 28)\n', (6459, 6482), False, 'import torch\n'), ((6508, 6542), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(6)', '(28)', '(28)'], {}), '(x.shape[0], 6, 28, 28)\n', (6519, 6542), False, 'import torch\n'), ((6568, 6603), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(16)', '(10)', '(10)'], {}), '(x.shape[0], 16, 10, 10)\n', (6579, 6603), False, 'import torch\n'), ((6629, 6664), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(16)', '(10)', '(10)'], {}), '(x.shape[0], 16, 10, 10)\n', (6640, 6664), False, 'import torch\n'), ((6690, 6718), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(120)'], {}), '(x.shape[0], 120)\n', (6701, 6718), False, 'import torch\n'), ((6744, 6772), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(120)'], {}), '(x.shape[0], 120)\n', (6755, 6772), False, 'import torch\n'), ((6798, 6825), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(84)'], {}), '(x.shape[0], 84)\n', (6809, 6825), False, 'import torch\n'), ((6851, 6878), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(84)'], {}), '(x.shape[0], 84)\n', (6862, 6878), False, 'import torch\n'), ((6904, 6931), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(10)'], {}), '(x.shape[0], 10)\n', (6915, 6931), False, 'import torch\n'), ((6957, 6984), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(10)'], {}), '(x.shape[0], 10)\n', (6968, 6984), False, 'import torch\n'), ((7011, 7038), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(10)'], {}), '(x.shape[0], 10)\n', (7022, 7038), False, 'import torch\n'), ((8914, 8948), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(1)', '(28)', '(28)'], {}), '(x.shape[0], 1, 28, 28)\n', (8925, 8948), False, 'import torch\n'), ((8974, 9008), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(1)', '(28)', '(28)'], {}), '(x.shape[0], 1, 28, 28)\n', (8985, 9008), False, 'import torch\n'), ((9034, 9068), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(6)', '(28)', '(28)'], {}), '(x.shape[0], 6, 28, 28)\n', (9045, 9068), False, 'import torch\n'), ((9094, 9128), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(6)', '(28)', '(28)'], {}), '(x.shape[0], 6, 28, 28)\n', (9105, 9128), False, 'import torch\n'), ((9154, 9189), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(16)', '(10)', '(10)'], {}), '(x.shape[0], 16, 10, 10)\n', (9165, 9189), False, 'import torch\n'), ((9215, 9250), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(16)', '(10)', '(10)'], {}), '(x.shape[0], 16, 10, 10)\n', (9226, 9250), False, 'import torch\n'), ((9276, 9304), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(120)'], {}), '(x.shape[0], 120)\n', (9287, 9304), False, 'import torch\n'), ((9330, 9358), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(120)'], {}), '(x.shape[0], 120)\n', (9341, 9358), False, 'import torch\n'), ((9384, 9411), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(84)'], {}), '(x.shape[0], 84)\n', (9395, 9411), False, 'import torch\n'), ((9437, 9464), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(84)'], {}), '(x.shape[0], 84)\n', (9448, 9464), False, 'import torch\n'), ((9490, 9517), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(10)'], {}), '(x.shape[0], 10)\n', (9501, 9517), False, 'import torch\n'), ((9543, 9570), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(10)'], {}), '(x.shape[0], 10)\n', (9554, 9570), False, 'import torch\n'), ((9597, 9624), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(10)'], {}), '(x.shape[0], 10)\n', (9608, 9624), False, 'import torch\n'), ((1301, 1330), 'spikingnet.poisson_spikes', 'poisson_spikes', (['x', 'self.scale'], {}), '(x, self.scale)\n', (1315, 1330), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((2355, 2384), 'spikingnet.poisson_spikes', 'poisson_spikes', (['x', 'self.scale'], {}), '(x, self.scale)\n', (2369, 2384), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((3468, 3497), 'spikingnet.poisson_spikes', 'poisson_spikes', (['x', 'self.scale'], {}), '(x, self.scale)\n', (3482, 3497), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((4913, 4942), 'spikingnet.poisson_spikes', 'poisson_spikes', (['x', 'self.scale'], {}), '(x, self.scale)\n', (4927, 4942), False, 'from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes\n'), ((11434, 11466), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'mtarget'], {}), '(output, mtarget)\n', (11449, 11466), True, 'import torch.nn.functional as F\n'), ((13206, 13250), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['output', 'mtarget'], {'reduction': '"""sum"""'}), "(output, mtarget, reduction='sum')\n", (13216, 13250), True, 'import torch.nn.functional as F\n'), ((16562, 16583), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16581, 16583), False, 'from torchvision import datasets, transforms\n'), ((16861, 16882), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16880, 16882), False, 'from torchvision import datasets, transforms\n'), ((17178, 17199), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (17197, 17199), False, 'from torchvision import datasets, transforms\n'), ((17463, 17484), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (17482, 17484), False, 'from torchvision import datasets, transforms\n')] |
# encoding=utf-8
from scipy.io import loadmat
import numpy as np
import pickle
"""
matrix shape: (577,272)
positive samples: 1583
negative samples: 155361
"""
m = loadmat("lda_interMatrix.mat")
interMatrix = m['interMatrix']
rows, cols = interMatrix.shape
print('matrix shape:', interMatrix.shape)
pos_set = []
neg_set = []
for i in range(rows):
for j in range(cols):
if interMatrix[i][j] != 0:
pos_set.append((i, j, 1))
else:
neg_set.append((i, j, 0))
print('positive samples:', len(pos_set))
print('negative samples:', len(neg_set))
with open('data.pkl', 'wb') as file:
pickle.dump((pos_set, neg_set), file)
np.save('matrix.npy', interMatrix)
| [
"scipy.io.loadmat",
"pickle.dump",
"numpy.save"
] | [((176, 206), 'scipy.io.loadmat', 'loadmat', (['"""lda_interMatrix.mat"""'], {}), "('lda_interMatrix.mat')\n", (183, 206), False, 'from scipy.io import loadmat\n'), ((695, 729), 'numpy.save', 'np.save', (['"""matrix.npy"""', 'interMatrix'], {}), "('matrix.npy', interMatrix)\n", (702, 729), True, 'import numpy as np\n'), ((654, 691), 'pickle.dump', 'pickle.dump', (['(pos_set, neg_set)', 'file'], {}), '((pos_set, neg_set), file)\n', (665, 691), False, 'import pickle\n')] |
#!/usr/bin/env python
"""
@author: <NAME>
"""
from spatialmath import SE3
from spatialmath.base.argcheck import getvector
from spatialmath.base import r2q
import numpy as np
import copy
_mpl = False
try:
from matplotlib import colors as mpc
_mpl = True
except ImportError: # pragma nocover
pass
CONST_RX = SE3.Rx(np.pi / 2).A
class Shape:
def __init__(self, base=None, color=None, stype=None):
# These three are static attributes which can never be changed
# If these are directly accessed and re-written, segmentation faults
# will follow very soon after
# wT and sT cannot be accessed and set by users by base can be
# modified through its setter
self._wT = np.eye(4)
self._sT = np.eye(4)
self._sq = np.zeros(4)
self._base = np.eye(4)
self.base = base
self.stype = stype
self.v = np.zeros(6)
self.color = color
self._collision = False
def copy(self):
"""
Copy of Shape object
:return: Shallow copy of Shape object
:rtype: Shape
"""
# print("Hello")
new = copy.copy(self)
# print(self._base)
# new = Shape(self.base, self.color, self.stype)
for k, v in self.__dict__.items():
if k.startswith("_") and isinstance(v, np.ndarray):
setattr(new, k, np.copy(v))
return new
def _to_hex(self, rgb):
rgb = (np.array(rgb) * 255).astype(int)
return int("0x%02x%02x%02x" % (rgb[0], rgb[1], rgb[2]), 16)
def to_dict(self):
"""
to_dict() returns the shapes information in dictionary form
:returns: All information about the shape
:rtype: dict
"""
self._to_hex(self.color[0:3])
if self.stype == "cylinder":
fk = self._sT @ CONST_RX
else:
fk = self._sT
q = r2q(fk[:3, :3]).tolist()
q = [q[1], q[2], q[3], q[0]]
shape = {
"stype": self.stype,
"t": fk[:3, 3].tolist(),
"q": q,
"v": self.v.tolist(),
"color": self._to_hex(self.color[0:3]),
"opacity": self.color[3],
}
return shape
def fk_dict(self):
"""
fk_dict() outputs shapes pose in dictionary form
:returns: The shape pose in translation and quternion form
:rtype: dict
"""
if self.stype == "cylinder":
fk = self._sT @ CONST_RX
else:
fk = self._sT
q = r2q(fk[:3, :3]).tolist()
q = [q[1], q[2], q[3], q[0]]
shape = {"t": fk[:3, 3].tolist(), "q": q}
return shape
def __repr__(self): # pragma nocover
return f"{self.stype},\n{self.base}"
@property
def collision(self):
return self._collision
@property
def v(self):
return self._v
@v.setter
def v(self, value):
self._v = getvector(value, 6)
@property
def color(self):
"""
shape.color returns a four length tuple representing (red, green, blue, alpha)
where alpha represents transparency. Values returned are in the range [0-1].
"""
return self._color
@color.setter
def color(self, value):
"""
shape.color(new_color) sets the color of a shape.
The color format is (red, green, blue, alpha).
Color can be set with a three length list, tuple or array which
will only set the (r, g, b) values and alpha will be set to maximum.
Color can be set with a four length list, tuple or array which
will set the (r, g, b, a) values.
Note: the color is auto-normalising. If any value passed is greater than
1.0 then all values will be normalised to the [0-1] range assuming the
previous range was [0-255].
"""
default_color = (0.95, 0.5, 0.25, 1.0)
if isinstance(value, str):
if _mpl:
try:
value = mpc.to_rgba(value)
except ValueError:
print(f"{value} is an invalid color name, using default color")
value = default_color
else: # pragma nocover
value = default_color
print(
"Color only supported when matplotlib is installed\n"
"Install using: pip install matplotlib"
)
elif value is None:
value = default_color
else:
value = np.array(value)
if np.any(value > 1.0):
value = value / 255.0
if value.shape[0] == 3:
value = np.r_[value, 1.0]
value = tuple(value)
self._color = value
def set_alpha(self, alpha):
"""
Convenience method to set the opacity/alpha value of the robots color.
"""
if alpha > 1.0:
alpha /= 255
new_color = np.r_[self._color[:3], alpha]
self._color = tuple(new_color)
@property
def wT(self):
return self._sT
@wT.setter
def wT(self, T):
self._wT[:] = T
self._sT[:] = self._wT @ self._base
self._sq[:] = r2q(self._sT[:3, :3], order="xyzs")
@property
def base(self):
return SE3(np.copy(self._base), check=False)
@base.setter
def base(self, T):
if not isinstance(T, SE3):
T = SE3(T)
self._base[:] = T.A
self._sT[:] = self._wT @ self._base
self._sq[:] = r2q(self._sT[:3, :3], order="xyzs")
class Axes(Shape):
"""An axes whose center is at the local origin.
Parameters
:param length: The length of each axis.
:type length: float
:param base: Local reference frame of the shape
:type base: SE3
"""
def __init__(self, length, **kwargs):
super(Axes, self).__init__(stype="axes", **kwargs)
self.length = length
@property
def length(self):
return self._length
@length.setter
def length(self, value):
self._length = float(value)
def to_dict(self):
"""
to_dict() returns the shapes information in dictionary form
:returns: All information about the shape
:rtype: dict
"""
shape = super().to_dict()
shape["length"] = self.length
return shape
| [
"numpy.copy",
"numpy.eye",
"spatialmath.SE3",
"matplotlib.colors.to_rgba",
"spatialmath.base.argcheck.getvector",
"numpy.any",
"numpy.array",
"numpy.zeros",
"spatialmath.base.r2q",
"spatialmath.SE3.Rx",
"copy.copy"
] | [((325, 342), 'spatialmath.SE3.Rx', 'SE3.Rx', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (331, 342), False, 'from spatialmath import SE3\n'), ((734, 743), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (740, 743), True, 'import numpy as np\n'), ((763, 772), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (769, 772), True, 'import numpy as np\n'), ((792, 803), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (800, 803), True, 'import numpy as np\n'), ((825, 834), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (831, 834), True, 'import numpy as np\n'), ((905, 916), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (913, 916), True, 'import numpy as np\n'), ((1160, 1175), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (1169, 1175), False, 'import copy\n'), ((2989, 3008), 'spatialmath.base.argcheck.getvector', 'getvector', (['value', '(6)'], {}), '(value, 6)\n', (2998, 3008), False, 'from spatialmath.base.argcheck import getvector\n'), ((5287, 5322), 'spatialmath.base.r2q', 'r2q', (['self._sT[:3, :3]'], {'order': '"""xyzs"""'}), "(self._sT[:3, :3], order='xyzs')\n", (5290, 5322), False, 'from spatialmath.base import r2q\n'), ((5604, 5639), 'spatialmath.base.r2q', 'r2q', (['self._sT[:3, :3]'], {'order': '"""xyzs"""'}), "(self._sT[:3, :3], order='xyzs')\n", (5607, 5639), False, 'from spatialmath.base import r2q\n'), ((5377, 5396), 'numpy.copy', 'np.copy', (['self._base'], {}), '(self._base)\n', (5384, 5396), True, 'import numpy as np\n'), ((5503, 5509), 'spatialmath.SE3', 'SE3', (['T'], {}), '(T)\n', (5506, 5509), False, 'from spatialmath import SE3\n'), ((1934, 1949), 'spatialmath.base.r2q', 'r2q', (['fk[:3, :3]'], {}), '(fk[:3, :3])\n', (1937, 1949), False, 'from spatialmath.base import r2q\n'), ((2583, 2598), 'spatialmath.base.r2q', 'r2q', (['fk[:3, :3]'], {}), '(fk[:3, :3])\n', (2586, 2598), False, 'from spatialmath.base import r2q\n'), ((4594, 4609), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (4602, 4609), True, 'import numpy as np\n'), ((4626, 4645), 'numpy.any', 'np.any', (['(value > 1.0)'], {}), '(value > 1.0)\n', (4632, 4645), True, 'import numpy as np\n'), ((1403, 1413), 'numpy.copy', 'np.copy', (['v'], {}), '(v)\n', (1410, 1413), True, 'import numpy as np\n'), ((1479, 1492), 'numpy.array', 'np.array', (['rgb'], {}), '(rgb)\n', (1487, 1492), True, 'import numpy as np\n'), ((4068, 4086), 'matplotlib.colors.to_rgba', 'mpc.to_rgba', (['value'], {}), '(value)\n', (4079, 4086), True, 'from matplotlib import colors as mpc\n')] |
from __future__ import print_function
import sys,inspect
import numpy as np
from flask import json
from collections import OrderedDict
class Evaluator:
def __init__(self,functionList):
self.generatedApp=[]
self.hasPlot=False
self.itemList=[]
self.evalGlobals={}
self.functionList = functionList
self.evalGlobals = functionList.copy()
self.evalGlobals['print_']=self.printer
self.evalGlobals['print']=self.print
self.evalGlobals['button']=self.button
self.evalGlobals['label']=self.label
self.evalGlobals['plot']=self.plot
def toUnique(self,identifier,suffix=0): #Make any ID string unique. returns new string.
newId = identifier+str(suffix) if suffix else identifier
if newId in self.itemList:
return self.toUnique(identifier,suffix+1)
return newId
def print(self,*args):
'''
For now, the print function is being overloaded in order to capture the console output.
Future plans will store each line of execution as a json object. This approach will increase flexibility,
and outputs more than just text, such as images and widgets can be created.
'''
name=self.toUnique("print")
self.generatedApp.append({"type":"text","name":name,"value":[str(a) for a in args]})
self.itemList.append(name)
return name
def printer(self,txt,name="print"):
name=self.toUnique(name)
self.generatedApp.append({"type":"span","name":name,"class":"row well","value":str(txt)})
self.itemList.append(name)
return name
def label(self,txt,name="print",html_class=""):
name=self.toUnique(name)
self.generatedApp.append({"type":"label","name":name,"class":html_class,"value":str(txt)})
self.itemList.append(name)
return name
def button(self,label,endpoint,displayType="display_number",**kwargs):
name = kwargs.get("name","button-id")
name=self.toUnique(name)
self.itemList.append(name)
targetName = kwargs.get('target',name+'-label')
if 'target' not in kwargs: #If a target was not specified, make up a name
targetName = self.toUnique(name+'-label')
successOpts={"datapoint":'result',"type":displayType,"target":targetName}
if displayType=='update-plot': # specify the stacking of data
successOpts['stacking']=kwargs.get('stacking','xy')
self.generatedApp.append({"type":"button", "name":name,"label":label,"fetched_value":"","action":{"type":"POST","endpoint":endpoint,"success":successOpts}})
if 'target' not in kwargs: #If a target was not specified, make a label.
if displayType in ["display_number","display"]:
self.label('',targetName)
return name
#Plots
def plot(self,x,y,**kwargs):
name = kwargs.get('name',self.toUnique('myPlot'))
self.generatedApp.append({"type":"plot","name":name,"data":[np.array([x,y]).T.tolist()]}) #jqplot requires [x,y] pairs . not separate datasets.
self.itemList.append(name)
return name
def runCode(self,code):
self.generatedApp=[]
self.itemList=[]
submitted = compile(code.encode(), '<string>', mode='exec')
self.exec_scope = self.evalGlobals.copy()
try:
exec(submitted, self.exec_scope)
except Exception as e:
print(str(e))
return self.getApp()
def getApp(self):
return self.generatedApp
#### Extract Doc Strings ####
def getDocs(self):
flist = []
for a in self.functionList.keys():
if a[:2]=='__':continue
doc = ''
try:
doc = inspect.getdoc(self.functionList[a])
arglist = inspect.getargspec(self.functionList[a]).args
except Exception as e:
print(a,e)
continue
arglist.remove('self')
flist.append({'doc_string':str(doc),'name':a,'args':arglist})
return flist
| [
"numpy.array",
"inspect.getdoc",
"inspect.getargspec"
] | [((3332, 3368), 'inspect.getdoc', 'inspect.getdoc', (['self.functionList[a]'], {}), '(self.functionList[a])\n', (3346, 3368), False, 'import sys, inspect\n'), ((3383, 3423), 'inspect.getargspec', 'inspect.getargspec', (['self.functionList[a]'], {}), '(self.functionList[a])\n', (3401, 3423), False, 'import sys, inspect\n'), ((2709, 2725), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (2717, 2725), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import face_recognition
import datetime
from multiprocessing import Process
import Data.DataProcessing as dp
from os import path, mkdir
import MotionDetection
from datetime import datetime
import Database.Commands as db_commands
class FaceAnalyzer():
def __init__(self, tolerance=0.6, model='haar'):
self.__worker = None
self.model = model
self.tolerance = tolerance
self.__unknown_faces_dir = r'Faces/Unknown'
self.__encodings = dp.load_encodings()
self.current_face = None
self.current_encoding = None
self.prepare_workspace()
def start(self):
self.__worker = Process(target=self.process_motions, args=(MotionDetection.motions,))
self.__worker.start()
def prepare_workspace(self):
if not path.exists(self.__unknown_faces_dir[:-8]):
mkdir(self.__unknown_faces_dir[:-8])
mkdir(self.__unknown_faces_dir)
else:
if not path.exists(self.__unknown_faces_dir):
mkdir(self.__unknown_faces_dir)
def process_motions(self, motions):
while True:
if not motions.empty():
motion = motions.get()
found_faces, faces = self.find_faces(motion)
if found_faces:
unknown_faces = []
log_faces_recognition = []
for face in faces:
face_encoding = face_recognition.face_encodings(face)
face_encoding = np.asarray(face_encoding).flatten()
if len(face_encoding) == 0:
continue
date_time_now = datetime.now()
date_now_string = date_time_now.strftime("%d/%m/%Y")
time_now_string = date_time_now.strftime("%H:%M:%S")
known_encodings_list = np.asarray(list(self.__encodings.values()))
results = face_recognition.compare_faces(known_encodings_list, face_encoding, self.tolerance)
is_known = np.any(results)
if not is_known:
unknown_faces.append(face)
is_success, image_buf_arr = cv2.imencode(".jpg", cv2.resize(face,(200, 200)))
log_faces_recognition.append((date_now_string, time_now_string, "Unknown", image_buf_arr))
else:
# Here sometimes exception occurs
try:
known_name = list(self.__encodings.keys())[np.where(results)[0][0]]
print(f'Face known [{known_name}] (Remove this message after release)')
is_success, image_buf_arr = cv2.imencode(".jpg", cv2.resize(face,(200, 200)))
log_faces_recognition.append((date_now_string, time_now_string, known_name, image_buf_arr))
except:
pass
if len(unknown_faces) > 0:
self.save_unknown_faces(unknown_faces)
if len(log_faces_recognition) > 0:
self.save_log_faces_to_database(log_faces_recognition)
def find_faces(self, motion):
img = motion.crop_box(5)
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = None
try:
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
except:
pass
return result
# Due to the fact that haardetector is not rotation invariant, we try to rotate images on 360 degrees to find possibly rotated faces
for angle in range(0, 360, 20):
rotated = rotate_image(img, angle)
# Check if frame is big enough to analyze
if rotated is None or rotated.shape[0] * rotated.shape[1] < 800 * 10:
continue
faces = []
if self.model == 'haar':
face_cascade = cv2.CascadeClassifier(r'./Cascades/haarcascade_frontalface_default.xml')
face_boxes = face_cascade.detectMultiScale(rotated, 1.1, minNeighbors=4)
if len(face_boxes) == 0:
continue
faces = [rotated[y:y + h, x:x + w] for (x, y, w, h) in face_boxes]
elif self.model == 'hog':
face_boxes = face_recognition.face_locations(rotated)
if len(face_boxes) == 0:
continue
faces = [rotated[top:bottom, left:right] for (top, right, bottom, left) in face_boxes]
return (True, faces)
return (False, None)
def save_unknown_faces(self, faces):
print('Saving unknown faces')
for f in faces:
filename = rf'{self.__unknown_faces_dir}/{str(datetime.now())[11:-7].replace(":", ".")}.jpg'
cv2.imwrite(filename, f)
def save_log_faces_to_database(self, log_faces_recognition):
db_commands.insert_into_log(log_faces_recognition)
| [
"os.path.exists",
"cv2.imwrite",
"cv2.warpAffine",
"cv2.getRotationMatrix2D",
"face_recognition.face_locations",
"cv2.resize",
"numpy.where",
"multiprocessing.Process",
"numpy.asarray",
"numpy.any",
"numpy.array",
"datetime.datetime.now",
"os.mkdir",
"face_recognition.compare_faces",
"fa... | [((524, 543), 'Data.DataProcessing.load_encodings', 'dp.load_encodings', ([], {}), '()\n', (541, 543), True, 'import Data.DataProcessing as dp\n'), ((699, 768), 'multiprocessing.Process', 'Process', ([], {'target': 'self.process_motions', 'args': '(MotionDetection.motions,)'}), '(target=self.process_motions, args=(MotionDetection.motions,))\n', (706, 768), False, 'from multiprocessing import Process\n'), ((5418, 5468), 'Database.Commands.insert_into_log', 'db_commands.insert_into_log', (['log_faces_recognition'], {}), '(log_faces_recognition)\n', (5445, 5468), True, 'import Database.Commands as db_commands\n'), ((852, 894), 'os.path.exists', 'path.exists', (['self.__unknown_faces_dir[:-8]'], {}), '(self.__unknown_faces_dir[:-8])\n', (863, 894), False, 'from os import path, mkdir\n'), ((909, 945), 'os.mkdir', 'mkdir', (['self.__unknown_faces_dir[:-8]'], {}), '(self.__unknown_faces_dir[:-8])\n', (914, 945), False, 'from os import path, mkdir\n'), ((959, 990), 'os.mkdir', 'mkdir', (['self.__unknown_faces_dir'], {}), '(self.__unknown_faces_dir)\n', (964, 990), False, 'from os import path, mkdir\n'), ((3645, 3694), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'angle', '(1.0)'], {}), '(image_center, angle, 1.0)\n', (3668, 3694), False, 'import cv2\n'), ((5314, 5338), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'f'], {}), '(filename, f)\n', (5325, 5338), False, 'import cv2\n'), ((1026, 1063), 'os.path.exists', 'path.exists', (['self.__unknown_faces_dir'], {}), '(self.__unknown_faces_dir)\n', (1037, 1063), False, 'from os import path, mkdir\n'), ((1082, 1113), 'os.mkdir', 'mkdir', (['self.__unknown_faces_dir'], {}), '(self.__unknown_faces_dir)\n', (1087, 1113), False, 'from os import path, mkdir\n'), ((3766, 3840), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rot_mat', 'image.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR'}), '(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n', (3780, 3840), False, 'import cv2\n'), ((4408, 4479), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""./Cascades/haarcascade_frontalface_default.xml"""'], {}), "('./Cascades/haarcascade_frontalface_default.xml')\n", (4429, 4479), False, 'import cv2\n'), ((3588, 3616), 'numpy.array', 'np.array', (['image.shape[1::-1]'], {}), '(image.shape[1::-1])\n', (3596, 3616), True, 'import numpy as np\n'), ((4800, 4840), 'face_recognition.face_locations', 'face_recognition.face_locations', (['rotated'], {}), '(rotated)\n', (4831, 4840), False, 'import face_recognition\n'), ((1521, 1558), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['face'], {}), '(face)\n', (1552, 1558), False, 'import face_recognition\n'), ((1772, 1786), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1784, 1786), False, 'from datetime import datetime\n'), ((2074, 2162), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['known_encodings_list', 'face_encoding', 'self.tolerance'], {}), '(known_encodings_list, face_encoding, self.\n tolerance)\n', (2104, 2162), False, 'import face_recognition\n'), ((2194, 2209), 'numpy.any', 'np.any', (['results'], {}), '(results)\n', (2200, 2209), True, 'import numpy as np\n'), ((1600, 1625), 'numpy.asarray', 'np.asarray', (['face_encoding'], {}), '(face_encoding)\n', (1610, 1625), True, 'import numpy as np\n'), ((2388, 2416), 'cv2.resize', 'cv2.resize', (['face', '(200, 200)'], {}), '(face, (200, 200))\n', (2398, 2416), False, 'import cv2\n'), ((2955, 2983), 'cv2.resize', 'cv2.resize', (['face', '(200, 200)'], {}), '(face, (200, 200))\n', (2965, 2983), False, 'import cv2\n'), ((5254, 5268), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5266, 5268), False, 'from datetime import datetime\n'), ((2743, 2760), 'numpy.where', 'np.where', (['results'], {}), '(results)\n', (2751, 2760), True, 'import numpy as np\n')] |
import os
import numpy as np
import cv2 as cv
m = {}
for dir in ["rotgen/ok", "rotgen/ko"]:
for f in os.listdir(dir):
file = "%s/%s" %(dir,f)
img0 = cv.cvtColor(cv.imread(file), cv.COLOR_BGR2GRAY)
n = np.sum(img0)
if n in m:
m[n].append(file)
print("rm -v %s" % file)
else:
m[n] = [file]
#for k in m.keys():
# if len(m[k]) > 1:
# print("cd rotgen/ko ; open", " ".join(m[k]), "; cd -")
| [
"numpy.sum",
"os.listdir",
"cv2.imread"
] | [((106, 121), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (116, 121), False, 'import os\n'), ((230, 242), 'numpy.sum', 'np.sum', (['img0'], {}), '(img0)\n', (236, 242), True, 'import numpy as np\n'), ((182, 197), 'cv2.imread', 'cv.imread', (['file'], {}), '(file)\n', (191, 197), True, 'import cv2 as cv\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 12 11:00:26 2022
@author: jac
"""
from numpy import array
from pandas import read_csv, unique
from ProxyUQ import ProxyUQ
from pyplotfuncmc import PlotSolnsMC
cat = read_csv("AtmosphericRivers/AR_Cat.csv")
ARivers = []
for y in unique(cat['Year']):
tmp = cat.loc[cat['Year'] == y]
intensity = 0
for index, row in tmp.iterrows():
#intensity += row['Hours']*row['Category']
intensity += row['IWV max']
#print("%d - %f" % (y,intensity))
ARivers += [[y,intensity]]
ARivers = array(ARivers)
#rt = ProxyUQ( "AtmosphericRivers/LL14", p_list=[1,2,3,4,5,6], y_by=1)
ax, quan = PlotSolnsMC( rt[0], rt[4])
ax.plot( 1950-ARivers[:,0], ARivers[:,1]/5000+0.1, '-')
ax.set_ylabel("Al/Si")
ax.set_xlabel("y BP")
| [
"pyplotfuncmc.PlotSolnsMC",
"numpy.array",
"pandas.unique",
"pandas.read_csv"
] | [((241, 281), 'pandas.read_csv', 'read_csv', (['"""AtmosphericRivers/AR_Cat.csv"""'], {}), "('AtmosphericRivers/AR_Cat.csv')\n", (249, 281), False, 'from pandas import read_csv, unique\n'), ((305, 324), 'pandas.unique', 'unique', (["cat['Year']"], {}), "(cat['Year'])\n", (311, 324), False, 'from pandas import read_csv, unique\n'), ((584, 598), 'numpy.array', 'array', (['ARivers'], {}), '(ARivers)\n', (589, 598), False, 'from numpy import array\n'), ((685, 710), 'pyplotfuncmc.PlotSolnsMC', 'PlotSolnsMC', (['rt[0]', 'rt[4]'], {}), '(rt[0], rt[4])\n', (696, 710), False, 'from pyplotfuncmc import PlotSolnsMC\n')] |
import random
import csv
from typing import Iterator, Union, List
from torch.utils.data.sampler import Sampler
import numpy as np
from ..datapaths import DATAPATHS_MAPPING
class LengthTrainSampler(Sampler):
def __init__(
self,
source: str,
field: str,
max_len: float, # 16K * 320
max_pool_difference: float, # 16K * 0.3
min_pool_size: int = 512,
num_batches: Union[int, None] = None,
):
"""
This batch_sampler groups the source into sample pools of examples with similar length meeting criterias defined
by 'max_pool_difference' and 'min_pool_size'. Batches of close to, but never more than, 'max_len', are
constructed by first sampling a pool and then sampling each batch from from within that pool.
Args:
source (object): Dataset for which the sampler will be used.
field (str): The field containing the relevant length information.
max_len (float): The maximum size of the batch in seconds.
max_pool_difference (float): The maximum length difference between shortest and longest sample a pool.
min_pool_size (float): The minimum number of examples in a pool. Overwrites max_pool_difference.
num_batches (int or None): Samples num_batches (with replacement if necessary) instead of running a standard epoch.
"""
self.source = source
self.field = field
self.max_len = max_len
self.max_pool_difference = max_pool_difference
self.min_pool_size = min_pool_size
self.num_batches = num_batches
self.buffer = [] # only used when num_batches is not None
self.source_filepath = DATAPATHS_MAPPING[source] if source in DATAPATHS_MAPPING else source
self.lengths = self.load_lengths(self.source_filepath)
self.pools = self.create_sample_pools(max_pool_difference, min_pool_size)
self.batches = self.sample_batches()
assert self.lengths.max() < self.max_len, "One or more examples are longer than the maximum length."
def load_lengths(self, source_filepath):
"""
Loads the example lengths into an array with same order as the examples of the source dataset.
"""
with open(source_filepath, newline='') as source_file_buffer:
reader = csv.DictReader(source_file_buffer)
lengths = [int(row[self.field]) for row in reader]
return np.array(lengths)
def create_sample_pools(self, max_diff, min_size):
"""Creates the sample pools. Can be used to change to the sampling criteria without creating a new sampler."""
start, end = 0, 0
sorted_idxs = np.argsort(self.lengths)
sorted_lens = self.lengths[sorted_idxs]
pools = []
while end != len(self.lengths):
base_len = sorted_lens[start]
deltas = sorted_lens - base_len
pool_size = np.logical_and(0 <= deltas, deltas < max_diff).sum()
end = min(max(start + min_size, start + pool_size), len(self.lengths))
if (len(self.lengths) - end) < min_size:
end = len(self.lengths)
pools.append(sorted_idxs[start:end].tolist())
start = end
return pools
def sample_batches(self):
"""Sample batches from the pools."""
if self.num_batches is not None:
if len(self.buffer) >= self.num_batches:
batches = self.buffer[:self.num_batches]
self.buffer = self.buffer[self.num_batches:]
return batches
ordered_idxs = np.concatenate([random.sample(p, k=len(p)) for p in self.pools]) # shuffle each pool internally
batch, batches, batch_len = [], [], 0
for idx in ordered_idxs:
l = self.lengths[idx]
if batch_len + l <= self.max_len:
batch_len += l
batch.append(idx)
else:
batches.append(batch)
batch = [idx]
batch_len = l
# batch_idxs = (self.lengths[ordered_idxs].cumsum() // self.max_len).astype(int)
# split_points = np.bincount(batch_idxs).cumsum()[:-1] # the last split is implicit
# batches = np.array_split(ordered_idxs, split_points)
# batches = list(map(lambda x: x.tolist(), batches))
random.shuffle(batches) # shuffle the order of batches
if self.num_batches is not None:
self.buffer += batches
return self.sample_batches()
return batches
def __iter__(self) -> Iterator[List[int]]:
try:
for batch in self.batches:
yield batch
finally:
self.batches = self.sample_batches() # to ensure batches are resampled if interrupted
def __len__(self):
return len(self.batches)
class LengthEvalSampler(Sampler):
def __init__(
self,
source: str,
field: str,
max_len: float
):
"""
This batch_sampler groups the source into sample pools of examples with similar length meeting criterias defined
by 'max_pool_difference' and 'min_pool_size'. Batches of up to 'seconds' are constructed by sampling from
one pool at the time.
Args:
source (object): Dataset for which the sampler will be used.
max_len (float): The maximum size of the batch in seconds.
"""
self.source = source
self.field = field
self.max_len = max_len
self.source_filepath = DATAPATHS_MAPPING[source] if source in DATAPATHS_MAPPING else source
self.lengths = self.load_lengths(self.source_filepath)
self.batches = self.sample_batches()
def load_lengths(self, source_filepath):
"""Loads the example lengths into an array with same order as the examples of the source dataset."""
with open(source_filepath, newline='') as source_file_buffer:
reader = csv.DictReader(source_file_buffer)
lengths = [int(row[self.field]) for row in reader]
return np.array(lengths)
def sample_batches(self):
"""Sample batches from the pools."""
sorted_idxs = np.argsort(self.lengths)
batch, batches, batch_len = [], [], 0
for idx in sorted_idxs:
l = self.lengths[idx]
if batch_len + l <= self.max_len:
batch_len += l
batch.append(idx)
else:
batches.append(batch)
batch = [idx]
batch_len = l
return batches
def __iter__(self) -> Iterator[List[int]]:
for batch in self.batches:
yield batch
def __len__(self):
return len(self.batches) | [
"csv.DictReader",
"random.shuffle",
"numpy.logical_and",
"numpy.argsort",
"numpy.array"
] | [((2475, 2492), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (2483, 2492), True, 'import numpy as np\n'), ((2716, 2740), 'numpy.argsort', 'np.argsort', (['self.lengths'], {}), '(self.lengths)\n', (2726, 2740), True, 'import numpy as np\n'), ((4413, 4436), 'random.shuffle', 'random.shuffle', (['batches'], {}), '(batches)\n', (4427, 4436), False, 'import random\n'), ((6156, 6173), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (6164, 6173), True, 'import numpy as np\n'), ((6272, 6296), 'numpy.argsort', 'np.argsort', (['self.lengths'], {}), '(self.lengths)\n', (6282, 6296), True, 'import numpy as np\n'), ((2361, 2395), 'csv.DictReader', 'csv.DictReader', (['source_file_buffer'], {}), '(source_file_buffer)\n', (2375, 2395), False, 'import csv\n'), ((6042, 6076), 'csv.DictReader', 'csv.DictReader', (['source_file_buffer'], {}), '(source_file_buffer)\n', (6056, 6076), False, 'import csv\n'), ((2959, 3005), 'numpy.logical_and', 'np.logical_and', (['(0 <= deltas)', '(deltas < max_diff)'], {}), '(0 <= deltas, deltas < max_diff)\n', (2973, 3005), True, 'import numpy as np\n')] |
from collections import namedtuple
from itertools import islice
import numpy as np
import pandas as pd
from dataclasses import dataclass
@dataclass
class BinningInfo(object):
"""Docstring for BinningInfo."""
variable_extents: tuple
step: float
num_bins: int
bin_indicies: np.ndarray
def build_spanning_grid_matrix(x_values, y_values, debug_print=False):
""" builds a 2D matrix with entries spanning x_values across axis 0 and spanning y_values across axis 1.
For example, used to build a grid of position points from xbins and ybins.
Usage:
all_positions_matrix, flat_all_positions_matrix, original_data_shape = build_all_positions_matrix(active_one_step_decoder.xbin_centers, active_one_step_decoder.ybin_centers)
"""
num_rows = len(y_values)
num_cols = len(x_values)
original_data_shape = (num_cols, num_rows) # original_position_data_shape: (64, 29)
if debug_print:
print(f'original_position_data_shape: {original_data_shape}')
x_only_matrix = np.repeat(np.expand_dims(x_values, 1).T, num_rows, axis=0).T
# np.shape(x_only_matrix) # (29, 64)
flat_x_only_matrix = np.reshape(x_only_matrix, (-1, 1))
if debug_print:
print(f'np.shape(x_only_matrix): {np.shape(x_only_matrix)}, np.shape(flat_x_only_matrix): {np.shape(flat_x_only_matrix)}') # np.shape(x_only_matrix): (64, 29), np.shape(flat_x_only_matrix): (1856, 1)
y_only_matrix = np.repeat(np.expand_dims(y_values, 1), num_cols, axis=1).T
# np.shape(y_only_matrix) # (29, 64)
flat_y_only_matrix = np.reshape(y_only_matrix, (-1, 1))
# flat_all_positions_matrix = np.array([np.append(an_x, a_y) for (an_x, a_y) in zip(flat_x_only_matrix, flat_y_only_matrix)])
flat_all_entries_matrix = [tuple(np.append(an_x, a_y)) for (an_x, a_y) in zip(flat_x_only_matrix, flat_y_only_matrix)] # a list of position tuples (containing two elements)
# reconsitute its shape:
all_entries_matrix = np.reshape(flat_all_entries_matrix, (original_data_shape[0], original_data_shape[1], 2))
if debug_print:
print(f'np.shape(all_positions_matrix): {np.shape(all_entries_matrix)}') # np.shape(all_positions_matrix): (1856, 2) # np.shape(all_positions_matrix): (64, 29, 2)
print(f'flat_all_positions_matrix[0]: {flat_all_entries_matrix[0]}\nall_positions_matrix[0,0,:]: {all_entries_matrix[0,0,:]}')
return all_entries_matrix, flat_all_entries_matrix, original_data_shape
def safe_get(list, index, fallback_value):
"""Similar to dict's .get(key, fallback) function but for lists. Returns a fallback/default value if the index is not valid for the list, otherwise returns the value at that index.
Args:
list (_type_): a list-like object
index (_type_): an index into the list
fallback_value (_type_): any value to be returned when the indexing fails
Returns:
_type_: the value in the list, or the fallback_value is the index is not valid for the list.
"""
try:
return list[index]
except IndexError:
return fallback_value
def safe_pandas_get_group(dataframe_group, key):
""" returns an empty dataframe if the key isn't found in the group."""
if key in dataframe_group.groups.keys():
return dataframe_group.get_group(key)
else:
original_df = dataframe_group.obj
return original_df.drop(original_df.index)
# class MatrixFlattenTransformer(object):
# """ Supposed to allow easy transformation of data from a flattened representation to the original.
# Usage:
# trans = MatrixFlattenTransformer(original_data_shape)
# test_all_positions_matrix = trans.unflatten(flat_all_positions_matrix)
# print(f'np.shape(test_all_positions_matrix): {np.shape(test_all_positions_matrix)}')
# """
# """ TODO: does not yet work. for MatrixFlattenTransformer."""
# def __init__(self, original_data_shape):
# super(MatrixFlattenTransformer, self).__init__()
# self.original_data_shape = original_data_shape
# def flatten(self, data):
# data_shape = np.shape(data)
# original_flat_shape = np.prod(self.original_data_shape)
# # assert np.shape(data) == self.original_data_shape, f"data passed in to flatten (with shape {np.shape(data)}) is not equal to the original data shape: {self.original_data_shape}"
# assert data_shape == original_flat_shape, f"data passed in to flatten (with shape {data_shape}) is not equal to the original shape's number of items (shape: {self.original_data_shape}, original_flat_shape: {original_flat_shape}"
# return np.reshape(data, (-1, 1))
# def unflatten(self, flat_data):
# flat_data_shape = np.shape(flat_data)
# original_data_shape_ndim = len(self.original_data_shape)
# # assert (flat_data_shape[:original_data_shape_ndim] == self.original_data_shape), f"data passed in to unflatten (with shape {flat_data_shape}) must match the original data shape ({self.original_data_shape}), at least up to the number of dimensions in the original"
# additional_dimensions = flat_data_shape[original_data_shape_ndim:]
# return np.reshape(flat_data, (self.original_data_shape[0], self.original_data_shape[1], *additional_dimensions))
def build_spanning_bins(variable_values, max_bin_size:float, debug_print=False):
""" DEPRICATED! out_digitized_variable_bins include both endpoints (bin edges)
Args:
variable_values ([type]): [description]
max_bin_size (float): [description]
debug_print (bool, optional): [description]. Defaults to False.
Returns:
out_digitized_variable_bins [type]: [description]
out_binning_info [BinningInfo]: contains info about how the binning was conducted
"""
raise DeprecationWarning
# compute extents:
curr_variable_extents = (np.nanmin(variable_values), np.nanmax(variable_values))
num_subdivisions = int(np.ceil((curr_variable_extents[1] - curr_variable_extents[0])/max_bin_size)) # get the next integer size above float_bin_size
actual_subdivision_step_size = (curr_variable_extents[1] - curr_variable_extents[0]) / float(num_subdivisions) # the actual exact size of the bin
if debug_print:
print(f'for max_bin_size: {max_bin_size} -> num_subdivisions: {num_subdivisions}, actual_subdivision_step_size: {actual_subdivision_step_size}')
# out_bin_indicies = np.arange(num_subdivisions)
out_binning_info = BinningInfo(curr_variable_extents, actual_subdivision_step_size, num_subdivisions, np.arange(num_subdivisions))
out_digitized_variable_bins = np.linspace(curr_variable_extents[0], curr_variable_extents[1], num_subdivisions, dtype=float)#.astype(float)
assert out_digitized_variable_bins[-1] == out_binning_info.variable_extents[1], "out_digitized_variable_bins[-1] should be the maximum variable extent!"
assert out_digitized_variable_bins[0] == out_binning_info.variable_extents[0], "out_digitized_variable_bins[0] should be the minimum variable extent!"
# All above arge the bin_edges
return out_digitized_variable_bins, out_binning_info
def compute_spanning_bins(variable_values, num_bins:int=None, bin_size:float=None):
"""[summary]
Args:
variable_values ([type]): [description]
num_bins (int, optional): [description]. Defaults to None.
bin_size (float, optional): [description]. Defaults to None.
debug_print (bool, optional): [description]. Defaults to False.
Raises:
ValueError: [description]
Returns:
[type]: [description]
Usage:
## Binning with Fixed Number of Bins:
xbin, ybin, bin_info = compute_spanning_bins(pos_df.x.to_numpy(), bin_size=active_config.computation_config.grid_bin[0]) # bin_size mode
print(bin_info)
## Binning with Fixed Bin Sizes:
xbin, ybin, bin_info = compute_spanning_bins(pos_df.x.to_numpy(), num_bins=num_bins) # num_bins mode
print(bin_info)
"""
assert (num_bins is None) or (bin_size is None), 'You cannot constrain both num_bins AND bin_size. Specify only one or the other.'
assert (num_bins is not None) or (bin_size is not None), 'You must specify either the num_bins XOR the bin_size.'
curr_variable_extents = (np.nanmin(variable_values), np.nanmax(variable_values))
if num_bins is not None:
## Binning with Fixed Number of Bins:
mode = 'num_bins'
xnum_bins = num_bins
xbin, xstep = np.linspace(curr_variable_extents[0], curr_variable_extents[1], num=num_bins, retstep=True) # binning of x position
elif bin_size is not None:
## Binning with Fixed Bin Sizes:
mode = 'bin_size'
xstep = bin_size
xbin = np.arange(curr_variable_extents[0], (curr_variable_extents[1] + xstep), xstep, ) # binning of x position
# the interval does not include this value, except in some cases where step is not an integer and floating point round-off affects the length of out.
xnum_bins = len(xbin)
else:
raise ValueError
return xbin, BinningInfo(curr_variable_extents, xstep, xnum_bins, np.arange(xnum_bins))
def compute_position_grid_size(*any_1d_series, num_bins:tuple):
""" Computes the required bin_sizes from the required num_bins (for each dimension independently)
Usage:
out_grid_bin_size, out_bins, out_bins_infos = compute_position_grid_size(curr_kdiba_pipeline.sess.position.x, curr_kdiba_pipeline.sess.position.y, num_bins=(64, 64))
active_grid_bin = tuple(out_grid_bin_size)
print(f'active_grid_bin: {active_grid_bin}') # (3.776841861770752, 1.043326930905373)
"""
assert (len(any_1d_series)) == len(num_bins), f'(len(other_1d_series)) must be the same length as the num_bins tuple! But (len(other_1d_series)): {(len(any_1d_series))} and len(num_bins): {len(num_bins)}!'
num_series = len(num_bins)
out_bins = []
out_bins_info = []
out_bin_grid_step_size = np.zeros((num_series,))
for i in np.arange(num_series):
xbins, xbin_info = compute_spanning_bins(any_1d_series[i], num_bins=num_bins[i])
out_bins.append(xbins)
out_bins_info.append(xbin_info)
out_bin_grid_step_size[i] = xbin_info.step
return out_bin_grid_step_size, out_bins, out_bins_info
def get_bin_centers(bin_edges):
""" For a series of 1D bin edges given by bin_edges, returns the center of the bins. Output will have one less element than bin_edges. """
return (bin_edges[:-1] + np.diff(bin_edges) / 2.0)
def get_bin_edges(bin_centers):
""" TODO: CHECK
For a series of 1D bin centers given by bin_centers, returns the edges of the bins.
Reciprocal of get_bin_centers(bin_edges)
"""
half_bin_width = float((bin_centers[1] - bin_centers[0])) / 2.0 # TODO: assumes fixed bin width
bin_starts = bin_centers - half_bin_width
bin_ends = bin_centers + half_bin_width
return interleave_elements(bin_starts, bin_ends)
def build_pairwise_indicies(target_indicies, debug_print=False):
""" Builds pairs of indicies from a simple list of indicies, for use in computing pairwise operations.
Example:
target_indicies = np.arange(5) # [0, 1, 2, 3, 4]
out_pair_indicies = build_pairwise_indicies(target_indicies)
> out_pair_indicies: [(0, 1), (1, 2), (2, 3), (3, 4)]
Args:
target_indicies ([type]): [description]
debug_print (bool, optional): [description]. Defaults to False.
Returns:
[type]: [description]
Usage:
target_indicies = np.arange(5)
out_pair_indicies = build_pairwise_indicies(target_indicies)
# out_pair_indicies = list(out_pair_indicies)
# print(f'out_pair_indicies: {list(out_pair_indicies)}')
print(f'out_pair_indicies: {list(out_pair_indicies)}')
for i, pair in enumerate(list(out_pair_indicies)):
# first_item_lap_idx, next_item_lap_idx
print(f'i: {i}, pair: {pair}')
"""
start_pairs = target_indicies[0:-1] # all but the last index
end_pairs = target_indicies[1:] # from the second to the last index
out_pair_indicies = list(zip(start_pairs, end_pairs)) # want to wrap in list so it isn't consumed
if debug_print:
print(f'target_indicies: {target_indicies}\nstart_pairs: {start_pairs}\nend_pairs: {end_pairs}')
return out_pair_indicies
def interleave_elements(start_points, end_points):
""" Given two equal sized arrays, produces an output array of double that size that contains elements of start_points interleaved with elements of end_points
Example:
a_starts = ['A','B','C','D']
a_ends = ['a','b','c','d']
a_interleaved = interleave_elements(a_starts, a_ends)
>> a_interleaved: ['A','a','B','b','C','c','D','d']
"""
assert np.shape(start_points) == np.shape(end_points), f"start_points and end_points must be the same shape. np.shape(start_points): {np.shape(start_points)}, np.shape(end_points): {np.shape(end_points)}"
start_points = np.atleast_2d(start_points)
end_points = np.atleast_2d(end_points)
all_points_shape = (np.shape(start_points)[0] * 2, np.shape(start_points)[1]) # it's double the length of the start_points
all_points = np.zeros(all_points_shape)
all_points[np.arange(0, all_points_shape[0], 2), :] = start_points # fill the even elements
all_points[np.arange(1, all_points_shape[0], 2), :] = end_points # fill the odd elements
assert np.shape(all_points)[0] == (np.shape(start_points)[0] * 2), f"newly created all_points is not of corrrect size! np.shape(all_points): {np.shape(all_points)}"
return all_points
def get_dict_subset(a_dict, included_keys=None, require_all_keys=False):
"""Gets a subset of a dictionary from a list of keys (included_keys)
Args:
a_dict ([type]): [description]
included_keys ([type], optional): [description]. Defaults to None.
require_all_keys: Bool, if True, requires all keys in included_keys to be in the dictionary (a_dict)
Returns:
[type]: [description]
"""
if included_keys is not None:
if require_all_keys:
return {included_key:a_dict[included_key] for included_key in included_keys} # filter the dictionary for only the keys specified
else:
out_dict = {}
for included_key in included_keys:
if included_key in a_dict.keys():
out_dict[included_key] = a_dict[included_key]
return out_dict
else:
return a_dict
# def extract_windows_vectorized(array, clearing_time_index, max_time, sub_window_size):
# start = clearing_time_index + 1 - sub_window_size + 1
# sub_windows = (
# start +
# # expand_dims are used to convert a 1D array to 2D array.
# np.expand_dims(np.arange(sub_window_size), 0) +
# np.expand_dims(np.arange(max_time + 1), 0).T
# )
# return array[sub_windows]
# def vectorized_stride_v2(array, clearing_time_index, max_time, sub_window_size, stride_size):
# start = clearing_time_index + 1 - sub_window_size + 1
# sub_windows = (
# start +
# np.expand_dims(np.arange(sub_window_size), 0) +
# # Create a rightmost vector as [0, V, 2V, ...].
# np.expand_dims(np.arange(max_time + 1, step=stride_size), 0).T
# )
# return array[sub_windows]
def sorted_slice(a,l,r):
start = np.searchsorted(a, l, 'left')
end = np.searchsorted(a, r, 'right')
return np.arange(start, end)
## Pandas DataFrame helpers:
def partition(df: pd.DataFrame, partitionColumn: str):
# splits a DataFrame df on the unique values of a specified column (partitionColumn) to return a unique DataFrame for each unique value in the column.
unique_values = np.unique(df[partitionColumn]) # array([ 0, 1, 2, 3, 4, 7, 11, 12, 13, 14])
grouped_df = df.groupby([partitionColumn]) # Groups on the specified column.
return unique_values, np.array([grouped_df.get_group(aValue) for aValue in unique_values], dtype=object) # dataframes split for each unique value in the column
def find_neighbours(value, df, colname):
"""Claims to be O(N)
From https://stackoverflow.com/questions/30112202/how-do-i-find-the-closest-values-in-a-pandas-series-to-an-input-number
Args:
value ([type]): [description]
df ([type]): [description]
colname ([type]): [description]
Returns:
[type]: [description]
"""
exactmatch = df[df[colname] == value]
if not exactmatch.empty:
return exactmatch.index
else:
lowerneighbour_ind = df[df[colname] < value][colname].idxmax()
upperneighbour_ind = df[df[colname] > value][colname].idxmin()
return [lowerneighbour_ind, upperneighbour_ind]
#If the series is already sorted, an efficient method of finding the indexes is by using bisect functions.
# def get_closests(df, col, val):
# """ Requires already sorted lists. """
# lower_idx = pd.bisect_left(df[col].values, val)
# higher_idx = pd.bisect_right(df[col].values, val)
# if higher_idx == lower_idx: #val is not in the list
# return lower_idx - 1, lower_idx
# else: #val is in the list
# return lower_idx
# def find_closest_values(target, source, k_matches=1):
# """[summary]
# Usage:
# find_closest_values(target, source, k_matches=1)
# Args:
# target ([type]): [description]
# source ([type]): [description]
# k_matches (int, optional): [description]. Defaults to 1.
# Returns:
# [type]: [description]
# """
# k_above = source[source >= target].nsmallest(k_matches)
# k_below = source[source < target].nlargest(k_matches)
# k_all = pd.concat([k_below, k_above]).sort_values()
# return k_all
def chunks(iterable, size=10):
""" Chunking
Args:
iterable ([type]): [description]
size (int, optional): [description]. Defaults to 10.
Usage:
laps_pages = [list(chunk) for chunk in _chunks(sess.laps.lap_id, curr_num_subplots)]
"""
iterator = iter(iterable)
for first in iterator: # stops when iterator is depleted
def chunk(): # construct generator for next chunk
yield first # yield element from for loop
for more in islice(iterator, size - 1):
yield more # yield more elements from the iterator
yield chunk() # in outer generator, yield next chunk
RowColTuple = namedtuple('RowColTuple', 'num_rows num_columns')
PaginatedGridIndexSpecifierTuple = namedtuple('PaginatedGridIndexSpecifierTuple', 'linear_idx row_idx col_idx data_idx')
RequiredSubplotsTuple = namedtuple('RequiredSubplotsTuple', 'num_required_subplots num_columns num_rows combined_indicies')
def compute_paginated_grid_config(num_required_subplots, max_num_columns, max_subplots_per_page=None, data_indicies=None, last_figure_subplots_same_layout=True, debug_print=False):
""" Fills row-wise first, and constrains the subplots values to just those that you need
Args:
num_required_subplots ([type]): [description]
max_num_columns ([type]): [description]
max_subplots_per_page ([type]): If None, pagination is effectively disabled and all subplots will be on a single page.
data_indicies ([type], optional): your indicies into your original data that will also be accessible in the main loop. Defaults to None.
last_figure_subplots_same_layout (bool): if True, the last page has the same number of items (same # columns and # rows) as the previous (full/complete) pages.
Example:
subplot_no_pagination_configuration, included_combined_indicies_pages, page_grid_sizes = compute_paginated_grid_config(nMapsToShow, max_num_columns=subplots.num_columns, max_subplots_per_page=max_subplots_per_page, data_indicies=included_unit_indicies, last_figure_subplots_same_layout=last_figure_subplots_same_layout)
num_pages = len(included_combined_indicies_pages)
"""
def _compute_subplots_grid_layout(num_page_required_subplots: int, page_max_num_columns: int):
""" For a single page """
fixed_columns = min(page_max_num_columns, num_page_required_subplots) # if there aren't enough plots to even fill up a whole row, reduce the number of columns
needed_rows = int(np.ceil(num_page_required_subplots / fixed_columns))
return RowColTuple(needed_rows, fixed_columns)
def _compute_num_subplots(num_required_subplots: int, max_num_columns: int, data_indicies=None):
"""Computes the RequiredSubplotsTuple from the required number of subplots and the max_num_columns. We start in row[0] and begin filling to the right until we exceed max_num_columns. To avoid going over, we add a new row and continue from there.
"""
linear_indicies = np.arange(num_required_subplots)
if data_indicies is None:
data_indicies = np.arange(num_required_subplots) # the data_indicies are just the same as the lienar indicies unless otherwise specified
(total_needed_rows, fixed_columns) = _compute_subplots_grid_layout(num_required_subplots, max_num_columns) # get the result for a single page before moving on
all_row_column_indicies = np.unravel_index(linear_indicies, (total_needed_rows, fixed_columns)) # inverse is: np.ravel_multi_index(row_column_indicies, (needed_rows, fixed_columns))
all_combined_indicies = [PaginatedGridIndexSpecifierTuple(linear_indicies[i], all_row_column_indicies[0][i], all_row_column_indicies[1][i], data_indicies[i]) for i in np.arange(len(linear_indicies))]
return RequiredSubplotsTuple(num_required_subplots, fixed_columns, total_needed_rows, all_combined_indicies)
subplot_no_pagination_configuration = _compute_num_subplots(num_required_subplots, max_num_columns=max_num_columns, data_indicies=data_indicies)
# once we have the result for a single page, we paginate it using the chunks function to easily separate it into pages.
if max_subplots_per_page is None:
max_subplots_per_page = num_required_subplots # all subplots must fit on a single page.
included_combined_indicies_pages = [list(chunk) for chunk in chunks(subplot_no_pagination_configuration.combined_indicies, max_subplots_per_page)]
if last_figure_subplots_same_layout:
page_grid_sizes = [RowColTuple(subplot_no_pagination_configuration.num_rows, subplot_no_pagination_configuration.num_columns) for a_page in included_combined_indicies_pages]
else:
# If it isn't required to have the same layout as the previous (full) pages, recompute the correct number of columns for just this page. This deals with the case when not even a full row is filled.
page_grid_sizes = [_compute_subplots_grid_layout(len(a_page), subplot_no_pagination_configuration.num_columns) for a_page in included_combined_indicies_pages]
if debug_print:
print(f'page_grid_sizes: {page_grid_sizes}')
return subplot_no_pagination_configuration, included_combined_indicies_pages, page_grid_sizes
def is_consecutive_no_gaps(arr, enable_debug_print=False):
""" Checks whether a passed array/list is a series of ascending indicies without gaps
arr: listlike: checks if the series is from [0, ... , len(arr)-1]
Usage:
neuron_IDXs = extracted_neuron_IDXs
is_consecutive_no_gaps(cell_ids, neuron_IDXs)
"""
if enable_debug_print:
print(f'is_consecutive_no_gaps(arr: {arr})')
comparison_correct_sequence = np.arange(len(arr)) # build a series from [0, ... , N-1]
differing_elements = np.setdiff1d(comparison_correct_sequence, arr)
if (len(differing_elements) > 0):
if enable_debug_print:
print(f'\t differing_elements: {differing_elements}')
return False
else:
return True
def validate_reverse_index_map(value_to_original_index_reverse_map, neuron_IDXs, cell_ids, debug_print=True):
"""
Used to be called `validate_cell_IDs_to_CellIDXs_map`
value_to_original_index_reverse_map: is a dictioanry that has any thing for its keys, but each
Example:
# Allows reverse indexing into the linear imported array using the original cell ID indicies:
id_arr = [ 2 3 4 5 7 8 9 10 11 12 14 17 18 21 22 23 24 25 26 27 28 29 33 34 38 39 42 44 45 46 47 48 53 55 57 58 61 62 63 64]
linear_flitered_ids = np.arange(len(id_arr)) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39]
value_to_original_index_reverse_map = dict(zip(id_arr, linear_flitered_ids))
Usage:
cell_ids = extracted_cell_ids
neuron_IDXs = extracted_neuron_IDXs
reverse_cellID_index_map = ipcDataExplorer.active_session.neurons.reverse_cellID_index_map
validate_reverse_index_map(reverse_cellID_index_map, cell_ids, neuron_IDXs)
"""
if debug_print:
print(f'\t cell_ids: {cell_ids}')
print(f'\t neuron_IDXs: {neuron_IDXs}')
if not is_consecutive_no_gaps(neuron_IDXs, enable_debug_print=debug_print):
if debug_print:
print('neuron_IDXs has gaps!')
return False
else:
map_start_ids = list(value_to_original_index_reverse_map.keys()) # the cellIDs that can be mapped from
differing_elements_ids = np.setdiff1d(map_start_ids, cell_ids)
num_differing_ids = len(differing_elements_ids)
map_destination_IDXs = list(value_to_original_index_reverse_map.values()) # the cellIDXs that can be mapped to.
differing_elements_IDXs = np.setdiff1d(map_destination_IDXs, neuron_IDXs)
num_differing_IDXs = len(differing_elements_IDXs)
if (num_differing_IDXs > 0) or (num_differing_ids > 0):
if debug_print:
print(f'\t differing_elements_IDXs: {differing_elements_IDXs}')
print(f'\t differing_elements_ids: {differing_elements_ids}')
return False
else:
return True
| [
"numpy.atleast_2d",
"numpy.ceil",
"collections.namedtuple",
"numpy.reshape",
"numpy.unique",
"itertools.islice",
"numpy.searchsorted",
"numpy.diff",
"numpy.append",
"numpy.linspace",
"numpy.zeros",
"numpy.setdiff1d",
"numpy.nanmax",
"numpy.unravel_index",
"numpy.expand_dims",
"numpy.na... | [((18753, 18802), 'collections.namedtuple', 'namedtuple', (['"""RowColTuple"""', '"""num_rows num_columns"""'], {}), "('RowColTuple', 'num_rows num_columns')\n", (18763, 18802), False, 'from collections import namedtuple\n'), ((18838, 18927), 'collections.namedtuple', 'namedtuple', (['"""PaginatedGridIndexSpecifierTuple"""', '"""linear_idx row_idx col_idx data_idx"""'], {}), "('PaginatedGridIndexSpecifierTuple',\n 'linear_idx row_idx col_idx data_idx')\n", (18848, 18927), False, 'from collections import namedtuple\n'), ((18948, 19051), 'collections.namedtuple', 'namedtuple', (['"""RequiredSubplotsTuple"""', '"""num_required_subplots num_columns num_rows combined_indicies"""'], {}), "('RequiredSubplotsTuple',\n 'num_required_subplots num_columns num_rows combined_indicies')\n", (18958, 19051), False, 'from collections import namedtuple\n'), ((1169, 1203), 'numpy.reshape', 'np.reshape', (['x_only_matrix', '(-1, 1)'], {}), '(x_only_matrix, (-1, 1))\n', (1179, 1203), True, 'import numpy as np\n'), ((1577, 1611), 'numpy.reshape', 'np.reshape', (['y_only_matrix', '(-1, 1)'], {}), '(y_only_matrix, (-1, 1))\n', (1587, 1611), True, 'import numpy as np\n'), ((1974, 2066), 'numpy.reshape', 'np.reshape', (['flat_all_entries_matrix', '(original_data_shape[0], original_data_shape[1], 2)'], {}), '(flat_all_entries_matrix, (original_data_shape[0],\n original_data_shape[1], 2))\n', (1984, 2066), True, 'import numpy as np\n'), ((6644, 6742), 'numpy.linspace', 'np.linspace', (['curr_variable_extents[0]', 'curr_variable_extents[1]', 'num_subdivisions'], {'dtype': 'float'}), '(curr_variable_extents[0], curr_variable_extents[1],\n num_subdivisions, dtype=float)\n', (6655, 6742), True, 'import numpy as np\n'), ((10069, 10092), 'numpy.zeros', 'np.zeros', (['(num_series,)'], {}), '((num_series,))\n', (10077, 10092), True, 'import numpy as np\n'), ((10107, 10128), 'numpy.arange', 'np.arange', (['num_series'], {}), '(num_series)\n', (10116, 10128), True, 'import numpy as np\n'), ((13159, 13186), 'numpy.atleast_2d', 'np.atleast_2d', (['start_points'], {}), '(start_points)\n', (13172, 13186), True, 'import numpy as np\n'), ((13204, 13229), 'numpy.atleast_2d', 'np.atleast_2d', (['end_points'], {}), '(end_points)\n', (13217, 13229), True, 'import numpy as np\n'), ((13374, 13400), 'numpy.zeros', 'np.zeros', (['all_points_shape'], {}), '(all_points_shape)\n', (13382, 13400), True, 'import numpy as np\n'), ((15584, 15613), 'numpy.searchsorted', 'np.searchsorted', (['a', 'l', '"""left"""'], {}), "(a, l, 'left')\n", (15599, 15613), True, 'import numpy as np\n'), ((15624, 15654), 'numpy.searchsorted', 'np.searchsorted', (['a', 'r', '"""right"""'], {}), "(a, r, 'right')\n", (15639, 15654), True, 'import numpy as np\n'), ((15666, 15687), 'numpy.arange', 'np.arange', (['start', 'end'], {}), '(start, end)\n', (15675, 15687), True, 'import numpy as np\n'), ((15951, 15981), 'numpy.unique', 'np.unique', (['df[partitionColumn]'], {}), '(df[partitionColumn])\n', (15960, 15981), True, 'import numpy as np\n'), ((23933, 23979), 'numpy.setdiff1d', 'np.setdiff1d', (['comparison_correct_sequence', 'arr'], {}), '(comparison_correct_sequence, arr)\n', (23945, 23979), True, 'import numpy as np\n'), ((5890, 5916), 'numpy.nanmin', 'np.nanmin', (['variable_values'], {}), '(variable_values)\n', (5899, 5916), True, 'import numpy as np\n'), ((5918, 5944), 'numpy.nanmax', 'np.nanmax', (['variable_values'], {}), '(variable_values)\n', (5927, 5944), True, 'import numpy as np\n'), ((5973, 6050), 'numpy.ceil', 'np.ceil', (['((curr_variable_extents[1] - curr_variable_extents[0]) / max_bin_size)'], {}), '((curr_variable_extents[1] - curr_variable_extents[0]) / max_bin_size)\n', (5980, 6050), True, 'import numpy as np\n'), ((6581, 6608), 'numpy.arange', 'np.arange', (['num_subdivisions'], {}), '(num_subdivisions)\n', (6590, 6608), True, 'import numpy as np\n'), ((8339, 8365), 'numpy.nanmin', 'np.nanmin', (['variable_values'], {}), '(variable_values)\n', (8348, 8365), True, 'import numpy as np\n'), ((8367, 8393), 'numpy.nanmax', 'np.nanmax', (['variable_values'], {}), '(variable_values)\n', (8376, 8393), True, 'import numpy as np\n'), ((8552, 8648), 'numpy.linspace', 'np.linspace', (['curr_variable_extents[0]', 'curr_variable_extents[1]'], {'num': 'num_bins', 'retstep': '(True)'}), '(curr_variable_extents[0], curr_variable_extents[1], num=\n num_bins, retstep=True)\n', (8563, 8648), True, 'import numpy as np\n'), ((12942, 12964), 'numpy.shape', 'np.shape', (['start_points'], {}), '(start_points)\n', (12950, 12964), True, 'import numpy as np\n'), ((12968, 12988), 'numpy.shape', 'np.shape', (['end_points'], {}), '(end_points)\n', (12976, 12988), True, 'import numpy as np\n'), ((21144, 21176), 'numpy.arange', 'np.arange', (['num_required_subplots'], {}), '(num_required_subplots)\n', (21153, 21176), True, 'import numpy as np\n'), ((21561, 21630), 'numpy.unravel_index', 'np.unravel_index', (['linear_indicies', '(total_needed_rows, fixed_columns)'], {}), '(linear_indicies, (total_needed_rows, fixed_columns))\n', (21577, 21630), True, 'import numpy as np\n'), ((25746, 25783), 'numpy.setdiff1d', 'np.setdiff1d', (['map_start_ids', 'cell_ids'], {}), '(map_start_ids, cell_ids)\n', (25758, 25783), True, 'import numpy as np\n'), ((25994, 26041), 'numpy.setdiff1d', 'np.setdiff1d', (['map_destination_IDXs', 'neuron_IDXs'], {}), '(map_destination_IDXs, neuron_IDXs)\n', (26006, 26041), True, 'import numpy as np\n'), ((1462, 1489), 'numpy.expand_dims', 'np.expand_dims', (['y_values', '(1)'], {}), '(y_values, 1)\n', (1476, 1489), True, 'import numpy as np\n'), ((1780, 1800), 'numpy.append', 'np.append', (['an_x', 'a_y'], {}), '(an_x, a_y)\n', (1789, 1800), True, 'import numpy as np\n'), ((8816, 8892), 'numpy.arange', 'np.arange', (['curr_variable_extents[0]', '(curr_variable_extents[1] + xstep)', 'xstep'], {}), '(curr_variable_extents[0], curr_variable_extents[1] + xstep, xstep)\n', (8825, 8892), True, 'import numpy as np\n'), ((9229, 9249), 'numpy.arange', 'np.arange', (['xnum_bins'], {}), '(xnum_bins)\n', (9238, 9249), True, 'import numpy as np\n'), ((10609, 10627), 'numpy.diff', 'np.diff', (['bin_edges'], {}), '(bin_edges)\n', (10616, 10627), True, 'import numpy as np\n'), ((13069, 13091), 'numpy.shape', 'np.shape', (['start_points'], {}), '(start_points)\n', (13077, 13091), True, 'import numpy as np\n'), ((13117, 13137), 'numpy.shape', 'np.shape', (['end_points'], {}), '(end_points)\n', (13125, 13137), True, 'import numpy as np\n'), ((13285, 13307), 'numpy.shape', 'np.shape', (['start_points'], {}), '(start_points)\n', (13293, 13307), True, 'import numpy as np\n'), ((13416, 13452), 'numpy.arange', 'np.arange', (['(0)', 'all_points_shape[0]', '(2)'], {}), '(0, all_points_shape[0], 2)\n', (13425, 13452), True, 'import numpy as np\n'), ((13512, 13548), 'numpy.arange', 'np.arange', (['(1)', 'all_points_shape[0]', '(2)'], {}), '(1, all_points_shape[0], 2)\n', (13521, 13548), True, 'import numpy as np\n'), ((13601, 13621), 'numpy.shape', 'np.shape', (['all_points'], {}), '(all_points)\n', (13609, 13621), True, 'import numpy as np\n'), ((13736, 13756), 'numpy.shape', 'np.shape', (['all_points'], {}), '(all_points)\n', (13744, 13756), True, 'import numpy as np\n'), ((18570, 18596), 'itertools.islice', 'islice', (['iterator', '(size - 1)'], {}), '(iterator, size - 1)\n', (18576, 18596), False, 'from itertools import islice\n'), ((20638, 20689), 'numpy.ceil', 'np.ceil', (['(num_page_required_subplots / fixed_columns)'], {}), '(num_page_required_subplots / fixed_columns)\n', (20645, 20689), True, 'import numpy as np\n'), ((21239, 21271), 'numpy.arange', 'np.arange', (['num_required_subplots'], {}), '(num_required_subplots)\n', (21248, 21271), True, 'import numpy as np\n'), ((1052, 1079), 'numpy.expand_dims', 'np.expand_dims', (['x_values', '(1)'], {}), '(x_values, 1)\n', (1066, 1079), True, 'import numpy as np\n'), ((13254, 13276), 'numpy.shape', 'np.shape', (['start_points'], {}), '(start_points)\n', (13262, 13276), True, 'import numpy as np\n'), ((13629, 13651), 'numpy.shape', 'np.shape', (['start_points'], {}), '(start_points)\n', (13637, 13651), True, 'import numpy as np\n'), ((1266, 1289), 'numpy.shape', 'np.shape', (['x_only_matrix'], {}), '(x_only_matrix)\n', (1274, 1289), True, 'import numpy as np\n'), ((1323, 1351), 'numpy.shape', 'np.shape', (['flat_x_only_matrix'], {}), '(flat_x_only_matrix)\n', (1331, 1351), True, 'import numpy as np\n'), ((2132, 2160), 'numpy.shape', 'np.shape', (['all_entries_matrix'], {}), '(all_entries_matrix)\n', (2140, 2160), True, 'import numpy as np\n')] |
import numpy as np
from util.math import abs_diff
# =============================================
# Metric Principle Component Analysis
# =============================================
# Generate vector between scaled by metric difference. Give the metric
# the indices of "vectors" in the provided matrix.
def gen_random_metric_diff(matrix, index_metric, power=2, count=None):
from util.random import pairs
# Iterate over random pairs, skip those with no difference.
for (p1, p2) in pairs(len(matrix), count):
metric_diff = index_metric(p1, p2)
if (metric_diff <= 0): continue
vec = matrix[p1] - matrix[p2]
length = np.linalg.norm(vec)**power
if length > 0: yield metric_diff * vec / length
# Given a set of row-vectors, compute a convex weighting that is
# proportional to the inverse total variation of metric distance
# between adjacent points along each vector (component).
def normalize_error(points, values, metric, display=False):
# Compute the magnitudes using total variation.
if display: print(" estimating error slope per axis.. ", end="\r", flush=True)
avg_slope = np.zeros(points.shape[1])
update = end = ""
for axis in range(points.shape[1]):
update = "\b"*len(end)
end = f"{axis+1} of {points.shape[1]}"
if display: print(update, end=end, flush=True)
# Sort points according to this axis.
ordering = np.argsort(points[:,axis])
for i in range(len(ordering)-1):
p1, p2 = ordering[i], ordering[i+1]
diff = (points[p2,axis] - points[p1,axis])
if (diff > 0): avg_slope[axis] += metric(values[p1], values[p2]) / (diff * points.shape[1])
if display: print("\r ", end="\r", flush=True)
# If there are dimensions with no slope, then they are the only ones needed.
if (min(avg_slope) <= 0.0): avg_slope = np.where(avg_slope == 0, 1., float('inf'))
# We want to minimize the expected error associated with the 2-norm.
#
# E[f(x) - f(y)] = sum( E[f(x)1 - f(y)1]^2 + ... + E[f(x)d - f(y)d]^2 )^(1/2)
# -> { E[f(x)1-f(y)1]^(-2), ..., E[f(x)d-f(y)d]^(-2) }
#
# This is the normalizing vector we want, scaled to unit determinant.
error_fix = avg_slope**(-2)
error_fix /= np.median(error_fix) # Make product of all values stable.
return error_fix / np.prod(error_fix)**(1/points.shape[1])
# ^^ This line was numerically unstable (for small
# Compute the metric PCA (pca of the between vectors scaled by
# metric difference slope).
def mpca(points, values, metric=abs_diff, num_components=None,
num_vecs=None, display=False):
if display: print(" normalizing axes by expected error..", end="\r", flush=True)
# Set default values for num_components and num_vecs.
if type(num_components) == type(None): num_components = min(points.shape)
if (type(num_vecs) == type(None)): num_vecs = (len(points)**2-len(points))//2
num_components = min(num_components, *points.shape)
num_vecs = min(num_vecs, (len(points)**2-len(points))//2)
if display: print(" allocating memory for metric between vectors..", end="\r", flush=True)
m_vecs = np.zeros((num_vecs, points.shape[1]))
if display: print(" generating metric vectors..", end="\r", flush=True)
# Function that takes two indices and returns metric difference.
index_metric = lambda i1, i2: metric(values[i1], values[i2])
# Generator that produces "between vectors".
vec_gen = gen_random_metric_diff(points, index_metric, count=num_vecs)
for i,vec in enumerate(vec_gen): m_vecs[i,:] = vec
# Compute the principle components of the between vectors.
if display: print(" computing principle components..", end="\r", flush=True)
components, _ = pca(m_vecs, num_components=num_components)
if display: print(" normalizing components by metric slope..", end="\r", flush=True)
weights = normalize_error(np.matmul(points, components.T), values, metric, display)
if display: print(" ", end="\r", flush=True)
# Make sure the first component starts with a positive value (consistency).
if (components[0,0] < 0): components *= -1
# Return the principle components of the metric slope vectors.
return components, weights
# Compute the principle components using sklearn.
def pca(points, num_components=None, display=False):
from sklearn.decomposition import PCA
pca = PCA(n_components=num_components)
if (num_components is None): num_components = min(*points.shape)
else: num_components = min(num_components, *points.shape)
if display: print(f"Computing {num_components} principle components..",end="\r", flush=True)
pca.fit(points)
if display: print( " ",end="\r", flush=True)
principle_components = pca.components_
magnitudes = pca.singular_values_
# Normalize the component magnitudes to have sum 1.
magnitudes /= np.sum(magnitudes)
return principle_components, magnitudes
| [
"numpy.prod",
"numpy.median",
"sklearn.decomposition.PCA",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"numpy.matmul",
"numpy.linalg.norm"
] | [((1154, 1179), 'numpy.zeros', 'np.zeros', (['points.shape[1]'], {}), '(points.shape[1])\n', (1162, 1179), True, 'import numpy as np\n'), ((2333, 2353), 'numpy.median', 'np.median', (['error_fix'], {}), '(error_fix)\n', (2342, 2353), True, 'import numpy as np\n'), ((3235, 3272), 'numpy.zeros', 'np.zeros', (['(num_vecs, points.shape[1])'], {}), '((num_vecs, points.shape[1]))\n', (3243, 3272), True, 'import numpy as np\n'), ((4530, 4562), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'num_components'}), '(n_components=num_components)\n', (4533, 4562), False, 'from sklearn.decomposition import PCA\n'), ((5072, 5090), 'numpy.sum', 'np.sum', (['magnitudes'], {}), '(magnitudes)\n', (5078, 5090), True, 'import numpy as np\n'), ((1440, 1467), 'numpy.argsort', 'np.argsort', (['points[:, axis]'], {}), '(points[:, axis])\n', (1450, 1467), True, 'import numpy as np\n'), ((3988, 4019), 'numpy.matmul', 'np.matmul', (['points', 'components.T'], {}), '(points, components.T)\n', (3997, 4019), True, 'import numpy as np\n'), ((672, 691), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (686, 691), True, 'import numpy as np\n'), ((2414, 2432), 'numpy.prod', 'np.prod', (['error_fix'], {}), '(error_fix)\n', (2421, 2432), True, 'import numpy as np\n')] |
from functools import partial, update_wrapper
from math import exp
import numpy as np
from scipy.sparse import lil_matrix
from scipy.stats import rankdata
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.pairwise import pairwise_distances, euclidean_distances
from sklearn.neighbors import NearestNeighbors
from ...utils.information_theory import conditional_entropy
from ...utils.information_theory import entropy
from ...utils.qpfs_body import qpfs_body
from ...utils.functions import knn_from_class
def _wrapped_partial(func, *args, **kwargs):
partial_func = partial(func, *args, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def fit_criterion_measure(x, y):
"""Calculate the FitCriterion score for features. Bigger values mean more
important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://core.ac.uk/download/pdf/191234514.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import fit_criterion_measure
>>> import numpy as np
>>> x = np.array([[1, 2, 4, 1, 1], [2, 2, 2, 1, 2], [3, 5, 1, 1, 4],
... [1, 1, 1, 1, 4], [2, 2, 2, 1, 5]])
>>> y = np.array([1, 2, 3, 1, 2])
>>> fit_criterion_measure(x, y)
array([1. , 0.8, 0.8, 0.4, 0.6])
"""
def count_hits(feature):
splits = {cl: feature[y == cl] for cl in classes}
means = {cl: np.mean(splits[cl]) for cl in classes}
devs = {cl: np.var(splits[cl]) for cl in classes}
distances = np.vectorize(
lambda x_val: {cl: (
abs(x_val - means[cl])
/ (devs[cl] + 1e-10)) for cl in classes})(feature)
return np.sum(np.vectorize(lambda d: min(d, key=d.get))(distances) == y)
classes = np.unique(y)
return np.apply_along_axis(count_hits, 0, x) / x.shape[0]
def f_ratio_measure(x, y):
"""Calculate Fisher score for features. Bigger values mean more important
features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://papers.nips.cc/paper/2909-laplacian-score-for-feature-selection.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import f_ratio_measure
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> f_ratio_measure(x, y)
array([0.6 , 0.2 , 1. , 0.12, 5.4 ])
"""
def __F_ratio(feature):
splits = {cl: feature[y == cl] for cl in classes}
mean_feature = np.mean(feature)
inter_class = np.sum(
np.vectorize(lambda cl: (
counts_d[cl]
* np.power(mean_feature - np.mean(splits[cl]), 2)))(classes))
intra_class = np.sum(
np.vectorize(lambda cl: (
counts_d[cl]
* np.var(splits[cl])))(classes))
return inter_class / (intra_class + 1e-10)
classes, counts = np.unique(y, return_counts=True)
counts_d = {cl: counts[idx] for idx, cl in enumerate(classes)}
return np.apply_along_axis(__F_ratio, 0, x)
def gini_index(x, y):
"""Calculate Gini index for features. Bigger values mean more important
features. This measure works best with discrete features due to being based
on information theory.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
http://lkm.fri.uni-lj.si/xaigor/slo/clanki/ijcai95z.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import gini_index
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> gini_index(x, y)
array([0.14 , 0.04 , 0.64 , 0.24 , 0.37333333])
"""
def __gini(feature):
values, counts = np.unique(feature, return_counts=True)
counts_d = {val: counts[idx] for idx, val in enumerate(values)}
total_sum = np.sum(
np.vectorize(
lambda val: (
np.sum(
np.square(
np.unique(
y[feature == val], return_counts=True)[1]))
/ counts_d[val]))(values))
return total_sum / x.shape[0] - prior_prob_squared_sum
classes, counts = np.unique(y, return_counts=True)
prior_prob_squared_sum = np.sum(np.square(counts / x.shape[0]))
return np.apply_along_axis(__gini, 0, x)
def su_measure(x, y):
"""SU is a correlation measure between the features and the class
calculated via formula SU(X,Y) = 2 * I(X|Y) / (H(X) + H(Y)). Bigger values
mean more important features. This measure works best with discrete
features due to being based on information theory.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://pdfs.semanticscholar.org/9964/c7b42e6ab311f88e493b3fc552515e0c764a.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import su_measure
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> su_measure(x, y)
array([0.28694182, 0.13715115, 0.79187567, 0.47435099, 0.67126949])
"""
def __SU(feature):
entropy_x = entropy(feature)
return (2 * (entropy_x - conditional_entropy(y, feature))
/ (entropy_x + entropy_y))
entropy_y = entropy(y)
return np.apply_along_axis(__SU, 0, x)
# TODO CONCORDATION COEF
def kendall_corr(x, y):
"""Calculate Sample sign correlation (Kendall correlation) for each
feature. Bigger absolute values mean more important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient
Examples
--------
>>> from ITMO_FS.filters.univariate import kendall_corr
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> kendall_corr(x, y)
array([-0.1, 0.2, -0.4, -0.2, 0.2])
"""
def __kendall_corr(feature):
k_corr = 0.0
for i in range(len(feature)):
k_corr += np.sum(np.sign(feature[i] - feature[i + 1:])
* np.sign(y[i] - y[i + 1:]))
return 2 * k_corr / (feature.shape[0] * (feature.shape[0] - 1))
return np.apply_along_axis(__kendall_corr, 0, x)
def fechner_corr(x, y):
"""Calculate Sample sign correlation (Fechner correlation) for each
feature. Bigger absolute values mean more important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
Examples
--------
>>> from ITMO_FS.filters.univariate import fechner_corr
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> fechner_corr(x, y)
array([-0.2, 0.2, -0.4, -0.2, -0.2])
"""
y_dev = y - np.mean(y)
x_dev = x - np.mean(x, axis=0)
return np.sum(np.sign(x_dev.T * y_dev), axis=1) / x.shape[0]
def reliefF_measure(x, y, k_neighbors=1):
"""Calculate ReliefF measure for each feature. Bigger values mean more
important features.
Note:
Only for complete x
Rather than repeating the algorithm m(TODO Ask Nikita about user defined)
times, implement it exhaustively (i.e. n times, once for each instance)
for relatively small n (up to one thousand).
Calculates spearman correlation for each feature.
Spearman's correlation assesses monotonic relationships (whether linear or
not). If there are no repeated data values, a perfect Spearman correlation
of +1 or −1 occurs when each of the variables is a perfect monotone
function of the other.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The input samples.
y : array-like, shape (n_samples,)
The classes for the samples.
k_neighbors : int, optional
The number of neighbors to consider when assigning feature importance
scores. More neighbors results in more accurate scores but takes
longer. Selection of k hits and misses is the basic difference to
Relief and ensures greater robustness of the algorithm concerning noise.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
<NAME> al. Relief-based feature selection: Introduction and
review. Journal of Biomedical Informatics 85 (2018) 189–203
Examples
--------
>>> from ITMO_FS.filters.univariate import reliefF_measure
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1], [1, 2, 1, 4, 2], [4, 3, 2, 3, 1]])
>>> y = np.array([1, 2, 2, 1, 2, 1, 2])
>>> reliefF_measure(x, y)
array([-0.14285714, -0.57142857, 0.10714286, -0.14285714, 0.07142857])
>>> reliefF_measure(x, y, k_neighbors=2)
array([-0.07142857, -0.17857143, -0.07142857, -0.0952381 , -0.17857143])
"""
def __calc_misses(index):
misses_diffs_classes = np.abs(
np.vectorize(
lambda cl: (
x[index]
- x[knn_from_class(dm, y, index, k_neighbors, cl)])
* prior_prob[cl],
signature='()->(n,m)')(classes[classes != y[index]]))
return (np.sum(np.sum(misses_diffs_classes, axis=1), axis=0)
/ (1 - prior_prob[y[index]]))
classes, counts = np.unique(y, return_counts=True)
if np.any(counts <= k_neighbors):
raise ValueError(
"Cannot calculate relieff measure because one of theclasses has "
"less than %d samples" % (k_neighbors + 1))
prior_prob = dict(zip(classes, np.array(counts) / len(y)))
n_samples = x.shape[0]
n_features = x.shape[1]
# use manhattan distance instead of euclidean
dm = pairwise_distances(x, x, 'manhattan')
indices = np.arange(n_samples)
# use abs instead of square because of manhattan distance
hits_diffs = np.abs(
np.vectorize(
lambda index: (
x[index]
- x[knn_from_class(dm, y, index, k_neighbors, y[index])]),
signature='()->(n,m)')(indices))
H = np.sum(hits_diffs, axis=(0,1))
misses_sum_diffs = np.vectorize(
lambda index: __calc_misses(index),
signature='()->(n)')(indices)
M = np.sum(misses_sum_diffs, axis=0)
weights = M - H
# dividing by m * k guarantees that all final weights
# will be normalized within the interval [ − 1, 1].
weights /= n_samples * k_neighbors
# The maximum and minimum values of A are determined over the entire
# set of instances.
# This normalization ensures that weight updates fall
# between 0 and 1 for both discrete and continuous features.
with np.errstate(divide='ignore', invalid="ignore"): # todo
return weights / (np.amax(x, axis=0) - np.amin(x, axis=0))
def relief_measure(x, y, m=None, random_state=42):
"""Calculate Relief measure for each feature. This measure is supposed to
work only with binary classification datasets; for multi-class problems use
the ReliefF measure. Bigger values mean more important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The input samples.
y : array-like, shape (n_samples,)
The classes for the samples.
m : int, optional
Amount of iterations to do. If not specified, n_samples iterations
would be performed.
random_state : int, optional
Random state for numpy random.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
<NAME> al. Relief-based feature selection: Introduction and
review. Journal of Biomedical Informatics 85 (2018) 189–203
Examples
--------
>>> from ITMO_FS.filters.univariate import relief_measure
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 2, 2, 1, 2])
>>> relief_measure(x, y)
array([ 0. , -0.6 , -0.1875, -0.15 , -0.4 ])
"""
weights = np.zeros(x.shape[1])
classes, counts = np.unique(y, return_counts=True)
if len(classes) == 1:
raise ValueError("Cannot calculate relief measure with 1 class")
if 1 in counts:
raise ValueError(
"Cannot calculate relief measure because one of the classes has "
"only 1 sample")
n_samples = x.shape[0]
n_features = x.shape[1]
if m is None:
m = n_samples
x_normalized = MinMaxScaler().fit_transform(x)
dm = euclidean_distances(x_normalized, x_normalized)
indices = np.random.default_rng(random_state).integers(
low=0, high=n_samples, size=m)
objects = x_normalized[indices]
hits_diffs = np.square(
np.vectorize(
lambda index: (
x_normalized[index]
- x_normalized[knn_from_class(dm, y, index, 1, y[index])]),
signature='()->(n,m)')(indices))
misses_diffs = np.square(
np.vectorize(
lambda index: (
x_normalized[index]
- x_normalized[knn_from_class(
dm, y, index, 1, y[index], anyOtherClass=True)]),
signature='()->(n,m)')(indices))
H = np.sum(hits_diffs, axis=(0,1))
M = np.sum(misses_diffs, axis=(0,1))
weights = M - H
return weights / m
def chi2_measure(x, y):
"""Calculate the Chi-squared measure for each feature. Bigger values mean
more important features. This measure works best with discrete features due
to being based on statistics.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
http://lkm.fri.uni-lj.si/xaigor/slo/clanki/ijcai95z.pdf
Example
-------
>>> from ITMO_FS.filters.univariate import chi2_measure
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> chi2_measure(x, y)
array([ 1.875 , 0.83333333, 10. , 3.75 , 6.66666667])
"""
def __chi2(feature):
values, counts = np.unique(feature, return_counts=True)
values_map = {val: idx for idx, val in enumerate(values)}
splits = {cl: np.array([values_map[val] for val in feature[y == cl]])
for cl in classes}
e = np.vectorize(
lambda cl: prior_probs[cl] * counts,
signature='()->(1)')(classes)
n = np.vectorize(
lambda cl: np.bincount(splits[cl], minlength=values.shape[0]),
signature='()->(1)')(classes)
return np.sum(np.square(e - n) / e)
classes, counts = np.unique(y, return_counts=True)
prior_probs = {cl: counts[idx] / x.shape[0] for idx, cl
in enumerate(classes)}
return np.apply_along_axis(__chi2, 0, x)
#
# def __contingency_matrix(labels_true, labels_pred):
# """Build a contingency matrix describing the relationship between labels.
# Parameters
# ----------
# labels_true : int array, shape = [n_samples]
# Ground truth class labels to be used as a reference
# labels_pred : array, shape = [n_samples]
# Cluster labels to evaluate
# Returns
# -------
# contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
# Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
# true class :math:`i` and in predicted class :math:`j`. If
# ``eps is None``, the dtype of this array will be integer. If ``eps`` is
# given, the dtype will be float.
# """
# classes, class_idx = np.unique(labels_true, return_inverse=True)
# clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
# n_classes = classes.shape[0]
# n_clusters = clusters.shape[0]
# # Using coo_matrix to accelerate simple histogram calculation,
# # i.e. bins are consecutive integers
# # Currently, coo_matrix is faster than histogram2d for simple cases
# # TODO redo it with numpy
# contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
# (class_idx, cluster_idx)),
# shape=(n_classes, n_clusters),
# dtype=np.int)
# contingency = contingency.tocsr()
# contingency.sum_duplicates()
# return contingency
#
#
# def __mi(U, V):
# contingency = __contingency_matrix(U, V)
# nzx, nzy, nz_val = sp.find(contingency)
# contingency_sum = contingency.sum()
# pi = np.ravel(contingency.sum(axis=1))
# pj = np.ravel(contingency.sum(axis=0))
# log_contingency_nm = np.log(nz_val)
# contingency_nm = nz_val / contingency_sum
# # Don't need to calculate the full outer product, just for non-zeroes
# outer = (pi.take(nzx).astype(np.int64, copy=False)
# * pj.take(nzy).astype(np.int64, copy=False))
# log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
# mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
# contingency_nm * log_outer)
# return mi.sum()
#
def spearman_corr(x, y):
"""Calculate Spearman's correlation for each feature. Bigger absolute
values mean more important features. This measure works best with discrete
features due to being based on statistics.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://en.wikipedia.org/wiki/Spearman's_rank_correlation_coefficient
Examples
--------
>>> from ITMO_FS.filters.univariate import spearman_corr
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> spearman_corr(x, y)
array([-0.186339 , 0.30429031, -0.52704628, -0.30555556, 0.35355339])
"""
n = x.shape[0]
if n < 2:
raise ValueError("The input should contain more than 1 sample")
x_ranks = np.apply_along_axis(rankdata, 0, x)
y_ranks = rankdata(y)
return pearson_corr(x_ranks, y_ranks)
def pearson_corr(x, y):
"""Calculate Pearson's correlation for each feature. Bigger absolute
values mean more important features. This measure works best with discrete
features due to being based on statistics.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
Examples
--------
>>> from ITMO_FS.filters.univariate import pearson_corr
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> pearson_corr(x, y)
array([-0.13363062, 0.32732684, -0.60631301, -0.26244533, 0.53452248])
"""
x_dev = x - np.mean(x, axis=0)
y_dev = y - np.mean(y)
sq_dev_x = x_dev * x_dev
sq_dev_y = y_dev * y_dev
sum_dev = y_dev.T.dot(x_dev).reshape((x.shape[1],))
denominators = np.sqrt(np.sum(sq_dev_y) * np.sum(sq_dev_x, axis=0))
results = np.array(
[(sum_dev[i] / denominators[i]) if denominators[i] > 0.0 else 0 for i
in range(len(denominators))])
return results
# TODO need to implement unsupervised way
def laplacian_score(x, y, k_neighbors=5, t=1, metric='euclidean', **kwargs):
"""Calculate Laplacian Score for each feature. Smaller values mean more
important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The input samples.
y : array-like, shape (n_samples,)
The classes for the samples.
k_neighbors : int, optional
The number of neighbors to construct a nearest neighbor graph.
t : float, optional
Suitable constant for weight matrix S
where Sij = exp(-(|xi - xj| ^ 2) / t).
metric : str or callable, optional
Norm function to compute distance between two points or one of the
commonly used strings ('euclidean', 'manhattan' etc.) The default
metric is euclidean.
weights : array-like, shape (n_samples, n_samples)
The weight matrix of the graph that models the local structure of
the data space. By default it is constructed using KNN algorithm.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://papers.nips.cc/paper/2909-laplacian-score-for-feature-selection.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import laplacian_score
>>> import numpy as np
>>> x = np.array([[1, 2, 3, 3, 1], [2, 2, 3, 3, 2], [1, 3, 3, 1, 3],
... [3, 1, 3, 1, 4], [4, 4, 3, 1, 5]])
>>> y = np.array([1, 2, 3, 4, 5])
>>> laplacian_score(x, y)
array([1.98983619, 1.22248371, nan, 0.79710221, 1.90648048])
"""
n, m = x.shape
k_neighbors = min(k_neighbors, n - 1)
if 'weights' in kwargs.keys():
S = kwargs['weights']
else:
if n > 100000:
S = lil_matrix((n, n))
else:
S = np.zeros((n, n))
graph = NearestNeighbors(n_neighbors=k_neighbors, metric=metric)
graph.fit(x)
distances, neighbors = graph.kneighbors()
for i in range(n):
for j in range(k_neighbors):
S[i, neighbors[i][j]] = S[neighbors[i][j], i] = exp(
-distances[i][j] * distances[i][j] / t)
ONE = np.ones((n,))
D = np.diag(S.dot(ONE))
L = D - S
t = D.dot(ONE)
F = x - x.T.dot(t) / ONE.dot(t)
F = F.T.dot(L.dot(F)) / F.T.dot(D.dot(F))
return np.diag(F)
def information_gain(x, y):
"""Calculate mutual information for each feature by formula
I(X,Y) = H(Y) - H(Y|X). Bigger values mean more important features. This
measure works best with discrete features due to being based on information
theory.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
Examples
--------
>>> from ITMO_FS.filters.univariate import information_gain
>>> import numpy as np
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> x = np.array([[1, 2, 3, 3, 1], [2, 2, 3, 3, 2], [1, 3, 3, 1, 3],
... [3, 1, 3, 1, 4], [4, 4, 3, 1, 5]])
>>> y = np.array([1, 2, 3, 4, 5])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> information_gain(x, y)
array([1.33217904, 1.33217904, 0. , 0.67301167, 1.60943791])
"""
entropy_x = entropy(y)
cond_entropy = np.apply_along_axis(conditional_entropy, 0, x, y)
return entropy_x - cond_entropy
def anova(x, y):
"""Calculate anova measure for each feature. Bigger values mean more
important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
<NAME>. "Concepts and Applications of Inferential Statistics".
Chapter 14. http://vassarstats.net/textbook/
Note:
The Anova score is counted for checking hypothesis if variances of two
samples are similar, this measure only returns you counted F-score.
For understanding whether samples' variances are similar you should
compare recieved result with value of F-distribution function, for
example use:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.fdtrc.html#scipy.special.fdtrc
Examples
--------
>>> from ITMO_FS.filters.univariate import anova
>>> import numpy as np
>>> x = np.array([[1, 2, 3, 3, 1], [2, 2, 3, 3, 2], [1, 3, 3, 1, 3],
... [3, 1, 3, 1, 4], [4, 4, 3, 1, 5]])
>>> y = np.array([1, 2, 1, 3, 3])
>>> anova(x, y)
array([12.6 , 0.04, nan, 1.4 , 3. ])
"""
split_by_class = [x[y == k] for k in np.unique(y)]
num_classes = len(np.unique(y))
num_samples = x.shape[0]
num_samples_by_class = [s.shape[0] for s in split_by_class]
sq_sum_all = sum((s ** 2).sum(axis=0) for s in split_by_class)
sum_group = [np.asarray(s.sum(axis=0)) for s in split_by_class]
sq_sum_combined = sum(sum_group) ** 2
sum_sq_group = [np.asarray((s ** 2).sum(axis=0)) for s in split_by_class]
sq_sum_group = [s ** 2 for s in sum_group]
sq_sum_total = sq_sum_all - sq_sum_combined / float(num_samples)
sq_sum_within = sum(
[sum_sq_group[i] - sq_sum_group[i] / num_samples_by_class[i] for i in
range(num_classes)])
sq_sum_between = sq_sum_total - sq_sum_within
deg_free_between = num_classes - 1
deg_free_within = num_samples - num_classes
ms_between = sq_sum_between / float(deg_free_between)
ms_within = sq_sum_within / float(deg_free_within)
f = ms_between / ms_within
return np.array(f)
def modified_t_score(x, y):
"""Calculate the Modified T-score for each feature. Bigger values mean
more important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The input samples.
y : array-like, shape (n_samples,)
The classes for the samples. There can be only 2 classes.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
For more details see paper <https://dergipark.org.tr/en/download/article-file/261247>.
Examples
--------
>>> from ITMO_FS.filters.univariate import modified_t_score
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 1, 2, 1, 2])
>>> modified_t_score(x, y)
array([1.68968099, 0.12148022, 0.39653932, 0.17682997, 2.04387142])
"""
classes = np.unique(y)
size_class0 = y[y == classes[0]].size
size_class1 = y[y == classes[1]].size
mean_class0 = np.mean(x[y == classes[0]], axis=0)
mean_class0 = np.nan_to_num(mean_class0)
mean_class1 = np.mean(x[y == classes[1]], axis=0)
mean_class1 = np.nan_to_num(mean_class1)
std_class0 = np.std(x[y == classes[0]], axis=0)
std_class0 = np.nan_to_num(std_class0)
std_class1 = np.std(x[y == classes[1]], axis=0)
std_class1 = np.nan_to_num(std_class1)
corr_with_y = np.apply_along_axis(
lambda feature: abs(np.corrcoef(feature, y)[0][1]), 0, x)
corr_with_y = np.nan_to_num(corr_with_y)
corr_with_others = abs(np.corrcoef(x, rowvar=False))
corr_with_others = np.nan_to_num(corr_with_others)
mean_of_corr_with_others = (
corr_with_others.sum(axis=1)
- corr_with_others.diagonal()) / (len(corr_with_others) - 1)
t_score_numerator = abs(mean_class0 - mean_class1)
t_score_denominator = np.sqrt(
(size_class0 * np.square(std_class0) + size_class1 * np.square(
std_class1)) / (size_class0 + size_class1))
modificator = corr_with_y / mean_of_corr_with_others
modified_t_score = t_score_numerator / t_score_denominator * modificator
modified_t_score = np.nan_to_num(modified_t_score)
return modified_t_score
MEASURE_NAMES = {"FitCriterion": fit_criterion_measure,
"FRatio": f_ratio_measure,
"GiniIndex": gini_index,
"SymmetricUncertainty": su_measure,
"SpearmanCorr": spearman_corr,
"PearsonCorr": pearson_corr,
"FechnerCorr": fechner_corr,
"KendallCorr": kendall_corr,
"ReliefF": reliefF_measure,
"Chi2": chi2_measure,
"Anova": anova,
"LaplacianScore": laplacian_score,
"InformationGain": information_gain,
"ModifiedTScore": modified_t_score,
"Relief": relief_measure}
def select_best_by_value(value):
return _wrapped_partial(__select_by_value, value=value, more=True)
def select_worst_by_value(value):
return _wrapped_partial(__select_by_value, value=value, more=False)
def __select_by_value(scores, value, more=True):
if more:
return np.flatnonzero(scores >= value)
else:
return np.flatnonzero(scores <= value)
def select_k_best(k):
return _wrapped_partial(__select_k, k=k, reverse=True)
def select_k_worst(k):
return _wrapped_partial(__select_k, k=k)
def __select_k(scores, k, reverse=False):
if not isinstance(k, int):
raise TypeError("Number of features should be integer")
if k > scores.shape[0]:
raise ValueError(
"Cannot select %d features with n_features = %d" % (k, len(scores)))
order = np.argsort(scores)
if reverse:
order = order[::-1]
return order[:k]
def __select_percentage_best(scores, percent):
return __select_k(
scores, k=(int)(scores.shape[0] * percent), reverse=True)
def select_best_percentage(percent):
return _wrapped_partial(__select_percentage_best, percent=percent)
def __select_percentage_worst(scores, percent):
return __select_k(
scores, k=(int)(scores.shape[0] * percent), reverse=False)
def select_worst_percentage(percent):
return _wrapped_partial(__select_percentage_worst, percent=percent)
CR_NAMES = {"Best by value": select_best_by_value,
"Worst by value": select_worst_by_value,
"K best": select_k_best,
"K worst": select_k_worst,
"Worst by percentage": select_worst_percentage,
"Best by percentage": select_best_percentage}
def qpfs_filter(X, y, r=None, sigma=None, solv='quadprog', fn=pearson_corr):
"""Performs Quadratic Programming Feature Selection algorithm.
Note: this realization requires labels to start from 1 and be numerical.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
y : array-like, shape (n_samples,)
The classes for the samples.
r : int
The number of samples to be used in Nystrom optimization.
sigma : double
The threshold for eigenvalues to be used in solving QP optimization.
solv : string, default
The name of qp solver according to
qpsolvers(https://pypi.org/project/qpsolvers/) naming. Note quadprog
is used by default.
fn : function(array, array), default
The function to count correlation, for example pierson correlation or
mutual information. Note mutual information is used by default.
Returns
-------
array-like, shape (n_features,) : the ranks of features in dataset, with
rank increase, feature relevance increases and redundancy decreases.
See Also
--------
http://www.jmlr.org/papers/volume11/rodriguez-lujan10a/rodriguez-lujan10a.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import qpfs_filter
>>> from sklearn.datasets import make_classification
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> ranks = qpfs_filter(x, y)
>>> print(ranks)
"""
return qpfs_body(X, y, fn, r=r, sigma=sigma, solv=solv)
| [
"numpy.random.default_rng",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.argsort",
"numpy.array",
"numpy.var",
"math.exp",
"numpy.arange",
"numpy.mean",
"scipy.sparse.lil_matrix",
"numpy.flatnonzero",
"sklearn.neighbors.NearestNeighbors",
"sklearn.preprocessing.MinMaxScaler",
"numpy... | [((589, 619), 'functools.partial', 'partial', (['func', '*args'], {}), '(func, *args, **kwargs)\n', (596, 619), False, 'from functools import partial, update_wrapper\n'), ((624, 658), 'functools.update_wrapper', 'update_wrapper', (['partial_func', 'func'], {}), '(partial_func, func)\n', (638, 658), False, 'from functools import partial, update_wrapper\n'), ((1990, 2002), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1999, 2002), True, 'import numpy as np\n'), ((3426, 3458), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (3435, 3458), True, 'import numpy as np\n'), ((3537, 3573), 'numpy.apply_along_axis', 'np.apply_along_axis', (['__F_ratio', '(0)', 'x'], {}), '(__F_ratio, 0, x)\n', (3556, 3573), True, 'import numpy as np\n'), ((5181, 5213), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (5190, 5213), True, 'import numpy as np\n'), ((5294, 5327), 'numpy.apply_along_axis', 'np.apply_along_axis', (['__gini', '(0)', 'x'], {}), '(__gini, 0, x)\n', (5313, 5327), True, 'import numpy as np\n'), ((6732, 6763), 'numpy.apply_along_axis', 'np.apply_along_axis', (['__SU', '(0)', 'x'], {}), '(__SU, 0, x)\n', (6751, 6763), True, 'import numpy as np\n'), ((7958, 7999), 'numpy.apply_along_axis', 'np.apply_along_axis', (['__kendall_corr', '(0)', 'x'], {}), '(__kendall_corr, 0, x)\n', (7977, 7999), True, 'import numpy as np\n'), ((11394, 11426), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (11403, 11426), True, 'import numpy as np\n'), ((11434, 11463), 'numpy.any', 'np.any', (['(counts <= k_neighbors)'], {}), '(counts <= k_neighbors)\n', (11440, 11463), True, 'import numpy as np\n'), ((11802, 11839), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['x', 'x', '"""manhattan"""'], {}), "(x, x, 'manhattan')\n", (11820, 11839), False, 'from sklearn.metrics.pairwise import pairwise_distances, euclidean_distances\n'), ((11855, 11875), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (11864, 11875), True, 'import numpy as np\n'), ((12166, 12197), 'numpy.sum', 'np.sum', (['hits_diffs'], {'axis': '(0, 1)'}), '(hits_diffs, axis=(0, 1))\n', (12172, 12197), True, 'import numpy as np\n'), ((12325, 12357), 'numpy.sum', 'np.sum', (['misses_sum_diffs'], {'axis': '(0)'}), '(misses_sum_diffs, axis=0)\n', (12331, 12357), True, 'import numpy as np\n'), ((14156, 14176), 'numpy.zeros', 'np.zeros', (['x.shape[1]'], {}), '(x.shape[1])\n', (14164, 14176), True, 'import numpy as np\n'), ((14199, 14231), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (14208, 14231), True, 'import numpy as np\n'), ((14641, 14688), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x_normalized', 'x_normalized'], {}), '(x_normalized, x_normalized)\n', (14660, 14688), False, 'from sklearn.metrics.pairwise import pairwise_distances, euclidean_distances\n'), ((15346, 15377), 'numpy.sum', 'np.sum', (['hits_diffs'], {'axis': '(0, 1)'}), '(hits_diffs, axis=(0, 1))\n', (15352, 15377), True, 'import numpy as np\n'), ((15385, 15418), 'numpy.sum', 'np.sum', (['misses_diffs'], {'axis': '(0, 1)'}), '(misses_diffs, axis=(0, 1))\n', (15391, 15418), True, 'import numpy as np\n'), ((17151, 17183), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (17160, 17183), True, 'import numpy as np\n'), ((17291, 17324), 'numpy.apply_along_axis', 'np.apply_along_axis', (['__chi2', '(0)', 'x'], {}), '(__chi2, 0, x)\n', (17310, 17324), True, 'import numpy as np\n'), ((20893, 20928), 'numpy.apply_along_axis', 'np.apply_along_axis', (['rankdata', '(0)', 'x'], {}), '(rankdata, 0, x)\n', (20912, 20928), True, 'import numpy as np\n'), ((20943, 20954), 'scipy.stats.rankdata', 'rankdata', (['y'], {}), '(y)\n', (20951, 20954), False, 'from scipy.stats import rankdata\n'), ((24721, 24734), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (24728, 24734), True, 'import numpy as np\n'), ((24889, 24899), 'numpy.diag', 'np.diag', (['F'], {}), '(F)\n', (24896, 24899), True, 'import numpy as np\n'), ((26032, 26081), 'numpy.apply_along_axis', 'np.apply_along_axis', (['conditional_entropy', '(0)', 'x', 'y'], {}), '(conditional_entropy, 0, x, y)\n', (26051, 26081), True, 'import numpy as np\n'), ((28375, 28386), 'numpy.array', 'np.array', (['f'], {}), '(f)\n', (28383, 28386), True, 'import numpy as np\n'), ((29327, 29339), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (29336, 29339), True, 'import numpy as np\n'), ((29444, 29479), 'numpy.mean', 'np.mean', (['x[y == classes[0]]'], {'axis': '(0)'}), '(x[y == classes[0]], axis=0)\n', (29451, 29479), True, 'import numpy as np\n'), ((29498, 29524), 'numpy.nan_to_num', 'np.nan_to_num', (['mean_class0'], {}), '(mean_class0)\n', (29511, 29524), True, 'import numpy as np\n'), ((29543, 29578), 'numpy.mean', 'np.mean', (['x[y == classes[1]]'], {'axis': '(0)'}), '(x[y == classes[1]], axis=0)\n', (29550, 29578), True, 'import numpy as np\n'), ((29597, 29623), 'numpy.nan_to_num', 'np.nan_to_num', (['mean_class1'], {}), '(mean_class1)\n', (29610, 29623), True, 'import numpy as np\n'), ((29642, 29676), 'numpy.std', 'np.std', (['x[y == classes[0]]'], {'axis': '(0)'}), '(x[y == classes[0]], axis=0)\n', (29648, 29676), True, 'import numpy as np\n'), ((29694, 29719), 'numpy.nan_to_num', 'np.nan_to_num', (['std_class0'], {}), '(std_class0)\n', (29707, 29719), True, 'import numpy as np\n'), ((29737, 29771), 'numpy.std', 'np.std', (['x[y == classes[1]]'], {'axis': '(0)'}), '(x[y == classes[1]], axis=0)\n', (29743, 29771), True, 'import numpy as np\n'), ((29789, 29814), 'numpy.nan_to_num', 'np.nan_to_num', (['std_class1'], {}), '(std_class1)\n', (29802, 29814), True, 'import numpy as np\n'), ((29939, 29965), 'numpy.nan_to_num', 'np.nan_to_num', (['corr_with_y'], {}), '(corr_with_y)\n', (29952, 29965), True, 'import numpy as np\n'), ((30047, 30078), 'numpy.nan_to_num', 'np.nan_to_num', (['corr_with_others'], {}), '(corr_with_others)\n', (30060, 30078), True, 'import numpy as np\n'), ((30596, 30627), 'numpy.nan_to_num', 'np.nan_to_num', (['modified_t_score'], {}), '(modified_t_score)\n', (30609, 30627), True, 'import numpy as np\n'), ((32180, 32198), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (32190, 32198), True, 'import numpy as np\n'), ((2014, 2051), 'numpy.apply_along_axis', 'np.apply_along_axis', (['count_hits', '(0)', 'x'], {}), '(count_hits, 0, x)\n', (2033, 2051), True, 'import numpy as np\n'), ((3014, 3030), 'numpy.mean', 'np.mean', (['feature'], {}), '(feature)\n', (3021, 3030), True, 'import numpy as np\n'), ((4675, 4713), 'numpy.unique', 'np.unique', (['feature'], {'return_counts': '(True)'}), '(feature, return_counts=True)\n', (4684, 4713), True, 'import numpy as np\n'), ((5250, 5280), 'numpy.square', 'np.square', (['(counts / x.shape[0])'], {}), '(counts / x.shape[0])\n', (5259, 5280), True, 'import numpy as np\n'), ((8813, 8823), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (8820, 8823), True, 'import numpy as np\n'), ((8840, 8858), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (8847, 8858), True, 'import numpy as np\n'), ((12761, 12807), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (12772, 12807), True, 'import numpy as np\n'), ((16609, 16647), 'numpy.unique', 'np.unique', (['feature'], {'return_counts': '(True)'}), '(feature, return_counts=True)\n', (16618, 16647), True, 'import numpy as np\n'), ((22125, 22143), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (22132, 22143), True, 'import numpy as np\n'), ((22160, 22170), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (22167, 22170), True, 'import numpy as np\n'), ((24386, 24442), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'k_neighbors', 'metric': 'metric'}), '(n_neighbors=k_neighbors, metric=metric)\n', (24402, 24442), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((27472, 27484), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (27481, 27484), True, 'import numpy as np\n'), ((29994, 30022), 'numpy.corrcoef', 'np.corrcoef', (['x'], {'rowvar': '(False)'}), '(x, rowvar=False)\n', (30005, 30022), True, 'import numpy as np\n'), ((31652, 31683), 'numpy.flatnonzero', 'np.flatnonzero', (['(scores >= value)'], {}), '(scores >= value)\n', (31666, 31683), True, 'import numpy as np\n'), ((31709, 31740), 'numpy.flatnonzero', 'np.flatnonzero', (['(scores <= value)'], {}), '(scores <= value)\n', (31723, 31740), True, 'import numpy as np\n'), ((1624, 1643), 'numpy.mean', 'np.mean', (['splits[cl]'], {}), '(splits[cl])\n', (1631, 1643), True, 'import numpy as np\n'), ((1683, 1701), 'numpy.var', 'np.var', (['splits[cl]'], {}), '(splits[cl])\n', (1689, 1701), True, 'import numpy as np\n'), ((8877, 8901), 'numpy.sign', 'np.sign', (['(x_dev.T * y_dev)'], {}), '(x_dev.T * y_dev)\n', (8884, 8901), True, 'import numpy as np\n'), ((14600, 14614), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (14612, 14614), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((14703, 14738), 'numpy.random.default_rng', 'np.random.default_rng', (['random_state'], {}), '(random_state)\n', (14724, 14738), True, 'import numpy as np\n'), ((16736, 16791), 'numpy.array', 'np.array', (['[values_map[val] for val in feature[y == cl]]'], {}), '([values_map[val] for val in feature[y == cl]])\n', (16744, 16791), True, 'import numpy as np\n'), ((16836, 16906), 'numpy.vectorize', 'np.vectorize', (['(lambda cl: prior_probs[cl] * counts)'], {'signature': '"""()->(1)"""'}), "(lambda cl: prior_probs[cl] * counts, signature='()->(1)')\n", (16848, 16906), True, 'import numpy as np\n'), ((22312, 22328), 'numpy.sum', 'np.sum', (['sq_dev_y'], {}), '(sq_dev_y)\n', (22318, 22328), True, 'import numpy as np\n'), ((22331, 22355), 'numpy.sum', 'np.sum', (['sq_dev_x'], {'axis': '(0)'}), '(sq_dev_x, axis=0)\n', (22337, 22355), True, 'import numpy as np\n'), ((24304, 24322), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(n, n)'], {}), '((n, n))\n', (24314, 24322), False, 'from scipy.sparse import lil_matrix\n'), ((24353, 24369), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (24361, 24369), True, 'import numpy as np\n'), ((27436, 27448), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (27445, 27448), True, 'import numpy as np\n'), ((11283, 11319), 'numpy.sum', 'np.sum', (['misses_diffs_classes'], {'axis': '(1)'}), '(misses_diffs_classes, axis=1)\n', (11289, 11319), True, 'import numpy as np\n'), ((11660, 11676), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (11668, 11676), True, 'import numpy as np\n'), ((12843, 12861), 'numpy.amax', 'np.amax', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (12850, 12861), True, 'import numpy as np\n'), ((12864, 12882), 'numpy.amin', 'np.amin', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (12871, 12882), True, 'import numpy as np\n'), ((17106, 17122), 'numpy.square', 'np.square', (['(e - n)'], {}), '(e - n)\n', (17115, 17122), True, 'import numpy as np\n'), ((24646, 24689), 'math.exp', 'exp', (['(-distances[i][j] * distances[i][j] / t)'], {}), '(-distances[i][j] * distances[i][j] / t)\n', (24649, 24689), False, 'from math import exp\n'), ((7778, 7815), 'numpy.sign', 'np.sign', (['(feature[i] - feature[i + 1:])'], {}), '(feature[i] - feature[i + 1:])\n', (7785, 7815), True, 'import numpy as np\n'), ((7847, 7872), 'numpy.sign', 'np.sign', (['(y[i] - y[i + 1:])'], {}), '(y[i] - y[i + 1:])\n', (7854, 7872), True, 'import numpy as np\n'), ((16990, 17040), 'numpy.bincount', 'np.bincount', (['splits[cl]'], {'minlength': 'values.shape[0]'}), '(splits[cl], minlength=values.shape[0])\n', (17001, 17040), True, 'import numpy as np\n'), ((30333, 30354), 'numpy.square', 'np.square', (['std_class0'], {}), '(std_class0)\n', (30342, 30354), True, 'import numpy as np\n'), ((30371, 30392), 'numpy.square', 'np.square', (['std_class1'], {}), '(std_class1)\n', (30380, 30392), True, 'import numpy as np\n'), ((29883, 29906), 'numpy.corrcoef', 'np.corrcoef', (['feature', 'y'], {}), '(feature, y)\n', (29894, 29906), True, 'import numpy as np\n'), ((3321, 3339), 'numpy.var', 'np.var', (['splits[cl]'], {}), '(splits[cl])\n', (3327, 3339), True, 'import numpy as np\n'), ((3170, 3189), 'numpy.mean', 'np.mean', (['splits[cl]'], {}), '(splits[cl])\n', (3177, 3189), True, 'import numpy as np\n'), ((4961, 5009), 'numpy.unique', 'np.unique', (['y[feature == val]'], {'return_counts': '(True)'}), '(y[feature == val], return_counts=True)\n', (4970, 5009), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import BasicLSTMCell, LSTMStateTuple
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
np.set_printoptions(suppress=True)
class MyLSTMCell(BasicLSTMCell):
def __init__(self, num_units, forget_bias=0,
state_is_tuple=True, activation=None,
reuse=None, name=None, weight_initializer=None, bias_initializer=None):
super(MyLSTMCell, self).__init__(num_units, forget_bias, state_is_tuple, activation, reuse, name)
self.weight_initializer = weight_initializer
self.bias_initializer = bias_initializer
self._kernel = None
self._bias = None
def compute_output_shape(self, input_shape):
super(MyLSTMCell, self).compute_output_shape(input_shape)
def build(self, inputs_shape):
if inputs_shape[1].value is None:
raise ValueError('Expected inputs.shape[-1] to be known, saw shape: %s'
% inputs_shape)
self._kernel = self.add_variable('kernel',
shape=self.weight_initializer.shape,
initializer=tf.constant_initializer(self.weight_initializer))
self._bias = self.add_variable('bias',
shape=self.bias_initializer.shape,
initializer=tf.constant_initializer(self.bias_initializer))
self.built = True
def call(self, inputs, state):
sigmoid = math_ops.sigmoid
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, h], 1), self._kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
# TensorFlow implementation
# i, j, f, o = array_ops.split(
# value=gate_inputs, num_or_size_splits=4, axis=one)
# PyTorch version
# in_gate, forget_gate, cell_gate, out_gate = gates.chunk(4, 1)
# need to adjust PyTorch weights, switch i and f
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, f, j, o = array_ops.split(
value=gate_inputs, num_or_size_splits=4, axis=1)
# Note that using `add` and `multiply` instead of `+` and `*` gives a
# performance improvement. So using those at the cost of readability.
add = math_ops.add
multiply = math_ops.multiply
new_c = add(multiply(c, sigmoid(f)),
multiply(sigmoid(i), self._activation(j)))
new_h = multiply(self._activation(new_c), sigmoid(o))
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
def length(sequence):
with tf.variable_scope('pad'):
eq = tf.equal(sequence, 0)
length_ = tf.cast(eq, tf.int32)
pad_len = tf.reduce_sum(length_, 1)
return pad_len
def ztg(data, k):
n = data.get_shape().as_list()[0]
mask_u = tf.greater_equal(tf.tile(tf.reshape(tf.range(n), [n, 1]), [1, n]),
tf.tile(tf.reshape(tf.range(1, n + 1), [1, n]), [n, 1]))
mask_l = tf.greater_equal(tf.tile(tf.reshape(tf.range(n), [n, 1]), [1, n]),
tf.tile(tf.reshape(tf.range(-k + 1, n - k + 1), [1, n]), [n, 1]))
tri = tf.where(mask_u, tf.zeros(data.get_shape(), dtype=data.dtype), data) # tri_u
# print(tri)
tri2 = tf.where(mask_l, tri, tf.zeros(data.get_shape(), dtype=data.dtype)) # tri_l
return tri2
def decode(answer_scores):
answers = []
for scores in answer_scores:
score_s, score_e = tf.split(scores, 2)
scores = tf.matmul(tf.expand_dims(score_s, 1), tf.expand_dims(score_e, 0))
# Zero out negative length and over-length span scores
scores = ztg(scores, 14)
max_score_idx = tf.argmax(tf.reshape(scores, [-1]))
dim = scores.get_shape().as_list()[0]
s_idx = tf.cast(tf.div(max_score_idx, dim), dtype=tf.float32)
e_idx = tf.cast(tf.mod(max_score_idx, dim), dtype=tf.float32)
s = tf.reduce_max(scores)
answers.append([s_idx, e_idx, s])
return answers
def stack_bi_rnn(input_data, hidden_size, num_layers, weights, scope, mask=None):
rnn_scope = 'q_rnn' if scope.startswith('q') else 'p_rnn'
with tf.variable_scope(rnn_scope):
if mask is not None:
seq_len = length(mask)
else:
seq_len = tf.ones(tf.shape(input_data)[0], dtype=tf.int32) * tf.shape(input_data)[1]
outputs = []
last_output = input_data
for k in range(num_layers):
fw_weights_input = weights['{}.rnns.{}.weight_ih_l0'.format(scope, str(k))]
fw_weights_hidden = weights['{}.rnns.{}.weight_hh_l0'.format(scope, str(k))]
fw_weights = np.concatenate((fw_weights_input, fw_weights_hidden), axis=1).transpose()
fw_bias_input = weights['{}.rnns.{}.bias_ih_l0'.format(scope, str(k))]
fw_bias_hidden = weights['{}.rnns.{}.bias_hh_l0'.format(scope, str(k))]
fw_bias = np.add(fw_bias_input, fw_bias_hidden)
bw_weights_input = weights['{}.rnns.{}.weight_ih_l0_reverse'.format(scope, str(k))]
bw_weights_hidden = weights['{}.rnns.{}.weight_hh_l0_reverse'.format(scope, str(k))]
bw_weights = np.concatenate((bw_weights_input, bw_weights_hidden), axis=1).transpose()
bw_bias_input = weights['{}.rnns.{}.bias_ih_l0_reverse'.format(scope, str(k))]
bw_bias_hidden = weights['{}.rnns.{}.bias_hh_l0_reverse'.format(scope, str(k))]
bw_bias = np.add(bw_bias_input, bw_bias_hidden)
with tf.variable_scope('layer_{}'.format(str(k))):
fw_cell = MyLSTMCell(num_units=hidden_size, name='basic_lstm_cell',
weight_initializer=fw_weights, bias_initializer=fw_bias)
bw_cell = MyLSTMCell(num_units=hidden_size, name='basic_lstm_cell',
weight_initializer=bw_weights, bias_initializer=bw_bias)
output, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, last_output,
dtype=tf.float32,
sequence_length=seq_len,
scope='bi_rnn')
last_output = tf.concat(output, 2)
outputs.append(last_output)
out = tf.concat(outputs, axis=2)
return out
def bi_linear_seq_attn(bi_w, bi_bias, x, y, x_mask):
with tf.variable_scope('bi_attn'):
w = tf.get_variable('weights', initializer=bi_w)
b = tf.get_variable('bias', initializer=bi_bias)
with tf.variable_scope('weighted'):
wy = tf.add(tf.matmul(y, w), b)
with tf.variable_scope('linear'):
xwy = tf.matmul(x, tf.expand_dims(wy, 2))
xwy = tf.squeeze(xwy, 2, name='alpha')
with tf.variable_scope('score'):
alpha = tf.exp(xwy)
z = tf.cast(tf.equal(x_mask, 0), dtype=tf.float32)
out = tf.multiply(alpha, z)
return out
class RnnReader(object):
def __init__(self, arg, weights_file):
# noinspection PyTypeChecker
opt = dict(np.load(arg).item())
self.args = opt
self.emb_shape = opt['embedding_shape']
self.hidden_size = opt['hidden_size']
self.doc_layers = opt['doc_layers']
self.question_layers = opt['question_layers']
self.weights = np.load(weights_file) # must end with .npz
self.qemb_match_weights = self.weights['qemb_match.linear.weight']
self.qemb_match_bias = self.weights['qemb_match.linear.bias']
self.self_attn_weights = self.weights['self_attn.linear.weight']
self.self_attn_bias = self.weights['self_attn.linear.bias']
self.start_attn_weights = self.weights['start_attn.linear.weight']
self.start_attn_bias = self.weights['start_attn.linear.bias']
self.end_attn_weights = self.weights['end_attn.linear.weight']
self.end_attn_bias = self.weights['end_attn.linear.bias']
self.sess = tf.Session()
def seq_attn_match(self, x, y, input_size):
seq_weights = tf.get_variable('weights', initializer=self.qemb_match_weights)
b = tf.get_variable('bias', initializer=self.qemb_match_bias)
# Project vectors
with tf.variable_scope('project_x'):
x_re = tf.reshape(x, [-1, input_size])
x_pj = tf.matmul(x_re, seq_weights, transpose_b=True) + b
x_pj = tf.nn.relu(x_pj)
x_pj = tf.reshape(x_pj, [-1, tf.shape(x)[1], input_size])
with tf.variable_scope('project_y'):
y_re = tf.reshape(y, [-1, input_size])
y_pj = tf.matmul(y_re, seq_weights, transpose_b=True) + b
y_pj = tf.nn.relu(y_pj)
y_pj = tf.reshape(y_pj, [-1, tf.shape(y)[1], input_size])
with tf.variable_scope('compute_scores'):
# Compute scores
scores = tf.matmul(x_pj, y_pj, transpose_b=True)
with tf.variable_scope('normalize'):
# Normalize with softmax
alpha_flat = tf.reshape(scores, [-1, tf.shape(y)[1]])
alpha_flat = tf.nn.softmax(alpha_flat)
with tf.variable_scope('weighted'):
# Take weighted average
alpha = tf.reshape(alpha_flat, [-1, tf.shape(x)[1], tf.shape(y)[1]])
weighted_average = tf.matmul(alpha, y)
return weighted_average
def linear_seq_attn(self, x):
x_weight = tf.get_variable('weights', initializer=self.self_attn_weights)
x_bias = tf.get_variable('bias', initializer=self.self_attn_bias)
with tf.variable_scope('matmul'):
x_flat = tf.reshape(x, [-1, tf.shape(x)[2]])
scores = tf.reshape(tf.matmul(x_flat, x_weight, transpose_b=True) + x_bias, [-1, tf.shape(x)[1]])
with tf.variable_scope('score'):
scores = tf.exp(scores)
x_sum = tf.expand_dims(tf.reduce_sum(scores, axis=1), axis=1)
with tf.variable_scope('weighted'):
scores = tf.expand_dims(tf.divide(scores, x_sum), axis=1)
out = tf.squeeze(tf.matmul(scores, x), axis=1)
return out
def network(self, x1_emb, x1_mask, x2_emb):
with tf.variable_scope('q_seq_attn'):
x2_weighted_emb = self.seq_attn_match(x1_emb, x2_emb, self.emb_shape[1])
with tf.variable_scope('p_rnn_input'):
doc_rnn_input_list = [x1_emb, x2_weighted_emb]
doc_rnn_input = tf.concat(doc_rnn_input_list, axis=2)
# self.np_rnn(x2_emb.numpy())
q_rnn_scope = 'question_rnn'
q_rnn_weights = {k: v for k, v in self.weights.items() if k.startswith(q_rnn_scope)}
question_hidden = stack_bi_rnn(input_data=x2_emb,
hidden_size=self.hidden_size,
num_layers=self.question_layers,
weights=q_rnn_weights,
scope=q_rnn_scope)
with tf.variable_scope('q_self_attn'):
q_weighted_hidden = self.linear_seq_attn(question_hidden)
doc_rnn_scope = 'doc_rnn'
doc_rnn_weights = {k: v for k, v in self.weights.items() if k.startswith(doc_rnn_scope)}
doc_hidden = stack_bi_rnn(input_data=doc_rnn_input,
hidden_size=self.hidden_size,
num_layers=self.doc_layers,
weights=doc_rnn_weights,
scope=doc_rnn_scope,
mask=x1_mask)
with tf.variable_scope('start'):
start_scores = bi_linear_seq_attn(self.start_attn_weights.transpose(), self.start_attn_bias,
doc_hidden, q_weighted_hidden, x1_mask)
with tf.variable_scope('end'):
end_scores = bi_linear_seq_attn(self.end_attn_weights.transpose(), self.end_attn_bias,
doc_hidden, q_weighted_hidden, x1_mask)
with tf.variable_scope('answer'):
final_answer = tf.concat([start_scores, end_scores], 1, name='scores')
# batches = start_scores.get_shape().as_list()[0]
# idx = tf.constant(0)
#
# def cond(_s, _e, idx_, _a):
# return idx_ < (batches or 1)
#
# answers = tf.constant([-1, -1, -1.0], dtype=tf.float32, shape=[1, 3])
# final_results = tf.while_loop(cond, decode_one, [start_scores, end_scores, idx, answers],
# shape_invariants=[start_scores.get_shape(), end_scores.get_shape(),
# idx.get_shape(), tf.TensorShape([None, 3])])
# final_answer = tf.identity(final_results[-1], name='answer')
return final_answer
def remove_weights(inference_graph):
out_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
for input_node in inference_graph.node:
output_node = node_def_pb2.NodeDef()
tensor_proto = input_node.attr['value'].tensor
if tensor_proto.tensor_content:
output_node.op = input_node.op
output_node.name = input_node.name
dtype = input_node.attr['dtype']
output_node.attr['dtype'].CopyFrom(dtype)
np_array = tensor_util.MakeNdarray(tensor_proto)
output_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(s=str(np_array).encode()))
how_many_converted += 1
else:
output_node.CopyFrom(input_node)
out_graph_def.node.extend([output_node])
out_graph_def.library.CopyFrom(inference_graph.library)
print('set %d weights to 0.' % how_many_converted)
return out_graph_def
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--weights_file', type=str, default='data/rnn_reader.npz')
parser.add_argument('-e', '--embedding_file', type=str, default='data/emb.npz')
parser.add_argument('-a', '--args', type=str, default='data/args.npy')
parser.add_argument('-t', '--test_ex', type=str, default='data/ex.npz')
parser.add_argument('-egr', '--eager', action='store_true')
args = parser.parse_args()
ex_input = np.load(args.test_ex)
ex_inputs = [ex_input[k] for k in ex_input.keys()[:-1]]
emb = np.load(args.embedding_file)['emb']
ex_inputs[0] = np.array([emb[i] for i in ex_inputs[0]])
ex_inputs[3] = np.array([emb[i] for i in ex_inputs[3]])
inputs = [ex_inputs[0], ex_inputs[2], ex_inputs[3]]
if args.eager:
tf.enable_eager_execution()
reader = RnnReader(args.args, args.weights_file)
results = reader.network(*inputs)
print(results)
answer = decode(results)
print(answer)
else:
reader = RnnReader(args.args, args.weights_file)
input_names = ['para/emb', 'para/mask', 'q_emb']
placeholders = [tf.placeholder(tf.as_dtype(k.dtype), shape=[None, None, *k.shape[2:]], name=n)
for n, k in zip(input_names, inputs)]
final_answers = reader.network(*placeholders)
sess = reader.sess
np_weights = dict()
for var in tf.global_variables():
sess.run(var.initializer)
# print(sess.run(var))
var_name = var.name[:-2]
print(var_name, var.shape)
np_weights[var_name] = sess.run(var)
np.savez_compressed('data/drqa_w', **{k: v for k, v in np_weights.items()})
sess.run(tf.global_variables_initializer())
# print(sess.run(tf.report_uninitialized_variables()))
results = sess.run(final_answers, feed_dict={k: v for k, v in zip(placeholders, inputs)})
print(results)
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
graph_def, # The graph_def is used to retrieve the nodes
output_node_names=['answer/scores'] # The output node names are used to select the useful nodes
)
with tf.gfile.GFile('data/drqa.pb', 'wb') as gf:
gf.write(output_graph_def.SerializeToString())
with tf.gfile.FastGFile('data/drqa.pb.txt', 'w') as gf:
gf.write(str(remove_weights(graph_def)))
# saver = tf.train.Saver()
# saver.save(sess, 'data/tf_reader.ckpt')
| [
"tensorflow.div",
"tensorflow.equal",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.get_variable",
"tensorflow.contrib.rnn.LSTMStateTuple",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.split",
"tensorflow.multiply",
"tensorflow.enable_eager_execution",
"tensorflow.gfile.Fast... | [((509, 543), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (528, 543), True, 'import numpy as np\n'), ((13733, 13753), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (13751, 13753), False, 'from tensorflow.core.framework import graph_pb2\n'), ((14641, 14666), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14664, 14666), False, 'import argparse\n'), ((15103, 15124), 'numpy.load', 'np.load', (['args.test_ex'], {}), '(args.test_ex)\n', (15110, 15124), True, 'import numpy as np\n'), ((15250, 15290), 'numpy.array', 'np.array', (['[emb[i] for i in ex_inputs[0]]'], {}), '([emb[i] for i in ex_inputs[0]])\n', (15258, 15290), True, 'import numpy as np\n'), ((15310, 15350), 'numpy.array', 'np.array', (['[emb[i] for i in ex_inputs[3]]'], {}), '([emb[i] for i in ex_inputs[3]])\n', (15318, 15350), True, 'import numpy as np\n'), ((2261, 2301), 'tensorflow.python.ops.nn_ops.bias_add', 'nn_ops.bias_add', (['gate_inputs', 'self._bias'], {}), '(gate_inputs, self._bias)\n', (2276, 2301), False, 'from tensorflow.python.ops import nn_ops\n'), ((2692, 2756), 'tensorflow.python.ops.array_ops.split', 'array_ops.split', ([], {'value': 'gate_inputs', 'num_or_size_splits': '(4)', 'axis': '(1)'}), '(value=gate_inputs, num_or_size_splits=4, axis=1)\n', (2707, 2756), False, 'from tensorflow.python.ops import array_ops\n'), ((3387, 3411), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pad"""'], {}), "('pad')\n", (3404, 3411), True, 'import tensorflow as tf\n'), ((3426, 3447), 'tensorflow.equal', 'tf.equal', (['sequence', '(0)'], {}), '(sequence, 0)\n', (3434, 3447), True, 'import tensorflow as tf\n'), ((3466, 3487), 'tensorflow.cast', 'tf.cast', (['eq', 'tf.int32'], {}), '(eq, tf.int32)\n', (3473, 3487), True, 'import tensorflow as tf\n'), ((3506, 3531), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['length_', '(1)'], {}), '(length_, 1)\n', (3519, 3531), True, 'import tensorflow as tf\n'), ((4267, 4286), 'tensorflow.split', 'tf.split', (['scores', '(2)'], {}), '(scores, 2)\n', (4275, 4286), True, 'import tensorflow as tf\n'), ((4724, 4745), 'tensorflow.reduce_max', 'tf.reduce_max', (['scores'], {}), '(scores)\n', (4737, 4745), True, 'import tensorflow as tf\n'), ((4962, 4990), 'tensorflow.variable_scope', 'tf.variable_scope', (['rnn_scope'], {}), '(rnn_scope)\n', (4979, 4990), True, 'import tensorflow as tf\n'), ((7154, 7180), 'tensorflow.concat', 'tf.concat', (['outputs'], {'axis': '(2)'}), '(outputs, axis=2)\n', (7163, 7180), True, 'import tensorflow as tf\n'), ((7260, 7288), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bi_attn"""'], {}), "('bi_attn')\n", (7277, 7288), True, 'import tensorflow as tf\n'), ((7302, 7346), 'tensorflow.get_variable', 'tf.get_variable', (['"""weights"""'], {'initializer': 'bi_w'}), "('weights', initializer=bi_w)\n", (7317, 7346), True, 'import tensorflow as tf\n'), ((7359, 7403), 'tensorflow.get_variable', 'tf.get_variable', (['"""bias"""'], {'initializer': 'bi_bias'}), "('bias', initializer=bi_bias)\n", (7374, 7403), True, 'import tensorflow as tf\n'), ((8217, 8238), 'numpy.load', 'np.load', (['weights_file'], {}), '(weights_file)\n', (8224, 8238), True, 'import numpy as np\n'), ((8850, 8862), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8860, 8862), True, 'import tensorflow as tf\n'), ((8934, 8997), 'tensorflow.get_variable', 'tf.get_variable', (['"""weights"""'], {'initializer': 'self.qemb_match_weights'}), "('weights', initializer=self.qemb_match_weights)\n", (8949, 8997), True, 'import tensorflow as tf\n'), ((9010, 9067), 'tensorflow.get_variable', 'tf.get_variable', (['"""bias"""'], {'initializer': 'self.qemb_match_bias'}), "('bias', initializer=self.qemb_match_bias)\n", (9025, 9067), True, 'import tensorflow as tf\n'), ((10279, 10341), 'tensorflow.get_variable', 'tf.get_variable', (['"""weights"""'], {'initializer': 'self.self_attn_weights'}), "('weights', initializer=self.self_attn_weights)\n", (10294, 10341), True, 'import tensorflow as tf\n'), ((10359, 10415), 'tensorflow.get_variable', 'tf.get_variable', (['"""bias"""'], {'initializer': 'self.self_attn_bias'}), "('bias', initializer=self.self_attn_bias)\n", (10374, 10415), True, 'import tensorflow as tf\n'), ((13847, 13869), 'tensorflow.core.framework.node_def_pb2.NodeDef', 'node_def_pb2.NodeDef', ([], {}), '()\n', (13867, 13869), False, 'from tensorflow.core.framework import node_def_pb2\n'), ((15195, 15223), 'numpy.load', 'np.load', (['args.embedding_file'], {}), '(args.embedding_file)\n', (15202, 15223), True, 'import numpy as np\n'), ((15435, 15462), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (15460, 15462), True, 'import tensorflow as tf\n'), ((16058, 16079), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (16077, 16079), True, 'import tensorflow as tf\n'), ((16616, 16638), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (16636, 16638), True, 'import tensorflow as tf\n'), ((16707, 16806), 'tensorflow.python.framework.graph_util.convert_variables_to_constants', 'graph_util.convert_variables_to_constants', (['sess', 'graph_def'], {'output_node_names': "['answer/scores']"}), "(sess, graph_def,\n output_node_names=['answer/scores'])\n", (16748, 16806), False, 'from tensorflow.python.framework import graph_util\n'), ((2080, 2138), 'tensorflow.python.ops.array_ops.split', 'array_ops.split', ([], {'value': 'state', 'num_or_size_splits': '(2)', 'axis': '(1)'}), '(value=state, num_or_size_splits=2, axis=1)\n', (2095, 2138), False, 'from tensorflow.python.ops import array_ops\n'), ((2191, 2223), 'tensorflow.python.ops.array_ops.concat', 'array_ops.concat', (['[inputs, h]', '(1)'], {}), '([inputs, h], 1)\n', (2207, 2223), False, 'from tensorflow.python.ops import array_ops\n'), ((3219, 3247), 'tensorflow.contrib.rnn.LSTMStateTuple', 'LSTMStateTuple', (['new_c', 'new_h'], {}), '(new_c, new_h)\n', (3233, 3247), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, LSTMStateTuple\n'), ((3286, 3321), 'tensorflow.python.ops.array_ops.concat', 'array_ops.concat', (['[new_c, new_h]', '(1)'], {}), '([new_c, new_h], 1)\n', (3302, 3321), False, 'from tensorflow.python.ops import array_ops\n'), ((4314, 4340), 'tensorflow.expand_dims', 'tf.expand_dims', (['score_s', '(1)'], {}), '(score_s, 1)\n', (4328, 4340), True, 'import tensorflow as tf\n'), ((4342, 4368), 'tensorflow.expand_dims', 'tf.expand_dims', (['score_e', '(0)'], {}), '(score_e, 0)\n', (4356, 4368), True, 'import tensorflow as tf\n'), ((4500, 4524), 'tensorflow.reshape', 'tf.reshape', (['scores', '[-1]'], {}), '(scores, [-1])\n', (4510, 4524), True, 'import tensorflow as tf\n'), ((4596, 4622), 'tensorflow.div', 'tf.div', (['max_score_idx', 'dim'], {}), '(max_score_idx, dim)\n', (4602, 4622), True, 'import tensorflow as tf\n'), ((4666, 4692), 'tensorflow.mod', 'tf.mod', (['max_score_idx', 'dim'], {}), '(max_score_idx, dim)\n', (4672, 4692), True, 'import tensorflow as tf\n'), ((5723, 5760), 'numpy.add', 'np.add', (['fw_bias_input', 'fw_bias_hidden'], {}), '(fw_bias_input, fw_bias_hidden)\n', (5729, 5760), True, 'import numpy as np\n'), ((6260, 6297), 'numpy.add', 'np.add', (['bw_bias_input', 'bw_bias_hidden'], {}), '(bw_bias_input, bw_bias_hidden)\n', (6266, 6297), True, 'import numpy as np\n'), ((7418, 7447), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""weighted"""'], {}), "('weighted')\n", (7435, 7447), True, 'import tensorflow as tf\n'), ((7506, 7533), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""linear"""'], {}), "('linear')\n", (7523, 7533), True, 'import tensorflow as tf\n'), ((7607, 7639), 'tensorflow.squeeze', 'tf.squeeze', (['xwy', '(2)'], {'name': '"""alpha"""'}), "(xwy, 2, name='alpha')\n", (7617, 7639), True, 'import tensorflow as tf\n'), ((7653, 7679), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""score"""'], {}), "('score')\n", (7670, 7679), True, 'import tensorflow as tf\n'), ((7701, 7712), 'tensorflow.exp', 'tf.exp', (['xwy'], {}), '(xwy)\n', (7707, 7712), True, 'import tensorflow as tf\n'), ((7794, 7815), 'tensorflow.multiply', 'tf.multiply', (['alpha', 'z'], {}), '(alpha, z)\n', (7805, 7815), True, 'import tensorflow as tf\n'), ((9107, 9137), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""project_x"""'], {}), "('project_x')\n", (9124, 9137), True, 'import tensorflow as tf\n'), ((9158, 9189), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, input_size]'], {}), '(x, [-1, input_size])\n', (9168, 9189), True, 'import tensorflow as tf\n'), ((9279, 9295), 'tensorflow.nn.relu', 'tf.nn.relu', (['x_pj'], {}), '(x_pj)\n', (9289, 9295), True, 'import tensorflow as tf\n'), ((9380, 9410), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""project_y"""'], {}), "('project_y')\n", (9397, 9410), True, 'import tensorflow as tf\n'), ((9431, 9462), 'tensorflow.reshape', 'tf.reshape', (['y', '[-1, input_size]'], {}), '(y, [-1, input_size])\n', (9441, 9462), True, 'import tensorflow as tf\n'), ((9552, 9568), 'tensorflow.nn.relu', 'tf.nn.relu', (['y_pj'], {}), '(y_pj)\n', (9562, 9568), True, 'import tensorflow as tf\n'), ((9653, 9688), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""compute_scores"""'], {}), "('compute_scores')\n", (9670, 9688), True, 'import tensorflow as tf\n'), ((9740, 9779), 'tensorflow.matmul', 'tf.matmul', (['x_pj', 'y_pj'], {'transpose_b': '(True)'}), '(x_pj, y_pj, transpose_b=True)\n', (9749, 9779), True, 'import tensorflow as tf\n'), ((9794, 9824), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""normalize"""'], {}), "('normalize')\n", (9811, 9824), True, 'import tensorflow as tf\n'), ((9954, 9979), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['alpha_flat'], {}), '(alpha_flat)\n', (9967, 9979), True, 'import tensorflow as tf\n'), ((9994, 10023), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""weighted"""'], {}), "('weighted')\n", (10011, 10023), True, 'import tensorflow as tf\n'), ((10173, 10192), 'tensorflow.matmul', 'tf.matmul', (['alpha', 'y'], {}), '(alpha, y)\n', (10182, 10192), True, 'import tensorflow as tf\n'), ((10430, 10457), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""matmul"""'], {}), "('matmul')\n", (10447, 10457), True, 'import tensorflow as tf\n'), ((10640, 10666), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""score"""'], {}), "('score')\n", (10657, 10666), True, 'import tensorflow as tf\n'), ((10689, 10703), 'tensorflow.exp', 'tf.exp', (['scores'], {}), '(scores)\n', (10695, 10703), True, 'import tensorflow as tf\n'), ((10792, 10821), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""weighted"""'], {}), "('weighted')\n", (10809, 10821), True, 'import tensorflow as tf\n'), ((11034, 11065), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q_seq_attn"""'], {}), "('q_seq_attn')\n", (11051, 11065), True, 'import tensorflow as tf\n'), ((11166, 11198), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""p_rnn_input"""'], {}), "('p_rnn_input')\n", (11183, 11198), True, 'import tensorflow as tf\n'), ((11287, 11324), 'tensorflow.concat', 'tf.concat', (['doc_rnn_input_list'], {'axis': '(2)'}), '(doc_rnn_input_list, axis=2)\n', (11296, 11324), True, 'import tensorflow as tf\n'), ((11827, 11859), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q_self_attn"""'], {}), "('q_self_attn')\n", (11844, 11859), True, 'import tensorflow as tf\n'), ((12425, 12451), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""start"""'], {}), "('start')\n", (12442, 12451), True, 'import tensorflow as tf\n'), ((12658, 12682), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""end"""'], {}), "('end')\n", (12675, 12682), True, 'import tensorflow as tf\n'), ((12881, 12908), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""answer"""'], {}), "('answer')\n", (12898, 12908), True, 'import tensorflow as tf\n'), ((12937, 12992), 'tensorflow.concat', 'tf.concat', (['[start_scores, end_scores]', '(1)'], {'name': '"""scores"""'}), "([start_scores, end_scores], 1, name='scores')\n", (12946, 12992), True, 'import tensorflow as tf\n'), ((14177, 14214), 'tensorflow.python.framework.tensor_util.MakeNdarray', 'tensor_util.MakeNdarray', (['tensor_proto'], {}), '(tensor_proto)\n', (14200, 14214), False, 'from tensorflow.python.framework import tensor_util\n'), ((16380, 16413), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (16411, 16413), True, 'import tensorflow as tf\n'), ((17017, 17053), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['"""data/drqa.pb"""', '"""wb"""'], {}), "('data/drqa.pb', 'wb')\n", (17031, 17053), True, 'import tensorflow as tf\n'), ((17133, 17176), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['"""data/drqa.pb.txt"""', '"""w"""'], {}), "('data/drqa.pb.txt', 'w')\n", (17151, 17176), True, 'import tensorflow as tf\n'), ((1540, 1588), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.weight_initializer'], {}), '(self.weight_initializer)\n', (1563, 1588), True, 'import tensorflow as tf\n'), ((1762, 1808), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.bias_initializer'], {}), '(self.bias_initializer)\n', (1785, 1808), True, 'import tensorflow as tf\n'), ((3658, 3669), 'tensorflow.range', 'tf.range', (['n'], {}), '(n)\n', (3666, 3669), True, 'import tensorflow as tf\n'), ((3738, 3756), 'tensorflow.range', 'tf.range', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (3746, 3756), True, 'import tensorflow as tf\n'), ((3825, 3836), 'tensorflow.range', 'tf.range', (['n'], {}), '(n)\n', (3833, 3836), True, 'import tensorflow as tf\n'), ((3905, 3932), 'tensorflow.range', 'tf.range', (['(-k + 1)', '(n - k + 1)'], {}), '(-k + 1, n - k + 1)\n', (3913, 3932), True, 'import tensorflow as tf\n'), ((6747, 6873), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', (['fw_cell', 'bw_cell', 'last_output'], {'dtype': 'tf.float32', 'sequence_length': 'seq_len', 'scope': '"""bi_rnn"""'}), "(fw_cell, bw_cell, last_output, dtype=tf.\n float32, sequence_length=seq_len, scope='bi_rnn')\n", (6778, 6873), True, 'import tensorflow as tf\n'), ((7079, 7099), 'tensorflow.concat', 'tf.concat', (['output', '(2)'], {}), '(output, 2)\n', (7088, 7099), True, 'import tensorflow as tf\n'), ((7473, 7488), 'tensorflow.matmul', 'tf.matmul', (['y', 'w'], {}), '(y, w)\n', (7482, 7488), True, 'import tensorflow as tf\n'), ((7566, 7587), 'tensorflow.expand_dims', 'tf.expand_dims', (['wy', '(2)'], {}), '(wy, 2)\n', (7580, 7587), True, 'import tensorflow as tf\n'), ((7737, 7756), 'tensorflow.equal', 'tf.equal', (['x_mask', '(0)'], {}), '(x_mask, 0)\n', (7745, 7756), True, 'import tensorflow as tf\n'), ((9209, 9255), 'tensorflow.matmul', 'tf.matmul', (['x_re', 'seq_weights'], {'transpose_b': '(True)'}), '(x_re, seq_weights, transpose_b=True)\n', (9218, 9255), True, 'import tensorflow as tf\n'), ((9482, 9528), 'tensorflow.matmul', 'tf.matmul', (['y_re', 'seq_weights'], {'transpose_b': '(True)'}), '(y_re, seq_weights, transpose_b=True)\n', (9491, 9528), True, 'import tensorflow as tf\n'), ((10739, 10768), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (10752, 10768), True, 'import tensorflow as tf\n'), ((10859, 10883), 'tensorflow.divide', 'tf.divide', (['scores', 'x_sum'], {}), '(scores, x_sum)\n', (10868, 10883), True, 'import tensorflow as tf\n'), ((10922, 10942), 'tensorflow.matmul', 'tf.matmul', (['scores', 'x'], {}), '(scores, x)\n', (10931, 10942), True, 'import tensorflow as tf\n'), ((15804, 15824), 'tensorflow.as_dtype', 'tf.as_dtype', (['k.dtype'], {}), '(k.dtype)\n', (15815, 15824), True, 'import tensorflow as tf\n'), ((5143, 5163), 'tensorflow.shape', 'tf.shape', (['input_data'], {}), '(input_data)\n', (5151, 5163), True, 'import tensorflow as tf\n'), ((5459, 5520), 'numpy.concatenate', 'np.concatenate', (['(fw_weights_input, fw_weights_hidden)'], {'axis': '(1)'}), '((fw_weights_input, fw_weights_hidden), axis=1)\n', (5473, 5520), True, 'import numpy as np\n'), ((5980, 6041), 'numpy.concatenate', 'np.concatenate', (['(bw_weights_input, bw_weights_hidden)'], {'axis': '(1)'}), '((bw_weights_input, bw_weights_hidden), axis=1)\n', (5994, 6041), True, 'import numpy as np\n'), ((7957, 7969), 'numpy.load', 'np.load', (['arg'], {}), '(arg)\n', (7964, 7969), True, 'import numpy as np\n'), ((10548, 10593), 'tensorflow.matmul', 'tf.matmul', (['x_flat', 'x_weight'], {'transpose_b': '(True)'}), '(x_flat, x_weight, transpose_b=True)\n', (10557, 10593), True, 'import tensorflow as tf\n'), ((5100, 5120), 'tensorflow.shape', 'tf.shape', (['input_data'], {}), '(input_data)\n', (5108, 5120), True, 'import tensorflow as tf\n'), ((9337, 9348), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (9345, 9348), True, 'import tensorflow as tf\n'), ((9610, 9621), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (9618, 9621), True, 'import tensorflow as tf\n'), ((9912, 9923), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (9920, 9923), True, 'import tensorflow as tf\n'), ((10109, 10120), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (10117, 10120), True, 'import tensorflow as tf\n'), ((10125, 10136), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (10133, 10136), True, 'import tensorflow as tf\n'), ((10499, 10510), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (10507, 10510), True, 'import tensorflow as tf\n'), ((10609, 10620), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (10617, 10620), True, 'import tensorflow as tf\n')] |
import angr ,claripy
import numpy as np
class SimExtractParams(angr.SimProcedure):
def run(self, *args, pointers=None):
self.state.globals['args']=[]
for numb,typ in pointers.items():
argRes=None
if typ == 'intPointer':
addr=args[numb-1].ast.args[0]
argRes=int(np.int32(self.state.mem[addr].long.concrete))
elif typ == 'charPointer':
addr = args[numb - 1].to_claripy()
value=self.state.mem[addr].string.concrete
if isinstance(value,str):
argRes=value
else:
argRes=value.decode('ascii','replace')
argRes=argRes+'\x00'
elif typ == 'floatPointer':
addr=args[numb-1].ast.args[0]
value=self.state.mem[addr].long.concrete
tmp_val=claripy.BVV(value,32)
argRes=tmp_val.raw_to_fp().args[0]
elif typ == 'doublePointer':
addr=args[numb-1].ast.args[0]
value=self.state.mem[addr].long.concrete
tmp_val=claripy.BVV(value,64)
fp=tmp_val.raw_to_fp()
argRes=tmp_val.raw_to_fp().args[0]
elif typ in 'char':
argRes=chr(args[numb-1].ast.args[0])
elif typ in 'int':
val=args[numb-1].ast
argRes=int(np.int32(self.state.solver.eval(val)))
elif typ in 'float':
tmp_val=claripy.BVV(args[numb-1].args[0],32)
argRes=tmp_val.raw_to_fp().args[0]
elif typ == "double":
tmp_val=claripy.BVV(args[numb-1].args[0],64)
argRes=tmp_val.raw_to_fp().args[0]
else:
argRes=args[numb-1]
self.state.globals['args'].append(argRes)
self.exit(0)
return 0
| [
"numpy.int32",
"claripy.BVV"
] | [((341, 385), 'numpy.int32', 'np.int32', (['self.state.mem[addr].long.concrete'], {}), '(self.state.mem[addr].long.concrete)\n', (349, 385), True, 'import numpy as np\n'), ((896, 918), 'claripy.BVV', 'claripy.BVV', (['value', '(32)'], {}), '(value, 32)\n', (907, 918), False, 'import angr, claripy\n'), ((1139, 1161), 'claripy.BVV', 'claripy.BVV', (['value', '(64)'], {}), '(value, 64)\n', (1150, 1161), False, 'import angr, claripy\n'), ((1527, 1566), 'claripy.BVV', 'claripy.BVV', (['args[numb - 1].args[0]', '(32)'], {}), '(args[numb - 1].args[0], 32)\n', (1538, 1566), False, 'import angr, claripy\n'), ((1673, 1712), 'claripy.BVV', 'claripy.BVV', (['args[numb - 1].args[0]', '(64)'], {}), '(args[numb - 1].args[0], 64)\n', (1684, 1712), False, 'import angr, claripy\n')] |
"""Create carbon regression scenarios."""
import argparse
import logging
import multiprocessing
import os
import subprocess
import sys
from osgeo import gdal
import pygeoprocessing
import numpy
import taskgraph
gdal.SetCacheMax(2**27)
logging.basicConfig(
level=logging.DEBUG,
format=(
'%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s'
' [%(funcName)s:%(lineno)d] %(message)s'),
stream=sys.stdout)
LOGGER = logging.getLogger(__name__)
logging.getLogger('taskgraph').setLevel(logging.INFO)
WORKSPACE_DIR = 'becky_ipcc_for_you'
ECOSHARD_DIR = os.path.join(WORKSPACE_DIR, 'ecoshard')
CHURN_DIR = os.path.join(WORKSPACE_DIR, 'churn')
DATA_DIR = os.path.join(WORKSPACE_DIR, 'data')
CROPLAND_LULC_CODES = range(10, 41)
URBAN_LULC_CODES = (190,)
FOREST_CODES = (50, 60, 61, 62, 70, 71, 72, 80, 81, 82, 90, 160, 170)
MASK_NODATA = 2
MULT_BY_COLUMNS_NODATA = -1
CARBON_ZONES_VECTOR_URI = 'gs://ecoshard-root/global_carbon_regression/carbon_zones_md5_aa16830f64d1ef66ebdf2552fb8a9c0d.gpkg'
CARBON_ZONES_VECTOR_PATH = os.path.join(ECOSHARD_DIR, 'carbon_zones.gpkg')
BASE_DATA_BUCKET_ROOT = 'gs://ecoshard-root/global_carbon_regression/inputs/'
LULC_SCENARIO_URI_MAP = {
'esa2014': 'gs://ecoshard-root/global_carbon_regression/ESACCI-LC-L4-LCCS-Map-300m-P1Y-2014-v2.0.7_smooth_compressed.tif',
}
TARGET_PIXEL_SIZE = (10./3600., -10./3600.)
IPCC_CARBON_TABLE_URI = 'gs://ecoshard-root/global_carbon_regression/IPCC_carbon_table_md5_a91f7ade46871575861005764d85cfa7.csv'
IPCC_CARBON_TABLE_PATH = os.path.join(
ECOSHARD_DIR, os.path.basename(IPCC_CARBON_TABLE_URI))
BACCINI_10s_2014_BIOMASS_URI = (
'gs://ecoshard-root/global_carbon_regression/baccini_10s_2014'
'_md5_5956a9d06d4dffc89517cefb0f6bb008.tif')
# The following is the base in the pattern found in the lasso table
# [base]_[mask_type]_gs[kernel_size]
BASE_LASSO_CONVOLUTION_RASTER_NAME = 'lulc_esa_smoothed_2014_10sec'
LULC_SCENARIO_RASTER_PATH_MAP = {}
def ipcc_carbon_op(
lulc_array, zones_array, zone_lulc_to_carbon_map, conversion_factor):
"""Map carbon to LULC/zone values and multiply by conversion map."""
result = numpy.zeros(lulc_array.shape)
for zone_id in numpy.unique(zones_array):
if zone_id in zone_lulc_to_carbon_map:
zone_mask = zones_array == zone_id
result[zone_mask] = (
zone_lulc_to_carbon_map[zone_id][lulc_array[zone_mask]] *
conversion_factor)
return result
def parse_carbon_lulc_table(ipcc_carbon_table_path):
"""Custom func to parse out the IPCC carbon table by zone and lulc."""
with open(IPCC_CARBON_TABLE_PATH, 'r') as carbon_table_file:
header_line = carbon_table_file.readline()
lulc_code_list = [int(lucode) for lucode in header_line.split(',')[1:]]
max_code = max(lulc_code_list)
zone_lucode_to_carbon_map = {}
for line in carbon_table_file:
split_line = line.split(',')
if split_line[0] == '':
continue
zone_id = int(split_line[0])
zone_lucode_to_carbon_map[zone_id] = numpy.zeros(max_code+1)
for lucode, carbon_value in zip(lulc_code_list, split_line[1:]):
zone_lucode_to_carbon_map[zone_id][lucode] = float(
carbon_value)
return zone_lucode_to_carbon_map
def rasterize_carbon_zones(
base_raster_path, carbon_vector_path, rasterized_zones_path):
"""Rasterize carbon zones, expect 'CODE' as the parameter in the vector."""
pygeoprocessing.new_raster_from_base(
base_raster_path, rasterized_zones_path, gdal.GDT_Int32,
[-1])
pygeoprocessing.rasterize(
carbon_vector_path, rasterized_zones_path,
option_list=['ATTRIBUTE=CODE'])
def download_and_clip(file_uri, download_dir, bounding_box, target_file_path):
"""Download file uri, then clip it. Will hardlink if no clip is necessary.
Will download and keep original files to `download_dir`.
Args:
file_uri (str): uri to file to download
download_dir (str): path to download directory that can will be used
to hold and keep the base downloaded file before clipping.
bounding_box (list): if not none, clip the result to target_file_path,
otherwise copy the result to target_file_path.
target_file_path (str): desired target of clipped file
Returns:
None.
"""
try:
os.makedirs(download_dir)
except OSError:
pass
base_filename = os.path.basename(file_uri)
base_file_path = os.path.join(download_dir, base_filename)
# Wrapping this in a taskgraph prevents us from re-downloading a large
# file if it's already been clipped before.
LOGGER.debug(f'download {file_uri} to {base_file_path}')
subprocess.run(
f'/usr/local/gcloud-sdk/google-cloud-sdk/bin/gsutil cp -nr '
f'{file_uri} {download_dir}/', shell=True, check=True)
raster_info = pygeoprocessing.get_raster_info(base_file_path)
if bounding_box != raster_info['bounding_box']:
LOGGER.debug(
f'bounding box and desired target differ '
f"{bounding_box} {raster_info['bounding_box']}")
pygeoprocessing.warp_raster(
base_file_path, raster_info['pixel_size'], target_file_path,
'near', target_bb=bounding_box)
else:
# it's already the same size so no need to warp it
LOGGER.debug('already the same size, so no need to warp')
os.link(base_file_path, target_file_path)
def fetch_data(bounding_box, clipped_data_dir, task_graph):
"""Download all the global data needed to run this analysis.
Args:
bounding_box (list): minx, miny, maxx, maxy list to clip to
clipped_data_dir (str): path to directory to copy clipped rasters
to
task_graph (TaskGraph): taskgraph object to schedule work.
Returns:
None.
"""
files_to_download = subprocess.check_output([
'/usr/local/gcloud-sdk/google-cloud-sdk/bin/gsutil ls '
'gs://ecoshard-root/global_carbon_regression/inputs'],
shell=True).decode('utf-8').splitlines() + [
BACCINI_10s_2014_BIOMASS_URI]
LOGGER.debug(f'here are the files to download: {files_to_download}')
try:
os.makedirs(clipped_data_dir)
except OSError:
pass
for file_uri in files_to_download:
if not file_uri.endswith('tif'):
continue
clipped_file_path = os.path.join(
clipped_data_dir, os.path.basename(file_uri))
_ = task_graph.add_task(
func=download_and_clip,
args=(
file_uri, DATA_DIR, bounding_box, clipped_file_path),
target_path_list=[clipped_file_path],
task_name=(
f'download and clip contents of {file_uri} to '
f'{clipped_data_dir}'))
task_graph.join()
global BACCINI_10s_2014_BIOMASS_RASTER_PATH
BACCINI_10s_2014_BIOMASS_RASTER_PATH = os.path.join(
clipped_data_dir, os.path.basename(BACCINI_10s_2014_BIOMASS_URI))
for data_uri, data_path in [
(CARBON_ZONES_VECTOR_URI, CARBON_ZONES_VECTOR_PATH),
(IPCC_CARBON_TABLE_URI, IPCC_CARBON_TABLE_PATH)]:
_ = task_graph.add_task(
func=subprocess.run,
args=(
f'/usr/local/gcloud-sdk/google-cloud-sdk/bin/gsutil cp -n '
f'{data_uri} {data_path}',),
kwargs={'shell': True, 'check': True},
target_path_list=[data_path],
task_name=f'download {data_uri}')
global LULC_SCENARIO_RASTER_PATH_MAP
for scenario_id, lulc_uri in LULC_SCENARIO_URI_MAP.items():
LOGGER.debug(f'download {lulc_uri}')
lulc_raster_path = os.path.join(
ECOSHARD_DIR, os.path.basename(lulc_uri))
clipped_file_path = os.path.join(
clipped_data_dir, os.path.basename(lulc_raster_path))
_ = task_graph.add_task(
func=download_and_clip,
args=(
lulc_uri, ECOSHARD_DIR, bounding_box, clipped_file_path),
target_path_list=[clipped_file_path],
task_name=(
f'download and clip contents of {lulc_uri} to '
f'{clipped_data_dir}'))
LULC_SCENARIO_RASTER_PATH_MAP[scenario_id] = clipped_file_path
task_graph.join()
def main():
"""Entry point."""
parser = argparse.ArgumentParser(
description='Becky\'s IPCC maker')
parser.add_argument(
'--bounding_box', type=float, nargs=4, default=[-180, -90, 180, 90],
help=(
"manual bounding box in the form of four consecutive floats: "
"min_lng, min_lat, max_lng, max_lat, ex: "
"-180.0, -58.3, 180.0, 81.5"))
parser.add_argument(
'--keyfile', help='path to keyfile that authorizes bucket access')
parser.add_argument(
'--n_workers', type=int, default=multiprocessing.cpu_count(),
help='how many workers to allocate to taskgraph')
args = parser.parse_args()
if args.keyfile:
subprocess.run(
f'/usr/local/gcloud-sdk/google-cloud-sdk/bin/gcloud auth '
f'activate-service-account --key-file={args.keyfile}',
shell=True, check=True)
for dir_path in [WORKSPACE_DIR, ECOSHARD_DIR, CHURN_DIR, DATA_DIR]:
try:
os.makedirs(dir_path)
except OSError:
pass
bounding_box_str = ','.join([str(x) for x in args.bounding_box])
clipped_data_dir = os.path.join(DATA_DIR, bounding_box_str)
# Step 0: Download data
task_graph = taskgraph.TaskGraph(CHURN_DIR, args.n_workers, 5.0)
LOGGER.info("Step 0: Download data")
fetch_data(args.bounding_box, clipped_data_dir, task_graph)
# IPCC Approach
# Create carbon stocks for ESA 2014 and restoration scenario
rasterize_carbon_zone_task = None
ipcc_carbon_scenario_raster_map = {}
IPCC_CARBON_DIR = os.path.join(WORKSPACE_DIR, 'ipcc_carbon')
try:
os.makedirs(IPCC_CARBON_DIR)
except OSError:
pass
for scenario_id, lulc_raster_path in LULC_SCENARIO_RASTER_PATH_MAP.items():
if rasterize_carbon_zone_task is None:
rasterized_zones_raster_path = os.path.join(
clipped_data_dir, 'carbon_zones.tif')
rasterize_carbon_zone_task = task_graph.add_task(
func=rasterize_carbon_zones,
args=(
lulc_raster_path, CARBON_ZONES_VECTOR_PATH,
rasterized_zones_raster_path),
target_path_list=[rasterized_zones_raster_path],
task_name='rasterize carbon zones')
zone_lucode_to_carbon_map = parse_carbon_lulc_table(
IPCC_CARBON_TABLE_PATH)
ipcc_carbon_scenario_raster_map[scenario_id] = os.path.join(
IPCC_CARBON_DIR,
f'ipcc_carbon_{scenario_id}_{bounding_box_str}.tif')
# Units are in Mg/Ha
conversion_factor = 1.0
task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(
[(lulc_raster_path, 1), (rasterized_zones_raster_path, 1),
(zone_lucode_to_carbon_map, 'raw'),
(conversion_factor, 'raw')],
ipcc_carbon_op, ipcc_carbon_scenario_raster_map[scenario_id],
gdal.GDT_Float32, MULT_BY_COLUMNS_NODATA),
dependent_task_list=[rasterize_carbon_zone_task],
target_path_list=[ipcc_carbon_scenario_raster_map[scenario_id]],
task_name=f'''create carbon for {
ipcc_carbon_scenario_raster_map[scenario_id]}''')
task_graph.close()
task_graph.join()
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"logging.getLogger",
"subprocess.check_output",
"pygeoprocessing.warp_raster",
"numpy.unique",
"pygeoprocessing.new_raster_from_base",
"argparse.ArgumentParser",
"os.makedirs",
"subprocess.run",
"os.path.join",
"multiprocessing.cpu_count",
"numpy.zeros",
"pygeoprocessi... | [((213, 238), 'osgeo.gdal.SetCacheMax', 'gdal.SetCacheMax', (['(2 ** 27)'], {}), '(2 ** 27)\n', (229, 238), False, 'from osgeo import gdal\n'), ((238, 412), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s [%(funcName)s:%(lineno)d] %(message)s"""', 'stream': 'sys.stdout'}), "(level=logging.DEBUG, format=\n '%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s [%(funcName)s:%(lineno)d] %(message)s'\n , stream=sys.stdout)\n", (257, 412), False, 'import logging\n'), ((448, 475), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (465, 475), False, 'import logging\n'), ((583, 622), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""ecoshard"""'], {}), "(WORKSPACE_DIR, 'ecoshard')\n", (595, 622), False, 'import os\n'), ((635, 671), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""churn"""'], {}), "(WORKSPACE_DIR, 'churn')\n", (647, 671), False, 'import os\n'), ((683, 718), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""data"""'], {}), "(WORKSPACE_DIR, 'data')\n", (695, 718), False, 'import os\n'), ((1052, 1099), 'os.path.join', 'os.path.join', (['ECOSHARD_DIR', '"""carbon_zones.gpkg"""'], {}), "(ECOSHARD_DIR, 'carbon_zones.gpkg')\n", (1064, 1099), False, 'import os\n'), ((1565, 1604), 'os.path.basename', 'os.path.basename', (['IPCC_CARBON_TABLE_URI'], {}), '(IPCC_CARBON_TABLE_URI)\n', (1581, 1604), False, 'import os\n'), ((2150, 2179), 'numpy.zeros', 'numpy.zeros', (['lulc_array.shape'], {}), '(lulc_array.shape)\n', (2161, 2179), False, 'import numpy\n'), ((2199, 2224), 'numpy.unique', 'numpy.unique', (['zones_array'], {}), '(zones_array)\n', (2211, 2224), False, 'import numpy\n'), ((3541, 3644), 'pygeoprocessing.new_raster_from_base', 'pygeoprocessing.new_raster_from_base', (['base_raster_path', 'rasterized_zones_path', 'gdal.GDT_Int32', '[-1]'], {}), '(base_raster_path,\n rasterized_zones_path, gdal.GDT_Int32, [-1])\n', (3577, 3644), False, 'import pygeoprocessing\n'), ((3662, 3766), 'pygeoprocessing.rasterize', 'pygeoprocessing.rasterize', (['carbon_vector_path', 'rasterized_zones_path'], {'option_list': "['ATTRIBUTE=CODE']"}), "(carbon_vector_path, rasterized_zones_path,\n option_list=['ATTRIBUTE=CODE'])\n", (3687, 3766), False, 'import pygeoprocessing\n'), ((4544, 4570), 'os.path.basename', 'os.path.basename', (['file_uri'], {}), '(file_uri)\n', (4560, 4570), False, 'import os\n'), ((4592, 4633), 'os.path.join', 'os.path.join', (['download_dir', 'base_filename'], {}), '(download_dir, base_filename)\n', (4604, 4633), False, 'import os\n'), ((4823, 4959), 'subprocess.run', 'subprocess.run', (['f"""/usr/local/gcloud-sdk/google-cloud-sdk/bin/gsutil cp -nr {file_uri} {download_dir}/"""'], {'shell': '(True)', 'check': '(True)'}), "(\n f'/usr/local/gcloud-sdk/google-cloud-sdk/bin/gsutil cp -nr {file_uri} {download_dir}/'\n , shell=True, check=True)\n", (4837, 4959), False, 'import subprocess\n'), ((4990, 5037), 'pygeoprocessing.get_raster_info', 'pygeoprocessing.get_raster_info', (['base_file_path'], {}), '(base_file_path)\n', (5021, 5037), False, 'import pygeoprocessing\n'), ((8476, 8533), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Becky\'s IPCC maker"""'}), '(description="Becky\'s IPCC maker")\n', (8499, 8533), False, 'import argparse\n'), ((9592, 9632), 'os.path.join', 'os.path.join', (['DATA_DIR', 'bounding_box_str'], {}), '(DATA_DIR, bounding_box_str)\n', (9604, 9632), False, 'import os\n'), ((9678, 9729), 'taskgraph.TaskGraph', 'taskgraph.TaskGraph', (['CHURN_DIR', 'args.n_workers', '(5.0)'], {}), '(CHURN_DIR, args.n_workers, 5.0)\n', (9697, 9729), False, 'import taskgraph\n'), ((10022, 10064), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""ipcc_carbon"""'], {}), "(WORKSPACE_DIR, 'ipcc_carbon')\n", (10034, 10064), False, 'import os\n'), ((476, 506), 'logging.getLogger', 'logging.getLogger', (['"""taskgraph"""'], {}), "('taskgraph')\n", (493, 506), False, 'import logging\n'), ((4464, 4489), 'os.makedirs', 'os.makedirs', (['download_dir'], {}), '(download_dir)\n', (4475, 4489), False, 'import os\n'), ((5236, 5360), 'pygeoprocessing.warp_raster', 'pygeoprocessing.warp_raster', (['base_file_path', "raster_info['pixel_size']", 'target_file_path', '"""near"""'], {'target_bb': 'bounding_box'}), "(base_file_path, raster_info['pixel_size'],\n target_file_path, 'near', target_bb=bounding_box)\n", (5263, 5360), False, 'import pygeoprocessing\n'), ((5525, 5566), 'os.link', 'os.link', (['base_file_path', 'target_file_path'], {}), '(base_file_path, target_file_path)\n', (5532, 5566), False, 'import os\n'), ((6330, 6359), 'os.makedirs', 'os.makedirs', (['clipped_data_dir'], {}), '(clipped_data_dir)\n', (6341, 6359), False, 'import os\n'), ((7084, 7130), 'os.path.basename', 'os.path.basename', (['BACCINI_10s_2014_BIOMASS_URI'], {}), '(BACCINI_10s_2014_BIOMASS_URI)\n', (7100, 7130), False, 'import os\n'), ((9148, 9306), 'subprocess.run', 'subprocess.run', (['f"""/usr/local/gcloud-sdk/google-cloud-sdk/bin/gcloud auth activate-service-account --key-file={args.keyfile}"""'], {'shell': '(True)', 'check': '(True)'}), "(\n f'/usr/local/gcloud-sdk/google-cloud-sdk/bin/gcloud auth activate-service-account --key-file={args.keyfile}'\n , shell=True, check=True)\n", (9162, 9306), False, 'import subprocess\n'), ((10082, 10110), 'os.makedirs', 'os.makedirs', (['IPCC_CARBON_DIR'], {}), '(IPCC_CARBON_DIR)\n', (10093, 10110), False, 'import os\n'), ((10906, 10992), 'os.path.join', 'os.path.join', (['IPCC_CARBON_DIR', 'f"""ipcc_carbon_{scenario_id}_{bounding_box_str}.tif"""'], {}), "(IPCC_CARBON_DIR,\n f'ipcc_carbon_{scenario_id}_{bounding_box_str}.tif')\n", (10918, 10992), False, 'import os\n'), ((3117, 3142), 'numpy.zeros', 'numpy.zeros', (['(max_code + 1)'], {}), '(max_code + 1)\n', (3128, 3142), False, 'import numpy\n'), ((6567, 6593), 'os.path.basename', 'os.path.basename', (['file_uri'], {}), '(file_uri)\n', (6583, 6593), False, 'import os\n'), ((7856, 7882), 'os.path.basename', 'os.path.basename', (['lulc_uri'], {}), '(lulc_uri)\n', (7872, 7882), False, 'import os\n'), ((7956, 7990), 'os.path.basename', 'os.path.basename', (['lulc_raster_path'], {}), '(lulc_raster_path)\n', (7972, 7990), False, 'import os\n'), ((9000, 9027), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (9025, 9027), False, 'import multiprocessing\n'), ((9436, 9457), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (9447, 9457), False, 'import os\n'), ((10315, 10365), 'os.path.join', 'os.path.join', (['clipped_data_dir', '"""carbon_zones.tif"""'], {}), "(clipped_data_dir, 'carbon_zones.tif')\n", (10327, 10365), False, 'import os\n'), ((5990, 6144), 'subprocess.check_output', 'subprocess.check_output', (["['/usr/local/gcloud-sdk/google-cloud-sdk/bin/gsutil ls gs://ecoshard-root/global_carbon_regression/inputs'\n ]"], {'shell': '(True)'}), "([\n '/usr/local/gcloud-sdk/google-cloud-sdk/bin/gsutil ls gs://ecoshard-root/global_carbon_regression/inputs'\n ], shell=True)\n", (6013, 6144), False, 'import subprocess\n')] |
"""Utilities for the normal CHIME Bayes module
"""
from typing import TypeVar, Union
from numpy import exp
FloatLike = TypeVar("FloatLike") # Floats or integers
FloatLikeArray = TypeVar("FloatLikeArray") # Arrays of floats or integers
NormalDistVar = TypeVar("NormalDistVar") # Normally distributed random var
NormalDistArray = TypeVar("NormalDistArray") # Array of Normally dist random var
FloatOrDistVar = Union[FloatLike, NormalDistVar]
FloatOrDistArray = Union[FloatLikeArray, NormalDistArray]
def logistic_fcn( # pylint: disable=C0103
x: FloatOrDistArray, L: FloatOrDistVar, k: FloatOrDistVar, x0: FloatOrDistVar,
) -> FloatOrDistArray:
"""Computes `L / (1 + exp(-k(x-x0)))`.
"""
return L / (1 + exp(-k * (x - x0)))
def one_minus_logistic_fcn( # pylint: disable=C0103
x: FloatOrDistArray, L: FloatOrDistVar, k: FloatOrDistVar, x0: FloatOrDistVar,
) -> FloatOrDistArray:
"""Computes `1 - L / (1 + exp(-k(x-x0)))`.
"""
return 1 - logistic_fcn(x, L, k, x0)
| [
"numpy.exp",
"typing.TypeVar"
] | [((121, 141), 'typing.TypeVar', 'TypeVar', (['"""FloatLike"""'], {}), "('FloatLike')\n", (128, 141), False, 'from typing import TypeVar, Union\n'), ((181, 206), 'typing.TypeVar', 'TypeVar', (['"""FloatLikeArray"""'], {}), "('FloatLikeArray')\n", (188, 206), False, 'from typing import TypeVar, Union\n'), ((256, 280), 'typing.TypeVar', 'TypeVar', (['"""NormalDistVar"""'], {}), "('NormalDistVar')\n", (263, 280), False, 'from typing import TypeVar, Union\n'), ((334, 360), 'typing.TypeVar', 'TypeVar', (['"""NormalDistArray"""'], {}), "('NormalDistArray')\n", (341, 360), False, 'from typing import TypeVar, Union\n'), ((728, 746), 'numpy.exp', 'exp', (['(-k * (x - x0))'], {}), '(-k * (x - x0))\n', (731, 746), False, 'from numpy import exp\n')] |
from torch.utils.tensorboard import SummaryWriter
import os, glob
from utils import dict_send_to
from tqdm import tqdm
import time, datetime
import argparse
import json
import traceback
from hyperparams import hparams as hp
import torch
from concurrent.futures import ProcessPoolExecutor
from utils.transcribe import transcribe, transcribe_available
from transformer import tacotron
import logging
from utils import infolog, checkpoint
from utils.text import language_vec_to_id
from dataloader import FeederEval
from functools import partial
from synthesize import eval_batch, save_eval_results
import numpy as np
import sys
import faulthandler, signal
if hasattr(faulthandler, 'register'):
faulthandler.register(signal.SIGUSR1)
executor = ProcessPoolExecutor(max_workers=4)
def run_transcription(eval_path, names, existent_samples, meta_index, cer_window, step):
if os.path.exists(os.path.join(eval_path, 'transcriptions.jsonl')):
lines = open(os.path.join(eval_path, 'transcriptions.jsonl'), encoding='utf-8').read().splitlines()
lines = [json.loads(l) for l in lines]
found_names = [t['name'] for t in lines if t['RecognitionStatus'] == 'Success']
transcribe_names = set(names + [n for n in existent_samples if n not in found_names])
logging.info("Exist transcriptions skipped: " + str(set(found_names).difference(transcribe_names)))
prev_trans = [t for t in lines if t['name'] not in transcribe_names and t['RecognitionStatus'] == 'Success']
else:
transcribe_names = names + existent_samples
prev_trans = []
trans_fn = []
for n in transcribe_names:
if n + '.npy' in meta_index:
fn = partial(transcribe, wav_path=os.path.join(eval_path, n + '_trim.wav'),
meta=meta_index[n + '.npy'])
trans_fn.append(executor.submit(fn))
trans = prev_trans + [f.result() for f in trans_fn]
trans.sort(key=lambda x: x['name'])
with open(os.path.join(eval_path, 'transcriptions.jsonl'), 'w', encoding='utf-8') as fw:
for t in trans:
fw.write(json.dumps(t, ensure_ascii=False) + '\n')
logging.info('[Step %d] Raw CER=%.3f' % (step, np.mean([t['cer'] for t in trans]).item()))
keys = []
values = []
for t in trans:
if 'fail' not in t:
keys.append(t['locale'])
values.append(t['cer'])
else:
logging.warn("Failed sample: " + t['name'])
cer_window.update(keys, values)
def main(args):
logdir = args.log_dir
model_dir = args.model_dir
data_dir = args.data_dir
os.makedirs(logdir, exist_ok=True)
hp.parse(args.hparams)
open(os.path.join(logdir, 'hparams.json'), 'w').write(hp.to_json(indent=1))
open(os.path.join(logdir, 'args.json'), 'w').write(json.dumps(vars(args), indent=1))
time_id = datetime.datetime.now().strftime('%m%d_%H%M')
logging.basicConfig(format="[%(levelname)s %(asctime)s]" + " %(message)s",
stream=sys.stdout, level=logging.INFO)
torch.manual_seed(0)
infolog.set_logger(os.path.join(logdir, 'outputs_%s.log' % (time_id)))
writer = SummaryWriter(log_dir=logdir)
map_location = {}
if not torch.cuda.is_available():
map_location = {'cuda:0': 'cpu'}
values = hp.values()
logging.info('Hyperparameters:\n' + '\n'.join([' %s: %s' % (name, values[name]) for name in sorted(values)]))
logging.info(' '.join(sys.argv))
if args.eval_steps is not None:
eval_steps = [int(s) for s in args.eval_steps.split(':')]
else:
eval_steps = None
lang_to_id = json.load(open(os.path.join(data_dir, 'lang_id.json'))) if hp.multi_lingual else None
spk_to_id = json.load(open(os.path.join(data_dir, 'spk_id.json'))) if hp.multi_speaker else None
if os.path.exists('filter_keys.json'):
filter_keys = json.load(open('filter_keys.json'))
else:
filter_keys = {}
eval_languages = args.eval_languages.split(':') if args.eval_languages else None
eval_speakers = args.eval_speakers.split(':') if args.eval_speakers else None
if args.exclude_speakers in filter_keys:
exclude_speakers = filter_keys[args.exclude_speakers]
else:
exclude_speakers = args.exclude_speakers.split(':') if args.exclude_speakers else None
zipfilepath = args.zipfilepath if args.zipfilepath else os.path.join(data_dir, 'mels.zip')
if not os.path.exists(zipfilepath):
zipfilepath = None
eval_meta = args.eval_meta if args.eval_meta else os.path.join(data_dir, 'metadata.eval.txt')
vocab = json.load(open(args.vocab, encoding='utf-8')) if args.vocab else None
feeder_eval = FeederEval(zipfilepath, eval_meta, hp, spk_to_id=spk_to_id, lang_to_id=lang_to_id,
eval_lang=eval_languages, eval_spk=eval_speakers, exclude_spk=exclude_speakers,
shuffle=True, keep_order=True, pick_partial=False, single=False,
vocab=vocab, embed=args.external_embed)
meta_index = dict([(m['n'], m) for m in feeder_eval._metadata])
m = tacotron.Tacotron(hp)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
m.to(device)
m.eval()
m.decoder.train()
state_dict = m.state_dict()
for var in state_dict:
logging.info("%s %s" % (var, state_dict[var].shape))
ckpt = []
finished_ckpt = []
if hp.multi_lingual:
id_to_lang = dict([(v, k) for k, v in lang_to_id.items()])
while True:
if len(ckpt) == 0:
logging.info('Scanning: %s\n' % model_dir + '\n'.join(os.listdir(model_dir)))
for l in glob.iglob(os.path.join(model_dir, 'model.ckpt-*')):
step = l.split('-')[-1]
if l not in finished_ckpt and step.isnumeric():
if eval_steps and int(step) in eval_steps:
pass
elif int(step) < args.start_step or (eval_steps and int(step) not in eval_steps) or \
int(step) % args.eval_interval != 0:
continue
ckpt.append((l, int(step)))
ckpt.sort(key=lambda x: x[-1])
if len(ckpt) == 0:
if args.no_wait:
logging.info('No more ckpt, exit')
return
logging.info('No ckpt found, sleeping...')
time.sleep(600)
continue
tic = time.time()
ckpt_path, step = ckpt[0]
ckpt = ckpt[1:]
eval_path = os.path.join(logdir, 'eval_%d' % (step))
logging.info('Evaluating %s' % ckpt_path)
os.makedirs(eval_path, exist_ok=True)
existent_samples = []
for f in glob.iglob(os.path.join(eval_path, '*_trim.wav')):
name = os.path.split(f)[-1][:-9]
existent_samples.append(name)
if len(existent_samples) == 0 or not args.recover_eval:
batches = feeder_eval.fetch_data()
else:
logging.info("%d samples found and skipped" % len(existent_samples))
batches = feeder_eval.fetch_data(exclude=existent_samples)
summary_windows = []
if zipfilepath:
mse = infolog.LookupWindow('mse_dtw', reduction='avg')
summary_windows.append(mse)
cer = infolog.LookupWindow('cer', reduction='avg')
summary_windows.append(cer)
checkpoint.load_model(ckpt_path, m, map_location=map_location, strict=True)
logging.info('Running %d batches, to %s' % (len(batches), eval_path))
batches = batches[:hp.max_eval_batches]
eval_futures = []
names = []
evaltime = 0
for i, batch in enumerate(batches):
logging.info("[Batch %d] Generating " % i + str(batch['names']))
batch = dict_send_to(batch, device)
eval_tic = time.time()
results = eval_batch(m, batch, use_bar=False, bar_interval=500)
evaltime += time.time() - eval_tic
results['mel_pre'] = results['alignments']['self'] = None
results = dict_send_to(results, 'cpu', as_numpy=True)
batch = dict_send_to(batch, 'cpu', as_numpy=True)
fn = partial(save_eval_results, **results, inputs=batch, output_dir=eval_path, save_trimmed_wave=True)
logging.info('[Batch %d] Submit thread: ' % (i) + str(batch['names']))
eval_futures.append(executor.submit(fn))
names.extend(batch['names'])
if 'input_language_vecs' in batch:
lvs = batch['input_language_vecs']
lang_ids = [language_vec_to_id(lv) for lv in lvs]
langs = [id_to_lang[id] for id in lang_ids]
else:
langs = ['' for _ in batch['names']]
if zipfilepath:
mse.update(langs, infolog.calculate_mse_dtw(
results['mel_aft'], results['generated_lengths'],
batch['mel_targets'], batch['target_lengths']))
eval_futures = [f.result() for f in eval_futures]
if transcribe_available:
run_transcription(eval_path, names, existent_samples, meta_index, cer, step)
for window in summary_windows:
stats = window.summary()
for k, v in stats:
if args.eval_name != 'eval_logs':
k += args.eval_name + '/'
writer.add_scalar(k, v, global_step=step)
window.clear()
logging.info('Finished eval in %.3f sec (sample generation %.3f)' % (time.time() - tic, evaltime))
finished_ckpt.append(ckpt_path)
os.system("rsync -au %s %s" % (os.path.join(logdir, '*'), os.path.join(model_dir, args.eval_name)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', required=True, help="Directory of checkpoints")
parser.add_argument('--log-dir', required=True, help="Directory to save log and tfevents")
parser.add_argument('--data-dir', required=True, help="Directory with data and metadata")
parser.add_argument('--eval_name', default="eval_logs", help="Directory to save eval results")
parser.add_argument('--no_wait', default=None, help='Wait when there is no ckpt available')
parser.add_argument('--zipfilepath', type=str, default=None,
help="Zip file of mels, use mels.zip under data-dir when not given")
parser.add_argument('--eval_meta', type=str, default=None,
help="Metadata file for eval, use metadata.eval.txt under data-dir when not given")
parser.add_argument('--vocab', type=str, default=None)
parser.add_argument('--external_embed', type=str, default=None)
parser.add_argument('--eval_languages', type=str, default=None,
help="Languages for eval, separated by colons; use all when not given")
parser.add_argument('--eval_speakers', type=str, default=None,
help="Speakers for eval under the eval_languages, separated by colon; use all when not given")
parser.add_argument('--exclude_speakers', type=str, default=None,
help="Speakers to be excluded from eval, separated by colon")
parser.add_argument('--recover_eval', type=bool, default=None,
help="Whether skip the samples that are found already synthesized; "
"enabling this may break the MSE metrics")
parser.add_argument('--start_step', type=int, default=50000,
help="Mininum step of checkpoint to run eval on")
parser.add_argument('--eval_steps', type=str, default=None,
help="Steps of checkpoints to run eval on; consider all checkpoints when not specified")
parser.add_argument('--eval_interval', type=int, default=10000,
help="Interval of steps to run eval on; "
"if step % eval_interval is not zero, the checkpoint will be skipped")
parser.add_argument('--hparams', default='', help='Alternative hparams')
args, unparsed = parser.parse_known_args()
print('unparsed:', unparsed)
main(args)
| [
"hyperparams.hparams.to_json",
"time.sleep",
"torch.cuda.is_available",
"logging.info",
"torch.utils.tensorboard.SummaryWriter",
"os.path.exists",
"synthesize.eval_batch",
"logging.warn",
"numpy.mean",
"argparse.ArgumentParser",
"os.listdir",
"json.dumps",
"os.path.split",
"utils.checkpoin... | [((744, 778), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': '(4)'}), '(max_workers=4)\n', (763, 778), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((695, 732), 'faulthandler.register', 'faulthandler.register', (['signal.SIGUSR1'], {}), '(signal.SIGUSR1)\n', (716, 732), False, 'import faulthandler, signal\n'), ((2605, 2639), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (2616, 2639), False, 'import os, glob\n'), ((2644, 2666), 'hyperparams.hparams.parse', 'hp.parse', (['args.hparams'], {}), '(args.hparams)\n', (2652, 2666), True, 'from hyperparams import hparams as hp\n'), ((2900, 3017), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': "('[%(levelname)s %(asctime)s]' + ' %(message)s')", 'stream': 'sys.stdout', 'level': 'logging.INFO'}), "(format='[%(levelname)s %(asctime)s]' + ' %(message)s',\n stream=sys.stdout, level=logging.INFO)\n", (2919, 3017), False, 'import logging\n'), ((3043, 3063), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3060, 3063), False, 'import torch\n'), ((3152, 3181), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'logdir'}), '(log_dir=logdir)\n', (3165, 3181), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3297, 3308), 'hyperparams.hparams.values', 'hp.values', ([], {}), '()\n', (3306, 3308), True, 'from hyperparams import hparams as hp\n'), ((3812, 3846), 'os.path.exists', 'os.path.exists', (['"""filter_keys.json"""'], {}), "('filter_keys.json')\n", (3826, 3846), False, 'import os, glob\n'), ((4683, 4963), 'dataloader.FeederEval', 'FeederEval', (['zipfilepath', 'eval_meta', 'hp'], {'spk_to_id': 'spk_to_id', 'lang_to_id': 'lang_to_id', 'eval_lang': 'eval_languages', 'eval_spk': 'eval_speakers', 'exclude_spk': 'exclude_speakers', 'shuffle': '(True)', 'keep_order': '(True)', 'pick_partial': '(False)', 'single': '(False)', 'vocab': 'vocab', 'embed': 'args.external_embed'}), '(zipfilepath, eval_meta, hp, spk_to_id=spk_to_id, lang_to_id=\n lang_to_id, eval_lang=eval_languages, eval_spk=eval_speakers,\n exclude_spk=exclude_speakers, shuffle=True, keep_order=True,\n pick_partial=False, single=False, vocab=vocab, embed=args.external_embed)\n', (4693, 4963), False, 'from dataloader import FeederEval\n'), ((5126, 5147), 'transformer.tacotron.Tacotron', 'tacotron.Tacotron', (['hp'], {}), '(hp)\n', (5143, 5147), False, 'from transformer import tacotron\n'), ((9810, 9835), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9833, 9835), False, 'import argparse\n'), ((891, 938), 'os.path.join', 'os.path.join', (['eval_path', '"""transcriptions.jsonl"""'], {}), "(eval_path, 'transcriptions.jsonl')\n", (903, 938), False, 'import os, glob\n'), ((2725, 2745), 'hyperparams.hparams.to_json', 'hp.to_json', ([], {'indent': '(1)'}), '(indent=1)\n', (2735, 2745), True, 'from hyperparams import hparams as hp\n'), ((3087, 3135), 'os.path.join', 'os.path.join', (['logdir', "('outputs_%s.log' % time_id)"], {}), "(logdir, 'outputs_%s.log' % time_id)\n", (3099, 3135), False, 'import os, glob\n'), ((3215, 3240), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3238, 3240), False, 'import torch\n'), ((4382, 4416), 'os.path.join', 'os.path.join', (['data_dir', '"""mels.zip"""'], {}), "(data_dir, 'mels.zip')\n", (4394, 4416), False, 'import os, glob\n'), ((4428, 4455), 'os.path.exists', 'os.path.exists', (['zipfilepath'], {}), '(zipfilepath)\n', (4442, 4455), False, 'import os, glob\n'), ((4538, 4581), 'os.path.join', 'os.path.join', (['data_dir', '"""metadata.eval.txt"""'], {}), "(data_dir, 'metadata.eval.txt')\n", (4550, 4581), False, 'import os, glob\n'), ((5171, 5196), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5194, 5196), False, 'import torch\n'), ((5327, 5379), 'logging.info', 'logging.info', (["('%s %s' % (var, state_dict[var].shape))"], {}), "('%s %s' % (var, state_dict[var].shape))\n", (5339, 5379), False, 'import logging\n'), ((6459, 6470), 'time.time', 'time.time', ([], {}), '()\n', (6468, 6470), False, 'import time, datetime\n'), ((6550, 6588), 'os.path.join', 'os.path.join', (['logdir', "('eval_%d' % step)"], {}), "(logdir, 'eval_%d' % step)\n", (6562, 6588), False, 'import os, glob\n'), ((6599, 6640), 'logging.info', 'logging.info', (["('Evaluating %s' % ckpt_path)"], {}), "('Evaluating %s' % ckpt_path)\n", (6611, 6640), False, 'import logging\n'), ((6649, 6686), 'os.makedirs', 'os.makedirs', (['eval_path'], {'exist_ok': '(True)'}), '(eval_path, exist_ok=True)\n', (6660, 6686), False, 'import os, glob\n'), ((7324, 7368), 'utils.infolog.LookupWindow', 'infolog.LookupWindow', (['"""cer"""'], {'reduction': '"""avg"""'}), "('cer', reduction='avg')\n", (7344, 7368), False, 'from utils import infolog, checkpoint\n'), ((7414, 7489), 'utils.checkpoint.load_model', 'checkpoint.load_model', (['ckpt_path', 'm'], {'map_location': 'map_location', 'strict': '(True)'}), '(ckpt_path, m, map_location=map_location, strict=True)\n', (7435, 7489), False, 'from utils import infolog, checkpoint\n'), ((1066, 1079), 'json.loads', 'json.loads', (['l'], {}), '(l)\n', (1076, 1079), False, 'import json\n'), ((1977, 2024), 'os.path.join', 'os.path.join', (['eval_path', '"""transcriptions.jsonl"""'], {}), "(eval_path, 'transcriptions.jsonl')\n", (1989, 2024), False, 'import os, glob\n'), ((2416, 2459), 'logging.warn', 'logging.warn', (["('Failed sample: ' + t['name'])"], {}), "('Failed sample: ' + t['name'])\n", (2428, 2459), False, 'import logging\n'), ((2850, 2873), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2871, 2873), False, 'import time, datetime\n'), ((6352, 6394), 'logging.info', 'logging.info', (['"""No ckpt found, sleeping..."""'], {}), "('No ckpt found, sleeping...')\n", (6364, 6394), False, 'import logging\n'), ((6407, 6422), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (6417, 6422), False, 'import time, datetime\n'), ((6746, 6783), 'os.path.join', 'os.path.join', (['eval_path', '"""*_trim.wav"""'], {}), "(eval_path, '*_trim.wav')\n", (6758, 6783), False, 'import os, glob\n'), ((7221, 7269), 'utils.infolog.LookupWindow', 'infolog.LookupWindow', (['"""mse_dtw"""'], {'reduction': '"""avg"""'}), "('mse_dtw', reduction='avg')\n", (7241, 7269), False, 'from utils import infolog, checkpoint\n'), ((7824, 7851), 'utils.dict_send_to', 'dict_send_to', (['batch', 'device'], {}), '(batch, device)\n', (7836, 7851), False, 'from utils import dict_send_to\n'), ((7875, 7886), 'time.time', 'time.time', ([], {}), '()\n', (7884, 7886), False, 'import time, datetime\n'), ((7909, 7962), 'synthesize.eval_batch', 'eval_batch', (['m', 'batch'], {'use_bar': '(False)', 'bar_interval': '(500)'}), '(m, batch, use_bar=False, bar_interval=500)\n', (7919, 7962), False, 'from synthesize import eval_batch, save_eval_results\n'), ((8103, 8146), 'utils.dict_send_to', 'dict_send_to', (['results', '"""cpu"""'], {'as_numpy': '(True)'}), "(results, 'cpu', as_numpy=True)\n", (8115, 8146), False, 'from utils import dict_send_to\n'), ((8167, 8208), 'utils.dict_send_to', 'dict_send_to', (['batch', '"""cpu"""'], {'as_numpy': '(True)'}), "(batch, 'cpu', as_numpy=True)\n", (8179, 8208), False, 'from utils import dict_send_to\n'), ((8227, 8328), 'functools.partial', 'partial', (['save_eval_results'], {'inputs': 'batch', 'output_dir': 'eval_path', 'save_trimmed_wave': '(True)'}), '(save_eval_results, **results, inputs=batch, output_dir=eval_path,\n save_trimmed_wave=True)\n', (8234, 8328), False, 'from functools import partial\n'), ((2676, 2712), 'os.path.join', 'os.path.join', (['logdir', '"""hparams.json"""'], {}), "(logdir, 'hparams.json')\n", (2688, 2712), False, 'import os, glob\n'), ((2756, 2789), 'os.path.join', 'os.path.join', (['logdir', '"""args.json"""'], {}), "(logdir, 'args.json')\n", (2768, 2789), False, 'import os, glob\n'), ((3633, 3671), 'os.path.join', 'os.path.join', (['data_dir', '"""lang_id.json"""'], {}), "(data_dir, 'lang_id.json')\n", (3645, 3671), False, 'import os, glob\n'), ((3735, 3772), 'os.path.join', 'os.path.join', (['data_dir', '"""spk_id.json"""'], {}), "(data_dir, 'spk_id.json')\n", (3747, 3772), False, 'import os, glob\n'), ((5677, 5716), 'os.path.join', 'os.path.join', (['model_dir', '"""model.ckpt-*"""'], {}), "(model_dir, 'model.ckpt-*')\n", (5689, 5716), False, 'import os, glob\n'), ((6282, 6316), 'logging.info', 'logging.info', (['"""No more ckpt, exit"""'], {}), "('No more ckpt, exit')\n", (6294, 6316), False, 'import logging\n'), ((7987, 7998), 'time.time', 'time.time', ([], {}), '()\n', (7996, 7998), False, 'import time, datetime\n'), ((1721, 1761), 'os.path.join', 'os.path.join', (['eval_path', "(n + '_trim.wav')"], {}), "(eval_path, n + '_trim.wav')\n", (1733, 1761), False, 'import os, glob\n'), ((2101, 2134), 'json.dumps', 'json.dumps', (['t'], {'ensure_ascii': '(False)'}), '(t, ensure_ascii=False)\n', (2111, 2134), False, 'import json\n'), ((6805, 6821), 'os.path.split', 'os.path.split', (['f'], {}), '(f)\n', (6818, 6821), False, 'import os, glob\n'), ((8629, 8651), 'utils.text.language_vec_to_id', 'language_vec_to_id', (['lv'], {}), '(lv)\n', (8647, 8651), False, 'from utils.text import language_vec_to_id\n'), ((8860, 8986), 'utils.infolog.calculate_mse_dtw', 'infolog.calculate_mse_dtw', (["results['mel_aft']", "results['generated_lengths']", "batch['mel_targets']", "batch['target_lengths']"], {}), "(results['mel_aft'], results['generated_lengths'],\n batch['mel_targets'], batch['target_lengths'])\n", (8885, 8986), False, 'from utils import infolog, checkpoint\n'), ((9699, 9724), 'os.path.join', 'os.path.join', (['logdir', '"""*"""'], {}), "(logdir, '*')\n", (9711, 9724), False, 'import os, glob\n'), ((9726, 9765), 'os.path.join', 'os.path.join', (['model_dir', 'args.eval_name'], {}), '(model_dir, args.eval_name)\n', (9738, 9765), False, 'import os, glob\n'), ((2194, 2228), 'numpy.mean', 'np.mean', (["[t['cer'] for t in trans]"], {}), "([t['cer'] for t in trans])\n", (2201, 2228), True, 'import numpy as np\n'), ((5621, 5642), 'os.listdir', 'os.listdir', (['model_dir'], {}), '(model_dir)\n', (5631, 5642), False, 'import os, glob\n'), ((9589, 9600), 'time.time', 'time.time', ([], {}), '()\n', (9598, 9600), False, 'import time, datetime\n'), ((962, 1009), 'os.path.join', 'os.path.join', (['eval_path', '"""transcriptions.jsonl"""'], {}), "(eval_path, 'transcriptions.jsonl')\n", (974, 1009), False, 'import os, glob\n')] |
"""This would provide postprocessing of results.
Libraries/Modules:
Would use: numpy\n
Would use: pandas\n
"""
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
from bin.NavierStokes import NavierStokes
class flo103_PostProcessor:
"""Not implemented as this time.
What it would do:
Plot pressure profile and entropy potential lines.
Plot outputs of wall stress and aerodynamics of airfoil.
Attributes:
None currently used.
Notes:
Currently just passed when called.
"""
def __init__(self, input):
"""
Is not used in current implementation.
"""
pass
def print_state(self, workspace, state, pressure=None):
xc = workspace.get_field('xc')
x = xc[:,:,0]
y = xc[:,:,1]
names = ["density", "x-mom", "y-mom", "energy"]
for i in range(4):
plt.figure()
plt.contourf(x,y,state[:,:,i])
plt.title(names[i])
plt.colorbar()
plt.axis([-0.75,1.50,-0.8,0.8])
plt.savefig(names[i]+".png")
# Pressure contours
if pressure is not None:
plt.figure()
plt.contourf(x,y,pressure)
plt.title("Pressure contours")
plt.colorbar()
plt.axis([-0.75,1.50,-0.8,0.8])
plt.savefig("pressure.png")
def print_grid(self, model, workspace):
xcpad = workspace.get_field('xc', model.className)
x = xcpad[:,:,0]
y = xcpad[:,:,1]
plt.plot(x[1:-1,1:-1], y[1:-1,1:-1])
plt.title("xc")
plt.axis([-0.75,1.50,-0.8,0.8])
plt.show()
volpad = workspace.get_field('vol', model.className)
volpad = volpad[2:-2,2:-2]
vol = workspace.get_field('vol')
diff = volpad-vol
x = x[2:-2,2:-2]
y = y[2:-2,2:-2]
xc = workspace.get_field('xc')
x = xc[:,:,0]
y = xc[:,:,1]
fig = plt.figure()
ax = plt.axes(projection='3d')
# ax.contour3D(x, y, vol, 50, cmap='binary')
ax.plot_surface(x, y, diff, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('vol')
ax.view_init(60, 35)
plt.savefig("volume.png")
print(np.min(vol))
assert np.max(np.abs(vol-volpad))<1e-15
assert np.max(np.abs(xcpad[2:-2,2:-2,:]-xc))<1e-15
| [
"matplotlib.pyplot.contourf",
"numpy.abs",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"numpy.min",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1622, 1660), 'matplotlib.pyplot.plot', 'plt.plot', (['x[1:-1, 1:-1]', 'y[1:-1, 1:-1]'], {}), '(x[1:-1, 1:-1], y[1:-1, 1:-1])\n', (1630, 1660), True, 'from matplotlib import pyplot as plt\n'), ((1667, 1682), 'matplotlib.pyplot.title', 'plt.title', (['"""xc"""'], {}), "('xc')\n", (1676, 1682), True, 'from matplotlib import pyplot as plt\n'), ((1691, 1724), 'matplotlib.pyplot.axis', 'plt.axis', (['[-0.75, 1.5, -0.8, 0.8]'], {}), '([-0.75, 1.5, -0.8, 0.8])\n', (1699, 1724), True, 'from matplotlib import pyplot as plt\n'), ((1731, 1741), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1739, 1741), True, 'from matplotlib import pyplot as plt\n'), ((2061, 2073), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2071, 2073), True, 'from matplotlib import pyplot as plt\n'), ((2087, 2112), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (2095, 2112), True, 'from matplotlib import pyplot as plt\n'), ((2394, 2419), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""volume.png"""'], {}), "('volume.png')\n", (2405, 2419), True, 'from matplotlib import pyplot as plt\n'), ((959, 971), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (969, 971), True, 'from matplotlib import pyplot as plt\n'), ((984, 1018), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'state[:, :, i]'], {}), '(x, y, state[:, :, i])\n', (996, 1018), True, 'from matplotlib import pyplot as plt\n'), ((1027, 1046), 'matplotlib.pyplot.title', 'plt.title', (['names[i]'], {}), '(names[i])\n', (1036, 1046), True, 'from matplotlib import pyplot as plt\n'), ((1059, 1073), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1071, 1073), True, 'from matplotlib import pyplot as plt\n'), ((1086, 1119), 'matplotlib.pyplot.axis', 'plt.axis', (['[-0.75, 1.5, -0.8, 0.8]'], {}), '([-0.75, 1.5, -0.8, 0.8])\n', (1094, 1119), True, 'from matplotlib import pyplot as plt\n'), ((1130, 1160), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(names[i] + '.png')"], {}), "(names[i] + '.png')\n", (1141, 1160), True, 'from matplotlib import pyplot as plt\n'), ((1241, 1253), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1251, 1253), True, 'from matplotlib import pyplot as plt\n'), ((1266, 1294), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'pressure'], {}), '(x, y, pressure)\n', (1278, 1294), True, 'from matplotlib import pyplot as plt\n'), ((1305, 1335), 'matplotlib.pyplot.title', 'plt.title', (['"""Pressure contours"""'], {}), "('Pressure contours')\n", (1314, 1335), True, 'from matplotlib import pyplot as plt\n'), ((1348, 1362), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1360, 1362), True, 'from matplotlib import pyplot as plt\n'), ((1375, 1408), 'matplotlib.pyplot.axis', 'plt.axis', (['[-0.75, 1.5, -0.8, 0.8]'], {}), '([-0.75, 1.5, -0.8, 0.8])\n', (1383, 1408), True, 'from matplotlib import pyplot as plt\n'), ((1419, 1446), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pressure.png"""'], {}), "('pressure.png')\n", (1430, 1446), True, 'from matplotlib import pyplot as plt\n'), ((2434, 2445), 'numpy.min', 'np.min', (['vol'], {}), '(vol)\n', (2440, 2445), True, 'import numpy as np\n'), ((2469, 2489), 'numpy.abs', 'np.abs', (['(vol - volpad)'], {}), '(vol - volpad)\n', (2475, 2489), True, 'import numpy as np\n'), ((2517, 2550), 'numpy.abs', 'np.abs', (['(xcpad[2:-2, 2:-2, :] - xc)'], {}), '(xcpad[2:-2, 2:-2, :] - xc)\n', (2523, 2550), True, 'import numpy as np\n')] |
'''
Library for 2-component Flory-Huggings theory.
Author: <NAME>
Date created: 23 March 2022
'''
import numpy as np
def help():
print('Here are the list of functions included in FH.py:\n')
print(' critical(n = 1): returns the critical concentration and critical interaction [phi_c, chi_c]\n')
print(' spinodal(chi, n = 1): returns spinodal concentrations [p1, p2, chi] in the valid chi range\n')
print(' GL_binodal(chi, n = 1): Ginzburg-Landau binodal [p1, p2, chi]\n')
print(' binodal(chi, n = 1, iteration = 5, UseImprovedMap = True): self-consistent solution with speficied number of iterations [p1, p2, chi]. You can also use the simple map to see what it does\n')
print(' analytic_binodal(x, n = 1): analytic forms')
def critical(n = 1):
''' calculates the critical point for a given polymer length '''
x_c = 0.5 * np.power(1. + 1./np.sqrt(n), 2)
phi_c = 1./(1. + np.sqrt(n))
return np.array([phi_c, x_c])
def spinodal(x, n = 1):
crit = critical(n)
x_c = crit[1]
gamma = 1. - 1./n
if not np.array(x).shape:
if x > x_c:
t1 = 1./2. - gamma / (4. * x)
t2 = np.sqrt(np.power(t1, 2) - 1./(2. * x * n))
return np.array([t1 + t2, t1 - t2])
else:
raise ValueError('interaction strength too small - no LLPS!')
else:
if max(x)<x_c:
raise ValueError('interaction strength too small - no LLPS!')
else:
x = np.array(x)
x = x[x >= x_c]
t1 = 1./2. - gamma / (4. * x)
t2 = np.sqrt(np.power(t1, 2) - 1./(2. * x * n))
return np.array([t1 + t2, t1 - t2, x])
def GL_binodal(x, n = 1):
crit = critical(n)
x_c = crit[1]
phi_c = crit[0]
if not np.array(x).shape:
if x > x_c:
t1 = phi_c
t2 = np.sqrt(3. * (x - x_c) / (2. * np.power(x_c, 2) * np.sqrt(n)))
return np.array([t1 + t2, t1 - t2, x])
else:
raise ValueError('interaction strength too small - no LLPS!')
else:
if max(x)<x_c:
raise ValueError('interaction strength too small - no LLPS!')
else:
x = np.array(x)
x = x[x >= x_c]
t1 = phi_c
t2 = np.sqrt(3. * (x - x_c) / (2. * np.power(x_c, 2) * np.sqrt(n)))
return np.array([t1 + t2, t1 - t2, x])
def binodal(x, n = 1, iteration = 5, UseImprovedMap = True):
assert iteration >= 0
crit = critical(n)
x_c = crit[1]
phi_c = crit[0]
gamma = 1. - 1./n
if n == 1:
guess = GL_binodal(x)
pp = guess[0]
xx = guess[2]
if UseImprovedMap:
for _ in range(iteration):
ee = np.exp(- 2 * xx * pp + xx)
pp = (2. * xx * pp * ee - 1. - ee)/(2. * xx * ee - (1. + ee)**2)
else:
for _ in range(iteration):
ee = np.exp(- 2 * xx * pp + xx)
pp = 1/(1 + ee)
return np.array([pp, 1 - pp, xx])
if n > 1:
guess = GL_binodal(x, n = n)
p1 = guess[0]
p2 = guess[1]
xx = guess[2]
if UseImprovedMap:
for _ in range(iteration):
a = np.exp( - 2. * xx * (p1 - p2))
b = np.exp( - gamma * (p1 - p2) - xx * (np.power(p1,2) - np.power(p2,2)))
c = np.power(a/b, n)
g1 = (1. - b)/(1. - np.power(a/b, n) * b)
g2 = (1. - b)/(np.power(b/a, n) - b)
d1lna = - 2. * xx
d1lnb = - gamma - xx * 2. * p1
d2lna = 2. * xx
d2lnb = gamma + xx * 2. * p2
j11 = g1**2 * (- d1lnb * b*(1-c)/(1-b)**2 + n * (d1lna - d1lnb) * c * b /(1-b)) - 1
j21 = g1**2 * (- d2lnb * b*(1-c)/(1-b)**2 + n * (d2lna - d2lnb) * c * b /(1-b))
j12 = (j11 + 1) * c + g1 * n * c * (d1lna - d1lnb)
j22 = j21 * c + g1 * n * c * (d2lna - d2lnb) - 1
detj = j11 * j22 - j12 * j21
p1_new = np.copy(p1 + (- (g1 - p1) * j22 + (g2 - p2) * j21)/detj)
p2_new = np.copy(p2 + (- (g2 - p2) * j11 + (g1 - p1) * j12)/detj)
p1 = p1_new
p2 = p2_new
else:
for _ in range(iteration):
a = np.exp( - 2. * xx * (p1 - p2))
b = np.exp( - gamma * (p1 - p2) - xx * (np.power(p1,2) - np.power(p2,2)))
c = np.power(a/b, n)
g1 = (1. - b)/(1. - np.power(a/b, n) * b)
g2 = (1. - b)/(np.power(b/a, n) - b)
p1_new = np.copy((1. - b)/(1. - np.power(a/b, n) * b))
p2_new = np.copy((1. - b)/(np.power(b/a, n) - b))
p1 = p1_new
p2 = p2_new
return np.array([p1, p2, xx])
def analytic_binodal(x, n = 1):
crit = critical(n)
x_c = crit[1]
if not np.array(x).shape:
if x > x_c:
if n == 1:
pp = 1/(1+np.exp(-x * np.tanh(x*np.sqrt(3*(x-2)/8))))
pm = 1/(1+np.exp(-x * np.tanh(-x*np.sqrt(3*(x-2)/8))))
else:
a = n ** 0.25
D = (x - x_c) / x_c
c = (a + 1/a) / 2
s = (a - 1/a) / 2
cothA = 1/np.tanh((1+D/a**2)*np.sqrt(3*D)/a)
cothB = 1/np.tanh((1+D*a**2)*np.sqrt(3*D)*a)
prefactor = c/(cothA+cothB)
numerator_exp = 8 * prefactor * (s/a**2 + (1+D) * prefactor * cothB / a**2)
denominator_exp = 8 * prefactor * (s*(1/a**2 - a**2)+(1+D)*prefactor*(cothB/a**2+a**2*cothA))
pp = (1-np.exp(-numerator_exp))/(1-np.exp(-denominator_exp))
pm = (1-np.exp(+numerator_exp))/(1-np.exp(+denominator_exp))
return np.array([pp, pm])
else:
raise ValueError('interaction strength too small - no LLPS!')
else:
if max(x)<x_c:
raise ValueError('interaction strength too small - no LLPS!')
else:
x = np.array(x)
x = x[x >= x_c]
if n == 1:
pp = 1/(1+np.exp(-x * np.tanh(x*np.sqrt(3*(x-2)/8))))
pm = 1/(1+np.exp(-x * np.tanh(-x*np.sqrt(3*(x-2)/8))))
else:
a = n ** 0.25
D = (x - x_c) / x_c
c = (a + 1/a) / 2
s = (a - 1/a) / 2
cothA = 1/np.tanh((1+D/a**2)*np.sqrt(3*D)/a)
cothB = 1/np.tanh((1+D*a**2)*np.sqrt(3*D)*a)
prefactor = c/(cothA+cothB)
numerator_exp = 8 * prefactor * (s/a**2 + (1+D) * prefactor * cothB / a**2)
denominator_exp = 8 * prefactor * (s*(1/a**2 - a**2)+(1+D)*prefactor*(cothB/a**2+a**2*cothA))
pp = (1-np.exp(-numerator_exp))/(1-np.exp(-denominator_exp))
pm = (1-np.exp(+numerator_exp))/(1-np.exp(+denominator_exp))
return np.array([pp, pm, x]) | [
"numpy.copy",
"numpy.sqrt",
"numpy.power",
"numpy.exp",
"numpy.array"
] | [((907, 929), 'numpy.array', 'np.array', (['[phi_c, x_c]'], {}), '([phi_c, x_c])\n', (915, 929), True, 'import numpy as np\n'), ((2589, 2615), 'numpy.array', 'np.array', (['[pp, 1 - pp, xx]'], {}), '([pp, 1 - pp, xx])\n', (2597, 2615), True, 'import numpy as np\n'), ((4035, 4057), 'numpy.array', 'np.array', (['[p1, p2, xx]'], {}), '([p1, p2, xx])\n', (4043, 4057), True, 'import numpy as np\n'), ((887, 897), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (894, 897), True, 'import numpy as np\n'), ((1019, 1030), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1027, 1030), True, 'import numpy as np\n'), ((1146, 1174), 'numpy.array', 'np.array', (['[t1 + t2, t1 - t2]'], {}), '([t1 + t2, t1 - t2])\n', (1154, 1174), True, 'import numpy as np\n'), ((1352, 1363), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1360, 1363), True, 'import numpy as np\n'), ((1477, 1508), 'numpy.array', 'np.array', (['[t1 + t2, t1 - t2, x]'], {}), '([t1 + t2, t1 - t2, x])\n', (1485, 1508), True, 'import numpy as np\n'), ((1598, 1609), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1606, 1609), True, 'import numpy as np\n'), ((1726, 1757), 'numpy.array', 'np.array', (['[t1 + t2, t1 - t2, x]'], {}), '([t1 + t2, t1 - t2, x])\n', (1734, 1757), True, 'import numpy as np\n'), ((1935, 1946), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1943, 1946), True, 'import numpy as np\n'), ((2061, 2092), 'numpy.array', 'np.array', (['[t1 + t2, t1 - t2, x]'], {}), '([t1 + t2, t1 - t2, x])\n', (2069, 2092), True, 'import numpy as np\n'), ((4136, 4147), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4144, 4147), True, 'import numpy as np\n'), ((4851, 4869), 'numpy.array', 'np.array', (['[pp, pm]'], {}), '([pp, pm])\n', (4859, 4869), True, 'import numpy as np\n'), ((5048, 5059), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5056, 5059), True, 'import numpy as np\n'), ((5762, 5783), 'numpy.array', 'np.array', (['[pp, pm, x]'], {}), '([pp, pm, x])\n', (5770, 5783), True, 'import numpy as np\n'), ((2387, 2412), 'numpy.exp', 'np.exp', (['(-2 * xx * pp + xx)'], {}), '(-2 * xx * pp + xx)\n', (2393, 2412), True, 'import numpy as np\n'), ((2532, 2557), 'numpy.exp', 'np.exp', (['(-2 * xx * pp + xx)'], {}), '(-2 * xx * pp + xx)\n', (2538, 2557), True, 'import numpy as np\n'), ((2774, 2803), 'numpy.exp', 'np.exp', (['(-2.0 * xx * (p1 - p2))'], {}), '(-2.0 * xx * (p1 - p2))\n', (2780, 2803), True, 'import numpy as np\n'), ((2891, 2909), 'numpy.power', 'np.power', (['(a / b)', 'n'], {}), '(a / b, n)\n', (2899, 2909), True, 'import numpy as np\n'), ((3443, 3500), 'numpy.copy', 'np.copy', (['(p1 + (-(g1 - p1) * j22 + (g2 - p2) * j21) / detj)'], {}), '(p1 + (-(g1 - p1) * j22 + (g2 - p2) * j21) / detj)\n', (3450, 3500), True, 'import numpy as np\n'), ((3513, 3570), 'numpy.copy', 'np.copy', (['(p2 + (-(g2 - p2) * j11 + (g1 - p1) * j12) / detj)'], {}), '(p2 + (-(g2 - p2) * j11 + (g1 - p1) * j12) / detj)\n', (3520, 3570), True, 'import numpy as np\n'), ((3654, 3683), 'numpy.exp', 'np.exp', (['(-2.0 * xx * (p1 - p2))'], {}), '(-2.0 * xx * (p1 - p2))\n', (3660, 3683), True, 'import numpy as np\n'), ((3771, 3789), 'numpy.power', 'np.power', (['(a / b)', 'n'], {}), '(a / b, n)\n', (3779, 3789), True, 'import numpy as np\n'), ((854, 864), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (861, 864), True, 'import numpy as np\n'), ((1101, 1116), 'numpy.power', 'np.power', (['t1', '(2)'], {}), '(t1, 2)\n', (1109, 1116), True, 'import numpy as np\n'), ((1432, 1447), 'numpy.power', 'np.power', (['t1', '(2)'], {}), '(t1, 2)\n', (1440, 1447), True, 'import numpy as np\n'), ((1703, 1713), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1710, 1713), True, 'import numpy as np\n'), ((2038, 2048), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2045, 2048), True, 'import numpy as np\n'), ((2975, 2993), 'numpy.power', 'np.power', (['(b / a)', 'n'], {}), '(b / a, n)\n', (2983, 2993), True, 'import numpy as np\n'), ((3855, 3873), 'numpy.power', 'np.power', (['(b / a)', 'n'], {}), '(b / a, n)\n', (3863, 3873), True, 'import numpy as np\n'), ((4722, 4744), 'numpy.exp', 'np.exp', (['(-numerator_exp)'], {}), '(-numerator_exp)\n', (4728, 4744), True, 'import numpy as np\n'), ((4749, 4773), 'numpy.exp', 'np.exp', (['(-denominator_exp)'], {}), '(-denominator_exp)\n', (4755, 4773), True, 'import numpy as np\n'), ((4787, 4809), 'numpy.exp', 'np.exp', (['(+numerator_exp)'], {}), '(+numerator_exp)\n', (4793, 4809), True, 'import numpy as np\n'), ((4814, 4838), 'numpy.exp', 'np.exp', (['(+denominator_exp)'], {}), '(+denominator_exp)\n', (4820, 4838), True, 'import numpy as np\n'), ((5633, 5655), 'numpy.exp', 'np.exp', (['(-numerator_exp)'], {}), '(-numerator_exp)\n', (5639, 5655), True, 'import numpy as np\n'), ((5660, 5684), 'numpy.exp', 'np.exp', (['(-denominator_exp)'], {}), '(-denominator_exp)\n', (5666, 5684), True, 'import numpy as np\n'), ((5698, 5720), 'numpy.exp', 'np.exp', (['(+numerator_exp)'], {}), '(+numerator_exp)\n', (5704, 5720), True, 'import numpy as np\n'), ((5725, 5749), 'numpy.exp', 'np.exp', (['(+denominator_exp)'], {}), '(+denominator_exp)\n', (5731, 5749), True, 'import numpy as np\n'), ((1684, 1700), 'numpy.power', 'np.power', (['x_c', '(2)'], {}), '(x_c, 2)\n', (1692, 1700), True, 'import numpy as np\n'), ((2019, 2035), 'numpy.power', 'np.power', (['x_c', '(2)'], {}), '(x_c, 2)\n', (2027, 2035), True, 'import numpy as np\n'), ((2934, 2952), 'numpy.power', 'np.power', (['(a / b)', 'n'], {}), '(a / b, n)\n', (2942, 2952), True, 'import numpy as np\n'), ((3814, 3832), 'numpy.power', 'np.power', (['(a / b)', 'n'], {}), '(a / b, n)\n', (3822, 3832), True, 'import numpy as np\n'), ((3968, 3986), 'numpy.power', 'np.power', (['(b / a)', 'n'], {}), '(b / a, n)\n', (3976, 3986), True, 'import numpy as np\n'), ((2849, 2864), 'numpy.power', 'np.power', (['p1', '(2)'], {}), '(p1, 2)\n', (2857, 2864), True, 'import numpy as np\n'), ((2866, 2881), 'numpy.power', 'np.power', (['p2', '(2)'], {}), '(p2, 2)\n', (2874, 2881), True, 'import numpy as np\n'), ((3729, 3744), 'numpy.power', 'np.power', (['p1', '(2)'], {}), '(p1, 2)\n', (3737, 3744), True, 'import numpy as np\n'), ((3746, 3761), 'numpy.power', 'np.power', (['p2', '(2)'], {}), '(p2, 2)\n', (3754, 3761), True, 'import numpy as np\n'), ((3914, 3932), 'numpy.power', 'np.power', (['(a / b)', 'n'], {}), '(a / b, n)\n', (3922, 3932), True, 'import numpy as np\n'), ((4432, 4446), 'numpy.sqrt', 'np.sqrt', (['(3 * D)'], {}), '(3 * D)\n', (4439, 4446), True, 'import numpy as np\n'), ((4481, 4495), 'numpy.sqrt', 'np.sqrt', (['(3 * D)'], {}), '(3 * D)\n', (4488, 4495), True, 'import numpy as np\n'), ((5343, 5357), 'numpy.sqrt', 'np.sqrt', (['(3 * D)'], {}), '(3 * D)\n', (5350, 5357), True, 'import numpy as np\n'), ((5392, 5406), 'numpy.sqrt', 'np.sqrt', (['(3 * D)'], {}), '(3 * D)\n', (5399, 5406), True, 'import numpy as np\n'), ((4219, 4243), 'numpy.sqrt', 'np.sqrt', (['(3 * (x - 2) / 8)'], {}), '(3 * (x - 2) / 8)\n', (4226, 4243), True, 'import numpy as np\n'), ((4278, 4302), 'numpy.sqrt', 'np.sqrt', (['(3 * (x - 2) / 8)'], {}), '(3 * (x - 2) / 8)\n', (4285, 4302), True, 'import numpy as np\n'), ((5130, 5154), 'numpy.sqrt', 'np.sqrt', (['(3 * (x - 2) / 8)'], {}), '(3 * (x - 2) / 8)\n', (5137, 5154), True, 'import numpy as np\n'), ((5189, 5213), 'numpy.sqrt', 'np.sqrt', (['(3 * (x - 2) / 8)'], {}), '(3 * (x - 2) / 8)\n', (5196, 5213), True, 'import numpy as np\n')] |
import enpix
import numpy as np
matrix = np.random.rand(341,765,3)
# print(matrix)
key="firstname.lastname@<EMAIL>.com-nameofuser-mobilenumber"
time=1000000
pic = enpix.encrypt(matrix,key,time)
# print(pic)
pic2 = enpix.decrypt(pic,key,time)
# print(pic2)
print((matrix==pic2).all()) | [
"enpix.decrypt",
"numpy.random.rand",
"enpix.encrypt"
] | [((42, 69), 'numpy.random.rand', 'np.random.rand', (['(341)', '(765)', '(3)'], {}), '(341, 765, 3)\n', (56, 69), True, 'import numpy as np\n'), ((166, 198), 'enpix.encrypt', 'enpix.encrypt', (['matrix', 'key', 'time'], {}), '(matrix, key, time)\n', (179, 198), False, 'import enpix\n'), ((218, 247), 'enpix.decrypt', 'enpix.decrypt', (['pic', 'key', 'time'], {}), '(pic, key, time)\n', (231, 247), False, 'import enpix\n')] |
# sphinx_gallery_thumbnail_number = 4
from __future__ import absolute_import
from . import _graph as __graph
from ._graph import *
from .. import Configuration
from . import opt
from . opt import multicut
from . opt import lifted_multicut
from . opt import mincut
from . opt import minstcut
import numpy
from functools import partial
import types
import sys
__all__ = []
for key in __graph.__dict__.keys():
try:
__graph.__dict__[key].__module__='nifty.graph'
except:
pass
__all__.append(key)
UndirectedGraph.__module__ = "nifty.graph"
ilpSettings = multicut.ilpSettings
# multicut objective
UndirectedGraph.MulticutObjective = multicut.MulticutObjectiveUndirectedGraph
UndirectedGraph.EdgeContractionGraph = EdgeContractionGraphUndirectedGraph
EdgeContractionGraphUndirectedGraph.MulticutObjective = multicut.MulticutObjectiveEdgeContractionGraphUndirectedGraph
UndirectedGraph.MincutObjective = mincut.MincutObjectiveUndirectedGraph
UndirectedGraph.EdgeContractionGraph = EdgeContractionGraphUndirectedGraph
EdgeContractionGraphUndirectedGraph.MincutObjective = mincut.MincutObjectiveEdgeContractionGraphUndirectedGraph
# #minstcut objective
# UndirectedGraph.MinstcutObjective = minstcut.MinstcutObjectiveUndirectedGraph
# UndirectedGraph.EdgeContractionGraph = EdgeContractionGraphUndirectedGraph
# EdgeContractionGraphUndirectedGraph.MinstcutObjective = minstcut.MinstcutObjectiveEdgeContractionGraphUndirectedGraph
# lifted multicut objective
UndirectedGraph.LiftedMulticutObjective = lifted_multicut.LiftedMulticutObjectiveUndirectedGraph
def randomGraph(numberOfNodes, numberOfEdges):
g = UndirectedGraph(numberOfNodes)
uv = numpy.random.randint(low=0, high=numberOfNodes-1, size=numberOfEdges*2)
uv = uv.reshape([-1,2])
where = numpy.where(uv[:,0]!=uv[:,1])[0]
uv = uv[where,:]
g.insertEdges(uv)
while( g.numberOfEdges < numberOfEdges):
u,v = numpy.random.randint(low=0, high=numberOfNodes-1, size=2)
if u != v:
g.insertEdge(int(u),int(v))
return g
class EdgeContractionGraphCallback(EdgeContractionGraphCallbackImpl):
def __init__(self):
super(EdgeContractionGraphCallback, self).__init__()
try:
self.contractEdgeCallback = self.contractEdge
except AttributeError:
pass
try:
self.mergeEdgesCallback = self.mergeEdges
except AttributeError:
pass
try:
self.mergeNodesCallback = self.mergeNodes
except AttributeError:
pass
try:
self.contractEdgeDoneCallback = self.contractEdgeDone
except AttributeError:
pass
def edgeContractionGraph(g, callback):
Ecg = g.__class__.EdgeContractionGraph
ecg = Ecg(g, callback)
return ecg
def undirectedGraph(numberOfNodes):
return UndirectedGraph(numberOfNodes)
def undirectedGridGraph(shape, simpleNh=True):
if not simpleNh:
raise RuntimeError("currently only simpleNh is implemented")
s = [int(s) for s in shape]
if(len(s) == 2):
return UndirectedGridGraph2DSimpleNh(s)
elif(len(s) == 3):
return UndirectedGridGraph3DSimpleNh(s)
else:
raise RuntimeError("currently only 2D and 3D grid graph is exposed to python")
gridGraph = undirectedGridGraph
def undirectedLongRangeGridGraph(shape, offsets, edge_mask=None,
offsets_probabilities=None):
"""
:param edge_mask: Boolean array (4D) indicating which edge connections should be introduced in the graph.
:param offsets_probabilities: Probability that a type of neighboring connection is introduced as edge in the graph.
Cannot be used at the same time with edge_mask
"""
offsets = numpy.require(offsets, dtype='int64')
shape = list(shape)
if len(shape) == 2:
G = UndirectedLongRangeGridGraph2D
elif len(shape) == 3:
G = UndirectedLongRangeGridGraph3D
else:
raise RuntimeError("wrong dimension: undirectedLongRangeGridGraph is only implemented for 2D and 3D")
if edge_mask is not None:
assert offsets_probabilities is None, "Edge mask and offsets probabilities cannot be used at the same time."
assert edge_mask.ndim == len(shape) + 1
assert edge_mask.dtype == numpy.dtype('bool')
assert edge_mask.shape[0] == offsets.shape[0]
edge_mask = numpy.rollaxis(edge_mask, axis=0, start=len(shape) + 1)
useEdgeMask = True
elif offsets_probabilities is not None:
offsets_probabilities = numpy.require(offsets_probabilities, dtype='float32')
assert offsets_probabilities.shape[0] == offsets.shape[0]
assert (offsets_probabilities.min() >= 0.0) and (offsets_probabilities.max() <= 1.0)
# Randomly sample some edges to add to the graph:
edge_mask = []
for off_prob in offsets_probabilities:
edge_mask.append(numpy.random.random(shape) <= off_prob)
edge_mask = numpy.stack(edge_mask, axis=-1)
useEdgeMask = True
else:
# Create an empty edge_mask (anyway it won't be used):
edge_mask = numpy.empty(tuple([1 for _ in range(len(shape) + 1)]), dtype='bool')
useEdgeMask = False
return G(shape=shape, offsets=offsets, edgeMask=edge_mask,
useEdgeMask=useEdgeMask)
longRangeGridGraph = undirectedLongRangeGridGraph
def drawGraph(graph, method='spring'):
import networkx
G = networkx.Graph()
for node in graph.nodes():
G.add_node(node)
for edge in graph.edges():
u, v = graph.uv(edge)
G.add_edge(u, v)
nodeLabels = {node: str(node) for node in graph.nodes()}
if method == 'spring':
networkx.draw_spring(G, labels=nodeLabels)
else:
networkx.draw(G, lables=nodeLabels)
def run_label_propagation(graph, edge_values, nb_iter=1, node_labels=None, local_edges=None, size_constr=-1,
nb_threads=-1):
print("Start")
if local_edges is not None:
assert edge_values.shape == local_edges.shape
local_edges = numpy.require(local_edges, dtype='bool')
else:
local_edges = numpy.ones_like(edge_values).astype('bool')
nb_nodes = graph.numberOfNodes
if node_labels is None:
node_labels = numpy.arange(0, nb_nodes)
else:
raise NotImplementedError("Deduce size of initial clusters!")
assert edge_values.shape == node_labels.shape
node_labels = numpy.require(node_labels, dtype='uint64')
sizes = numpy.ones((nb_nodes,))
runLabelPropagation_impl(graph, node_labels, edge_values, local_edges, nb_iter, size_constr, nb_threads)
return node_labels | [
"numpy.ones_like",
"numpy.ones",
"numpy.arange",
"numpy.where",
"numpy.require",
"numpy.random.random",
"networkx.draw_spring",
"networkx.Graph",
"numpy.stack",
"numpy.random.randint",
"numpy.dtype",
"networkx.draw"
] | [((1793, 1868), 'numpy.random.randint', 'numpy.random.randint', ([], {'low': '(0)', 'high': '(numberOfNodes - 1)', 'size': '(numberOfEdges * 2)'}), '(low=0, high=numberOfNodes - 1, size=numberOfEdges * 2)\n', (1813, 1868), False, 'import numpy\n'), ((3908, 3945), 'numpy.require', 'numpy.require', (['offsets'], {'dtype': '"""int64"""'}), "(offsets, dtype='int64')\n", (3921, 3945), False, 'import numpy\n'), ((5615, 5631), 'networkx.Graph', 'networkx.Graph', ([], {}), '()\n', (5629, 5631), False, 'import networkx\n'), ((6631, 6673), 'numpy.require', 'numpy.require', (['node_labels'], {'dtype': '"""uint64"""'}), "(node_labels, dtype='uint64')\n", (6644, 6673), False, 'import numpy\n'), ((6686, 6709), 'numpy.ones', 'numpy.ones', (['(nb_nodes,)'], {}), '((nb_nodes,))\n', (6696, 6709), False, 'import numpy\n'), ((1906, 1939), 'numpy.where', 'numpy.where', (['(uv[:, 0] != uv[:, 1])'], {}), '(uv[:, 0] != uv[:, 1])\n', (1917, 1939), False, 'import numpy\n'), ((2042, 2101), 'numpy.random.randint', 'numpy.random.randint', ([], {'low': '(0)', 'high': '(numberOfNodes - 1)', 'size': '(2)'}), '(low=0, high=numberOfNodes - 1, size=2)\n', (2062, 2101), False, 'import numpy\n'), ((5873, 5915), 'networkx.draw_spring', 'networkx.draw_spring', (['G'], {'labels': 'nodeLabels'}), '(G, labels=nodeLabels)\n', (5893, 5915), False, 'import networkx\n'), ((5934, 5969), 'networkx.draw', 'networkx.draw', (['G'], {'lables': 'nodeLabels'}), '(G, lables=nodeLabels)\n', (5947, 5969), False, 'import networkx\n'), ((6250, 6290), 'numpy.require', 'numpy.require', (['local_edges'], {'dtype': '"""bool"""'}), "(local_edges, dtype='bool')\n", (6263, 6290), False, 'import numpy\n'), ((6453, 6478), 'numpy.arange', 'numpy.arange', (['(0)', 'nb_nodes'], {}), '(0, nb_nodes)\n', (6465, 6478), False, 'import numpy\n'), ((4456, 4475), 'numpy.dtype', 'numpy.dtype', (['"""bool"""'], {}), "('bool')\n", (4467, 4475), False, 'import numpy\n'), ((4710, 4763), 'numpy.require', 'numpy.require', (['offsets_probabilities'], {'dtype': '"""float32"""'}), "(offsets_probabilities, dtype='float32')\n", (4723, 4763), False, 'import numpy\n'), ((5141, 5172), 'numpy.stack', 'numpy.stack', (['edge_mask'], {'axis': '(-1)'}), '(edge_mask, axis=-1)\n', (5152, 5172), False, 'import numpy\n'), ((6323, 6351), 'numpy.ones_like', 'numpy.ones_like', (['edge_values'], {}), '(edge_values)\n', (6338, 6351), False, 'import numpy\n'), ((5081, 5107), 'numpy.random.random', 'numpy.random.random', (['shape'], {}), '(shape)\n', (5100, 5107), False, 'import numpy\n')] |
import numpy as np
from experiments.target_lnpdfs.Lnpdf import LNPDF
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
class PlanarRobot(LNPDF):
def __init__(self, num_links, num_goals, prior_std=2e-1, likelihood_std=1e-2):
self._num_dimensions = num_links
prior_stds = prior_std * np.ones(num_links)
prior_stds[0] = 1.
self.prior = tfd.MultivariateNormalDiag(loc=tf.zeros(num_links), scale_diag=prior_stds.astype(np.float32))
self.link_lengths = np.ones(self._num_dimensions)
if num_goals == 1:
self.goal_Gaussian = tfd.MultivariateNormalDiag(loc=[0.7 * self._num_dimensions, 0],
scale_identity_multiplier=likelihood_std)
self.likelihood = self.goal_Gaussian.log_prob
elif num_goals == 4:
self.goal_Gaussian1 = tfd.MultivariateNormalDiag(loc=[0.7 * self._num_dimensions, 0],
scale_identity_multiplier=likelihood_std)
self.goal_Gaussian2 = tfd.MultivariateNormalDiag(loc=[-0.7 * self._num_dimensions, 0],
scale_identity_multiplier=likelihood_std)
self.goal_Gaussian3 = tfd.MultivariateNormalDiag(loc=[0, 0.7 * self._num_dimensions],
scale_identity_multiplier=likelihood_std)
self.goal_Gaussian4 = tfd.MultivariateNormalDiag(loc=[0, -0.7 * self._num_dimensions],
scale_identity_multiplier=likelihood_std)
self.likelihood = lambda pos: tf.reduce_max(tf.stack(
(self.goal_Gaussian1.log_prob(pos),
self.goal_Gaussian2.log_prob(pos),
self.goal_Gaussian3.log_prob(pos),
self.goal_Gaussian4.log_prob(pos))), axis=0)
else:
raise ValueError
def get_num_dimensions(self):
return self._num_dimensions
def log_density(self, theta):
y = tf.zeros(len(theta))
x = tf.zeros(len(theta))
for i in range(0, self._num_dimensions):
y += self.link_lengths[i] * tf.math.sin(tf.reduce_sum(theta[:, :i+1], axis=1))
x += self.link_lengths[i] * tf.math.cos(tf.reduce_sum(theta[:, :i+1], axis=1))
return self.prior.log_prob(theta) + self.likelihood(tf.stack((x,y), axis=1))
def make_single_goal():
return PlanarRobot(10, 1)
def make_four_goal():
return PlanarRobot(10, 4) | [
"tensorflow.stack",
"tensorflow.reduce_sum",
"numpy.ones",
"tensorflow.zeros"
] | [((549, 578), 'numpy.ones', 'np.ones', (['self._num_dimensions'], {}), '(self._num_dimensions)\n', (556, 578), True, 'import numpy as np\n'), ((359, 377), 'numpy.ones', 'np.ones', (['num_links'], {}), '(num_links)\n', (366, 377), True, 'import numpy as np\n'), ((457, 476), 'tensorflow.zeros', 'tf.zeros', (['num_links'], {}), '(num_links)\n', (465, 476), True, 'import tensorflow as tf\n'), ((2485, 2509), 'tensorflow.stack', 'tf.stack', (['(x, y)'], {'axis': '(1)'}), '((x, y), axis=1)\n', (2493, 2509), True, 'import tensorflow as tf\n'), ((2295, 2334), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['theta[:, :i + 1]'], {'axis': '(1)'}), '(theta[:, :i + 1], axis=1)\n', (2308, 2334), True, 'import tensorflow as tf\n'), ((2386, 2425), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['theta[:, :i + 1]'], {'axis': '(1)'}), '(theta[:, :i + 1], axis=1)\n', (2399, 2425), True, 'import tensorflow as tf\n')] |
import numpy as np
from typing import List, Literal
from .constraint import Constraint
from .parameter import Parameter
class LinearConstraint(Constraint):
"""
Represents a linear constraint. Either an equality constraint :math:`Ax = b`, or an inequality constraint
:math:`Ax \\geq b`, where :math:`A \\in \\mathbb{R}^{c \\times n}`.
"""
def __init__(self, parameters: List[Parameter], a: np.ndarray, b: np.ndarray, ctype: Literal["eq", "ineq"]):
"""
Represents a linear constraint. Either an equality constraint :math:`Ax = b`, or an inequality constraint
:math:`Ax \\geq b`, where :math:`A \\in \\mathbb{R}^{c \\times n}`.
:param parameters: A list of the parameters involved in the constraint. If the list contains more than one
element, the constraint will be defined with respect to the concatenated parameter vector.
:param a: Of shape (c, n). The constraint matrix. The number of columns `n` must be equal to the dimension of
the concatenated parameter vector.
:param b: The right hand side of the constraint. Must have shape (c,).
:param ctype: The type of the constraint.
"""
self._check_input_linear(parameters, a, b, ctype)
self._cdim = a.shape[0]
self._a = a
self._b = b
def linfun(*args):
xvec = np.concatenate(args)
y = self._a @ xvec - b
return y
def linjac(*args):
return self._a
Constraint.__init__(self, parameters=parameters, fun=linfun, jac=linjac, ctype=ctype)
@property
def a(self) -> np.ndarray:
"""
The constraint matrix :math:`A`.
"""
return self._a
@property
def b(self) -> np.ndarray:
"""
The constraint vector :math:`b`.
"""
return self._b
@property
def cdim(self) -> int:
"""
The constraint dimension :math:`c`.
"""
return self._cdim
@staticmethod
def _check_input_linear(parameters: List[Parameter], a: np.ndarray, b: np.ndarray,
ctype: Literal["eq", "ineq"]):
if ctype not in ["eq", "ineq"]:
raise Exception("'ctype' must either be 'eq' or 'ineq'.")
n = 0
for param in parameters:
n += param.dim
if not a.shape[1] == n:
raise Exception(f"'a' must have shape[1] = {n}")
m = a.shape[0]
if not b.shape == (m, ):
raise Exception(f"'b' must have shape ({m},).")
| [
"numpy.concatenate"
] | [((1375, 1395), 'numpy.concatenate', 'np.concatenate', (['args'], {}), '(args)\n', (1389, 1395), True, 'import numpy as np\n')] |
import torch
from torch import nn
from torch.distributions import Categorical
from torch.optim import Adam, SGD, ASGD
import torch.multiprocessing as mp
import os
from multiprocessing import Process, Queue
import queue
import numpy
import argparse
import glob
#from matplotlib import pyplot as plot
import copy
from time import sleep
import time
import gym
from utils import Buffer, collect_one_episode, copy_params, avg_params
import ff
import conv
def simulator(idx, player_queue, episode_queue, args, valid=False):
seed = 0
for ii in range(idx):
seed = numpy.random.randint(0, numpy.iinfo(int).max)
torch.manual_seed(seed)
if valid:
idx = "valid"
print('Starting the simulator {}'.format(idx))
device = 'cpu'
torch.device('cpu')
torch.set_num_threads(args.n_cores)
env = gym.make(args.env)
max_len = args.max_len
discount_factor = args.discount_factor
n_frames = args.n_frames
if args.nn == "ff":
player = ff.Player(n_in=128 * n_frames, n_hid=args.n_hid, n_out=6).to(device)
elif args.nn == "conv":
# create a policy
player = conv.Player(n_frames=n_frames, n_hid=args.n_hid).to(device)
else:
raise Exception('Unknown type')
n_params = len(list(player.parameters()))
if args.resize is None:
resize = None
else:
resize = [int(v) for v in args.resize.split(',')]
while True:
# first sync the player if possible
try:
player_state = player_queue.get_nowait()
if type(player_state) == str and player_state == "END":
break
for p, c in zip(player.parameters(), player_state[:n_params]):
p.data.copy_(torch.from_numpy(c))
for p, c in zip(player.buffers(), player_state[n_params:]):
p.data.copy_(torch.from_numpy(c))
if player_queue.qsize() > 0:
print('Simulator {} queue overflowing'.format(idx))
except queue.Empty:
pass
# run one episode
for m in player.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if isinstance(m, nn.BatchNorm1d):
m.eval()
if valid:
_, _, _, _, ret_ = collect_one_episode(env,
player, max_len=max_len, discount_factor=discount_factor,
n_frames=n_frames,
deterministic=True,
queue=None, interval=-1,
resize=resize)
episode_queue.put(ret_)
else:
o_, r_, a_, ap_, ret_ = collect_one_episode(env,
player, max_len=max_len, discount_factor=discount_factor,
n_frames=n_frames,
deterministic=numpy.random.rand() <= args.deterministic_ratio,
queue=episode_queue, interval=args.collect_interval,
resize=resize)
episode_queue.put((o_, r_, a_, ap_, ret_))
def main(args):
torch.manual_seed(args.seed)
# start simulators
mp.set_start_method('spawn')
episode_q = Queue()
player_qs = []
simulators = []
for si in range(args.n_simulators):
player_qs.append(Queue())
simulators.append(mp.Process(target=simulator, args=(si, player_qs[-1], episode_q, args, False,)))
simulators[-1].start()
return_q = Queue()
valid_q = Queue()
valid_simulator = mp.Process(target=simulator, args=(args.n_simulators, valid_q, return_q, args, True,))
valid_simulator.start()
env = gym.make(args.env)
# env = gym.make('Assault-ram-v0')
n_frames = args.n_frames
# initialize replay buffer
replay_buffer = Buffer(max_items=args.buffer_size,
n_frames=n_frames,
priority_ratio=args.priority_ratio,
store_ratio=args.store_ratio)
n_iter = args.n_iter
init_collect = args.init_collect
n_collect = args.n_collect
n_value = args.n_value
n_policy = args.n_policy
n_hid = args.n_hid
critic_aware = args.critic_aware
update_every = args.update_every
disp_iter = args.disp_iter
val_iter = args.val_iter
save_iter = args.save_iter
max_len = args.max_len
batch_size = args.batch_size
max_collected_frames = args.max_collected_frames
clip_coeff = args.grad_clip
ent_coeff = args.ent_coeff
discount_factor = args.discount_factor
value_loss = -numpy.Inf
entropy = -numpy.Inf
valid_ret = -numpy.Inf
ess = -numpy.Inf
n_collected_frames = 0
offset = 0
return_history = []
if args.nn == "ff":
# create a policy
player = ff.Player(n_in=128 * n_frames, n_hid=args.n_hid, n_out=6).to(args.device)
if args.player_coeff > 0.:
player_old = ff.Player(n_in=128 * n_frames, n_hid=args.n_hid, n_out=6).to(args.device)
player_copy = ff.Player(n_in=128 * n_frames, n_hid=args.n_hid, n_out=6).to('cpu')
# create a value estimator
value = ff.Value(n_in=128 * n_frames, n_hid=args.n_hid).to(args.device)
value_old = ff.Value(n_in=128 * n_frames, n_hid=args.n_hid).to(args.device)
for m in player.parameters():
m.data.normal_(0., 0.01)
for m in value.parameters():
m.data.normal_(0., 0.01)
elif args.nn == "conv":
# create a policy
player = conv.Player(n_frames=n_frames, n_hid=args.n_hid).to(args.device)
if args.player_coeff > 0.:
player_old = conv.Player(n_frames=n_frames, n_hid=args.n_hid).to(args.device)
player_copy = conv.Player(n_frames=n_frames, n_hid=args.n_hid).to('cpu')
# create a value estimator
value = conv.Value(n_frames, n_hid=args.n_hid).to(args.device)
value_old = conv.Value(n_frames, n_hid=args.n_hid).to(args.device)
else:
raise Exception('Unknown type')
if args.cont:
files = glob.glob("{}*th".format(args.saveto))
iterations = [int(".".join(f.split('.')[:-1]).split('_')[-1].strip()) for f in files]
last_iter = numpy.max(iterations)
offset = last_iter-1
print('Reloading from {}_{}.th'.format(args.saveto, last_iter))
checkpoint = torch.load("{}_{}.th".format(args.saveto, last_iter))
player.load_state_dict(checkpoint['player'])
value.load_state_dict(checkpoint['value'])
return_history = checkpoint['return_history']
n_collected_frames = checkpoint['n_collected_frames']
copy_params(value, value_old)
if args.player_coeff > 0.:
copy_params(player, player_old)
# start simulators
player.to('cpu')
copy_params(player, player_copy)
for si in range(args.n_simulators):
player_qs[si].put([copy.deepcopy(p.data.numpy()) for p in player_copy.parameters()]+
[copy.deepcopy(p.data.numpy()) for p in player_copy.buffers()])
valid_q.put([copy.deepcopy(p.data.numpy()) for p in player_copy.parameters()]+
[copy.deepcopy(p.data.numpy()) for p in player_copy.buffers()])
player.to(args.device)
if args.device == 'cuda':
torch.set_num_threads(1)
initial = True
pre_filled = 0
for ni in range(n_iter):
# re-initialize optimizers
opt_player = eval(args.optimizer_player)(player.parameters(),
lr=args.lr, weight_decay=args.l2)
opt_value = eval(args.optimizer_value)(value.parameters(),
lr=args.lr, weight_decay=args.l2)
try:
if not initial:
lr = args.lr / (1 + (ni-pre_filled+1) * args.lr_factor)
ent_coeff = args.ent_coeff / (1 + (ni-pre_filled+1) * args.ent_factor)
print('lr', lr, 'ent_coeff', ent_coeff)
for param_group in opt_player.param_groups:
param_group['lr'] = lr
for param_group in opt_value.param_groups:
param_group['lr'] = lr
if numpy.mod((ni-pre_filled+1), save_iter) == 0:
torch.save({
'n_iter': n_iter,
'n_collect': n_collect,
'n_value': n_value,
'n_policy': n_policy,
'max_len': max_len,
'n_hid': n_hid,
'batch_size': batch_size,
'player': player.state_dict(),
'value': value.state_dict(),
'return_history': return_history,
'n_collected_frames': n_collected_frames,
}, '{}_{}.th'.format(args.saveto,(ni-pre_filled+1)+offset+1))
player.eval()
ret_ = -numpy.Inf
while True:
try:
ret_ = return_q.get_nowait()
except queue.Empty:
break
if ret_ != -numpy.Inf:
return_history.append(ret_)
if valid_ret == -numpy.Inf:
valid_ret = ret_
else:
valid_ret = 0.9 * valid_ret + 0.1 * ret_
print('Valid run', ret_, valid_ret)
#st = time.time()
player.to('cpu')
copy_params(player, player_copy)
for si in range(args.n_simulators):
while True:
try:
# empty the queue, as the new one has arrived
player_qs[si].get_nowait()
except queue.Empty:
break
player_qs[si].put([copy.deepcopy(p.data.numpy()) for p in player_copy.parameters()]+
[copy.deepcopy(p.data.numpy()) for p in player_copy.buffers()])
while True:
try:
# empty the queue, as the new one has arrived
valid_q.get_nowait()
except queue.Empty:
break
valid_q.put([copy.deepcopy(p.data.numpy()) for p in player_copy.parameters()]+
[copy.deepcopy(p.data.numpy()) for p in player_copy.buffers()])
player.to(args.device)
#print('model push took', time.time()-st)
#st = time.time()
n_collected_frames_ = 0
while True:
try:
epi = episode_q.get_nowait()
replay_buffer.add(epi[0], epi[1], epi[2], epi[3])
n_collected_frames_ = n_collected_frames_ + len(epi[0])
except queue.Empty:
break
if n_collected_frames_ >= max_collected_frames \
and (len(replay_buffer.buffer) + len(replay_buffer.priority_buffer)) > 0:
break
n_collected_frames = n_collected_frames + n_collected_frames_
if len(replay_buffer.buffer) + len(replay_buffer.priority_buffer) < 1:
continue
if len(replay_buffer.buffer) + len(replay_buffer.priority_buffer) < args.initial_buffer:
if initial:
print('Pre-filling the buffer...',
len(replay_buffer.buffer) + len(replay_buffer.priority_buffer))
continue
else:
if initial:
pre_filled = ni
initial = False
#print('collection took', time.time()-st)
#print('Buffer size', len(replay_buffer.buffer) + len(replay_buffer.priority_buffer))
# fit a value function
# TD(0)
#st = time.time()
value.train()
for vi in range(n_value):
if numpy.mod(vi, update_every) == 0:
#print(vi, 'zeroing gradient')
opt_player.zero_grad()
opt_value.zero_grad()
batch = replay_buffer.sample(batch_size)
batch_x = torch.from_numpy(numpy.stack([ex.current_['obs'] for ex in batch]).astype('float32')).to(args.device)
batch_r = torch.from_numpy(numpy.stack([ex.current_['rew'] for ex in batch]).astype('float32')).to(args.device)
batch_xn = torch.from_numpy(numpy.stack([ex.next_['obs'] for ex in batch]).astype('float32')).to(args.device)
pred_y = value(batch_x)
pred_next = value_old(batch_xn).clone().detach()
batch_pi = player(batch_x)
loss_ = ((batch_r + discount_factor * pred_next.squeeze() - pred_y.squeeze()) ** 2)
batch_a = torch.from_numpy(numpy.stack([ex.current_['act'] for ex in batch]).astype('float32')[:,None]).to(args.device)
batch_q = torch.from_numpy(numpy.stack([ex.current_['prob'] for ex in batch]).astype('float32')).to(args.device)
logp = torch.log(batch_pi.gather(1, batch_a.long())+1e-8)
# (clipped) importance weight:
# because the policy may have changed since the tuple was collected.
log_iw = logp.squeeze().clone().detach() - torch.log(batch_q.squeeze()+1e-8)
ess_ = torch.exp(-torch.logsumexp(2 * log_iw, dim=0)).item()
iw = torch.exp(log_iw.clamp(max=0.))
if args.iw:
loss = iw * loss_
else:
loss = loss_
loss = loss.mean()
loss.backward()
if numpy.mod(vi, update_every) == (update_every-1):
#print(vi, 'making an update')
if clip_coeff > 0.:
nn.utils.clip_grad_norm_(value.parameters(), clip_coeff)
opt_value.step()
copy_params(value, value_old)
if value_loss < 0.:
value_loss = loss_.mean().item()
else:
value_loss = 0.9 * value_loss + 0.1 * loss_.mean().item()
if numpy.mod((ni-pre_filled+1), disp_iter) == 0:
print('# frames', n_collected_frames,
'value_loss', value_loss,
'entropy', -entropy,
'ess', ess)
#print('value update took', time.time()-st)
# fit a policy
#st = time.time()
value.eval()
player.train()
if args.player_coeff > 0.:
player_old.eval()
for pi in range(n_policy):
if numpy.mod(pi, update_every) == 0:
opt_player.zero_grad()
opt_value.zero_grad()
#st = time.time()
batch = replay_buffer.sample(batch_size)
#print('batch collection took', time.time()-st)
#st = time.time()
#batch_x = [ex.current_['obs'] for ex in batch]
#batch_xn = [ex.next_['obs'] for ex in batch]
#batch_r = [ex.current_['rew'] for ex in batch]
#print('list construction took', time.time()-st)
#st = time.time()
batch_x = numpy.zeros(tuple([len(batch)] + list(batch[0].current_['obs'].shape)), dtype='float32')
batch_xn = numpy.zeros(tuple([len(batch)] + list(batch[0].current_['obs'].shape)), dtype='float32')
batch_r = numpy.zeros((len(batch)), dtype='float32')[:, None]
for ei, ex in enumerate(batch):
batch_x[ei,:] = ex.current_['obs']
batch_xn[ei,:] = ex.next_['obs']
batch_r[ei,0] = ex.current_['rew']
#batch_x = numpy.stack(batch_x).astype('float32')
#batch_xn = numpy.stack(batch_xn).astype('float32')
#batch_r = numpy.stack(batch_r).astype('float32')[:,None]
#print('batch stack for value took', time.time()-st)
#st = time.time()
batch_x = torch.from_numpy(batch_x).to(args.device)
batch_xn = torch.from_numpy(batch_xn).to(args.device)
batch_r = torch.from_numpy(batch_r).to(args.device)
#print('batch push for value took', time.time()-st)
#st = time.time()
batch_v = value(batch_x).clone().detach()
batch_vn = value(batch_xn).clone().detach()
#print('value forward pass took', time.time()-st)
#st = time.time()
batch_a = torch.from_numpy(numpy.stack([ex.current_['act'] for ex in batch]).astype('float32')[:,None]).to(args.device)
batch_q = torch.from_numpy(numpy.stack([ex.current_['prob'] for ex in batch]).astype('float32')).to(args.device)
batch_pi = player(batch_x)
logp = torch.log(batch_pi.gather(1, batch_a.long())+1e-8)
if args.player_coeff > 0.:
batch_pi_old = player_old(batch_x).clone().detach()
#print('policy computation took', time.time()-st)
#st = time.time()
# entropy regularization
ent = -(batch_pi * torch.log(batch_pi+1e-8)).sum(1)
if entropy == -numpy.Inf:
entropy = ent.mean().item()
else:
entropy = 0.9 * entropy + 0.1 * ent.mean().item()
#print('entropy computation took', time.time()-st)
#st = time.time()
# advantage: r(s,a) + \gamma * V(s') - V(s)
adv = batch_r + discount_factor * batch_vn - batch_v
#adv = adv / adv.abs().max().clamp(min=1.)
loss = -(adv * logp).squeeze()
loss = loss - ent_coeff * ent
#print('basic loss computation took', time.time()-st)
#st = time.time()
# (clipped) importance weight:
log_iw = logp.squeeze().clone().detach() - torch.log(batch_q+1e-8)
iw = torch.exp(log_iw.clamp(max=0.))
ess_ = torch.exp(-torch.logsumexp(2 * log_iw, dim=0)).item()
if ess == -numpy.Inf:
ess = ess_
else:
ess = 0.9 * ess + 0.1 * ess_
if args.iw:
loss = iw * loss
else:
loss = loss
#print('importance weighting took', time.time()-st)
if critic_aware:
#st = time.time()
pred_y = value(batch_x).squeeze()
pred_next = value(batch_xn).squeeze()
critic_loss_ = -((batch_r.squeeze() + discount_factor * pred_next - pred_y) ** 2).clone().detach()
critic_loss_ = torch.exp(critic_loss_)
loss = loss * critic_loss_
#print('critic aware weighting took', time.time()-st)
loss = loss.mean()
if args.player_coeff > 0.:
#st = time.time()
loss_old = -(batch_pi_old * torch.log(batch_pi + 1e-8)).sum(1).mean()
loss = (1.-args.player_coeff) * loss + args.player_coeff * loss_old
#print('player interpolation took', time.time()-st)
#st = time.time()
loss.backward()
if numpy.mod(pi, update_every) == (update_every-1):
if clip_coeff > 0.:
nn.utils.clip_grad_norm_(player.parameters(), clip_coeff)
opt_player.step()
#print('backward computation and update took', time.time()-st)
if args.player_coeff > 0.:
copy_params(player, player_old)
##print('policy update took', time.time()-st)
except KeyboardInterrupt:
print('Terminating...')
break
for si in range(args.n_simulators):
player_qs[si].put("END")
print('Waiting for the simulators...')
for si in range(args.n_simulators):
simulators[-1].join()
print('Done')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-seed', type=int, default=1234)
parser.add_argument('-n-iter', type=int, default=1000)
parser.add_argument('-n-collect', type=int, default=1)
parser.add_argument('-init-collect', type=int, default=100)
parser.add_argument('-n-value', type=int, default=150)
parser.add_argument('-n-policy', type=int, default=150)
parser.add_argument('-update-every', type=int, default=1)
parser.add_argument('-disp-iter', type=int, default=1)
parser.add_argument('-val-iter', type=int, default=1)
parser.add_argument('-save-iter', type=int, default=10)
parser.add_argument('-max-len', type=int, default=1000)
parser.add_argument('-batch-size', type=int, default=1000)
parser.add_argument('-ent-coeff', type=float, default=0.)
parser.add_argument('-ent-factor', type=float, default=0.)
parser.add_argument('-discount-factor', type=float, default=0.95)
parser.add_argument('-grad-clip', type=float, default=1.)
parser.add_argument('-n-hid', type=int, default=256)
parser.add_argument('-buffer-size', type=int, default=50000)
parser.add_argument('-n-frames', type=int, default=1)
parser.add_argument('-max-collected-frames', type=int, default=1000)
parser.add_argument('-env', type=str, default='Pong-ram-v0')
parser.add_argument('-nn', type=str, default='ff')
parser.add_argument('-device', type=str, default='cuda')
parser.add_argument('-optimizer-player', type=str, default='ASGD')
parser.add_argument('-optimizer-value', type=str, default='Adam')
parser.add_argument('-lr', type=float, default=1e-4)
parser.add_argument('-lr-factor', type=float, default=0.)
parser.add_argument('-l2', type=float, default=0.)
parser.add_argument('-priority-ratio', type=float, default=0.)
parser.add_argument('-store-ratio', type=float, default=1.)
parser.add_argument('-cont', action="store_true", default=False)
parser.add_argument('-critic-aware', action="store_true", default=False)
parser.add_argument('-iw', action="store_true", default=False)
parser.add_argument('-n-simulators', type=int, default=2)
parser.add_argument('-n-cores', type=int, default=1)
parser.add_argument('-deterministic-ratio', type=float, default=0.)
parser.add_argument('-player-coeff', type=float, default=0.)
parser.add_argument('-initial-buffer', type=int, default=0)
parser.add_argument('-collect-interval', type=int, default=10)
parser.add_argument('-resize', type=str, default=None)
parser.add_argument('saveto', type=str)
args = parser.parse_args()
main(args)
| [
"numpy.random.rand",
"utils.copy_params",
"numpy.iinfo",
"torch.from_numpy",
"torch.exp",
"numpy.mod",
"gym.make",
"argparse.ArgumentParser",
"conv.Player",
"torch.set_num_threads",
"numpy.max",
"numpy.stack",
"ff.Value",
"torch.multiprocessing.set_start_method",
"conv.Value",
"torch.m... | [((638, 661), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (655, 661), False, 'import torch\n'), ((775, 794), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (787, 794), False, 'import torch\n'), ((799, 834), 'torch.set_num_threads', 'torch.set_num_threads', (['args.n_cores'], {}), '(args.n_cores)\n', (820, 834), False, 'import torch\n'), ((846, 864), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (854, 864), False, 'import gym\n'), ((3059, 3087), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3076, 3087), False, 'import torch\n'), ((3116, 3144), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (3135, 3144), True, 'import torch.multiprocessing as mp\n'), ((3162, 3169), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (3167, 3169), False, 'from multiprocessing import Process, Queue\n'), ((3437, 3444), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (3442, 3444), False, 'from multiprocessing import Process, Queue\n'), ((3459, 3466), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (3464, 3466), False, 'from multiprocessing import Process, Queue\n'), ((3489, 3578), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'simulator', 'args': '(args.n_simulators, valid_q, return_q, args, True)'}), '(target=simulator, args=(args.n_simulators, valid_q, return_q,\n args, True))\n', (3499, 3578), True, 'import torch.multiprocessing as mp\n'), ((3615, 3633), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (3623, 3633), False, 'import gym\n'), ((3755, 3879), 'utils.Buffer', 'Buffer', ([], {'max_items': 'args.buffer_size', 'n_frames': 'n_frames', 'priority_ratio': 'args.priority_ratio', 'store_ratio': 'args.store_ratio'}), '(max_items=args.buffer_size, n_frames=n_frames, priority_ratio=args.\n priority_ratio, store_ratio=args.store_ratio)\n', (3761, 3879), False, 'from utils import Buffer, collect_one_episode, copy_params, avg_params\n'), ((6590, 6619), 'utils.copy_params', 'copy_params', (['value', 'value_old'], {}), '(value, value_old)\n', (6601, 6619), False, 'from utils import Buffer, collect_one_episode, copy_params, avg_params\n'), ((6740, 6772), 'utils.copy_params', 'copy_params', (['player', 'player_copy'], {}), '(player, player_copy)\n', (6751, 6772), False, 'from utils import Buffer, collect_one_episode, copy_params, avg_params\n'), ((20583, 20608), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (20606, 20608), False, 'import argparse\n'), ((6167, 6188), 'numpy.max', 'numpy.max', (['iterations'], {}), '(iterations)\n', (6176, 6188), False, 'import numpy\n'), ((6659, 6690), 'utils.copy_params', 'copy_params', (['player', 'player_old'], {}), '(player, player_old)\n', (6670, 6690), False, 'from utils import Buffer, collect_one_episode, copy_params, avg_params\n'), ((7225, 7249), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (7246, 7249), False, 'import torch\n'), ((2295, 2465), 'utils.collect_one_episode', 'collect_one_episode', (['env', 'player'], {'max_len': 'max_len', 'discount_factor': 'discount_factor', 'n_frames': 'n_frames', 'deterministic': '(True)', 'queue': 'None', 'interval': '(-1)', 'resize': 'resize'}), '(env, player, max_len=max_len, discount_factor=\n discount_factor, n_frames=n_frames, deterministic=True, queue=None,\n interval=-1, resize=resize)\n', (2314, 2465), False, 'from utils import Buffer, collect_one_episode, copy_params, avg_params\n'), ((3274, 3281), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (3279, 3281), False, 'from multiprocessing import Process, Queue\n'), ((3309, 3387), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'simulator', 'args': '(si, player_qs[-1], episode_q, args, False)'}), '(target=simulator, args=(si, player_qs[-1], episode_q, args, False))\n', (3319, 3387), True, 'import torch.multiprocessing as mp\n'), ((9387, 9419), 'utils.copy_params', 'copy_params', (['player', 'player_copy'], {}), '(player, player_copy)\n', (9398, 9419), False, 'from utils import Buffer, collect_one_episode, copy_params, avg_params\n'), ((14003, 14032), 'utils.copy_params', 'copy_params', (['value', 'value_old'], {}), '(value, value_old)\n', (14014, 14032), False, 'from utils import Buffer, collect_one_episode, copy_params, avg_params\n'), ((611, 627), 'numpy.iinfo', 'numpy.iinfo', (['int'], {}), '(int)\n', (622, 627), False, 'import numpy\n'), ((1006, 1063), 'ff.Player', 'ff.Player', ([], {'n_in': '(128 * n_frames)', 'n_hid': 'args.n_hid', 'n_out': '(6)'}), '(n_in=128 * n_frames, n_hid=args.n_hid, n_out=6)\n', (1015, 1063), False, 'import ff\n'), ((4757, 4814), 'ff.Player', 'ff.Player', ([], {'n_in': '(128 * n_frames)', 'n_hid': 'args.n_hid', 'n_out': '(6)'}), '(n_in=128 * n_frames, n_hid=args.n_hid, n_out=6)\n', (4766, 4814), False, 'import ff\n'), ((4987, 5044), 'ff.Player', 'ff.Player', ([], {'n_in': '(128 * n_frames)', 'n_hid': 'args.n_hid', 'n_out': '(6)'}), '(n_in=128 * n_frames, n_hid=args.n_hid, n_out=6)\n', (4996, 5044), False, 'import ff\n'), ((5107, 5154), 'ff.Value', 'ff.Value', ([], {'n_in': '(128 * n_frames)', 'n_hid': 'args.n_hid'}), '(n_in=128 * n_frames, n_hid=args.n_hid)\n', (5115, 5154), False, 'import ff\n'), ((5191, 5238), 'ff.Value', 'ff.Value', ([], {'n_in': '(128 * n_frames)', 'n_hid': 'args.n_hid'}), '(n_in=128 * n_frames, n_hid=args.n_hid)\n', (5199, 5238), False, 'import ff\n'), ((8144, 8185), 'numpy.mod', 'numpy.mod', (['(ni - pre_filled + 1)', 'save_iter'], {}), '(ni - pre_filled + 1, save_iter)\n', (8153, 8185), False, 'import numpy\n'), ((14251, 14292), 'numpy.mod', 'numpy.mod', (['(ni - pre_filled + 1)', 'disp_iter'], {}), '(ni - pre_filled + 1, disp_iter)\n', (14260, 14292), False, 'import numpy\n'), ((20152, 20183), 'utils.copy_params', 'copy_params', (['player', 'player_old'], {}), '(player, player_old)\n', (20163, 20183), False, 'from utils import Buffer, collect_one_episode, copy_params, avg_params\n'), ((1146, 1194), 'conv.Player', 'conv.Player', ([], {'n_frames': 'n_frames', 'n_hid': 'args.n_hid'}), '(n_frames=n_frames, n_hid=args.n_hid)\n', (1157, 1194), False, 'import conv\n'), ((1744, 1763), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (1760, 1763), False, 'import torch\n'), ((1866, 1885), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (1882, 1885), False, 'import torch\n'), ((4891, 4948), 'ff.Player', 'ff.Player', ([], {'n_in': '(128 * n_frames)', 'n_hid': 'args.n_hid', 'n_out': '(6)'}), '(n_in=128 * n_frames, n_hid=args.n_hid, n_out=6)\n', (4900, 4948), False, 'import ff\n'), ((5476, 5524), 'conv.Player', 'conv.Player', ([], {'n_frames': 'n_frames', 'n_hid': 'args.n_hid'}), '(n_frames=n_frames, n_hid=args.n_hid)\n', (5487, 5524), False, 'import conv\n'), ((5688, 5736), 'conv.Player', 'conv.Player', ([], {'n_frames': 'n_frames', 'n_hid': 'args.n_hid'}), '(n_frames=n_frames, n_hid=args.n_hid)\n', (5699, 5736), False, 'import conv\n'), ((5799, 5837), 'conv.Value', 'conv.Value', (['n_frames'], {'n_hid': 'args.n_hid'}), '(n_frames, n_hid=args.n_hid)\n', (5809, 5837), False, 'import conv\n'), ((5874, 5912), 'conv.Value', 'conv.Value', (['n_frames'], {'n_hid': 'args.n_hid'}), '(n_frames, n_hid=args.n_hid)\n', (5884, 5912), False, 'import conv\n'), ((11904, 11931), 'numpy.mod', 'numpy.mod', (['vi', 'update_every'], {}), '(vi, update_every)\n', (11913, 11931), False, 'import numpy\n'), ((13720, 13747), 'numpy.mod', 'numpy.mod', (['vi', 'update_every'], {}), '(vi, update_every)\n', (13729, 13747), False, 'import numpy\n'), ((14791, 14818), 'numpy.mod', 'numpy.mod', (['pi', 'update_every'], {}), '(pi, update_every)\n', (14800, 14818), False, 'import numpy\n'), ((18354, 18380), 'torch.log', 'torch.log', (['(batch_q + 1e-08)'], {}), '(batch_q + 1e-08)\n', (18363, 18380), False, 'import torch\n'), ((19206, 19229), 'torch.exp', 'torch.exp', (['critic_loss_'], {}), '(critic_loss_)\n', (19215, 19229), False, 'import torch\n'), ((19808, 19835), 'numpy.mod', 'numpy.mod', (['pi', 'update_every'], {}), '(pi, update_every)\n', (19817, 19835), False, 'import numpy\n'), ((2825, 2844), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (2842, 2844), False, 'import numpy\n'), ((5601, 5649), 'conv.Player', 'conv.Player', ([], {'n_frames': 'n_frames', 'n_hid': 'args.n_hid'}), '(n_frames=n_frames, n_hid=args.n_hid)\n', (5612, 5649), False, 'import conv\n'), ((16290, 16315), 'torch.from_numpy', 'torch.from_numpy', (['batch_x'], {}), '(batch_x)\n', (16306, 16315), False, 'import torch\n'), ((16359, 16385), 'torch.from_numpy', 'torch.from_numpy', (['batch_xn'], {}), '(batch_xn)\n', (16375, 16385), False, 'import torch\n'), ((16428, 16453), 'torch.from_numpy', 'torch.from_numpy', (['batch_r'], {}), '(batch_r)\n', (16444, 16453), False, 'import torch\n'), ((13397, 13431), 'torch.logsumexp', 'torch.logsumexp', (['(2 * log_iw)'], {'dim': '(0)'}), '(2 * log_iw, dim=0)\n', (13412, 13431), False, 'import torch\n'), ((17506, 17533), 'torch.log', 'torch.log', (['(batch_pi + 1e-08)'], {}), '(batch_pi + 1e-08)\n', (17515, 17533), False, 'import torch\n'), ((18466, 18500), 'torch.logsumexp', 'torch.logsumexp', (['(2 * log_iw)'], {'dim': '(0)'}), '(2 * log_iw, dim=0)\n', (18481, 18500), False, 'import torch\n'), ((12192, 12241), 'numpy.stack', 'numpy.stack', (["[ex.current_['obs'] for ex in batch]"], {}), "([ex.current_['obs'] for ex in batch])\n", (12203, 12241), False, 'import numpy\n'), ((12320, 12369), 'numpy.stack', 'numpy.stack', (["[ex.current_['rew'] for ex in batch]"], {}), "([ex.current_['rew'] for ex in batch])\n", (12331, 12369), False, 'import numpy\n'), ((12449, 12495), 'numpy.stack', 'numpy.stack', (["[ex.next_['obs'] for ex in batch]"], {}), "([ex.next_['obs'] for ex in batch])\n", (12460, 12495), False, 'import numpy\n'), ((12976, 13026), 'numpy.stack', 'numpy.stack', (["[ex.current_['prob'] for ex in batch]"], {}), "([ex.current_['prob'] for ex in batch])\n", (12987, 13026), False, 'import numpy\n'), ((16991, 17041), 'numpy.stack', 'numpy.stack', (["[ex.current_['prob'] for ex in batch]"], {}), "([ex.current_['prob'] for ex in batch])\n", (17002, 17041), False, 'import numpy\n'), ((12840, 12889), 'numpy.stack', 'numpy.stack', (["[ex.current_['act'] for ex in batch]"], {}), "([ex.current_['act'] for ex in batch])\n", (12851, 12889), False, 'import numpy\n'), ((16855, 16904), 'numpy.stack', 'numpy.stack', (["[ex.current_['act'] for ex in batch]"], {}), "([ex.current_['act'] for ex in batch])\n", (16866, 16904), False, 'import numpy\n'), ((19519, 19546), 'torch.log', 'torch.log', (['(batch_pi + 1e-08)'], {}), '(batch_pi + 1e-08)\n', (19528, 19546), False, 'import torch\n')] |
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from scipy.stats import norm
import time
import os
from demoire.epll.epll import EPLLhalfQuadraticSplit
from demoire.epll.utils import get_gs_matrix
def process(noiseI, GS, matpath, DC):
patchSize = 8
noiseSD = 25/255
# same to matlab seed
# np.random.seed(1)
# rand = np.array(norm.ppf(np.random.rand(I.shape[1], I.shape[0]))).T
# noiseI = I + noiseSD * rand
excludeList = []
LogLFunc = []
cleanI, psnr, cost = EPLLhalfQuadraticSplit(
noiseI = noiseI,
rambda = patchSize**2/noiseSD**2,
patchSize = patchSize,
betas = (1/(noiseSD**2))*np.array([1,4,8,16,32]),
T = 1,
I = None,
LogLFunc = LogLFunc,
GS = GS,
excludeList = None,
SigmaNoise = None,
matpath = matpath,
DC = DC
)
return cleanI
def denoise( target,
matpath,
DC,
convert_type = 'RGB'
):
convert_type = convert_type.upper()
GS = get_gs_matrix(path=matpath, DC=DC)
if convert_type == 'L':
targetI = np.array(Image.open(target).convert(convert_type))/255
print('grayscale')
cleanI = process(targetI, GS, matpath, DC)
elif convert_type == 'RGB':
targetI = np.array(Image.open(target).convert(convert_type))/255
cleanI = np.empty(targetI.shape)
for i in range(3):
print()
if i == 0:
print('R channel')
elif i == 1:
print('G channel')
else :
print('B channel')
cleanI[:,:,i] = process(targetI[:,:,i], GS, matpath, DC)
else:
print('ValueError: covert type should be grayscale(L) or RGB')
exit(-1)
return cleanI
def save_result(cleanI, resultpath):
assert os.path.exists(os.path.dirname(resultpath)), print('result directory not exists')
if cleanI.ndim == 2:
cmap='gray'
elif cleanI.ndim == 3:
cmap=None
else:
print('image dimesion should be 2 or 3')
exit(-1)
plt.imsave(resultpath, cleanI, cmap=cmap)
def main(target, matfile, DC, resultdir):
if DC:
print('background')
else:
print('moire')
matdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
matpath = os.path.join(matdir, matfile)
# cleanI = np.array(Image.open(target).convert('RGB'))/255
cleanI = denoise(target=target, matpath=matpath, DC=DC, convert_type='L')
img_type = os.path.basename(target).split('.')[-1]
filename = ''.join(os.path.basename(target).split('.')[:-1]) + '_' + ('background' if DC else 'moire') + '.' + img_type
resultpath = os.path.join(resultdir, filename)
save_result(cleanI, resultpath) | [
"PIL.Image.open",
"matplotlib.pyplot.imsave",
"os.path.join",
"os.path.realpath",
"os.path.dirname",
"numpy.array",
"numpy.empty",
"os.path.basename",
"demoire.epll.utils.get_gs_matrix"
] | [((1508, 1542), 'demoire.epll.utils.get_gs_matrix', 'get_gs_matrix', ([], {'path': 'matpath', 'DC': 'DC'}), '(path=matpath, DC=DC)\n', (1521, 1542), False, 'from demoire.epll.utils import get_gs_matrix\n'), ((2604, 2645), 'matplotlib.pyplot.imsave', 'plt.imsave', (['resultpath', 'cleanI'], {'cmap': 'cmap'}), '(resultpath, cleanI, cmap=cmap)\n', (2614, 2645), True, 'import matplotlib.pyplot as plt\n'), ((2864, 2893), 'os.path.join', 'os.path.join', (['matdir', 'matfile'], {}), '(matdir, matfile)\n', (2876, 2893), False, 'import os\n'), ((3234, 3267), 'os.path.join', 'os.path.join', (['resultdir', 'filename'], {}), '(resultdir, filename)\n', (3246, 3267), False, 'import os\n'), ((2365, 2392), 'os.path.dirname', 'os.path.dirname', (['resultpath'], {}), '(resultpath)\n', (2380, 2392), False, 'import os\n'), ((1856, 1879), 'numpy.empty', 'np.empty', (['targetI.shape'], {}), '(targetI.shape)\n', (1864, 1879), True, 'import numpy as np\n'), ((2813, 2839), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2829, 2839), False, 'import os\n'), ((818, 845), 'numpy.array', 'np.array', (['[1, 4, 8, 16, 32]'], {}), '([1, 4, 8, 16, 32])\n', (826, 845), True, 'import numpy as np\n'), ((3052, 3076), 'os.path.basename', 'os.path.basename', (['target'], {}), '(target)\n', (3068, 3076), False, 'import os\n'), ((1600, 1618), 'PIL.Image.open', 'Image.open', (['target'], {}), '(target)\n', (1610, 1618), False, 'from PIL import Image\n'), ((1793, 1811), 'PIL.Image.open', 'Image.open', (['target'], {}), '(target)\n', (1803, 1811), False, 'from PIL import Image\n'), ((3115, 3139), 'os.path.basename', 'os.path.basename', (['target'], {}), '(target)\n', (3131, 3139), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 20:21:34 2020
@author: nickcostanzino
"""
def NN_structure(layers, perceptrons):
A = list()
for n in range(layers):
A.append(perceptrons)
return tuple(A)
def NN_structures(layers, perceptrons):
A = list()
for i in range(1, layers):
for j in range(1, perceptrons):
A.append(NN_structure(i, j))
A = array(A)
A = list(A)
return A
def MSE(prediction, true):
from sklearn.metrics import mean_squared_error
mse = mean_squared_error(prediction, true)
return mse
def process_simulator(f, sigma_X, sigma_e, N):
import pandas as pd
import numpy as np
from scipy.optimize import fsolve
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
f = 'np.' + f
e = np.random.normal(0,sigma_e,N)
X = np.random.normal(0,sigma_X,N)
Y = eval(f) + e
df = pd.DataFrame()
df['X'] = X
df['Y'] = Y
df['e'] = e
return df
def performance_analyzer(func, sigma_X, sigma_e, N, number_of_partitions, number_of_simulations, output_folder):
import pandas as pd
import numpy as np
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, TimeSeriesSplit
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
COLUMNS = ['training_data_points', 'testing_data_points', 'LR_intercept', 'LR_slope', 'NN_layers', 'NN_perceptrons',
'NN_activation', 'NN_alpha', 'LR-In-Sample-R2', 'NN-In-Sample-R2', 'Best-Possible-In-Sample-R2',
'LR-Out-Sample-R2', 'NN-Out-Sample-R2', 'Best-Possible-Out-Sample-R2']
full_results = pd.DataFrame(columns = COLUMNS)
for k in range(number_of_simulations):
S = process_simulator(func, sigma_X, sigma_e, N)
X = pd.DataFrame(S.X)
Y = pd.DataFrame(S.Y)
e = pd.DataFrame(S.e)
L = len(X)
for l in range(1, number_of_partitions):
results = pd.DataFrame()
print(l)
test_L = int(L/number_of_partitions)
train_L = int(L/number_of_partitions*l)
train_start = L- test_L-train_L
train_end = L- test_L
max_layers = 2
max_perceptrons = 8
structures = NN_structures(max_layers, max_perceptrons)
print("NN_Structures:")
print(structures)
X_train = pd.DataFrame(X[train_start:train_end])
Y_train = pd.DataFrame(Y[train_start:train_end])
e_train = pd.DataFrame(e[train_start:train_end])
X_test = pd.DataFrame(X[train_end +1: L])
Y_test = pd.DataFrame(Y[train_end +1: L])
e_test = pd.DataFrame(e[train_end +1: L])
LR_regressor = LinearRegression(fit_intercept=True)
LR_regressor.fit(X_train, Y_train)
print("Fitted LR")
NN_regressor = MLPRegressor()
# default max_iter = 10000
param_grid = {'hidden_layer_sizes': structures,
'activation': ['identity', 'relu'],
'alpha': [0.01, 0.001, 0.0001, 0.00001],
'learning_rate': ['adaptive'],
'solver': ['adam'],
'random_state': [0],
'early_stopping': [True],
'max_iter': [10000],
'warm_start': [True]}
tscv = TimeSeriesSplit(n_splits=4)
NN_gridsearch = GridSearchCV(estimator=NN_regressor, param_grid=param_grid, n_jobs=-1, verbose=False, cv= tscv)
print("Performing grid search for NN")
NN_gridsearch.fit(X_train, Y_train)
print("Finished grid search for NN")
NN_params = NN_gridsearch.best_params_
NN_model = NN_gridsearch.best_estimator_
LR_params = np.append([LR_regressor.intercept_], LR_regressor.coef_)
pred_LR = LR_regressor.predict(X_test)
pred_NN = NN_model.predict(X_test)
insample_LR = LR_regressor.predict(X_train)
insample_NN = NN_model.predict(X_train)
results.loc[l, COLUMNS[0]] = train_L
results.loc[l, COLUMNS[1]] = test_L
results.loc[l, COLUMNS[2]] = LR_params[0]
results.loc[l, COLUMNS[3]] = LR_params[1]
results.loc[l, COLUMNS[4]] = len(NN_params['hidden_layer_sizes'])
results.loc[l, COLUMNS[5]] = sum(NN_params['hidden_layer_sizes'])
results.loc[l, COLUMNS[6]] = str(NN_params['activation'])
results.loc[l, COLUMNS[7]] = str(NN_params['alpha'])
results.loc[l, COLUMNS[8]] = LR_regressor.score(X_train,Y_train)
results.loc[l, COLUMNS[9]] = NN_model.score(X_train,Y_train)
results.loc[l, COLUMNS[10]] = 1 - (e_train*e_train).sum().values[0]/(((Y_train-Y_train.mean())*(Y_train-Y_train.mean())).sum().values[0])
results.loc[l, COLUMNS[11]] = LR_regressor.score(X_test,Y_test)
results.loc[l, COLUMNS[12]] = NN_model.score(X_test,Y_test)
results.loc[l, COLUMNS[13]] = 1 - (e_test*e_test).sum().values[0]/(((Y_test-Y_test.mean())*(Y_test-Y_test.mean())).sum().values[0])
full_results = full_results.append(results, ignore_index=False)
full_results.to_csv(output_folder + '/' + str(func) + '_' + str(N) + '_' + str(number_of_partitions) + '_.csv')
def main():
# default values
# func = 'power(X,3)'
# sigma_X=2
# N=10000
# number_of_partitions = 20
# number_of_simulations=1000
# output_folder = 'results'
func = 'power(X,3)'
sigma_X = 2
N = 1000
number_of_partitions = 2
number_of_simulations = 1
output_folder = 'results'
for i in range(1):
performance_analyzer(func, sigma_X, 0.5 * i, N, number_of_partitions, number_of_simulations, output_folder)
if __name__ == "__main__":
main()
| [
"numpy.random.normal",
"sklearn.model_selection.GridSearchCV",
"sklearn.neural_network.MLPRegressor",
"sklearn.model_selection.TimeSeriesSplit",
"sklearn.metrics.mean_squared_error",
"numpy.append",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression"
] | [((560, 596), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['prediction', 'true'], {}), '(prediction, true)\n', (578, 596), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((889, 920), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma_e', 'N'], {}), '(0, sigma_e, N)\n', (905, 920), True, 'import numpy as np\n'), ((927, 958), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma_X', 'N'], {}), '(0, sigma_X, N)\n', (943, 958), True, 'import numpy as np\n'), ((991, 1005), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1003, 1005), True, 'import pandas as pd\n'), ((1908, 1937), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'COLUMNS'}), '(columns=COLUMNS)\n', (1920, 1937), True, 'import pandas as pd\n'), ((2067, 2084), 'pandas.DataFrame', 'pd.DataFrame', (['S.X'], {}), '(S.X)\n', (2079, 2084), True, 'import pandas as pd\n'), ((2097, 2114), 'pandas.DataFrame', 'pd.DataFrame', (['S.Y'], {}), '(S.Y)\n', (2109, 2114), True, 'import pandas as pd\n'), ((2127, 2144), 'pandas.DataFrame', 'pd.DataFrame', (['S.e'], {}), '(S.e)\n', (2139, 2144), True, 'import pandas as pd\n'), ((2246, 2260), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2258, 2260), True, 'import pandas as pd\n'), ((2726, 2764), 'pandas.DataFrame', 'pd.DataFrame', (['X[train_start:train_end]'], {}), '(X[train_start:train_end])\n', (2738, 2764), True, 'import pandas as pd\n'), ((2787, 2825), 'pandas.DataFrame', 'pd.DataFrame', (['Y[train_start:train_end]'], {}), '(Y[train_start:train_end])\n', (2799, 2825), True, 'import pandas as pd\n'), ((2848, 2886), 'pandas.DataFrame', 'pd.DataFrame', (['e[train_start:train_end]'], {}), '(e[train_start:train_end])\n', (2860, 2886), True, 'import pandas as pd\n'), ((2917, 2949), 'pandas.DataFrame', 'pd.DataFrame', (['X[train_end + 1:L]'], {}), '(X[train_end + 1:L])\n', (2929, 2949), True, 'import pandas as pd\n'), ((2971, 3003), 'pandas.DataFrame', 'pd.DataFrame', (['Y[train_end + 1:L]'], {}), '(Y[train_end + 1:L])\n', (2983, 3003), True, 'import pandas as pd\n'), ((3025, 3057), 'pandas.DataFrame', 'pd.DataFrame', (['e[train_end + 1:L]'], {}), '(e[train_end + 1:L])\n', (3037, 3057), True, 'import pandas as pd\n'), ((3098, 3134), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (3114, 3134), False, 'from sklearn.linear_model import LinearRegression\n'), ((3242, 3256), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {}), '()\n', (3254, 3256), False, 'from sklearn.neural_network import MLPRegressor\n'), ((3855, 3882), 'sklearn.model_selection.TimeSeriesSplit', 'TimeSeriesSplit', ([], {'n_splits': '(4)'}), '(n_splits=4)\n', (3870, 3882), False, 'from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, TimeSeriesSplit\n'), ((3911, 4009), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'NN_regressor', 'param_grid': 'param_grid', 'n_jobs': '(-1)', 'verbose': '(False)', 'cv': 'tscv'}), '(estimator=NN_regressor, param_grid=param_grid, n_jobs=-1,\n verbose=False, cv=tscv)\n', (3923, 4009), False, 'from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, TimeSeriesSplit\n'), ((4298, 4354), 'numpy.append', 'np.append', (['[LR_regressor.intercept_]', 'LR_regressor.coef_'], {}), '([LR_regressor.intercept_], LR_regressor.coef_)\n', (4307, 4354), True, 'import numpy as np\n')] |
import sys
import os
import shutil
import time
import json
from typing import List, Union
import torch
from torch import nn
from torch.autograd import Variable
from torch.optim import SGD
import numpy as np
from torchmetrics import Accuracy, Precision, Recall
from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc
from torchvision.transforms.transforms import ToPILImage, ToTensor
from DataAugement import RandAugment
from DataLoaders import load_gen_data_dir
from LossFunctions import softmax_mse_loss, symmetric_mse_loss
from Models import cifar_shakeshake26, mt_shake_shake_params
import warnings
warnings.filterwarnings("ignore")
DATASETS_DIR = "Datasets"
OUT_DIR = "outputs"
NO_LABEL = -1
def eval_model(model, model_name, val_data_loaders, classes, fold_num, dataset_name, model_2=None, model2_name=None,
out_dir=OUT_DIR):
"""
Evaluates performance of up to 2 models
:param model:
:param model_name:
:param val_data_loaders:
:param classes:
:param fold_num:
:param dataset_name:
:param model_2:
:param model2_name:
:param out_dir:
:return: dictionary of evaluation restuls
"""
loss_func = nn.CrossEntropyLoss(size_average=False, ignore_index=NO_LABEL)
loss_model_1 = torch.Tensor([0])
loss_model_2 = torch.Tensor([0])
infer_time = (Time1000Samples(model), Time1000Samples(model_2))
all_metrics = {"accuracy": (Accuracy(), Accuracy()),
"F1": (ModifiedF1(classes=classes), ModifiedF1(classes=classes)),
"stats": (TprFpr(classes), TprFpr(classes)),
"recall": (Recall(), Recall()),
"precision": (Precision(), Precision()),
"auroc": (SkAucRoc(classes=classes), SkAucRoc(classes=classes)),
"area_under_precision_recall": (AreaUnderPrecisionCurve(classes, out_dir, fold_num,
model_name, dataset_name),
AreaUnderPrecisionCurve(classes, out_dir, fold_num,
model_name, dataset_name)),
}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.eval()
if model_2 is not None:
model_2.eval()
for val_data_loader in val_data_loaders:
for x, y_true in val_data_loader:
x = torch.autograd.Variable(x).to(device)
y_true = torch.autograd.Variable(y_true).to(device)
preds = model(x)
preds = preds[0] if type(preds) is tuple else preds
batch_size = len(y_true)
for metric in all_metrics.values():
metric[0].update(preds=preds.detach().to('cpu'), target=y_true.detach().to('cpu'))
loss_model_1 += (loss_func(preds.detach().to('cpu'), y_true.detach().to('cpu')) / batch_size)
infer_time[0].update(x.detach().to('cpu'))
if model_2 is not None:
preds = model_2(x)
preds = preds[0]
for metric in all_metrics.values():
metric[1].update(preds=preds.detach().to('cpu'), target=y_true.detach().to('cpu'))
loss_model_2 += (loss_func(preds.detach().to('cpu'), y_true.detach().to('cpu')) / batch_size)
infer_time[1].update(x.detach().to('cpu'))
ans = {
f"loss_{model_name}": loss_model_1.numpy(),
f"1000_samples_run_time_{model_name}": infer_time[0].compute()
}
if model_2 is not None:
ans.update({
f"loss_{model2_name}": loss_model_2.numpy(),
f"1000_samples_run_time_{model2_name}": infer_time[0].compute()
})
for key, metric in all_metrics.items():
try:
ans[f"{key}_{model_name}"] = metric[0].compute().numpy()
except AttributeError:
ans[f"{key}_{model_name}"] = metric[0].compute()
if model_2 is not None:
try:
ans[f"{key}_{model2_name}"] = metric[1].compute().numpy()
except AttributeError:
ans[f"{key}_{model_name}"] = metric[0].compute()
return {fold_num: ans}
def dump_to_log(data, log=None):
if log is None:
print(data)
else:
try:
if type(data) == dict:
first_key = list(data.keys())[0]
for key, value in data[first_key].items():
if type(data[first_key][key]) == np.ndarray:
data[first_key][key] = value.tolist()
except (IndexError, AttributeError):
pass
with open(log, "a") as file:
json.dump(data, file)
file.write("\n")
# ------------ Ramp up function taken from: https://github.com/benathi/fastswa-semi-sup.
def linear_rampup(current, rampup_length):
"""Linear rampup"""
assert current >= 0 and rampup_length >= 0
if current >= rampup_length:
return 1.0
else:
return current / rampup_length
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert 0 <= current <= rampdown_length
return max(0., float(.5 * (np.cos(np.pi * current / rampdown_length) + 1)))
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
# -------------- End of Ramp up functions
def select_best_hyper_parameters(dataset_name, epochs):
"""
Evaluate best hyper parameters for Fast-SWA and Mean-Teacher from orginal paper.
Hyper parameters are taken from the list below using 3 fold cross validation
:param dataset_name:
:param epochs: number of epochs for pre-training the model
:return: dictionary with best hyper parameters changes
"""
print("------ Start hyper parameters search ----------")
hyper_parameters = [
{ # Original parameters
},
{
"optimizer_args": {
'lr': 0.1,
"weight_decay": 2e-3,
},
'ema_decay': 0.93,
},
{
"logit_distance_cost": 0.015,
'ema_decay': 0.93,
"consistency_rampup": 4,
},
{
'ema_decay': 0.93,
"consistency_rampup": 7,
"consistency": 95.0,
'fastswa_freq': '10',
}
]
accuracies = list()
for idx, curr_test_params in enumerate(hyper_parameters):
print(f"hyper parameters test - {idx}")
curr_params = mt_shake_shake_params()
curr_params.update(curr_test_params)
accuracies.append(train_helper(dataset_name, 3, f"{OUT_DIR}/fast_swa_{dataset_name}_hyper_{idx}_log.log",
curr_params, epochs))
best_hp_idx = np.argmax(accuracies)
dump_to_log(hyper_parameters[best_hp_idx], f"{OUT_DIR}/fast_swa_{dataset_name}_hyper_log.log")
print(f"best hyper-parameters idx: {best_hp_idx},\nvalues: {hyper_parameters[best_hp_idx]}")
print("------ END hyper parameters search ----------")
return hyper_parameters[best_hp_idx]
###### Best hyperparameters index is 0 !!!!!!!!!
def main_original(idx_to_run):
"""
Run hyper parameter search for Fast-SWA & Mean-Teacher from original paper on dataset shape.
Then use best hyper parameters to run 10 fold cross validation on all data sets
:return:
"""
try:
shutil.rmtree(OUT_DIR, ignore_errors=True)
except FileNotFoundError:
pass
os.makedirs(OUT_DIR, exist_ok=True)
epochs = 60
hp_dataset = "shapes"
best_params = select_best_hyper_parameters(hp_dataset, epochs)
params_to_use = mt_shake_shake_params()
params_to_use.update(best_params)
for dataset_name in os.listdir(DATASETS_DIR):
print(f"Training on dataset - {dataset_name}")
train_helper(dataset_name, 10, f"{OUT_DIR}/fast_swa_{dataset_name}_log.log", params_to_use, epochs)
print("--------- END Train Fast-SWA -------------------")
def train_helper(dataset_name, k_fold, log_name, params, epochs) -> float:
"""
Train original Fast-SWA
:param dataset_name:
:param k_fold:
:param log_name: log file name
:param params: hyper parameters to update
:param epochs: pre-train epochs for base model
:return:
"""
ans = list()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
unlabeled_gen, labeled_gen, classes = load_gen_data_dir(os.path.join(DATASETS_DIR, dataset_name), k_fold)
for fold_num, ((unlabeled_train, unlabeled_val), (labeled_train, labeled_val)) in enumerate(zip(unlabeled_gen, labeled_gen)):
base_model = cifar_shakeshake26(pretrained=False, num_classes=len(classes)).to(device)
teacher_model = cifar_shakeshake26(pretrained=False, num_classes=len(classes)).to(device)
swa_model = cifar_shakeshake26(pretrained=False, num_classes=len(classes)).to(device)
mt = MeanTeacher(base_model, teacher_model, None, dataset_name, **params)
fast_swa = FastSWA(mt, swa_model, log_name, dataset_name, **params)
fast_swa.train_model_and_swa(unlabeled_train, labeled_train, epochs, fold_num)
curr_ans = fast_swa.eval_swa([unlabeled_val, labeled_val], classes, fold_num)
ans.append(curr_ans[fold_num]["accuracy_fast-swa"])
return np.mean(ans)
def train_helper_augmented(dataset_name, k_fold, log_name, params, epochs) -> float:
"""
Train the improved Fast-SWA model
:param dataset_name:
:param k_fold:
:param log_name: log file name
:param params: hyper parameters to update
:param epochs: pre-train epochs for base model
:return:
"""
ans = list()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
temp_loaders = [load_gen_data_dir(os.path.join(DATASETS_DIR, dataset_name), k_fold),
load_gen_data_dir(os.path.join(DATASETS_DIR, dataset_name), k_fold),
load_gen_data_dir(os.path.join(DATASETS_DIR, dataset_name), k_fold)]
classes = temp_loaders[0][-1]
unlabeled_loaders = [curr_temp[0] for curr_temp in temp_loaders]
labeled_loaders = [curr_temp[1] for curr_temp in temp_loaders]
for fold_num, curr_data_loaders in enumerate(zip(*unlabeled_loaders, *labeled_loaders)):
curr_unlabeled_loaders = curr_data_loaders[:len(curr_data_loaders)//2]
curr_labeled_loaders = curr_data_loaders[len(curr_data_loaders)//2:]
base_models = [cifar_shakeshake26(pretrained=False, num_classes=len(classes)).to(device)
for _ in range(0, 3)]
fake_teachers = [cifar_shakeshake26(pretrained=False, num_classes=len(classes)).to(device)
for _ in range(0, 3)]
teacher_model = cifar_shakeshake26(pretrained=False, num_classes=len(classes)).to(device)
swa_model = cifar_shakeshake26(pretrained=False, num_classes=len(classes)).to(device)
students = [MeanTeacher(base, teacher, None, dataset_name, augment_values=(2, 7), **params)
for base, teacher in zip(base_models, fake_teachers)]
ms = MultipleStudents(students, teacher_model, **params)
fast_swa = FastSWA(ms, swa_model, log_name, dataset_name, **params)
fast_swa.train_model_and_swa(curr_unlabeled_loaders, curr_labeled_loaders, epochs, fold_num, use_augment=True)
curr_ans = fast_swa.eval_swa([curr_unlabeled_loaders[0][1], curr_labeled_loaders[0][1]], classes, fold_num,
"outputs_aug")
ans.append(curr_ans[fold_num]["accuracy_fast-swa"])
return np.mean(ans)
def select_best_hyper_parameters_augmented(dataset_name, epochs):
"""
Evaluate best hyper parameters for our improvement to fast-SWA
Hyper parameters are taken from the list below using 3 fold cross validation
:param dataset_name:
:param epochs: number of epochs for pre-training the model
:return: dictionary with best hyper parameters changes
"""
print("------ Start hyper parameters search ----------")
accuracies = list()
hyper_parameters = [
{ # Original parameters
},
{
"optimizer_args": {
'lr': 0.1,
"weight_decay": 2e-3,
},
'ema_decay': 0.93,
},
{
"logit_distance_cost": 0.015,
'ema_decay': 0.93,
"consistency_rampup": 4,
},
{
'ema_decay': 0.93,
"consistency_rampup": 7,
"consistency": 95.0,
'fastswa_freq': '10',
}
]
for idx, curr_hyper in enumerate(hyper_parameters):
print(f"hyper parameters test augmented - {idx}")
params = mt_shake_shake_params()
params.update(curr_hyper)
k_fold = 3
log_name = f"{OUT_DIR}/fast_swa_{dataset_name}_hyper_{idx}_log.log"
accuracies.append(train_helper_augmented(dataset_name, k_fold, log_name, params, epochs))
best_hp_idx = np.argmax(accuracies)
dump_to_log(hyper_parameters[best_hp_idx], f"{OUT_DIR}/fast_swa_{dataset_name}_hyper_log.log")
print(f"Best hyper-parameters accuracy result: {accuracies[best_hp_idx]}"
f"\nbest hyper-parameters idx: {best_hp_idx},\nvalues: {hyper_parameters[best_hp_idx]}"
"------ END hyper parameters search ----------")
return hyper_parameters[best_hp_idx]
def main_augmented(idx_to_run):
"""
Run hyper parameter search for Fast-SWA & Mean-Teacher from original paper on dataset shape.
Then use best hyper parameters to run 10 fold cross validation on all data sets
:return:
"""
print("Running Augmented")
try:
shutil.rmtree(OUT_DIR, ignore_errors=True)
except FileNotFoundError:
pass
os.makedirs(OUT_DIR, exist_ok=True)
epochs = 60
hp_dataset = "shapes"
best_params = select_best_hyper_parameters_augmented(hp_dataset, epochs)
params = mt_shake_shake_params()
params.update(best_params)
for dataset_name in os.listdir("./Datasets"):
print(f"current dataset being used {dataset_name}")
k_fold = 10
log_name = f"{OUT_DIR}/fast_swa_{dataset_name}_log.log"
train_helper_augmented(dataset_name, k_fold, log_name, params, epochs)
print("--------- END Train Fast-SWA -------------------")
class MeanTeacher:
"""
Mean-Teache class. For trainign and evaluationg the Mean-Teacher model
"""
def __init__(self, base_model: nn.Module, teacher_model: nn.Module, log_file_path: str, dataset_name: str,
lr_rampdown_epochs, epoch_args, cycle_interval, logit_distance_cost, ema_decay,
num_cycles, optimizer_args, consistency_rampup, consistency, optimizer=SGD, augment_values=(4, 10),
**kwargs):
# Make teacher untrainable from its own loss
for param in teacher_model.parameters():
param.detach_()
self.dataset_name = dataset_name
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self._base_model = base_model
self._teacher_model = teacher_model
self.log_file = log_file_path
self._optimizer = optimizer(self.base_model.parameters(), **optimizer_args)
self.epoch = 0
self.global_step = 0
self.base_lr = optimizer_args['lr']
self.lr_rampdown_epochs = lr_rampdown_epochs
self.epoch_args = epoch_args
self.cycle_interval = cycle_interval
self.logit_distance_cost = logit_distance_cost
self.ema_decay = ema_decay
self.num_cycles = num_cycles
self.consistency_rampup = consistency_rampup
self.consistency = consistency
self.img_augment = RandAugment(*augment_values)
def train(self, data_loader, epochs, val_data_loader, classes, fold_num):
"""
Train the Mean-Teacher for the amount of epochs and evaluate at the end of each epoch
:param data_loader:
:param epochs:
:param val_data_loader:
:param classes:
:param fold_num:
:return:
"""
total_epochs = epochs + self.cycle_interval*self.num_cycles
print(f"will run Mean Teacher for {total_epochs} epochs")
self.dump_to_log(f"will run Mean Teacher for {total_epochs} epochs\nRunning On Dataset: {self.dataset_name}")
start_time = time.time()
for _ in range(0, total_epochs):
loss, lr = self.train_single_epoch(data_loader)
eval_ans = self.eval_mt_model(val_data_loader, classes=classes, fold_num=fold_num,
dataset_name=self.dataset_name)
eval_ans.update({"loss": loss, "lr": lr})
self.dump_to_log({self.epoch: eval_ans})
self.epoch += 1
end_time = time.time()
self.dump_to_log(f"Train time for: {self.dataset_name} is: {end_time - start_time}\n"
f"----------------------------------\n----------------------------------")
def _single_train_helper(self, inp, labels, use_augment=False):
"""
Actual training of the student model for a single batch
:param inp:
:param labels:
:return: loss on batch
"""
batch_size = len(inp)
class_criterion = nn.CrossEntropyLoss(size_average=False, ignore_index=NO_LABEL).to(self.device)
consistency_criterion = softmax_mse_loss
residual_logit_criterion = symmetric_mse_loss
if not use_augment:
input_var_st = torch.autograd.Variable(inp).to(self.device)
input_var_teacher = torch.autograd.Variable(inp).to(self.device)
else:
# Dataloaders must return a tensor, therfore we need to convert back to PIL Image here to allow the
# RandAugment transformations
imgs = [ToPILImage()(curr) for curr in inp]
aug_1 = self.img_augment.transform_list(imgs)
aug_2 = self.img_augment.transform_list(imgs)
inp_1 = torch.cat([torch.unsqueeze(ToTensor()(curr), dim=0) for curr in aug_1])
inp_2 = torch.cat([torch.unsqueeze(ToTensor()(curr), dim=0) for curr in aug_2])
input_var_st = torch.autograd.Variable(inp_1).to(self.device)
input_var_teacher = torch.autograd.Variable(inp_2).to(self.device)
if labels is not None:
labels = torch.autograd.Variable(labels).to(self.device)
model_out_1, model_out_2 = self._base_model(input_var_st)
teacher_out, _ = self._teacher_model(input_var_teacher)
teacher_out = Variable(teacher_out.detach().data, requires_grad=False)
class_loss = class_criterion(model_out_1, labels) / batch_size if labels is not None else 0
res_loss = self.logit_distance_cost * residual_logit_criterion(model_out_1, model_out_2) / batch_size
consistency_weight = self.get_current_consistency_weight()
consistency_loss = consistency_weight * consistency_criterion(model_out_2, teacher_out) / batch_size
loss = class_loss + consistency_loss + res_loss
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
return loss
def train_single_epoch(self, unlabeled_data_loader, labeled_data_loader, use_augment=False):
"""
Train the studen model for a single epoch and update the teacher at the end of the epoch
:param use_augment:
:param unlabeled_data_loader:
:param labeled_data_loader:
:return:
"""
full_loss = 0
self._base_model.train()
self._teacher_model.train()
steps_per_epoch = len(unlabeled_data_loader) + len(labeled_data_loader)
for curr_step_in_unlabeled, (inp, _) in enumerate(unlabeled_data_loader):
self.adjust_learning_rate(curr_step_in_unlabeled, steps_per_epoch)
full_loss += self._single_train_helper(inp, None, use_augment)
for curr_step_in_labeled, (inp, label) in enumerate(labeled_data_loader):
self.adjust_learning_rate(curr_step_in_labeled + len(unlabeled_data_loader), steps_per_epoch)
full_loss += self._single_train_helper(inp, label, use_augment)
self.global_step += 1
self.update_teacher_variables()
print(f"loss {full_loss}")
return full_loss
def update_teacher_variables(self):
"""
Updat the mean teacher - based on code from: https://github.com/benathi/fastswa-semi-sup.
:return:
"""
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (self.global_step + 1), self.ema_decay)
for ema_param, param in zip(self._teacher_model.parameters(), self._base_model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def eval_mt_model(self, val_data_loader, classes, fold_num, dataset_name):
return eval_model(self._base_model, "base_model", val_data_loader, classes=classes,
fold_num=fold_num, dataset_name=dataset_name,
model_2=self._teacher_model, model2_name="teacher")
def get_current_consistency_weight(self):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return self.consistency * sigmoid_rampup(self.epoch, self.consistency_rampup)
def dump_to_log(self, data):
dump_to_log(data, self.log_file)
def adjust_learning_rate(self, curr_step, steps_per_epoch):
"""
Calculate learning rate at each step based on code from: https://github.com/benathi/fastswa-semi-sup.
:param curr_step:
:param steps_per_epoch:
:return:
"""
lr = self.base_lr
part_epoch = self.epoch + curr_step/steps_per_epoch
if self.lr_rampdown_epochs:
if part_epoch < self.epoch_args:
lr *= cosine_rampdown(part_epoch, self.lr_rampdown_epochs)
else:
lr_rampdown_epochs = self.lr_rampdown_epochs
lr *= cosine_rampdown(
(lr_rampdown_epochs - (self.lr_rampdown_epochs - self.epoch_args) - self.cycle_interval) + (
(part_epoch - self.epoch_args) % self.cycle_interval),
lr_rampdown_epochs)
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
return lr
@property
def base_model(self):
return self._base_model
@property
def teacher_model(self):
return self._teacher_model
@property
def optimizer(self):
return self._optimizer
def get_learn_model(self):
return self._base_model
class MultipleStudents:
"""
Class for Mean-Teacher upgarde with multiple students
"""
def __init__(self, students: List[MeanTeacher], teacher, ema_decay, **kwargs):
self.students = students
self.teacher = teacher
self.global_step = 0
self.ema_decay = ema_decay
def train_single_epoch(self, unlabeled_data_loaders: List, labeled_data_loaders: List, use_augment: bool):
"""
Train each student for a single epoch and finally update the mean teacher
:param unlabeled_data_loaders:
:param labeled_data_loaders:
:return:
"""
for student, unlabeled_data_loader, labeled_data_loader in zip(self.students, unlabeled_data_loaders,
labeled_data_loaders):
student.train_single_epoch(unlabeled_data_loader[0], labeled_data_loader[0], use_augment=use_augment)
self.global_step += 1
self.update_teacher()
def update_teacher(self):
alpha = min(1 - 1 / (self.global_step + 1), self.ema_decay)
for student in self.students:
for ema_param, param in zip(self.teacher.parameters(), student.base_model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data/len(self.students))
@property
def teacher_model(self):
return self.teacher
def get_learn_model(self):
return self.teacher
class FastSWA:
"""
Fast-SWA model as described in original papaer
"""
def __init__(self, mt: Union[MeanTeacher, MultipleStudents], swa_model: nn.Module, log_path, dataset_name: str,
num_cycles, cycle_interval, fastswa_freq, epoch_args, **kwargs):
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
for param in swa_model.parameters():
param.detach_()
self.dataset_name = dataset_name
self.mt = mt
self.teacher_model = mt.teacher_model
self.swa_model = swa_model
self.num_params = 0
self.cycle_interval = cycle_interval
self.num_cycles = num_cycles
self.log_file = log_path
self.fastswa_freq = int(fastswa_freq)
self.epoch = 0
self.epoch_args = epoch_args
self.updated_swa = 0
def update_fast_swa(self):
"""
Update the Fast-SWA model weights
:return:
"""
self.num_params += 1
if self.num_params == 1:
self.swa_model.load_state_dict(self.mt.get_learn_model().state_dict())
else:
inv = 1./float(self.num_params)
for swa_p, src_p in zip(self.swa_model.parameters(), self.teacher_model.parameters()):
swa_p.data.add_(-inv*swa_p.data)
swa_p.data.add_(inv*src_p.data)
def reset(self):
self.num_params = 0
def train_model_and_swa(self, unlabeled_data_loader, labeled_data_loader, epochs, fold_num,
use_augment=False):
"""
Pre-traind the student model of the Mean-Teacher, then continues training and updates the
Fast-SWA model when needed
:param unlabeled_data_loader:
:param labeled_data_loader:
:param epochs:
:param fold_num:
:return:
"""
total_epochs = epochs + self.cycle_interval*self.num_cycles
print(f"will run Fast SWA for {total_epochs} epochs")
self.dump_to_log(f"will run Fast SWA for {total_epochs} epochs\n"
f"Running on Dataset: {self.dataset_name}\nRunning on fold: {fold_num}")
start_time = time.time()
for epoch in range(0, total_epochs):
self.mt.train_single_epoch(unlabeled_data_loader, labeled_data_loader, use_augment=use_augment)
if epoch >= epochs - self.cycle_interval and\
(epoch - self.epoch_args + self.cycle_interval) % self.fastswa_freq == 0:
print("update swa")
self.updated_swa += 1
self.update_fast_swa()
if type(unlabeled_data_loader) is list or type(unlabeled_data_loader) is tuple:
self.update_batchnorm(unlabeled_data_loader[0][0])
else:
self.update_batchnorm(unlabeled_data_loader)
self.epoch += 1
print(f"Finished Epoch {self.epoch}")
end_time = time.time()
self.dump_to_log({fold_num: {
"updated_swa": self.updated_swa,
"train_time": end_time - start_time}})
def eval_swa(self, val_data_loaders, classes, fold_num, out_dir = OUT_DIR):
ans = eval_model(self.swa_model, "fast-swa", val_data_loaders, classes=classes,
fold_num=fold_num, dataset_name=self.dataset_name, out_dir=out_dir)
self.dump_to_log(ans)
return ans
def dump_to_log(self, data):
dump_to_log(data, self.log_file)
def update_batchnorm(self, data_loader, steps_to_run=100):
"""
Updates the fast-SWA bathcnorm parameters
:param data_loader:
:param steps_to_run:
:return:
"""
self.swa_model.train()
for idx, (x, y) in enumerate(data_loader):
if idx > steps_to_run:
return
input_var = torch.autograd.Variable(x, volatile=True).to(self.device)
target_var = torch.autograd.Variable(y, volatile=True).to(self.device)
model_out = self.swa_model(input_var)
if __name__ == '__main__':
augment = 1 if sys.argv[1] != "0" else 0
if augment == 0:
main_original()
else:
OUT_DIR = "outputs_aug"
main_augmented(int(sys.argv[1]))
| [
"numpy.clip",
"DataAugement.RandAugment",
"torchvision.transforms.transforms.ToPILImage",
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available",
"Metrics.ModifiedF1",
"Metrics.TprFpr",
"torchmetrics.Recall",
"Metrics.Time1000Samples",
"numpy.mean",
"os.listdir",
"numpy.exp",
"Metrics.SkAucR... | [((663, 696), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (686, 696), False, 'import warnings\n'), ((1254, 1316), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'size_average': '(False)', 'ignore_index': 'NO_LABEL'}), '(size_average=False, ignore_index=NO_LABEL)\n', (1273, 1316), False, 'from torch import nn\n'), ((1337, 1354), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (1349, 1354), False, 'import torch\n'), ((1375, 1392), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (1387, 1392), False, 'import torch\n'), ((7291, 7312), 'numpy.argmax', 'np.argmax', (['accuracies'], {}), '(accuracies)\n', (7300, 7312), True, 'import numpy as np\n'), ((8034, 8069), 'os.makedirs', 'os.makedirs', (['OUT_DIR'], {'exist_ok': '(True)'}), '(OUT_DIR, exist_ok=True)\n', (8045, 8069), False, 'import os\n'), ((8205, 8228), 'Models.mt_shake_shake_params', 'mt_shake_shake_params', ([], {}), '()\n', (8226, 8228), False, 'from Models import cifar_shakeshake26, mt_shake_shake_params\n'), ((8293, 8317), 'os.listdir', 'os.listdir', (['DATASETS_DIR'], {}), '(DATASETS_DIR)\n', (8303, 8317), False, 'import os\n'), ((9890, 9902), 'numpy.mean', 'np.mean', (['ans'], {}), '(ans)\n', (9897, 9902), True, 'import numpy as np\n'), ((12191, 12203), 'numpy.mean', 'np.mean', (['ans'], {}), '(ans)\n', (12198, 12203), True, 'import numpy as np\n'), ((13640, 13661), 'numpy.argmax', 'np.argmax', (['accuracies'], {}), '(accuracies)\n', (13649, 13661), True, 'import numpy as np\n'), ((14438, 14473), 'os.makedirs', 'os.makedirs', (['OUT_DIR'], {'exist_ok': '(True)'}), '(OUT_DIR, exist_ok=True)\n', (14449, 14473), False, 'import os\n'), ((14612, 14635), 'Models.mt_shake_shake_params', 'mt_shake_shake_params', ([], {}), '()\n', (14633, 14635), False, 'from Models import cifar_shakeshake26, mt_shake_shake_params\n'), ((14693, 14717), 'os.listdir', 'os.listdir', (['"""./Datasets"""'], {}), "('./Datasets')\n", (14703, 14717), False, 'import os\n'), ((1412, 1434), 'Metrics.Time1000Samples', 'Time1000Samples', (['model'], {}), '(model)\n', (1427, 1434), False, 'from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc\n'), ((1436, 1460), 'Metrics.Time1000Samples', 'Time1000Samples', (['model_2'], {}), '(model_2)\n', (1451, 1460), False, 'from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc\n'), ((2330, 2355), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2353, 2355), False, 'import torch\n'), ((5677, 5713), 'numpy.clip', 'np.clip', (['current', '(0.0)', 'rampup_length'], {}), '(current, 0.0, rampup_length)\n', (5684, 5713), True, 'import numpy as np\n'), ((7023, 7046), 'Models.mt_shake_shake_params', 'mt_shake_shake_params', ([], {}), '()\n', (7044, 7046), False, 'from Models import cifar_shakeshake26, mt_shake_shake_params\n'), ((7941, 7983), 'shutil.rmtree', 'shutil.rmtree', (['OUT_DIR'], {'ignore_errors': '(True)'}), '(OUT_DIR, ignore_errors=True)\n', (7954, 7983), False, 'import shutil\n'), ((8911, 8936), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8934, 8936), False, 'import torch\n'), ((9009, 9049), 'os.path.join', 'os.path.join', (['DATASETS_DIR', 'dataset_name'], {}), '(DATASETS_DIR, dataset_name)\n', (9021, 9049), False, 'import os\n'), ((10287, 10312), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10310, 10312), False, 'import torch\n'), ((13364, 13387), 'Models.mt_shake_shake_params', 'mt_shake_shake_params', ([], {}), '()\n', (13385, 13387), False, 'from Models import cifar_shakeshake26, mt_shake_shake_params\n'), ((14345, 14387), 'shutil.rmtree', 'shutil.rmtree', (['OUT_DIR'], {'ignore_errors': '(True)'}), '(OUT_DIR, ignore_errors=True)\n', (14358, 14387), False, 'import shutil\n'), ((16434, 16462), 'DataAugement.RandAugment', 'RandAugment', (['*augment_values'], {}), '(*augment_values)\n', (16445, 16462), False, 'from DataAugement import RandAugment\n'), ((17097, 17108), 'time.time', 'time.time', ([], {}), '()\n', (17106, 17108), False, 'import time\n'), ((17541, 17552), 'time.time', 'time.time', ([], {}), '()\n', (17550, 17552), False, 'import time\n'), ((27307, 27318), 'time.time', 'time.time', ([], {}), '()\n', (27316, 27318), False, 'import time\n'), ((28102, 28113), 'time.time', 'time.time', ([], {}), '()\n', (28111, 28113), False, 'import time\n'), ((1495, 1505), 'torchmetrics.Accuracy', 'Accuracy', ([], {}), '()\n', (1503, 1505), False, 'from torchmetrics import Accuracy, Precision, Recall\n'), ((1507, 1517), 'torchmetrics.Accuracy', 'Accuracy', ([], {}), '()\n', (1515, 1517), False, 'from torchmetrics import Accuracy, Precision, Recall\n'), ((1547, 1574), 'Metrics.ModifiedF1', 'ModifiedF1', ([], {'classes': 'classes'}), '(classes=classes)\n', (1557, 1574), False, 'from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc\n'), ((1576, 1603), 'Metrics.ModifiedF1', 'ModifiedF1', ([], {'classes': 'classes'}), '(classes=classes)\n', (1586, 1603), False, 'from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc\n'), ((1636, 1651), 'Metrics.TprFpr', 'TprFpr', (['classes'], {}), '(classes)\n', (1642, 1651), False, 'from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc\n'), ((1653, 1668), 'Metrics.TprFpr', 'TprFpr', (['classes'], {}), '(classes)\n', (1659, 1668), False, 'from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc\n'), ((1702, 1710), 'torchmetrics.Recall', 'Recall', ([], {}), '()\n', (1708, 1710), False, 'from torchmetrics import Accuracy, Precision, Recall\n'), ((1712, 1720), 'torchmetrics.Recall', 'Recall', ([], {}), '()\n', (1718, 1720), False, 'from torchmetrics import Accuracy, Precision, Recall\n'), ((1757, 1768), 'torchmetrics.Precision', 'Precision', ([], {}), '()\n', (1766, 1768), False, 'from torchmetrics import Accuracy, Precision, Recall\n'), ((1770, 1781), 'torchmetrics.Precision', 'Precision', ([], {}), '()\n', (1779, 1781), False, 'from torchmetrics import Accuracy, Precision, Recall\n'), ((1814, 1839), 'Metrics.SkAucRoc', 'SkAucRoc', ([], {'classes': 'classes'}), '(classes=classes)\n', (1822, 1839), False, 'from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc\n'), ((1841, 1866), 'Metrics.SkAucRoc', 'SkAucRoc', ([], {'classes': 'classes'}), '(classes=classes)\n', (1849, 1866), False, 'from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc\n'), ((1921, 1998), 'Metrics.AreaUnderPrecisionCurve', 'AreaUnderPrecisionCurve', (['classes', 'out_dir', 'fold_num', 'model_name', 'dataset_name'], {}), '(classes, out_dir, fold_num, model_name, dataset_name)\n', (1944, 1998), False, 'from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc\n'), ((2128, 2205), 'Metrics.AreaUnderPrecisionCurve', 'AreaUnderPrecisionCurve', (['classes', 'out_dir', 'fold_num', 'model_name', 'dataset_name'], {}), '(classes, out_dir, fold_num, model_name, dataset_name)\n', (2151, 2205), False, 'from Metrics import AreaUnderPrecisionCurve, TprFpr, Time1000Samples, ModifiedF1, SkAucRoc\n'), ((4870, 4891), 'json.dump', 'json.dump', (['data', 'file'], {}), '(data, file)\n', (4879, 4891), False, 'import json\n'), ((5783, 5811), 'numpy.exp', 'np.exp', (['(-5.0 * phase * phase)'], {}), '(-5.0 * phase * phase)\n', (5789, 5811), True, 'import numpy as np\n'), ((10363, 10403), 'os.path.join', 'os.path.join', (['DATASETS_DIR', 'dataset_name'], {}), '(DATASETS_DIR, dataset_name)\n', (10375, 10403), False, 'import os\n'), ((10453, 10493), 'os.path.join', 'os.path.join', (['DATASETS_DIR', 'dataset_name'], {}), '(DATASETS_DIR, dataset_name)\n', (10465, 10493), False, 'import os\n'), ((10543, 10583), 'os.path.join', 'os.path.join', (['DATASETS_DIR', 'dataset_name'], {}), '(DATASETS_DIR, dataset_name)\n', (10555, 10583), False, 'import os\n'), ((15700, 15725), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15723, 15725), False, 'import torch\n'), ((25398, 25423), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (25421, 25423), False, 'import torch\n'), ((18046, 18108), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'size_average': '(False)', 'ignore_index': 'NO_LABEL'}), '(size_average=False, ignore_index=NO_LABEL)\n', (18065, 18108), False, 'from torch import nn\n'), ((2546, 2572), 'torch.autograd.Variable', 'torch.autograd.Variable', (['x'], {}), '(x)\n', (2569, 2572), False, 'import torch\n'), ((2606, 2637), 'torch.autograd.Variable', 'torch.autograd.Variable', (['y_true'], {}), '(y_true)\n', (2629, 2637), False, 'import torch\n'), ((5433, 5474), 'numpy.cos', 'np.cos', (['(np.pi * current / rampdown_length)'], {}), '(np.pi * current / rampdown_length)\n', (5439, 5474), True, 'import numpy as np\n'), ((18289, 18317), 'torch.autograd.Variable', 'torch.autograd.Variable', (['inp'], {}), '(inp)\n', (18312, 18317), False, 'import torch\n'), ((18367, 18395), 'torch.autograd.Variable', 'torch.autograd.Variable', (['inp'], {}), '(inp)\n', (18390, 18395), False, 'import torch\n'), ((18604, 18616), 'torchvision.transforms.transforms.ToPILImage', 'ToPILImage', ([], {}), '()\n', (18614, 18616), False, 'from torchvision.transforms.transforms import ToPILImage, ToTensor\n'), ((18972, 19002), 'torch.autograd.Variable', 'torch.autograd.Variable', (['inp_1'], {}), '(inp_1)\n', (18995, 19002), False, 'import torch\n'), ((19052, 19082), 'torch.autograd.Variable', 'torch.autograd.Variable', (['inp_2'], {}), '(inp_2)\n', (19075, 19082), False, 'import torch\n'), ((19155, 19186), 'torch.autograd.Variable', 'torch.autograd.Variable', (['labels'], {}), '(labels)\n', (19178, 19186), False, 'import torch\n'), ((29035, 29076), 'torch.autograd.Variable', 'torch.autograd.Variable', (['x'], {'volatile': '(True)'}), '(x, volatile=True)\n', (29058, 29076), False, 'import torch\n'), ((29119, 29160), 'torch.autograd.Variable', 'torch.autograd.Variable', (['y'], {'volatile': '(True)'}), '(y, volatile=True)\n', (29142, 29160), False, 'import torch\n'), ((18806, 18816), 'torchvision.transforms.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (18814, 18816), False, 'from torchvision.transforms.transforms import ToPILImage, ToTensor\n'), ((18899, 18909), 'torchvision.transforms.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (18907, 18909), False, 'from torchvision.transforms.transforms import ToPILImage, ToTensor\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 14:36:02 2021
@author: endocv202<EMAIL>
"""
# import network
import os
import os.path as osp
import argparse
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
import skimage
from skimage import io
from skimage.transform import resize as rsz_sk
from tifffile import imsave
from models.get_model import get_arch
def create_predFolder(task_type):
directoryName = 'EndoCV2021'
if not os.path.exists(directoryName):
os.mkdir(directoryName)
if not os.path.exists(os.path.join(directoryName, task_type)):
os.mkdir(os.path.join(directoryName, task_type))
return os.path.join(directoryName, task_type)
def detect_imgs(infolder, ext='.tif'):
import os
items = os.listdir(infolder)
flist = []
for names in items:
if names.endswith(ext) or names.endswith(ext.upper()):
flist.append(os.path.join(infolder, names))
return np.sort(flist)
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--n_classes", type=int, default=1, help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model_name", type=str, default='fpnet_mobilenet_W', help='model name')
parser.add_argument("--ckpt_path", type=str, default='/home/aggcmab/code/checkpoints/F1/fpnet_mobilenet_W/', help='checkpoint path')
parser.add_argument("--im_size", help='delimited list input, could be 512, or 480,600', type=str, default='512,640')
parser.add_argument("--gpu_id", type=str, default='1', help="GPU ID")
parser.add_argument("--random_seed", type=int, default=1, help="random seed (default: 1)")
return parser
def mymodel():
'''
Returns
-------
model : TYPE
DESCRIPTION.
device : TYPE
DESCRIPTION.
'''
opts = get_argparser().parse_args()
im_size = tuple([int(item) for item in opts.im_size.split(',')])
if isinstance(im_size, tuple) and len(im_size)==1:
tg_size = (im_size[0], im_size[0])
elif isinstance(im_size, tuple) and len(im_size)==2:
tg_size = (im_size[0], im_size[1])
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
print(opts.model_name)
model, mean, std = get_arch(opts.model_name, n_classes=opts.n_classes)
checkpoint = torch.load(osp.join(opts.ckpt_path, 'model_checkpoint.pth'), map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
# model = nn.DataParallel(model)
model.to(device)
model.mode = 'eval'
model.eval()
return model, mean, std, tg_size, device
if __name__ == '__main__':
'''
You are not allowed to print the images or visualizing the test data according to the rule.
We expect all the users to abide by this rule and help us have a fair challenge "EndoCV2021-Generalizability challenge"
FAQs:
1) Most of my predictions do not have polyp.
--> This can be the case as this is a generalisation challenge. The dataset is very different and can produce such results. In general, not all samples
have polyp.
2) What format should I save the predictions.
--> you can save it in the tif or jpg format.
3) Can I visualize the data or copy them in my local computer to see?
--> No, you are not allowed to do this. This is against challenge rules. No test data can be copied or visualised to get insight. Please treat this as unseen image.!!!
4) Can I use my own test code?
--> Yes, but please make sure that you follow the rules. Any visulization or copy of test data is against the challenge rules. We make sure that the
competition is fair and results are replicative.
'''
model, mean, std, tg_size, device = mymodel()
task_type = 'segmentation'
# set image folder here!
directoryName = create_predFolder(task_type)
# ----> three test folders [https://github.com/sharibox/EndoCV2021-polyp_det_seg_gen/wiki/EndoCV2021-Leaderboard-guide]
subDirs = ['EndoCV_DATA1', 'EndoCV_DATA2', 'EndoCV_DATA3']
print(subDirs)
for j in range(0, len(subDirs)):
# ---> Folder for test data location!!! (Warning!!! do not copy/visulise!!!)
imgfolder='/project/def-sponsor00/endocv2021-test-noCopyAllowed-v1/' + subDirs[j]
# set folder to save your checkpoints here!
saveDir = os.path.join(directoryName , subDirs[j]+'_pred')
if not os.path.exists(saveDir):
os.mkdir(saveDir)
imgfiles = detect_imgs(imgfolder, ext='.jpg')
from torchvision import transforms
data_transforms = transforms.Compose([
transforms.Resize(tg_size),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
file = open(saveDir + '/'+"timeElaspsed" + subDirs[j] +'.txt', mode='w')
timeappend = []
for imagePath in imgfiles[:]:
"""plt.imshow(img1[:,:,(2,1,0)])
Grab the name of the file.
"""
filename = (imagePath.split('/')[-1]).split('.jpg')[0]
print('filename is printing::=====>>', filename)
img1 = Image.open(imagePath).convert('RGB').resize((256,256), resample=0)
image = data_transforms(img1)
# perform inference here:
images = image.to(device, dtype=torch.float32)
#
img = skimage.io.imread(imagePath)
size=img.shape
start.record()
#
outputs = model(images.unsqueeze(0))
#
end.record()
torch.cuda.synchronize()
print(start.elapsed_time(end))
timeappend.append(start.elapsed_time(end))
#
probs = outputs.squeeze().sigmoid().detach().cpu()
preds = (probs > 0.5).numpy()
probs = probs.numpy()
pred = (preds * 255.0).astype(np.uint8)
prob = (probs * 255.0).astype(np.uint8)
img_mask = rsz_sk(pred, (size[0], size[1]), anti_aliasing=True)
img_prob = rsz_sk(prob, (size[0], size[1]), anti_aliasing=True)
io.imsave(saveDir + '/' + filename + '_mask.jpg', (img_mask * 255.0).astype('uint8'))
io.imsave(saveDir + '/' + filename + '_prob.jpg', (img_prob * 255.0).astype('uint8'))
file.write('%s -----> %s \n' %
(filename, start.elapsed_time(end)))
# TODO: write time in a text file
file.write('%s -----> %s \n' %
('average_t', np.mean(timeappend)))
| [
"models.get_model.get_arch",
"os.path.exists",
"torch.cuda.Event",
"os.listdir",
"numpy.mean",
"PIL.Image.open",
"argparse.ArgumentParser",
"numpy.sort",
"os.path.join",
"torch.cuda.synchronize",
"skimage.io.imread",
"torch.cuda.is_available",
"os.mkdir",
"torchvision.transforms.Normalize"... | [((708, 746), 'os.path.join', 'os.path.join', (['directoryName', 'task_type'], {}), '(directoryName, task_type)\n', (720, 746), False, 'import os\n'), ((814, 834), 'os.listdir', 'os.listdir', (['infolder'], {}), '(infolder)\n', (824, 834), False, 'import os\n'), ((1006, 1020), 'numpy.sort', 'np.sort', (['flist'], {}), '(flist)\n', (1013, 1020), True, 'import numpy as np\n'), ((1057, 1082), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1080, 1082), False, 'import argparse\n'), ((2410, 2461), 'models.get_model.get_arch', 'get_arch', (['opts.model_name'], {'n_classes': 'opts.n_classes'}), '(opts.model_name, n_classes=opts.n_classes)\n', (2418, 2461), False, 'from models.get_model import get_arch\n'), ((492, 521), 'os.path.exists', 'os.path.exists', (['directoryName'], {}), '(directoryName)\n', (506, 521), False, 'import os\n'), ((531, 554), 'os.mkdir', 'os.mkdir', (['directoryName'], {}), '(directoryName)\n', (539, 554), False, 'import os\n'), ((2490, 2538), 'os.path.join', 'osp.join', (['opts.ckpt_path', '"""model_checkpoint.pth"""'], {}), "(opts.ckpt_path, 'model_checkpoint.pth')\n", (2498, 2538), True, 'import os.path as osp\n'), ((4604, 4653), 'os.path.join', 'os.path.join', (['directoryName', "(subDirs[j] + '_pred')"], {}), "(directoryName, subDirs[j] + '_pred')\n", (4616, 4653), False, 'import os\n'), ((5030, 5066), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (5046, 5066), False, 'import torch\n'), ((5081, 5117), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (5097, 5117), False, 'import torch\n'), ((590, 628), 'os.path.join', 'os.path.join', (['directoryName', 'task_type'], {}), '(directoryName, task_type)\n', (602, 628), False, 'import os\n'), ((648, 686), 'os.path.join', 'os.path.join', (['directoryName', 'task_type'], {}), '(directoryName, task_type)\n', (660, 686), False, 'import os\n'), ((2287, 2312), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2310, 2312), False, 'import torch\n'), ((4673, 4696), 'os.path.exists', 'os.path.exists', (['saveDir'], {}), '(saveDir)\n', (4687, 4696), False, 'import os\n'), ((4710, 4727), 'os.mkdir', 'os.mkdir', (['saveDir'], {}), '(saveDir)\n', (4718, 4727), False, 'import os\n'), ((5790, 5818), 'skimage.io.imread', 'skimage.io.imread', (['imagePath'], {}), '(imagePath)\n', (5807, 5818), False, 'import skimage\n'), ((5987, 6011), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (6009, 6011), False, 'import torch\n'), ((6393, 6445), 'skimage.transform.resize', 'rsz_sk', (['pred', '(size[0], size[1])'], {'anti_aliasing': '(True)'}), '(pred, (size[0], size[1]), anti_aliasing=True)\n', (6399, 6445), True, 'from skimage.transform import resize as rsz_sk\n'), ((6469, 6521), 'skimage.transform.resize', 'rsz_sk', (['prob', '(size[0], size[1])'], {'anti_aliasing': '(True)'}), '(prob, (size[0], size[1]), anti_aliasing=True)\n', (6475, 6521), True, 'from skimage.transform import resize as rsz_sk\n'), ((963, 992), 'os.path.join', 'os.path.join', (['infolder', 'names'], {}), '(infolder, names)\n', (975, 992), False, 'import os\n'), ((4891, 4917), 'torchvision.transforms.Resize', 'transforms.Resize', (['tg_size'], {}), '(tg_size)\n', (4908, 4917), False, 'from torchvision import transforms\n'), ((4931, 4952), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4950, 4952), False, 'from torchvision import transforms\n'), ((4966, 4997), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (4986, 4997), False, 'from torchvision import transforms\n'), ((6979, 6998), 'numpy.mean', 'np.mean', (['timeappend'], {}), '(timeappend)\n', (6986, 6998), True, 'import numpy as np\n'), ((5527, 5548), 'PIL.Image.open', 'Image.open', (['imagePath'], {}), '(imagePath)\n', (5537, 5548), False, 'from PIL import Image\n')] |
from operator import mul
import sys
import matplotlib.pyplot as plt
import numpy as np
from holoviews import opts
from scipy.signal.ltisys import dfreqresp
from scipy.spatial import Voronoi
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
import pandas as pd
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual, Text, interactive_output
from ipywidgets import Button, HBox, VBox,Layout,Label
import panel as pn
import seaborn as sns
from kneed import KneeLocator
from PatientGraphPheno import *
from patientKG.config.bedrock_connection import *
#from patientKG import utils_pickle
import patientKG.utils_pickle
from holoviews.operation.datashader import datashade, bundle_graph
import holoviews as hv
from holoviews import opts
from datetime import datetime
import re
import plotly.graph_objects as go
from pivottablejs import pivot_ui
from IPython.display import display, HTML
from sklearn.feature_selection import VarianceThreshold
from sklearn import preprocessing
import urllib, json
sns.set(style="ticks")
hv.extension('bokeh')
defaults = dict(width=1000, height=1000, padding=0.1)
from patientKG.tests.test_graphs import *
from ipywidgets import TwoByTwoLayout
import itertools
import time
from IPython.display import IFrame
import json, io
from patientKG.priorKnowledge.Hb1AC import *
from patientKG.priorKnowledge.Albumin import *
from patientKG.priorKnowledge.FBC import *
from patientKG.priorKnowledge.Creactive import *
from scipy.stats import chi2_contingency
import scipy.stats as stats
def show_SpellHRG_HRG_Table(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = patientKG.utils_pickle.read("Degree_ReAdmitted_HRG")
return Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == HRG)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
#This below block is for Jupyter-Notebook
"""stats = interact(PatientGraphVisuExplore.show_SpellHRG_HRG_Table,
HRG=widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_Income_Scatter(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == HRG)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
plt.scatter(data['Sum_Degree'], data['INCOME'], edgecolors='r')
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_Income_Scatter,
HRG=widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_LOS_Scatter(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
plt.scatter(data['Sum_Degree'], data['Total_LOS'], edgecolors='r')
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_LOS_Scatter,
HRG=widgets.SelectMultiple(
options=list(Degree_HRG['SpellHRG'].dropna().unique()),
value=['WJ06E'],
#rows=10,
description='HRG',
disabled=False
)
#widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_Pairplot(HRG,Degree,Readmit):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
sns.pairplot(df[df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE','ReAdmitted in DAYS'])], hue="SpellHRG")
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_Pairplot,
HRG=widgets.SelectMultiple(
options=list(Degree_HRG['SpellHRG'].dropna().unique()),
value=['WJ06E'],
#rows=10,
description='HRG',
disabled=False
)
#widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_HRG_ICD(HRG,ICD,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
if ICD == None:
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
else:
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellPDiag'] == ICD)&(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME']#,'Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','age','sex','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','SpellPDiag','SpellSDiag']]#,'ALL_DIAG']]
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_HRG_ICD,
HRG=widgets.SelectMultiple(
options=
init_code,
#list(Degree_HRG['SpellHRG'].dropna().unique()),
value=init_code,
#rows=10,
description='HRG',
disabled=False
),
ICD=widgets.Dropdown(
options=
#init_code,
sorted(list(Degree_HRG['SpellPDiag'].dropna().unique())),value=None
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_ICD(ICD,Degree,Age,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in ICD:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellPDiag'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['age'].astype(int)>=Age[0])&(Degree_ReAdmitted_HRG['age'].astype(int) <=Age[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME']#,'Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['age','sex','SpellHRG']+features+ ['POD_CODE','SpellPDiag','SpellSDiag']]#,'ALL_DIAG']]
#This block is for Jupyter-Notebook script
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_ICD,
ICD=widgets.SelectMultiple(
options=
#init_code,
list(Degree_HRG['SpellPDiag'].dropna().unique()),
value=['A415'],
#rows=10,
description='ICD',
disabled=False
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Age=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=100,
step=1,
description='Age:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')) """
def plot_SpellHRG_HRG(HRG,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME','Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG']]
#The below block is for Jupyter-Notebook
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_HRG,
HRG=widgets.SelectMultiple(
options=
#init_code,
list(Degree_HRG['SpellHRG'].dropna().unique()),
value=init_code,
#rows=10,
description='HRG',
disabled=False
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_HRG_Degree(HRG,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
Degree_ReAdmitted_HRG = Degree_ReAdmitted_HRG[Degree_ReAdmitted_HRG['SpellHRG'].notna()]
Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] = Degree_ReAdmitted_HRG['ReAdmitted in DAYS'].fillna(-1)
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME','Turnaround_Degree','DIAG_COUNT']
principalComponents = sliced_principle_components(df,features,2)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
kmax = 10
best_n = best_eblow_k(principalDf.values.tolist(),kmax = 10)
df = plot_vor(df,principalComponents, best_n)
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG','labels']]
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_HRG_Degree,
HRG=widgets.SelectMultiple(
options=
#init_code,
list(Degree_HRG['SpellHRG'].dropna().unique()),
value=init_code,
#rows=10,
description='HRG',
disabled=False
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_HRG_Degree_PairCompare(HRG,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("../Degree_ReAdmitted_HRG")
Degree_ReAdmitted_HRG = Degree_ReAdmitted_HRG[Degree_ReAdmitted_HRG['SpellHRG'].notna()]
Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] = Degree_ReAdmitted_HRG['ReAdmitted in DAYS'].fillna(-1)
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME','Turnaround_Degree','DIAG_COUNT']
principalComponents = sliced_principle_components(df,features,2)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
kmax = 10
best_n = best_eblow_k(principalDf.values.tolist(),kmax = 10)
df = plot_vor(df,principalComponents, best_n)
features.append('labels')
sns.pairplot(df[features], hue="labels", diag_kws={'bw':'1.0'})
#df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG','labels']]
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG','labels']]
def multi_table(df, title, addAll=True):
fig = go.Figure()
fig.add_trace(go.Table(
header=dict(values=list(df.columns)),
cells=dict(values=df.transpose().values.tolist()
)
))
button_all = dict(label='All',
method='update',
args=[{'visible':df['labels'].isin(list(df['labels'].unique())),'title':'All','showlegend':True}])
def create_layout_button(column):
return dict(label=column, method='update', args=[{'visible':df['labels'].isin([column]),'title':column,'showlegend':True}])
fig.update_layout(updatemenus=[go.layout.Updatemenu(active=0, buttons=([button_all]*addAll)+list(df['labels'].map(lambda column:create_layout_button(column))))],yaxis_type="log")
fig.show()
return
def sankey(df):
labels = ['Total']
source=[]
target=[]
value=[]
color = ['#57c19c']
color_map={'g':'green','r':'red','y':'yellow','b':'blue'}
total = df['activity_identifier'].nunique()
grouped = pd.DataFrame(df.groupby(['labels','Label','color'])['activity_identifier'].nunique()).reset_index()
for item in sorted(df['labels'].unique()):
labels.append(str(item))
source.append(0)
target.append(labels.index(str(item)))
value.append(grouped[grouped['labels']==item]['activity_identifier'].sum())
color.append(str(color_map[grouped[grouped['labels']==item]['color'].unique()[0]]))
for item in sorted(df['labels'].unique()):
for item2 in sorted(df['Label'].unique()):
try:
num = int(grouped[(grouped['labels']==item)&(grouped['Label']==item2)]['activity_identifier'])
labels.append(str(item+"_"+item2))
except:
continue
color.append('black')
color.append('pink')
for index,row in grouped.iterrows():
source_label, target_label,value_ = row['labels'], row['Label'],row['activity_identifier']
source.append(labels.index(str(source_label)))
target.append(labels.index(str(source_label+"_"+target_label)))
value.append(value_)
percentage_node = ["{:.2f}".format(total/total*100)+"%"]
diff = list(set(source)-set([0]))
i=0
cn =0
while i < len(source):
if source[i] == 0:
percentage_node.append("{:.2f}".format(value[i]/total*100)+"%")
cn+=1
i+=1
while cn < len(source):
percentage_node.append("{:.2f}".format(value[cn]/value[target.index(source[cn])]*100)+"%")
cn+=1
percentage_link = ["{:.2f}".format(total/total*100)+"%", "60%", "70%", "60%", "100%"]
fig = go.Figure(data=[go.Sankey(
node = dict(
pad = 15,
thickness = 20,
line = dict(color = "black", width = 0.5),
label = labels,
customdata = percentage_node,
hovertemplate='%{label}: %{value}(%{customdata})<extra></extra>',
color = color
),
link = dict(
source = source, # indices correspond to labels, eg A1, A2, A1, B1, ...
target = target,
value = value,
#customdata = percentage_link,
#hovertemplate='Link from %{source.label}<br />'+
#'to %{target.label}<br /> %{value}(%{customdata})'+
#'<extra></extra>',
))])
#
return fig.update_layout(title_text="Cluster via Outcome", font_size=10)#labels, source, target, value
def plot_Spell_PU_Degree_PairCompare(Label, Expected_LOS,selected_list,Age,Waterlow_Standard,features = ['Complex_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree'] ):
def pop_std(x):
return x.std(ddof=0)
all_modelled_events = ['WardStay,LabTest', 'WardStay',
'WardStay,Waterlow,LabTest,PatientPosition',
'WardStay,Waterlow,LabTest,Skinasses,TV,PatientPosition',
'WardStay,Waterlow,LabTest',
'WardStay,Waterlow,LabTest,Skinasses,PatientPosition',
'WardStay,Waterlow,LabTest,TV,PatientPosition',
'WardStay,Waterlow,Skinasses,PatientPosition',
'WardStay,Waterlow,PatientPosition', 'WardStay,PatientPosition',
'WardStay,LabTest,PatientPosition', 'WardStay,Waterlow',
'WardStay,Skinasses', 'WardStay,Skinasses,PatientPosition',
'WardStay,Waterlow,Skinasses', 'WardStay,LabTest,Skinasses',
'WardStay,LabTest,Skinasses,PatientPosition',
'WardStay,Waterlow,Skinasses,TV,PatientPosition',
'WardStay,LabTest,Skinasses,TV',
'WardStay,Waterlow,TV,PatientPosition',
'WardStay,Waterlow,LabTest,Skinasses',
'WardStay,LabTest,Skinasses,TV,PatientPosition',
'WardStay,LabTest,TV', 'WardStay,LabTest,TV,PatientPosition',
'WardStay,Waterlow,LabTest,TV', 'WardStay,TV,PatientPosition',
'WardStay,Waterlow,TV', 'WardStay,TV', 'WardStay,Skinasses,TV',
'WardStay,Waterlow,LabTest,Skinasses,TV']
selected_list=list(selected_list)
selected_list.append('WardStay')
modelled_events =[]
for item in all_modelled_events:
#print(item.split(','))
#print(selected_list)
if set(item.split(','))==set(selected_list):
modelled_events.append(item)
if len(modelled_events)==0:
print("No Events!")
return
Waterlow_Compliance = list(Waterlow_Standard)
if len(Waterlow_Compliance)==1 and Waterlow_Compliance[0]!='Rule 1: Use Waterlow' and Waterlow_Compliance[0]!='No Waterlow':
return "In RBH we only use Waterlow!"
diction={'Rule 1: Use Waterlow':{'rule 1': 'Pass'}, 'Rule 2: 4 Hours Admission':{'rule 2': 'Pass'}, 'Rule 3: AE 4hours':{'rule 3': 'Pass'}, 'Rule 4: Ward Transfer 4hours':{'rule 4': 'Pass'},'No Waterlow':'No Waterlow'}
waterlow_group=[]
rule_group={}
for index, key in enumerate(diction):
rule_number = index+1
if key != 'No Waterlow':
if key in Waterlow_Compliance:
rule_group.update(diction[key])
else:
rule_group.update({'rule {}'.format(rule_number):'Fail'})
else:
waterlow_group.append(diction[key])
waterlow_group.append(str(rule_group))
df = pd.DataFrame()
Degree_ReAdmitted_HRG = patientKG.utils_pickle.read("PU_RESULT")
#Degree_ReAdmitted_HRG = Degree_ReAdmitted_HRG[Degree_ReAdmitted_HRG['SpellHRG'].notna()]
#Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] = Degree_ReAdmitted_HRG['ReAdmitted in DAYS'].fillna(-1)
# for item in Label:
#print(item)
los_dic= {"Expected Long for HRG":"Normal", "Unexpected Long for HRG":"Abnormal","Unexpected short - live discharge":"Not yet", 'Unknown': 'Unknown'}
LOS_LIST =[]
for item in Expected_LOS:
LOS_LIST.append(los_dic[item])
try:
df=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['Label'].isin(Label))&(Degree_ReAdmitted_HRG['Expected_LOS'].isin(LOS_LIST))& (Degree_ReAdmitted_HRG['modelled_events'].isin(modelled_events))
&(((Degree_ReAdmitted_HRG['HPS_AGE_AT_ADMISSION_DATE']>=Age[0])
&(Degree_ReAdmitted_HRG['HPS_AGE_AT_ADMISSION_DATE'] <=Age[1])))
&(Degree_ReAdmitted_HRG['Waterlow_Standard'].isin(waterlow_group))
#&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))
]
# df = pd.concat([df,data])
except:
return "No Sample!"
#features = ['Sum_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree']
principalComponents,pca_explained,pca_components = sliced_principle_components(df,features,2)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
kmax = 10
best_n = best_eblow_k(principalDf.values.tolist(),kmax = 10)
try:
df = plot_vor(df,principalComponents, best_n)
except:
df = plot(df,principalComponents, best_n)
#print(list(features))
#Section Outcomes to Estimated groups
total = df['activity_identifier'].nunique()
outcomes = df.groupby(['labels','Label'])['activity_identifier'].nunique()
fig = sankey(df)
fig.show()
#Section phenotype table with variables
selector = VarianceThreshold()
x = df[list(features)].values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df_x = pd.DataFrame(x_scaled)
selector.fit_transform(df_x)
#print(selector.variances_)
feature_list = []
for item in features:
feature_list.append(item)
for i in range(len(pca_components)):
for item in pca_components[i]:
result_i=[item*pca_explained[i]]
features_score = selector.variances_
feature_rank = pd.DataFrame(list(zip(features, features_score)), columns=['variable_name','score'])
#print(feature_rank)
test3 =pd.DataFrame()
#print(test3)
for item in df[feature_list].columns:
sub_df = df[df[item]!=0]
test1=sub_df.groupby(['labels'])[item].agg({item:'mean'}).T
test2=sub_df.groupby(['labels'])[item].agg({item:pop_std}).T
test4 =pd.DataFrame()
for item in sub_df['labels'].unique():
test4[item] = test1[item].round(2).astype(str)+" (\u00B1"+test2[item].round(2).astype(str)+")"
test3=test3.append(test4)
test3 = test3.reindex(sorted(test3.columns),axis=1)
test3['variable_name'] = test3.index
#print(test3)
test3 =test3.merge(feature_rank, how='left', on='variable_name')
#test3 = test3.set_index('variable_name')
test5=pd.DataFrame(df.groupby(['labels'])['activity_identifier'].agg({'activity_identifier':lambda x: x.nunique()}).T)
test3 = test3.sort_values(by='score',ascending=False)
test3=pd.concat([test5,test3]).set_index('variable_name')
display(test3)
all_features = feature_list.copy()
if len(feature_list)>5:
feature_list = list(test3.sort_values(by='score',ascending=False).index[:5].values)
feature_list.append('labels')
lis_ = df[['labels','color']].drop_duplicates()
palette={y['labels']:str(y['color']) for x,y in lis_.iterrows()}
sns.pairplot(df[feature_list], hue="labels", diag_kws={'bw':'1.0'},palette=palette)
#df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG','labels']]
#df[(df['Waterlow_Standard'] == Waterlow_Standard)][['ACTIVITY_IDENTIFIER']+features+ ['labels']]
return df[['activity_identifier']+all_features+ ['Waterlow_Standard','Careplan','Label','labels','Expected_LOS']]
def transform(PU_RESULT):
PU_RESULT = PU_RESULT.replace(0,np.NaN)
avg_list = ['WL - Waterlow Score','Mean cell volume', 'Haematocrit', 'Red blood cell count',
'Basophil count', 'White blood cell count', 'Mean cell haemoglobin',
'Neutrophil count', 'Eosinophil count', 'Haemoglobin',
'Lymphocyte count', 'Platelet count', 'Mean cell haemoglobin conc',
'Monocyte count', 'Haemoglobin A1c IFCC', 'C-reactive protein',
'Glucose fasting', 'Glucose random', 'Glucose, CSF',
'Glucose, dialysis fluid', 'Glucose, fluid', 'Albumin']
concate_list = [
'WL - Age',
'WL - Broken Type',
'WL - Build/Weight for Height',
'WL - Continence',
'WL - Gender',
'WL - Lack of Appetite',
'WL - Major Surgery / Trauma',
'WL - Medication',
'WL - Mobility',
'WL - Neurological Deficit',
'WL - Recent Weight Loss',
'WL - Skin Type',
'WL - Tissue Malnutrition',
'WL - Weight Lost',
'PATIENTPOSITION',
'Referral Status Tissue Viability',
'Wound Status',
'Photograph Wound',
'Wound Width',
'Wound Depth',
'Wound Exudate Odour',
'Dressing Type:',
'Wound Surrounding Tissue Colour',
'Wound Cleansing',
'Dressing Assessment:',
'Wound Undermining Location',
'Wound Tunneling Location',
'Wound Odour',
'Already Being Cared for in the Community',
'Wound Exudate Colour',
'Equipment Induced Pressure Ulcer',
'Wound Edge',
'Wound Percent Epithelialised:',
'Equipment Type',
'Wound Dressing Activity',
'Wound Colour',
'Next Dressing Change',
'Wound Length',
'Wound Percent Tissue Eschar',
'Pressure Ulcer Datix Number',
'Pressure Ulcer Datix completed',
'Consent to Photograph',
'Wound Percent Granulated',
'Wound Percent Tissue Slough',
'Wound Type - Wound Assessment',
'Wound Tunneling Depth',
'Wound Exudate Volume',
'Wound Undermining Depth',
'Wound Exudate Type',
'Wound Surrounding Tissue',
'Pressure Ulcer/Blister Category'
]
max_list = ['modelled_events',
'local_patient_identifier',
'all_codes',
'all_hrg',
'HPS_ACTIVITY_DATE_TIME',
'HPS_DISCHARGE_DATE_TIME_HOSPITAL_PROVIDER_SPELL',
'Complex_Degree',
'Global_Central',
'Total_LOS',
'Turnaround_Degree',
'Waterlow_Standard',
'Careplan',
'HPS_ADMISSION_METHOD_CODE_HOSPITAL_PROVIDER_SPELL',
'HPS_AGE_AT_ADMISSION_DATE',
'PERSON_MARITAL_STATUS_CODE_DESC','weight',
'height','Pressure Ulcer Present On Admission',
'Label','DT_ATRISK','ward_move','careplan_ontime','numberof_repositioning','carplan_numberof_repositioning','careplan_compliance_degree']
for item in concate_list:
PU_RESULT[item] = PU_RESULT.groupby(['activity_identifier'])[item].transform(lambda x: ' '.join(str(x)))
PU_RESULT = PU_RESULT.drop_duplicates()
print("Concate Finished")
for item in avg_list:
PU_RESULT[item] = PU_RESULT.groupby(['activity_identifier'])[item].transform(np.mean)
PU_RESULT = PU_RESULT.drop_duplicates()
print("Avg Finished")
for item in max_list:
try:
PU_RESULT[item] = PU_RESULT.groupby(['activity_identifier'])[item].transform(np.max)
except:
PU_RESULT[item] = PU_RESULT[item].astype(str)
PU_RESULT[item] = PU_RESULT.groupby(['activity_identifier'])[item].transform(np.max)
PU_RESULT = PU_RESULT.drop_duplicates()
PU_RESULT = PU_RESULT.drop_duplicates()
return PU_RESULT
def check_blood_normal(Reference_Range,input_node_fields,PU_RESULT):
for item in input_node_fields:
print(item)
ref_inuse ={}
for key, value in Reference_Range[item].items():
if key == 'Male':
ref_inuse.update({'Sex is male':value})
elif key == 'Female':
ref_inuse.update({'Sex is female':value})
elif key == 'Unknown':
ref_inuse.update({'Sex is unknown':value})
else:
ref_inuse.update({key:value})
PU_RESULT[item +'_normal'] = PU_RESULT.apply(lambda row: -1 if (pd.isnull(row[item]))else (1 if float(ref_inuse[row['PERSON_GENDER_CODE_DESC']]['min']) <= row[item]<=float(ref_inuse[row['PERSON_GENDER_CODE_DESC']]['max']) else 0),axis=1)
return PU_RESULT
def apply_tag(PU_RESULT):
PU_RESULT['Age_Group'] = PU_RESULT.apply(lambda row: 'Over 60' if row['HPS_AGE_AT_ADMISSION_DATE'] >=60 else 'Under 60',axis=1)
PU_RESULT['Gender_Group'] = PU_RESULT.apply(lambda row: 'Male' if row['PERSON_GENDER_CODE_DESC'] =='Sex is male' else ('Female' if row['PERSON_GENDER_CODE_DESC'] =='Sex is female' else 'Other'),axis=1)
PU_RESULT['Risk_Group'] = PU_RESULT.apply(lambda row: 'PU High Risk' if row['WL - Waterlow Score'] >10 else 'PU Low Risk',axis=1)
PU_RESULT['PU_Group'] = PU_RESULT.apply(lambda row: 'PU Patient' if row['Label'] =='Diagnosed_PU' else 'No PU',axis=1)
PU_RESULT['Surgery_Group'] = PU_RESULT.apply(lambda row: 'Surgical Patient' if row['Surgery'] =='1' else 'No Surgical',axis=1)
PU_RESULT['BMI_Group'] = PU_RESULT.apply(lambda row: 'Unknown BMI - Missing value' if (row['height']==0 or row['weight'] ==0)else ('Obese' if (row['weight']/row['height'])*100 >=30 else ('Under Weight' if (row['weight']/row['height'])*100 <18.5 else ('Healthy' if 18.5<=(row['weight']/row['height'])*100<25 else 'Over Weight' ))),axis=1)
PU_RESULT['Cohort_Group'] = PU_RESULT[['Age_Group', 'Gender_Group', 'Risk_Group','PU_Group','BMI_Group','Surgery_Group']].agg(','.join, axis=1)
PU_RESULT['Waterloo Assessment pass'] = PU_RESULT.apply(lambda row: 1 if row['Waterlow_Standard'] == "{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}" else 0,axis=1)
PU_RESULT['Waterloo Assessment fail'] = PU_RESULT.apply(lambda row: 1 if row['Waterlow_Standard'] != "{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}" else 0,axis=1)
PU_RESULT['Waterloo Assessment on time'] = PU_RESULT.apply(lambda row: 1 if row['Waterlow_Standard'] == "{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}" else 0,axis=1)
PU_RESULT['Waterloo Assessment not on time'] = PU_RESULT.apply(lambda row: 1 if row['Waterlow_Standard'] != "{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}" else 0,axis=1)
PU_RESULT['PU plan on time'] = PU_RESULT.apply(lambda row: 1 if (row['careplan_ontime'] in ([1]) )else 0,axis=1)
PU_RESULT['PU plan not on time'] = PU_RESULT.apply(lambda row: 1 if (row['careplan_ontime'] not in ([1]) )else 0,axis=1)
PU_RESULT['Re-positioning on time'] = PU_RESULT.apply(lambda row: 1 if (row['Careplan'] in (['No careplan', 'No risk',"0,0"]) )else 0,axis=1)
PU_RESULT['Re-positioning not on time'] = PU_RESULT.apply(lambda row: 1 if (row['Careplan'] not in (['No careplan', 'No risk',"0,0"]) )else 0,axis=1)
PU_RESULT['Careplan Compliance'] = PU_RESULT.apply(lambda row: 0 if (float(row['careplan_compliance_degree']) ==0) else (1 if float(row['careplan_compliance_degree'])<0.5 else (2 if 0.5<float(row['careplan_compliance_degree'])<0.8 else 3 )),axis=1)
PU_RESULT['Repositioning Compliance'] = PU_RESULT.apply(lambda row: 0 if (float(row['careplan_compliance_degree']) ==0) else (1 if float(row['careplan_compliance_degree'])<0.5 else (2 if 0.5<float(row['careplan_compliance_degree'])<0.8 else 3 )),axis=1)
Reference_Range,input_node_fields = CR_inputs_reference()
PU_RESULT = check_blood_normal(Reference_Range,input_node_fields,PU_RESULT)
Reference_Range,input_node_fields = Hb1AC_inputs_reference()
PU_RESULT = check_blood_normal(Reference_Range,input_node_fields,PU_RESULT)
Reference_Range,input_node_fields = Albumin_inputs_reference()
PU_RESULT = check_blood_normal(Reference_Range,input_node_fields,PU_RESULT)
Reference_Range,input_node_fields = FBC_inputs_reference()
PU_RESULT = check_blood_normal(Reference_Range,input_node_fields,PU_RESULT)
PU_RESULT=PU_RESULT.fillna(0)
return PU_RESULT
def data_load_clean():
Red004_Conn = Red004()
PU_RESULT = pd.read_sql_query('SELECT * from [AdvancedAnalytics].[dbo].[Variance_Analysis]',Red004_Conn)
HRG_stat = pd.read_sql_query('SELECT [FY] ,[HRG_CODE], [HRG_NAME] ,[ORDINARY_ELECTIVE_LONG_STAY_TRIMPOINT_DAYS] ,[NON_ELECTIVE_LONG_STAY_TRIMPOINT_DAYS] FROM [LOCAL_REFERENCE_DB].[ref].[NATIONAL_TARIFF_APC_OPROC] \
where FY = \'2020/2021\'',Red004_Conn)
Red004_Conn.close()
PU_RESULT = PU_RESULT[~PU_RESULT['Label'].str.contains('Empty')]
PU_RESULT=PU_RESULT.fillna(0)
encode_list=[#'Chief Complaint SNOMED Code'
#,'PRESENTING_COMPLAINT'
'modelled_events'
,'all_codes'
,'all_hrg'
,'WARD STAY LOCATION'
,'ETHNIC_CATEGORY_CODE'
,'PERSON_MARITAL_STATUS_CODE'
,'PERSON_GENDER_CODE_DESC'
,'ETHNIC_CATEGORY_CODE_DESC'
,'RELIGIOUS_OR_OTHER_BELIEF_SYSTEM_AFFILIATION'
,'PERSON_MARITAL_STATUS_CODE_DESC'
,'Waterlow_Standard'
,'Careplan'
,'WL - Age'
,'WL - Broken Type'
,'WL - Build/Weight for Height'
,'WL - Continence'
,'WL - Gender'
,'WL - Lack of Appetite'
,'WL - Major Surgery / Trauma'
,'WL - Medication'
,'WL - Mobility'
,'WL - Neurological Deficit'
,'WL - Recent Weight Loss'
,'WL - Skin Type'
,'WL - Tissue Malnutrition'
#,'WL - Waterlow Score'
,'WL - Weight Lost'
,'Wound Status',
'Photograph Wound',
'Wound Width',
'Wound Depth',
'Wound Exudate Odour',
'Dressing Type:',
'Wound Surrounding Tissue Colour',
'Wound Cleansing',
'Dressing Assessment:',
'Wound Undermining Location',
'Wound Tunneling Location',
'Wound Odour',
'Already Being Cared for in the Community',
'Wound Exudate Colour',
'Equipment Induced Pressure Ulcer',
'Wound Edge',
'Wound Percent Epithelialised:',
'Equipment Type',
'Wound Dressing Activity',
'Wound Colour',
'Next Dressing Change',
'Pressure Ulcer Present On Admission',
'Wound Length',
'Wound Percent Tissue Eschar',
'Pressure Ulcer Datix Number',
'Pressure Ulcer Datix completed',
'Consent to Photograph',
'Wound Percent Granulated',
'Wound Percent Tissue Slough',
'Wound Type - Wound Assessment',
'Wound Tunneling Depth',
'Wound Exudate Volume',
'Wound Undermining Depth',
'Wound Exudate Type',
'Wound Surrounding Tissue',
'Pressure Ulcer/Blister Category'
,'Referral Status Tissue Viability'
,'Referral - Tissue Viability','PATIENTPOSITION','Label']
for column in PU_RESULT[PU_RESULT.columns.difference(encode_list)]:
try:
PU_RESULT[column] = PU_RESULT[column].replace(' ', np.NaN).replace(['/'], np.NaN).replace('----',np.NaN).replace('See Lab Comment:',np.NaN).replace('----',np.NaN, regex=True).replace('[a-zA-Z]',np.NaN,regex=True).astype(float)
except Exception as e:
if column == 'C-reactive protein':
PU_RESULT[column] = PU_RESULT[column].replace('<1', 0.5).replace(['/'], np.NaN).replace('<0.2', 0.5).replace('<0.3', 0.5).replace('<0.6', 0.5).replace(' ', np.NaN).replace('[a-zA-Z]',np.NaN,regex=True).astype(float)
elif column =='Glucose, CSF':
PU_RESULT[column] = PU_RESULT[column].replace('<0.1', 0.1).replace('<0.2', 0.5).replace('<0.3', 0.5).replace(' ', np.NaN).replace('[a-zA-Z]',np.NaN,regex=True).astype(float)
elif e == 'cannot astype a datetimelike from [datetime64[ns]] to [float64]':
pass
# try:
# PU_RESULT['all_hrg'] = PU_RESULT.apply(lambda row: list(set(row['all_hrg'].split(","))) if row['all_hrg'] != 0 else row['all_hrg'],axis=1)
# PU_RESULT['all_hrg']=PU_RESULT['all_hrg'].apply(str)
# except:
# pass
PU_RESULT=PU_RESULT.fillna(0)
for index,row in PU_RESULT.iterrows():
#print(row['all_hrg'].strip("[']"))
try:
upper_boundary = int(HRG_stat[HRG_stat['HRG_CODE'] == row['all_hrg'].strip("[']")]['NON_ELECTIVE_LONG_STAY_TRIMPOINT_DAYS'])*3600*24
lower_boundary = 2
condition = 'Abnormal'
if 2< row['Total_LOS'] <= upper_boundary:
condition = 'Normal'
PU_RESULT.at[index,'Expected_LOS'] = condition
except:
PU_RESULT.at[index,'Expected_LOS'] = "Unknown"
print(len(PU_RESULT))
PU_RESULT = transform(PU_RESULT)
print("Transform finished.")
print(len(PU_RESULT))
utils_pickle.write(PU_RESULT,"PU_RESULT")
PU_RESULT= apply_tag(PU_RESULT)
utils_pickle.write(PU_RESULT,"PU_RESULT")
column_map = {"Sex":'PERSON_GENDER_CODE_DESC', "Ethcity":'ETHNIC_CATEGORY_CODE_DESC'}
list_dummy_column_map={}
for item in column_map:
dummy_column_map, PU_RESULT = get_dummy_list(column_map, PU_RESULT, item)
list_dummy_column_map.update(dummy_column_map)
#HRG_TLOS_AVG = pd.DataFrame(PU_RESULT.groupby(['activity_identifier','all_hrg'])['Total_LOS'].mean()).reset_index().groupby(['all_hrg'])['Total_LOS'].mean()
#HRG_TLOS_STD = pd.DataFrame(PU_RESULT.groupby(['activity_identifier','all_hrg'])['Total_LOS'].mean()).reset_index().groupby(['all_hrg'])['Total_LOS'].std()
#avg_=pd.DataFrame(pd.DataFrame(PU_RESULT.groupby(['activity_identifier','all_hrg'])['Total_LOS'].mean()).reset_index().groupby(['all_hrg'])['Total_LOS'].mean()).reset_index()
#std_=pd.DataFrame(pd.DataFrame(PU_RESULT.groupby(['activity_identifier','all_hrg'])['Total_LOS'].mean()).reset_index().groupby(['all_hrg'])['Total_LOS'].std()).reset_index()
#count_=pd.DataFrame(pd.DataFrame(PU_RESULT.groupby(['activity_identifier','all_hrg'])['Total_LOS'].mean()).reset_index().groupby(['all_hrg'])['Total_LOS'].count()).reset_index()
#hrg_stat = avg_.merge(std_, on='all_hrg',how='left').merge(count_, on='all_hrg',how='left')
utils_pickle.write(list_dummy_column_map, "PU_RESULT_DUMMY_COLUMNS")
utils_pickle.write(PU_RESULT,"PU_RESULT")
return
def PU_Demo():
Label=['No-diagnose', 'Diagnosed_PU']
Variable_selection = ['WL - Waterlow Score','Complex_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree','Mean cell volume', 'Haematocrit', 'Red blood cell count',
'Basophil count', 'White blood cell count', 'Mean cell haemoglobin',
'Neutrophil count', 'Eosinophil count', 'Haemoglobin',
'Lymphocyte count', 'Platelet count', 'Mean cell haemoglobin conc',
'Monocyte count', 'Haemoglobin A1c IFCC', 'C-reactive protein',
'Glucose fasting', 'Glucose random', 'Glucose, CSF',
'Glucose, dialysis fluid', 'Glucose, fluid', 'Albumin']
Demographic_variable_selection = ['Weight','Sex', 'Age']
Assessment = ['Waterloo Assessment pass', 'Waterloo Assessment fail', 'Waterloo Assessment on time', 'Waterloo Assessment not on time']
Prevention = ['PU plan on time','PU plan not on time', 'Re-positioning on time','Re-positioning not on time']
Patient_Cohort = ['Surgical Patient', 'Medical Patient', 'Ward Outliers','Over 60', 'Over Weight', 'Male', 'Female','PU High Risk', 'PU Patient','NO PU']
Waterlow_Standard = [
'No waterlow',
"{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}",
"{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Fail'}",
"{'rule 1': 'Pass', 'rule 2': 'Fail', 'rule 3': 'Fail', 'rule 4': 'Fail'}",
"{'rule 1': 'Pass', 'rule 2': 'Fail', 'rule 3': 'Fail', 'rule 4': 'Pass'}"]
Waterlow_Compliance = ['Rule 1: Use Waterlow', 'Rule 2: 4 Hours Admission', 'Rule 3: AE 4hours', 'Rule 4: Ward Transfer 4hours','No Waterlow']
LOS = ["Expected Long for HRG", "Unexpected Long for HRG","Unexpected short - live discharge","Unknown"]
events_list =['Waterlow','LabTest','Skinasses','TV','PatientPosition']
stats = interact(plot_Spell_PU_Degree_PairCompare,
Label=widgets.SelectMultiple(
options=
Label,
value= Label,
#rows=10,
description='Pressure Ulcer',
disabled=False
),Expected_LOS=widgets.SelectMultiple(
options=
LOS,
value= LOS,
#rows=10,
description='Expected LOS',
disabled=False
),selected_list=widgets.SelectMultiple(
options=
events_list,
value= ['Waterlow','LabTest'],
#rows=10,
description='Events',
disabled=False
)
,Age=widgets.IntRangeSlider(value=[0,120],
min=0,
max=120,
step=1,
description='Age:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Waterlow_Standard=widgets.SelectMultiple(
options=
Waterlow_Compliance,
value= ['Rule 1: Use Waterlow'] ,
#rows=10,
description='WaterlowStandard',
disabled=False)
,features=widgets.SelectMultiple(
options=
Variable_selection,
value= ['Complex_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree','WL - Waterlow Score'] ,
#rows=10,
description='Variables',
disabled=False
)
)
return stats
def sub_timeline_layout(tt, labels=None):
node = {}
for node_index, node_feature in tt.nodes(data=True):
if node_feature['name'] == 'Spell_Start':
node.update({node_index:node_feature['activity_start_time']})
elif node_feature['name'] == 'Spell_End':
node.update({node_index:node_feature['activity_end_time']})
else:
node.update({node_index:node_feature['activity_start_time']})
# for node_index, node_feature in tt.nodes(data=True):
# node.update({node_index:node_feature['activity_start_time']})
sorted_dic = sorted(node.items(), key=lambda kv: kv[1])
pos = {}
i=0
x = 0
y = 0
list_=[]
for i in range(len(sorted_dic)):
if i >0:
diff = datetime.strptime(sorted_dic[i][1],'%Y.%m.%d %H:%M:%S')-datetime.strptime(sorted_dic[i-1][1],'%Y.%m.%d %H:%M:%S')
x = x +(diff.seconds)/18
y = y
pos.update({sorted_dic[i][0]:np.array([x,y])})
else:
pos.update({sorted_dic[0][0]:np.array([0,0])})
if labels is not None:
result = ''.join([i for i in labels[sorted_dic[i][0]] if not i.isdigit()])
if result == '._start':
continue
elif result == '._end':
continue
else:
list_.append(result)
unique_events = set(list_)
pos_y = 20
for item in unique_events:
for i in range(len(sorted_dic)):
event_match = re.match( r'{}'.format(item), labels[sorted_dic[i][0]], re.M|re.I)
if event_match:
x= pos[sorted_dic[i][0]][0]
y = pos_y
pos.update({sorted_dic[i][0]:np.array([x,y])})
pos_y = pos_y + 30
return pos
def PU_Path_Vis_Demo_fromRecords(item = 1234567):
# hv.extension('bokeh')
# defaults = dict(width=1000, height=1000, padding=0.1)
# Load individual visualization requires patientKG Class
graph = patientKG.utils_pickle.read("GraphCalculationResults/Ward_Stay/KG_{}".format(item))
# hv.opts.defaults(opts.EdgePaths(**defaults), opts.Graph(**defaults), opts.Nodes(**defaults))
label = dict((int(v),k) for k,v in graph.node_dic.items())
combined = graph.graph
# for index, item in combined.nodes(data=True):
# print(item['color'])
# combined._node[6]["WARD STAY LOCATION"]=""
# combined._node[7]["WARD STAY LOCATION"]=""
# combined._node[8]["WARD STAY LOCATION"]=""
# combined._node[9]["WARD STAY LOCATION"]=""
attr={}
for index, node in combined.nodes(data=True):
if index==0 or index == 1:
attr.update({index:{'abv': node['name']}})
else:
attr.update({index:{'abv':"".join(e[0] for e in node['name'].split())}})
nx.set_node_attributes(combined, attr)
hv.opts.defaults(
opts.EdgePaths(**defaults), opts.Graph(**defaults), opts.Nodes(**defaults))
pos = graph.timeline_layout(label)
optss = dict(node_size='size', edge_line_width=0.5 ,node_color='color', cmap=['#30a2da','yellow','red','green','black'])
simple_graph=hv.Graph.from_networkx(combined, pos).options(**optss)
labels = hv.Labels(simple_graph.nodes, ['x', 'y'], 'abv')
# print(simple_graph.nodes)
# print(graph.graph.degree)
#bokeh_server = pn.Row(simple_graph* labels.opts(text_font_size='16pt', text_color='white', bgcolor='gray')).show(port=12345)
return pn.Row(simple_graph* labels.opts(text_font_size='16pt', text_color='white', bgcolor='gray'))
# days_nodes = {}
# for i in range(0, 16, 1):
# nodes_list = [0,1]
# for k,v in graph.graph.nodes(data=True):
# if k > 1:
# diff = (datetime.strptime(v['activity_start_time'],'%Y.%m.%d %H:%M:%S') -datetime.strptime(graph.graph.nodes[0]['activity_start_time'],'%Y.%m.%d %H:%M:%S')).total_seconds()
# if diff <= i*3600*24:
# nodes_list.append(k)
# days_nodes.update({i:nodes_list})
# debug_ = {i: hv.Graph.from_networkx(graph.graph.subgraph(days_nodes[i]), sub_timeline_layout(graph.graph.subgraph(days_nodes[i]),dict((int(v),k) for k,v in graph.node_dic.items())), iterations=i, seed=10) for i in range(0, 16, 1)}
# return hv.HoloMap({i: hv.Graph.from_networkx(graph.graph.subgraph(days_nodes[i]), sub_timeline_layout(graph.graph.subgraph(days_nodes[i]),dict((int(v),k) for k,v in graph.node_dic.items())), iterations=i, seed=10) for i in range(0, 16, 1)},
# kdims='Iterations')
def PU_Path_Vis_Demo_live(item = 1234567):
#While modelling process is okey, but using Jupyter running as windows service, login detail is'RBBH_MSDOMAIN1\\RBHDBSRED008$', which currently has no access to database
#Thus result in live querying fail.
return test_compose(str(item))
def PU_Path_DEMO():
try:
print("\
WA: Waterlow Assessemnt \n \
Node in Red: Not implement. \n \
Cplb: C-reactive protein level, blood\n \
Node in Red: test result is out of normal range;\n \
PP: Patient Position\n \
Node in red: breach 6 hours repositioning requirement\n \
Node in yellow: breach 4 hours repositioning requirement\n \
WS: Ward Stay\n \
Node in Red: Waterlow assessment not performed within hours after ward transfer\n \
Fbcb: Full blood count, blood\n \
Node in Red: test result is out of normal range;")
stats = interact(PU_Path_Vis_Demo_fromRecords,
item=widgets.Text(value='1234567',
placeholder='Type in Spell Number',
description='Spell:',
disabled=False))
return stats
except:
return "No Such Spell!"
def generate_cohort_pattern(Patient_Cohort):
pattern=''
Union_criteria = [('Male', 'Female'),('PU Patient','NO PU'),('No Surgical','Surgical Patient'),('PU Low Risk', 'PU High Risk')]
union_BMI = ['Healthy','Under Weight','Over Weight','Obese','Unknown BMI - Missing value']
tt=[]
bmi =[]
for item in Patient_Cohort:
check = [(x,y) for x, y in Union_criteria if (x == item or y ==item)]
if len(check)<1 and item not in union_BMI:
y = '(?=.*{})'.format(item)
pattern+=y
elif item in union_BMI:
bmi.append(item)
else:
tt.append(check)
ttt= [[g[0], len(list(g[1]))] for g in itertools.groupby(tt)]
for item in ttt:
if item[1] > 1:
pattern+='((?=.*{})|(?=.*{}))'.format(item[0][0][0],item[0][0][1])
elif item[1] == 1:
for check_item in Patient_Cohort:
check = [(x,y) for x, y in Union_criteria if (x == check_item or y ==check_item)]
if len(check)==1 and check == item[0]:
y = '(?=.*{})'.format(check_item)
pattern+=y
union_pattern=''
while bmi:
y = '(?=.*{})|'.format(bmi[0])
union_pattern+=y
bmi.pop(0)
if len(union_pattern)>0:
union_pattern= "("+union_pattern[:-1]+")"
pattern+=union_pattern
return pattern
def get_dummy_list(column_map, df, column):
column_mapped = [column_map[column]]
df[column] = df[column_mapped]
dum_df = pd.get_dummies(df, columns=column_mapped, prefix=["Type_is"] )
column_diff = list(set(dum_df.columns) - set(df.columns))
dummy_column_map = {column:column_diff}
return dummy_column_map, dum_df
def predefined_cohort(Patient_Cohort):
df = pd.DataFrame()
PU_RESULT = patientKG.utils_pickle.read("PU_RESULT")
pattern = generate_cohort_pattern(Patient_Cohort)
PU_RESULT_COHORT =PU_RESULT.loc[(PU_RESULT['Cohort_Group'].str.contains(pattern))]
return PU_RESULT_COHORT
TEMPLATE = u"""
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>PivotTable.js</title>
<!-- external libs from cdnjs -->
<link rel="stylesheet" type="text/css" href="https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.11/c3.min.css">
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.11/c3.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jquery/1.11.2/jquery.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.11.4/jquery-ui.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jquery-csv/0.71/jquery.csv-0.71.min.js"></script>
<link rel="stylesheet" type="text/css" href="https://cdnjs.cloudflare.com/ajax/libs/pivottable/2.19.0/pivot.min.css">
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pivottable/2.19.0/pivot.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pivottable/2.19.0/d3_renderers.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pivottable/2.19.0/c3_renderers.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pivottable/2.19.0/export_renderers.min.js"></script>
<style>
body {font-family: Verdana;}
.node {
border: solid 1px white;
font: 10px sans-serif;
line-height: 12px;
overflow: hidden;
position: absolute;
text-indent: 2px;
}
.c3-line, .c3-focused {stroke-width: 3px !important;}
.c3-bar {stroke: white !important; stroke-width: 1;}
.c3 text { font-size: 12px; color: grey;}
.tick line {stroke: white;}
.c3-axis path {stroke: grey;}
.c3-circle { opacity: 1 !important; }
.c3-xgrid-focus {visibility: hidden !important;}
</style>
</head>
<body>
<script type="text/javascript">
$(function(){
$("#output").pivotUI(
$.csv.toArrays($("#output").text())
, $.extend({
renderers: $.extend(
$.pivotUtilities.renderers,
$.pivotUtilities.c3_renderers,
$.pivotUtilities.d3_renderers,
$.pivotUtilities.export_renderers
),
hiddenAttributes: [""]
}
, {
onRefresh: function(config) {
var config_copy = JSON.parse(JSON.stringify(config));
//delete some values which are functions
delete config_copy["aggregators"];
delete config_copy["renderers"];
//delete some bulky default values
delete config_copy["rendererOptions"];
delete config_copy["localeStrings"];
$("#output2").text(JSON.stringify(config_copy, undefined, 2));
}
}
, %(kwargs)s
, %(json_kwargs)s)
).show();
});
</script>
<div id="output" style="display: none;">%(csv)s</div>
<textarea id="output2"
style="float: left; width: 0px; height: 0px; margin: 0px; opacity:0;" readonly>
</textarea>
<button onclick="copyTextFunction()">Copy settings</button>
<script>
function copyTextFunction() {
var copyText = document.getElementById("output2");
copyText.select();
document.execCommand("copy");
}
</script>
</body>
</html>
"""
def pivot_cht_ui(df, name="test", url="",
width="100%", height="500",json_kwargs='', **kwargs):
#print(name)
outfile_path = name + '.html'
with io.open(outfile_path, 'wt', encoding='utf8') as outfile:
csv = df.to_csv(encoding='utf8')
if hasattr(csv, 'decode'):
csv = csv.decode('utf8')
outfile.write(TEMPLATE %
dict(csv=csv, kwargs=json.dumps(kwargs),json_kwargs=json_kwargs))
return IFrame(src=url or outfile_path, width=width, height=height)
def get_pvalue(df,feature_list,categorical_feature_list):
rows_list = []
outcome_number = len(df['Label'].unique())
for item in df[feature_list].columns:
if item not in categorical_feature_list:
dia_list = []
undiag_list = []
for label in df['Label'].unique():
if label == 'Diagnosed_PU':
dia_list.append(df[df['Label']==label][item].values)
else:
undiag_list.append(df[df['Label']==label][item].values)
dd=[]
ddd = []
for da_item in list(dia_list[0]):
dd.append(da_item)
for und_item in list(undiag_list[0]):
ddd.append(und_item)
fvalue, pvalue = stats.f_oneway(*[dd,ddd])
rows_list.append((item,pvalue))
else:
dict1 = {}
contigency= pd.crosstab(df[item], df['Label'])
c, p, dof, expected = chi2_contingency(contigency)
dict1.update({item:p})
rows_list.append((item,p))
return rows_list
#def plot_func(df, Outcome, Patient_Cohort, DateRange,Demographic, Assessment, Prevention, Blood_Results, Blood_Normalty,Management):
def plot_Spell_PU_Degree_PairCompare_v2(Outcome, Patient_Cohort, DateRange,Demographic, Assessment, Prevention, Blood_Results, Blood_Normalty,Management):
#Corhot Selection
#print(list(Patient_Cohort))
if list(Patient_Cohort) == ['All']:
df= patientKG.utils_pickle.read("PU_RESULT")
#plot_func(df, Outcome, Patient_Cohort, DateRange,Demographic, Assessment, Prevention, Blood_Results, Blood_Normalty,Management)
else:
Patient_Cohort = list(Patient_Cohort)
try:
df=predefined_cohort(Patient_Cohort)
if df.empty:
return "No Sample!"
#else:
#plot_func(df, Outcome, Patient_Cohort, DateRange,Demographic, Assessment, Prevention, Blood_Results, Blood_Normalty,Management)
# df = pd.concat([df,data])
except:
print("No Sample!")
sys.exit(1)
return "No Sample!"
def pop_std(x):
return x.std(ddof=0)
df['date'] = pd.to_datetime(df['HPS_ACTIVITY_DATE_TIME'])
mask = (df['date'] > DateRange[0]) & (df['date'] <= DateRange[1])
df = df.loc[mask]
features = []
df= df.fillna(0)
Demographic_map = {"Weight":"weight",
"Sex":['Type_is_Sex is female', 'Type_is_Sex is male','Type_is_Sex is unknown', 'Type_is_Unspecified']
,"Age":'HPS_AGE_AT_ADMISSION_DATE'
,"Ethcity":'ETHNIC_CATEGORY_CODE_DESC'}
Assessment_map = {"Waterlow Assessment Outcomes":"Waterloo Assessment pass", "Waterloo Assessment fail":"Waterloo Assessment fail", "Waterlow Assessment timeliness":"Waterloo Assessment on time","Waterloo Assessment not on time":"Waterloo Assessment not on time" }
Prevention_map = {'PU plan initia timeliness':'PU plan on time','PU plan not on time':'PU plan not on time', 'Re-positioning timeliness':'Re-positioning on time','Re-positioning not on time':'Re-positioning not on time','Re-positioning Compliance':'Repositioning Compliance'}
Management_map={'Ward Move':'ward_move'}
One_hot_encoding_map= utils_pickle.read("PU_RESULT_DUMMY_COLUMNS")
for item in Demographic:
if item not in One_hot_encoding_map.keys():
features.append(Demographic_map[item])
else:
features = features +list(One_hot_encoding_map[item])
for item in Assessment:
if item not in One_hot_encoding_map.keys():
features.append(Assessment_map[item])
else:
features = features +list(One_hot_encoding_map[item])
for item in Prevention:
if item not in One_hot_encoding_map.keys():
features.append(Prevention_map[item])
else:
features = features +list(One_hot_encoding_map[item])
for item in Blood_Results:
if item not in One_hot_encoding_map.keys():
features.append(item)
else:
features = features +list(One_hot_encoding_map[item])
for item in Blood_Normalty:
if item not in One_hot_encoding_map.keys():
features.append(item+'_normal')
else:
features = features +list(One_hot_encoding_map[item])
for item in Management:
if item not in One_hot_encoding_map.keys():
features.append(Management_map[item])
else:
features = features +list(One_hot_encoding_map[item])
#features = ['Sum_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree']
try:
principalComponents,pca_explained,pca_components = sliced_principle_components(df,features,2)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
kmax = 10
best_n = best_eblow_k(principalDf.values.tolist(),kmax = 10)
try:
df = plot_vor(df,principalComponents, best_n)
except:
df = plot(df,principalComponents, best_n)
#print(list(features))
#Section Outcomes to Estimated groups
total = df['activity_identifier'].nunique()
outcomes = df.groupby(['labels','Label'])['activity_identifier'].nunique()
#print("Sankey")
fig = sankey(df)
fig.show()
#Section phenotype table with variables
selector = VarianceThreshold()
x = df[list(features)].values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df_x = pd.DataFrame(x_scaled)
selector.fit_transform(df_x)
#print(selector.variances_)
feature_list = []
for item in features:
feature_list.append(item)
for i in range(len(pca_components)):
for item in pca_components[i]:
result_i=[item*pca_explained[i]]
features_score = selector.variances_
feature_rank = pd.DataFrame(list(zip(features, features_score)), columns=['variable_name','score'])
#print(feature_rank)
test3 =pd.DataFrame()
#print(test3)
categorical_feature_list = []
for item in Blood_Normalty:
if item not in One_hot_encoding_map.keys():
categorical_feature_list.append(item+'_normal')
for item in Assessment:
if item not in One_hot_encoding_map.keys():
categorical_feature_list.append(Assessment_map[item])
for item in Prevention:
if item not in One_hot_encoding_map.keys():
categorical_feature_list.append(Prevention_map[item])
for key,value in One_hot_encoding_map.items():
for item in value:
categorical_feature_list.append(item)
for item in df[feature_list].columns:
if item not in categorical_feature_list:
sub_df = df[df[item]!=0]
#test1=sub_df.groupby(['labels'])[item].agg(item='mean').T
#test2=sub_df.groupby(['labels'])[item].agg(item=pop_std).T
test1=sub_df.groupby(['labels'])[item].agg([(item, 'mean')]).T
test2=sub_df.groupby(['labels'])[item].agg([(item, pop_std)]).T
#print(test1)
#print('Test 1 over')
#print(test2)
#test1=sub_df.groupby(['labels'])[item].agg({item:'mean'}).T
#test2=sub_df.groupby(['labels'])[item].agg({item:pop_std}).T
test4 =pd.DataFrame()
for item in sub_df['labels'].unique():
test4[item] = test1[item].round(2).astype(str)+" (\u00B1"+test2[item].round(2).astype(str)+")"
else:
test1=df.groupby(['labels',item])[item].agg([(item, 'count')]).T
test2=pd.DataFrame(df.groupby(['labels',item])[item].agg([('count', 'count')]).reset_index())
#print(test1)
#print(test2)
#test1=df.groupby(['labels',item])[item].agg(item='count').T
#test2=pd.DataFrame(df.groupby(['labels',item])[item].agg(count='count').reset_index())
#test1=df.groupby(['labels',item])[item].agg({item:'count'}).T
#test2=pd.DataFrame(df.groupby(['labels',item])[item].agg({'count':'count'}).reset_index())
test4 =pd.DataFrame()
index = pd.Index([item])
test4['label'] =[item]
for label in df['labels'].unique():
try:
zero_count = str(int(test2[(test2['labels']==label) & (test2[item]==0)]['count']))
except:
zero_count = 0
try:
one_count = str(int(test2[(test2['labels']==label) & (test2[item]==1)]['count']))
except:
one_count = 0
test4[label] = [str(one_count)+" ("+str(zero_count)+")" ]
test4=test4.set_index('label')
test3=test3.append(test4)
rows_list = get_pvalue(df,feature_list,categorical_feature_list)
pvalue = pd.DataFrame(rows_list, columns = ['variable_name','p-value (ANOVA&Chi-square)'])
test3 = test3.reindex(sorted(test3.columns),axis=1)
test3['variable_name'] = test3.index
#print(test3)
test3 =test3.merge(feature_rank, how='left', on='variable_name')
test3 = test3.merge(pvalue, how='left', on='variable_name')
#test3 = test3.set_index('variable_name')
test5=pd.DataFrame(df.groupby(['labels'])['activity_identifier'].agg([('activity_identifier', lambda x: x.nunique())]).T)
#test5=pd.DataFrame(df.groupby(['labels'])['activity_identifier'].agg({'activity_identifier':lambda x: x.nunique()}).T)
#test5=pd.DataFrame(df.groupby(['labels'])['activity_identifier'].agg(activity_identifier=lambda x: x.nunique()).T)
test3 = test3.sort_values(by='score',ascending=False)
#print(test5)
test3=pd.concat([test5,test3]).set_index('variable_name')
display(test3)
all_features = feature_list.copy()
if len(feature_list)>5:
feature_list = list(test3.sort_values(by='score',ascending=False).index[:5].values)
feature_list.append('labels')
lis_ = df[['labels','color']].drop_duplicates()
palette={y['labels']:str(y['color']) for x,y in lis_.iterrows()}
#sns.pairplot(df[feature_list], hue="labels", diag_kws={'bw':'1.0'},palette=palette)
#df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG','labels']]
#df[(df['Waterlow_Standard'] == Waterlow_Standard)][['ACTIVITY_IDENTIFIER']+features+ ['labels']]
#Known Jupyternotebook issue of nondeterministic output display
#time.sleep(100)
features_display= []
for item in all_features:
if 'Type_is' not in item:
features_display.append(item)
utils_pickle.write(df[['activity_identifier','Sex','Ethcity']+features_display+ ['Waterlow_Standard','Careplan','Label','labels','Expected_LOS','Cohort_Group']],"Cohort_Analysis")
return pivot_cht_ui(df[['activity_identifier','Sex','Ethcity']+features_display+ ['Waterlow_Standard','Careplan','Label','labels','Expected_LOS']], outfile_path='pivottablejs.html',json_kwargs="""
{
"derivedAttributes": {},
"hiddenAttributes": [
""
],
"hiddenFromAggregators": [],
"hiddenFromDragDrop": [],
"menuLimit": 500,
"cols": [],
"rows": [
"activity_identifier",
"Label",
"labels"
],
"vals": [
"activity_identifier"
],
"rowOrder": "key_a_to_z",
"colOrder": "key_a_to_z",
"exclusions": {},
"inclusions": {},
"unusedAttrsVertical": 85,
"autoSortUnusedAttrs": false,
"sorters": {},
"outfile_path": "pivottablejs.html",
"inclusionsInfo": {},
"aggregatorName": "Count Unique Values",
"rendererName": "Table"
}
"""
)
except:
print("Something Wrong with Sample Size or Calculation! Display pivot table only!")
utils_pickle.write(df[['activity_identifier','Sex','Ethcity']+features_display+ ['Waterlow_Standard','Careplan','Label','labels','Expected_LOS','Cohort_Group']],"Cohort_Analysis")
return pivot_cht_ui(df[['activity_identifier','Sex','Ethcity']+ ['Waterlow_Standard','Careplan','Label','Expected_LOS']], outfile_path='pivottablejs.html',json_kwargs="""
{
"derivedAttributes": {},
"hiddenAttributes": [
""
],
"hiddenFromAggregators": [],
"hiddenFromDragDrop": [],
"menuLimit": 500,
"cols": [],
"rows": [
"activity_identifier",
"Label"
],
"vals": [
"activity_identifier"
],
"rowOrder": "key_a_to_z",
"colOrder": "key_a_to_z",
"exclusions": {},
"inclusions": {},
"unusedAttrsVertical": 85,
"autoSortUnusedAttrs": false,
"sorters": {},
"outfile_path": "pivottablejs.html",
"inclusionsInfo": {},
"aggregatorName": "Count Unique Values",
"rendererName": "Table"
}
"""
)
#print(len(df))
#print(DateRange)
def PU_Demo_v2():
Blood_Normalty = ['Red blood cell count'
,'Mean cell haemoglobin'
, 'Haemoglobin'
, 'Haematocrit'
, 'Platelet count'
, 'Mean cell volume'
, 'Mean cell haemoglobin conc'
, 'White blood cell count'
, 'Monocyte count'
, 'Neutrophil count'
, 'Lymphocyte count'
, 'Eosinophil count'
, 'Basophil count'
,'Albumin'
,'C-reactive protein'
,'Haemoglobin A1c IFCC']
Blood_Results = [
'Glucose fasting', 'Glucose random', 'Glucose, CSF',
'Glucose, dialysis fluid', 'Glucose, fluid']
Demographic = [
'Weight',
'Sex', 'Age','Ethcity']
Assessment = ['Waterlow Assessment Outcomes', 'Waterlow Assessment timeliness']
Prevention = ['PU plan initia timeliness', 'Re-positioning timeliness','Re-positioning Compliance']
Patient_Cohort = ['All',
'Surgical Patient'
,'No Surgical'
#,'Medical Patient'
#,'Ward Outliers'
,'Over 60'
,'Unknown BMI - Missing value'
,'Under Weight'
,'Healthy'
,'Over Weight'
,'Obese'
, 'Male'
, 'Female'
,'PU High Risk'
,'PU Low Risk'
]
Outcomes= [
'Pressure Ulcer', 'Length of Stay']
Management = ['Ward Move']
#Waterlow_Compliance = ['Rule 1: Use Waterlow', 'Rule 2: 4 Hours Admission', 'Rule 3: AE 4hours', 'Rule 4: Ward Transfer 4hours','No Waterlow']
#LOS = ["Expected Long for HRG", "Unexpected Long for HRG","Unexpected short - live discharge","Unknown"]
#events_list =['Waterlow','LabTest','Skinasses','TV','PatientPosition']
start_date = datetime(2018, 6, 12)
end_date = datetime(2021, 6, 22)
dates = pd.date_range(start_date, end_date, freq='D')
options = [(date.strftime(' %d%b%y '), date) for date in dates]
index = (0, len(options)-1)
layout = widgets.Layout(width='500px', height='100px')
style={'description_width': '250px'}
selection_range_slider = widgets.SelectionRangeSlider(
options=options,
index=index,
description='Dates',
orientation='horizontal',
layout={'width': '700px'}, style=style
)
Outcome=widgets.SelectMultiple(
options=
Outcomes,
value= ['Pressure Ulcer'],
#rows=10,
description='Outcomes',
disabled=False,layout=layout, style=style)
Patient_Cohort=widgets.SelectMultiple(
options=
Patient_Cohort,
value= ['Surgical Patient'],
#rows=10,
description='Patient Cohort',
disabled=False,layout=layout, style=style)
Demographic=widgets.SelectMultiple(
options=
Demographic,
value= Demographic,
#rows=10,
description='Demographic',
disabled=False,layout=layout,style=style
)
Assessment=widgets.SelectMultiple(
options=
Assessment,
value= Assessment,
#rows=10,
description='Clinical Events – PU Risk Assessment',
disabled=False,layout=layout,style=style
)
Prevention =widgets.SelectMultiple(
options=
Prevention,
value= Prevention ,
#rows=10,
description='Clinical Events – PU Prevention',
disabled=False,layout=layout,style=style)
Blood_Results =widgets.SelectMultiple(
options=
Blood_Results,
value= Blood_Results ,
#rows=10,
description='Laboratory – Other',
disabled=False,layout=layout,style=style)
Blood_Normalty =widgets.SelectMultiple(
options=
Blood_Normalty,
value= Blood_Normalty,
#rows=10,
description='Laboratory – bloods',
disabled=False,layout=layout,style=style)
Management =widgets.SelectMultiple(
options=
Management,
value= Management,
#rows=10,
description='Patient Management',
disabled=False,layout=layout,style=style)
# from ipywidgets import Button, Layout, jslink, IntText, IntSlider
# TwoByTwoLayout(top_left=Blood_Results,
# top_right=Demographic,
# bottom_left=Assessment,
# bottom_right=Prevention)
#left_box = VBox([Patient_Cohort])
#right_box = VBox([Demographic,Assessment,Prevention,Blood_Results])
#ui = HBox([left_box, right_box])
#stats = interactive_output(plot_Spell_PU_Degree_PairCompare_v2,
#{'Patient_Cohort':Patient_Cohort,'Demographic':Demographic,'Assessment':Assessment,'Prevention':Prevention,'Blood_Results':Blood_Results})
stats = interact_manual(plot_Spell_PU_Degree_PairCompare_v2,
Outcome=Outcome, Patient_Cohort=Patient_Cohort,DateRange=selection_range_slider,Demographic=Demographic,Assessment=Assessment,Prevention=Prevention,Blood_Results=Blood_Results, Blood_Normalty=Blood_Normalty,Management=Management)
# def create_expanded_button(description, button_style):
# return Button(description=description, button_style=button_style, layout=Layout(height='auto', width='auto'))
# top_left_button = create_expanded_button("Top left", 'info')
# top_right_button = create_expanded_button("Top right", 'success')
# bottom_left_button = create_expanded_button("Bottom left", 'danger')
# bottom_right_button = create_expanded_button("Bottom right", 'warning')
# top_left_text = IntText(description='Top left', layout=Layout(width='auto', height='auto'))
# top_right_text = IntText(description='Top right', layout=Layout(width='auto', height='auto'))
# bottom_left_slider = IntSlider(description='Bottom left', layout=Layout(width='auto', height='auto'))
# bottom_right_slider = IntSlider(description='Bottom right', layout=Layout(width='auto', height='auto'))
#display(ui,stats)
return stats
# TwoByTwoLayout(top_left=top_left_button,
# top_right=top_right_button,
# bottom_left=bottom_left_button,
# bottom_right=bottom_right_button)
if __name__ == '__main__':
# Degree_HRG = utils_pickle.read("../Degree_HRG")
# Degree_ReAdmitted_HRG = utils_pickle.read("../Degree_ReAdmitted_HRG")
# HRG = [
# 'EY01A',
# 'EY01B',
# 'EY02A',
# 'EY02B',
# 'EY11Z',
# 'EY12A',
# 'EY12B',
# 'EY13Z',
# 'EY16A',
# 'EY16B',
# 'EY17A',
# 'EY17B']
# Degree = [0,100]
# Readmit = [-1,15]
# POD = "{AandE, NEL, UNBUN}"
# plot_SpellHRG_HRG_Degree_PairCompare(HRG,Degree,Readmit,POD)
# Label=['No-diagnose', 'Diagnosed_PU']
# modelled_events = ['Waterlow','LabTest']
# Age = [0,120]
# Waterlow_Standard = ['Rule 1: Use Waterlow']#"{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}"
# features = ['Complex_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree','WL - Waterlow Score']
# plot_Spell_PU_Degree_PairCompare(Label,modelled_events,Age,Waterlow_Standard,features)
#PU_Path_Vis_Demo()
#data_load_clean()
Blood_Normalty = ['Red blood cell count'
,'Mean cell haemoglobin'
, 'Haemoglobin'
, 'Haematocrit'
, 'Platelet count'
, 'Mean cell volume'
, 'Mean cell haemoglobin conc'
, 'White blood cell count'
, 'Monocyte count'
, 'Neutrophil count'
, 'Lymphocyte count'
, 'Eosinophil count'
, 'Basophil count'
,'Albumin'
,'C-reactive protein'
,'Haemoglobin A1c IFCC']
Blood_Results = [
'Glucose fasting', 'Glucose random', 'Glucose, CSF',
'Glucose, dialysis fluid', 'Glucose, fluid']
Demographic = [
'Weight',
'Sex', 'Age','Ethcity']
Assessment = ['Waterlow Assessment Outcomes', 'Waterlow Assessment timeliness']
Prevention = ['PU plan initia timeliness', 'Re-positioning timeliness']
Patient_Cohort = [
'Surgical Patient',
'Unknown BMI - Missing value'
]
Management = ['Ward Move']
start_date = datetime(2018, 6, 12)
end_date = datetime(2021, 6, 22)
DateRange = ['20180612','20210622']
Outcome=[]
plot_Spell_PU_Degree_PairCompare_v2(Outcome, Patient_Cohort,DateRange, Demographic, Assessment, Prevention, Blood_Results,Blood_Normalty,Management)
PU_Path_Vis_Demo_live()
| [
"IPython.display.display",
"sklearn.feature_selection.VarianceThreshold",
"holoviews.Graph.from_networkx",
"io.open",
"pandas.Index",
"matplotlib.pyplot.annotate",
"numpy.array",
"holoviews.opts.EdgePaths",
"sys.exit",
"seaborn.pairplot",
"pandas.to_datetime",
"pandas.date_range",
"pandas.re... | [((1152, 1174), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""'}), "(style='ticks')\n", (1159, 1174), True, 'import seaborn as sns\n'), ((1175, 1196), 'holoviews.extension', 'hv.extension', (['"""bokeh"""'], {}), "('bokeh')\n", (1187, 1196), True, 'import holoviews as hv\n'), ((3500, 3563), 'matplotlib.pyplot.scatter', 'plt.scatter', (["data['Sum_Degree']", "data['INCOME']"], {'edgecolors': '"""r"""'}), "(data['Sum_Degree'], data['INCOME'], edgecolors='r')\n", (3511, 3563), True, 'import matplotlib.pyplot as plt\n'), ((6022, 6036), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6034, 6036), True, 'import pandas as pd\n'), ((7740, 7754), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7752, 7754), True, 'import pandas as pd\n'), ((9208, 9227), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (9211, 9227), False, 'from sklearn.decomposition import PCA\n'), ((9298, 9400), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'principalComponents', 'columns': "['principal component 1', 'principal component 2']"}), "(data=principalComponents, columns=['principal component 1',\n 'principal component 2'])\n", (9310, 9400), True, 'import pandas as pd\n'), ((9495, 9515), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (9501, 9515), False, 'from sklearn.cluster import KMeans\n'), ((9694, 9724), 'numpy.vstack', 'np.vstack', (['[centroids, [0, 0]]'], {}), '([centroids, [0, 0]])\n', (9703, 9724), True, 'import numpy as np\n'), ((9747, 9775), 'scipy.spatial.Voronoi', 'Voronoi', (['principalComponents'], {}), '(principalComponents)\n', (9754, 9775), False, 'from scipy.spatial import Voronoi\n'), ((9854, 9882), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (9864, 9882), True, 'import matplotlib.pyplot as plt\n'), ((10847, 10961), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df['principal component 1']", "df['principal component 2']"], {'color': 'colors', 'alpha': '(0.5)', 'edgecolor': '"""k"""'}), "(df['principal component 1'], df['principal component 2'], color\n =colors, alpha=0.5, edgecolor='k')\n", (10858, 10961), True, 'import matplotlib.pyplot as plt\n'), ((12389, 12445), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(vor.min_bound[0] - 0.1)', '(vor.max_bound[0] + 0.1)'], {}), '(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)\n', (12397, 12445), True, 'import matplotlib.pyplot as plt\n'), ((12446, 12502), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(vor.min_bound[1] - 0.1)', '(vor.max_bound[1] + 0.1)'], {}), '(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)\n', (12454, 12502), True, 'import matplotlib.pyplot as plt\n'), ((14407, 14421), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14419, 14421), True, 'import pandas as pd\n'), ((15322, 15341), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (15325, 15341), False, 'from sklearn.decomposition import PCA\n'), ((15412, 15514), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'principalComponents', 'columns': "['principal component 1', 'principal component 2']"}), "(data=principalComponents, columns=['principal component 1',\n 'principal component 2'])\n", (15424, 15514), True, 'import pandas as pd\n'), ((15609, 15629), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (15615, 15629), False, 'from sklearn.cluster import KMeans\n'), ((15808, 15838), 'numpy.vstack', 'np.vstack', (['[centroids, [0, 0]]'], {}), '([centroids, [0, 0]])\n', (15817, 15838), True, 'import numpy as np\n'), ((15861, 15889), 'scipy.spatial.Voronoi', 'Voronoi', (['principalComponents'], {}), '(principalComponents)\n', (15868, 15889), False, 'from scipy.spatial import Voronoi\n'), ((15968, 15996), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (15978, 15996), True, 'import matplotlib.pyplot as plt\n'), ((16961, 17075), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df['principal component 1']", "df['principal component 2']"], {'color': 'colors', 'alpha': '(0.5)', 'edgecolor': '"""k"""'}), "(df['principal component 1'], df['principal component 2'], color\n =colors, alpha=0.5, edgecolor='k')\n", (16972, 17075), True, 'import matplotlib.pyplot as plt\n'), ((18503, 18559), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(vor.min_bound[0] - 0.1)', '(vor.max_bound[0] + 0.1)'], {}), '(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)\n', (18511, 18559), True, 'import matplotlib.pyplot as plt\n'), ((18560, 18616), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(vor.min_bound[1] - 0.1)', '(vor.max_bound[1] + 0.1)'], {}), '(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)\n', (18568, 18616), True, 'import matplotlib.pyplot as plt\n'), ((20382, 20396), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (20394, 20396), True, 'import pandas as pd\n'), ((21307, 21326), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (21310, 21326), False, 'from sklearn.decomposition import PCA\n'), ((21397, 21499), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'principalComponents', 'columns': "['principal component 1', 'principal component 2']"}), "(data=principalComponents, columns=['principal component 1',\n 'principal component 2'])\n", (21409, 21499), True, 'import pandas as pd\n'), ((21594, 21614), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (21600, 21614), False, 'from sklearn.cluster import KMeans\n'), ((21798, 21828), 'numpy.vstack', 'np.vstack', (['[centroids, [0, 0]]'], {}), '([centroids, [0, 0]])\n', (21807, 21828), True, 'import numpy as np\n'), ((21851, 21879), 'scipy.spatial.Voronoi', 'Voronoi', (['principalComponents'], {}), '(principalComponents)\n', (21858, 21879), False, 'from scipy.spatial import Voronoi\n'), ((21958, 21986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (21968, 21986), True, 'import matplotlib.pyplot as plt\n'), ((22951, 23065), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df['principal component 1']", "df['principal component 2']"], {'color': 'colors', 'alpha': '(0.5)', 'edgecolor': '"""k"""'}), "(df['principal component 1'], df['principal component 2'], color\n =colors, alpha=0.5, edgecolor='k')\n", (22962, 23065), True, 'import matplotlib.pyplot as plt\n'), ((24493, 24549), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(vor.min_bound[0] - 0.1)', '(vor.max_bound[0] + 0.1)'], {}), '(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)\n', (24501, 24549), True, 'import matplotlib.pyplot as plt\n'), ((24550, 24606), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(vor.min_bound[1] - 0.1)', '(vor.max_bound[1] + 0.1)'], {}), '(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)\n', (24558, 24606), True, 'import matplotlib.pyplot as plt\n'), ((26385, 26399), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (26397, 26399), True, 'import pandas as pd\n'), ((27377, 27479), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'principalComponents', 'columns': "['principal component 1', 'principal component 2']"}), "(data=principalComponents, columns=['principal component 1',\n 'principal component 2'])\n", (27389, 27479), True, 'import pandas as pd\n'), ((29173, 29187), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (29185, 29187), True, 'import pandas as pd\n'), ((30168, 30270), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'principalComponents', 'columns': "['principal component 1', 'principal component 2']"}), "(data=principalComponents, columns=['principal component 1',\n 'principal component 2'])\n", (30180, 30270), True, 'import pandas as pd\n'), ((30443, 30507), 'seaborn.pairplot', 'sns.pairplot', (['df[features]'], {'hue': '"""labels"""', 'diag_kws': "{'bw': '1.0'}"}), "(df[features], hue='labels', diag_kws={'bw': '1.0'})\n", (30455, 30507), True, 'import seaborn as sns\n'), ((30831, 30842), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (30840, 30842), True, 'import plotly.graph_objects as go\n'), ((36921, 36935), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (36933, 36935), True, 'import pandas as pd\n'), ((38539, 38641), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'principalComponents', 'columns': "['principal component 1', 'principal component 2']"}), "(data=principalComponents, columns=['principal component 1',\n 'principal component 2'])\n", (38551, 38641), True, 'import pandas as pd\n'), ((39162, 39181), 'sklearn.feature_selection.VarianceThreshold', 'VarianceThreshold', ([], {}), '()\n', (39179, 39181), False, 'from sklearn.feature_selection import VarianceThreshold\n'), ((39260, 39288), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (39286, 39288), False, 'from sklearn import preprocessing\n'), ((39347, 39369), 'pandas.DataFrame', 'pd.DataFrame', (['x_scaled'], {}), '(x_scaled)\n', (39359, 39369), True, 'import pandas as pd\n'), ((39826, 39840), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (39838, 39840), True, 'import pandas as pd\n'), ((40783, 40797), 'IPython.display.display', 'display', (['test3'], {}), '(test3)\n', (40790, 40797), False, 'from IPython.display import display, HTML\n'), ((41133, 41222), 'seaborn.pairplot', 'sns.pairplot', (['df[feature_list]'], {'hue': '"""labels"""', 'diag_kws': "{'bw': '1.0'}", 'palette': 'palette'}), "(df[feature_list], hue='labels', diag_kws={'bw': '1.0'},\n palette=palette)\n", (41145, 41222), True, 'import seaborn as sns\n'), ((50058, 50156), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT * from [AdvancedAnalytics].[dbo].[Variance_Analysis]"""', 'Red004_Conn'], {}), "('SELECT * from [AdvancedAnalytics].[dbo].[Variance_Analysis]'\n , Red004_Conn)\n", (50075, 50156), True, 'import pandas as pd\n'), ((50166, 50417), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT [FY] ,[HRG_CODE], [HRG_NAME] ,[ORDINARY_ELECTIVE_LONG_STAY_TRIMPOINT_DAYS] ,[NON_ELECTIVE_LONG_STAY_TRIMPOINT_DAYS] FROM [LOCAL_REFERENCE_DB].[ref].[NATIONAL_TARIFF_APC_OPROC] where FY = \'2020/2021\'"""', 'Red004_Conn'], {}), '(\n "SELECT [FY] ,[HRG_CODE], [HRG_NAME] ,[ORDINARY_ELECTIVE_LONG_STAY_TRIMPOINT_DAYS] ,[NON_ELECTIVE_LONG_STAY_TRIMPOINT_DAYS] FROM [LOCAL_REFERENCE_DB].[ref].[NATIONAL_TARIFF_APC_OPROC] where FY = \'2020/2021\'"\n , Red004_Conn)\n', (50183, 50417), True, 'import pandas as pd\n'), ((63160, 63208), 'holoviews.Labels', 'hv.Labels', (['simple_graph.nodes', "['x', 'y']", '"""abv"""'], {}), "(simple_graph.nodes, ['x', 'y'], 'abv')\n", (63169, 63208), True, 'import holoviews as hv\n'), ((67298, 67359), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'columns': 'column_mapped', 'prefix': "['Type_is']"}), "(df, columns=column_mapped, prefix=['Type_is'])\n", (67312, 67359), True, 'import pandas as pd\n'), ((67553, 67567), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (67565, 67567), True, 'import pandas as pd\n'), ((72462, 72521), 'IPython.display.IFrame', 'IFrame', ([], {'src': '(url or outfile_path)', 'width': 'width', 'height': 'height'}), '(src=url or outfile_path, width=width, height=height)\n', (72468, 72521), False, 'from IPython.display import IFrame\n'), ((74772, 74816), 'pandas.to_datetime', 'pd.to_datetime', (["df['HPS_ACTIVITY_DATE_TIME']"], {}), "(df['HPS_ACTIVITY_DATE_TIME'])\n", (74786, 74816), True, 'import pandas as pd\n'), ((88580, 88601), 'datetime.datetime', 'datetime', (['(2018)', '(6)', '(12)'], {}), '(2018, 6, 12)\n', (88588, 88601), False, 'from datetime import datetime\n'), ((88617, 88638), 'datetime.datetime', 'datetime', (['(2021)', '(6)', '(22)'], {}), '(2021, 6, 22)\n', (88625, 88638), False, 'from datetime import datetime\n'), ((88651, 88696), 'pandas.date_range', 'pd.date_range', (['start_date', 'end_date'], {'freq': '"""D"""'}), "(start_date, end_date, freq='D')\n", (88664, 88696), True, 'import pandas as pd\n'), ((88811, 88856), 'ipywidgets.Layout', 'widgets.Layout', ([], {'width': '"""500px"""', 'height': '"""100px"""'}), "(width='500px', height='100px')\n", (88825, 88856), True, 'import ipywidgets as widgets\n'), ((88927, 89077), 'ipywidgets.SelectionRangeSlider', 'widgets.SelectionRangeSlider', ([], {'options': 'options', 'index': 'index', 'description': '"""Dates"""', 'orientation': '"""horizontal"""', 'layout': "{'width': '700px'}", 'style': 'style'}), "(options=options, index=index, description=\n 'Dates', orientation='horizontal', layout={'width': '700px'}, style=style)\n", (88955, 89077), True, 'import ipywidgets as widgets\n'), ((89136, 89274), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Outcomes', 'value': "['Pressure Ulcer']", 'description': '"""Outcomes"""', 'disabled': '(False)', 'layout': 'layout', 'style': 'style'}), "(options=Outcomes, value=['Pressure Ulcer'],\n description='Outcomes', disabled=False, layout=layout, style=style)\n", (89158, 89274), True, 'import ipywidgets as widgets\n'), ((89352, 89504), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Patient_Cohort', 'value': "['Surgical Patient']", 'description': '"""Patient Cohort"""', 'disabled': '(False)', 'layout': 'layout', 'style': 'style'}), "(options=Patient_Cohort, value=['Surgical Patient'],\n description='Patient Cohort', disabled=False, layout=layout, style=style)\n", (89374, 89504), True, 'import ipywidgets as widgets\n'), ((89579, 89717), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Demographic', 'value': 'Demographic', 'description': '"""Demographic"""', 'disabled': '(False)', 'layout': 'layout', 'style': 'style'}), "(options=Demographic, value=Demographic, description=\n 'Demographic', disabled=False, layout=layout, style=style)\n", (89601, 89717), True, 'import ipywidgets as widgets\n'), ((89793, 89958), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Assessment', 'value': 'Assessment', 'description': '"""Clinical Events – PU Risk Assessment"""', 'disabled': '(False)', 'layout': 'layout', 'style': 'style'}), "(options=Assessment, value=Assessment, description=\n 'Clinical Events – PU Risk Assessment', disabled=False, layout=layout,\n style=style)\n", (89815, 89958), True, 'import ipywidgets as widgets\n'), ((90051, 90212), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Prevention', 'value': 'Prevention', 'description': '"""Clinical Events – PU Prevention"""', 'disabled': '(False)', 'layout': 'layout', 'style': 'style'}), "(options=Prevention, value=Prevention, description=\n 'Clinical Events – PU Prevention', disabled=False, layout=layout, style\n =style)\n", (90073, 90212), True, 'import ipywidgets as widgets\n'), ((90283, 90436), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Blood_Results', 'value': 'Blood_Results', 'description': '"""Laboratory – Other"""', 'disabled': '(False)', 'layout': 'layout', 'style': 'style'}), "(options=Blood_Results, value=Blood_Results,\n description='Laboratory – Other', disabled=False, layout=layout, style=\n style)\n", (90305, 90436), True, 'import ipywidgets as widgets\n'), ((90511, 90667), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Blood_Normalty', 'value': 'Blood_Normalty', 'description': '"""Laboratory – bloods"""', 'disabled': '(False)', 'layout': 'layout', 'style': 'style'}), "(options=Blood_Normalty, value=Blood_Normalty,\n description='Laboratory – bloods', disabled=False, layout=layout, style\n =style)\n", (90533, 90667), True, 'import ipywidgets as widgets\n'), ((90735, 90878), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Management', 'value': 'Management', 'description': '"""Patient Management"""', 'disabled': '(False)', 'layout': 'layout', 'style': 'style'}), "(options=Management, value=Management, description=\n 'Patient Management', disabled=False, layout=layout, style=style)\n", (90757, 90878), True, 'import ipywidgets as widgets\n'), ((91562, 91867), 'ipywidgets.interact_manual', 'interact_manual', (['plot_Spell_PU_Degree_PairCompare_v2'], {'Outcome': 'Outcome', 'Patient_Cohort': 'Patient_Cohort', 'DateRange': 'selection_range_slider', 'Demographic': 'Demographic', 'Assessment': 'Assessment', 'Prevention': 'Prevention', 'Blood_Results': 'Blood_Results', 'Blood_Normalty': 'Blood_Normalty', 'Management': 'Management'}), '(plot_Spell_PU_Degree_PairCompare_v2, Outcome=Outcome,\n Patient_Cohort=Patient_Cohort, DateRange=selection_range_slider,\n Demographic=Demographic, Assessment=Assessment, Prevention=Prevention,\n Blood_Results=Blood_Results, Blood_Normalty=Blood_Normalty, Management=\n Management)\n', (91577, 91867), False, 'from ipywidgets import interact, interactive, fixed, interact_manual, Text, interactive_output\n'), ((95273, 95294), 'datetime.datetime', 'datetime', (['(2018)', '(6)', '(12)'], {}), '(2018, 6, 12)\n', (95281, 95294), False, 'from datetime import datetime\n'), ((95310, 95331), 'datetime.datetime', 'datetime', (['(2021)', '(6)', '(22)'], {}), '(2021, 6, 22)\n', (95318, 95331), False, 'from datetime import datetime\n'), ((4911, 4977), 'matplotlib.pyplot.scatter', 'plt.scatter', (["data['Sum_Degree']", "data['Total_LOS']"], {'edgecolors': '"""r"""'}), "(data['Sum_Degree'], data['Total_LOS'], edgecolors='r')\n", (4922, 4977), True, 'import matplotlib.pyplot as plt\n'), ((6555, 6576), 'pandas.concat', 'pd.concat', (['[df, data]'], {}), '([df, data])\n', (6564, 6576), True, 'import pandas as pd\n'), ((8857, 8878), 'pandas.concat', 'pd.concat', (['[df, data]'], {}), '([df, data])\n', (8866, 8878), True, 'import pandas as pd\n'), ((10423, 10475), 'holoviews.opts.Points', 'opts.Points', ([], {'padding': '(0.1)', 'size': '(8)', 'line_color': '"""black"""'}), "(padding=0.1, size=8, line_color='black')\n", (10434, 10475), False, 'from holoviews import opts\n'), ((12034, 12132), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label', '(x, y)'], {'textcoords': '"""offset points"""', 'xytext': '(0, 10)', 'ha': '"""center"""', 'size': '(20)'}), "(label, (x, y), textcoords='offset points', xytext=(0, 10), ha=\n 'center', size=20)\n", (12046, 12132), True, 'import matplotlib.pyplot as plt\n'), ((14971, 14992), 'pandas.concat', 'pd.concat', (['[df, data]'], {}), '([df, data])\n', (14980, 14992), True, 'import pandas as pd\n'), ((16537, 16589), 'holoviews.opts.Points', 'opts.Points', ([], {'padding': '(0.1)', 'size': '(8)', 'line_color': '"""black"""'}), "(padding=0.1, size=8, line_color='black')\n", (16548, 16589), False, 'from holoviews import opts\n'), ((18148, 18246), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label', '(x, y)'], {'textcoords': '"""offset points"""', 'xytext': '(0, 10)', 'ha': '"""center"""', 'size': '(20)'}), "(label, (x, y), textcoords='offset points', xytext=(0, 10), ha=\n 'center', size=20)\n", (18160, 18246), True, 'import matplotlib.pyplot as plt\n'), ((20958, 20979), 'pandas.concat', 'pd.concat', (['[df, data]'], {}), '([df, data])\n', (20967, 20979), True, 'import pandas as pd\n'), ((22527, 22579), 'holoviews.opts.Points', 'opts.Points', ([], {'padding': '(0.1)', 'size': '(8)', 'line_color': '"""black"""'}), "(padding=0.1, size=8, line_color='black')\n", (22538, 22579), False, 'from holoviews import opts\n'), ((24138, 24236), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label', '(x, y)'], {'textcoords': '"""offset points"""', 'xytext': '(0, 10)', 'ha': '"""center"""', 'size': '(20)'}), "(label, (x, y), textcoords='offset points', xytext=(0, 10), ha=\n 'center', size=20)\n", (24150, 24236), True, 'import matplotlib.pyplot as plt\n'), ((27159, 27180), 'pandas.concat', 'pd.concat', (['[df, data]'], {}), '([df, data])\n', (27168, 27180), True, 'import pandas as pd\n'), ((29950, 29971), 'pandas.concat', 'pd.concat', (['[df, data]'], {}), '([df, data])\n', (29959, 29971), True, 'import pandas as pd\n'), ((40090, 40104), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (40102, 40104), True, 'import pandas as pd\n'), ((62835, 62861), 'holoviews.opts.EdgePaths', 'opts.EdgePaths', ([], {}), '(**defaults)\n', (62849, 62861), False, 'from holoviews import opts\n'), ((62863, 62885), 'holoviews.opts.Graph', 'opts.Graph', ([], {}), '(**defaults)\n', (62873, 62885), False, 'from holoviews import opts\n'), ((62887, 62909), 'holoviews.opts.Nodes', 'opts.Nodes', ([], {}), '(**defaults)\n', (62897, 62909), False, 'from holoviews import opts\n'), ((72177, 72221), 'io.open', 'io.open', (['outfile_path', '"""wt"""'], {'encoding': '"""utf8"""'}), "(outfile_path, 'wt', encoding='utf8')\n", (72184, 72221), False, 'import json, io\n'), ((77359, 77461), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'principalComponents', 'columns': "['principal component 1', 'principal component 2']"}), "(data=principalComponents, columns=['principal component 1',\n 'principal component 2'])\n", (77371, 77461), True, 'import pandas as pd\n'), ((78064, 78083), 'sklearn.feature_selection.VarianceThreshold', 'VarianceThreshold', ([], {}), '()\n', (78081, 78083), False, 'from sklearn.feature_selection import VarianceThreshold\n'), ((78170, 78198), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (78196, 78198), False, 'from sklearn import preprocessing\n'), ((78265, 78287), 'pandas.DataFrame', 'pd.DataFrame', (['x_scaled'], {}), '(x_scaled)\n', (78277, 78287), True, 'import pandas as pd\n'), ((78793, 78807), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (78805, 78807), True, 'import pandas as pd\n'), ((81986, 82071), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {'columns': "['variable_name', 'p-value (ANOVA&Chi-square)']"}), "(rows_list, columns=['variable_name', 'p-value (ANOVA&Chi-square)']\n )\n", (81998, 82071), True, 'import pandas as pd\n'), ((82935, 82949), 'IPython.display.display', 'display', (['test3'], {}), '(test3)\n', (82942, 82949), False, 'from IPython.display import display, HTML\n'), ((9164, 9180), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9178, 9180), False, 'from sklearn.preprocessing import StandardScaler\n'), ((15278, 15294), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (15292, 15294), False, 'from sklearn.preprocessing import StandardScaler\n'), ((21263, 21279), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (21277, 21279), False, 'from sklearn.preprocessing import StandardScaler\n'), ((40727, 40752), 'pandas.concat', 'pd.concat', (['[test5, test3]'], {}), '([test5, test3])\n', (40736, 40752), True, 'import pandas as pd\n'), ((58450, 58551), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Label', 'value': 'Label', 'description': '"""Pressure Ulcer"""', 'disabled': '(False)'}), "(options=Label, value=Label, description=\n 'Pressure Ulcer', disabled=False)\n", (58472, 58551), True, 'import ipywidgets as widgets\n'), ((58627, 58721), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'LOS', 'value': 'LOS', 'description': '"""Expected LOS"""', 'disabled': '(False)'}), "(options=LOS, value=LOS, description='Expected LOS',\n disabled=False)\n", (58649, 58721), True, 'import ipywidgets as widgets\n'), ((58799, 58915), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'events_list', 'value': "['Waterlow', 'LabTest']", 'description': '"""Events"""', 'disabled': '(False)'}), "(options=events_list, value=['Waterlow', 'LabTest'],\n description='Events', disabled=False)\n", (58821, 58915), True, 'import ipywidgets as widgets\n'), ((59024, 59217), 'ipywidgets.IntRangeSlider', 'widgets.IntRangeSlider', ([], {'value': '[0, 120]', 'min': '(0)', 'max': '(120)', 'step': '(1)', 'description': '"""Age:"""', 'disabled': '(False)', 'continuous_update': '(False)', 'orientation': '"""horizontal"""', 'readout': '(True)', 'readout_format': '"""d"""'}), "(value=[0, 120], min=0, max=120, step=1, description=\n 'Age:', disabled=False, continuous_update=False, orientation=\n 'horizontal', readout=True, readout_format='d')\n", (59046, 59217), True, 'import ipywidgets as widgets\n'), ((59318, 59454), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Waterlow_Compliance', 'value': "['Rule 1: Use Waterlow']", 'description': '"""WaterlowStandard"""', 'disabled': '(False)'}), "(options=Waterlow_Compliance, value=[\n 'Rule 1: Use Waterlow'], description='WaterlowStandard', disabled=False)\n", (59340, 59454), True, 'import ipywidgets as widgets\n'), ((59543, 59743), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': 'Variable_selection', 'value': "['Complex_Degree', 'Global_Central', 'Total_LOS', 'Turnaround_Degree',\n 'WL - Waterlow Score']", 'description': '"""Variables"""', 'disabled': '(False)'}), "(options=Variable_selection, value=['Complex_Degree',\n 'Global_Central', 'Total_LOS', 'Turnaround_Degree',\n 'WL - Waterlow Score'], description='Variables', disabled=False)\n", (59565, 59743), True, 'import ipywidgets as widgets\n'), ((63092, 63129), 'holoviews.Graph.from_networkx', 'hv.Graph.from_networkx', (['combined', 'pos'], {}), '(combined, pos)\n', (63114, 63129), True, 'import holoviews as hv\n'), ((66448, 66469), 'itertools.groupby', 'itertools.groupby', (['tt'], {}), '(tt)\n', (66465, 66469), False, 'import itertools\n'), ((73300, 73326), 'scipy.stats.f_oneway', 'stats.f_oneway', (['*[dd, ddd]'], {}), '(*[dd, ddd])\n', (73314, 73326), True, 'import scipy.stats as stats\n'), ((73432, 73466), 'pandas.crosstab', 'pd.crosstab', (['df[item]', "df['Label']"], {}), "(df[item], df['Label'])\n", (73443, 73466), True, 'import pandas as pd\n'), ((73502, 73530), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['contigency'], {}), '(contigency)\n', (73518, 73530), False, 'from scipy.stats import chi2_contingency\n'), ((60636, 60692), 'datetime.datetime.strptime', 'datetime.strptime', (['sorted_dic[i][1]', '"""%Y.%m.%d %H:%M:%S"""'], {}), "(sorted_dic[i][1], '%Y.%m.%d %H:%M:%S')\n", (60653, 60692), False, 'from datetime import datetime\n'), ((60692, 60752), 'datetime.datetime.strptime', 'datetime.strptime', (['sorted_dic[i - 1][1]', '"""%Y.%m.%d %H:%M:%S"""'], {}), "(sorted_dic[i - 1][1], '%Y.%m.%d %H:%M:%S')\n", (60709, 60752), False, 'from datetime import datetime\n'), ((65562, 65669), 'ipywidgets.Text', 'widgets.Text', ([], {'value': '"""1234567"""', 'placeholder': '"""Type in Spell Number"""', 'description': '"""Spell:"""', 'disabled': '(False)'}), "(value='1234567', placeholder='Type in Spell Number',\n description='Spell:', disabled=False)\n", (65574, 65669), True, 'import ipywidgets as widgets\n'), ((74660, 74671), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (74668, 74671), False, 'import sys\n'), ((80255, 80269), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (80267, 80269), True, 'import pandas as pd\n'), ((81155, 81169), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (81167, 81169), True, 'import pandas as pd\n'), ((81194, 81210), 'pandas.Index', 'pd.Index', (['[item]'], {}), '([item])\n', (81202, 81210), True, 'import pandas as pd\n'), ((82875, 82900), 'pandas.concat', 'pd.concat', (['[test5, test3]'], {}), '([test5, test3])\n', (82884, 82900), True, 'import pandas as pd\n'), ((46056, 46076), 'pandas.isnull', 'pd.isnull', (['row[item]'], {}), '(row[item])\n', (46065, 46076), True, 'import pandas as pd\n'), ((60859, 60875), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (60867, 60875), True, 'import numpy as np\n'), ((60940, 60956), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (60948, 60956), True, 'import numpy as np\n'), ((61643, 61659), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (61651, 61659), True, 'import numpy as np\n'), ((72405, 72423), 'json.dumps', 'json.dumps', (['kwargs'], {}), '(kwargs)\n', (72415, 72423), False, 'import json, io\n')] |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
# part 1
with open('input.txt') as f:
lines = f.readlines()
# [horizontal_pos, depth]
loc = np.array([0,0])
# [horizontal_pos, depth]
command = {'forward': np.array([1, 0]),
'up': np.array([0, -1]),
'down': np.array([0, 1])}
for line in lines:
[direction, dist] = line.split()
loc = loc + ( command[direction] * int(dist))
# print(loc.prod())
# part 2
with open('input.txt') as f:
lines = f.readlines()
# [horizontal_pos, depth, aim]
loc = np.array([0,0,0])
# [[horizontal_pos, depth, aim],
command_dict = {'forward': np.array([1, 1, 0]),
'up': np.array([0, 0, -1]),
'down': np.array([0, 0, 1])}
for line in lines:
[command, dist] = line.split()
loc = loc \
+ (command_dict[command] * [1,0,1] * int(dist)) \
+ (command_dict[command] * [0,1,0] * int(dist) * loc[2])
print(loc[:2].prod())
| [
"numpy.array"
] | [((210, 226), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (218, 226), True, 'import numpy as np\n'), ((665, 684), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (673, 684), True, 'import numpy as np\n'), ((288, 304), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (296, 304), True, 'import numpy as np\n'), ((332, 349), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (340, 349), True, 'import numpy as np\n'), ((377, 393), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (385, 393), True, 'import numpy as np\n'), ((756, 775), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (764, 775), True, 'import numpy as np\n'), ((808, 828), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (816, 828), True, 'import numpy as np\n'), ((861, 880), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (869, 880), True, 'import numpy as np\n')] |
import pickle
import os
import numpy as np
import torch
import warnings
from tqdm import tqdm
from Metrics import evaluateTracking
from dataset.dataLoader import Data_Loader_MOT
from network.tubetk import TubeTK
from post_processing.tube_nms import multiclass_nms
from apex import amp
import argparse
import multiprocessing
from configs.default import __C, cfg_from_file
from post_processing.tube_iou_matching import matching
warnings.filterwarnings('ignore')
import shutil
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not torch.distributed.is_available():
return
if not torch.distributed.is_initialized():
return
world_size = torch.distributed.get_world_size()
if world_size == 1:
return
torch.distributed.barrier()
def match_video(video_name, tmp_dir, output_dir, model_arg):
tubes_path = os.path.join(tmp_dir, video_name)
tubes = []
frames = sorted([int(x) for x in os.listdir(tubes_path)])
for f in frames:
tube = pickle.load(open(os.path.join(tubes_path, str(f)), 'rb'))
tubes.append(tube)
tubes = np.concatenate(tubes)
matching(tubes, save_path=os.path.join(output_dir, video_name + '.txt'), verbose=True, arg=model_arg)
def evaluate(model, loader, test_arg, model_arg, output_dir='output'):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
tmp_dir = os.path.join(output_dir, 'tmp')
try:
shutil.rmtree(tmp_dir)
except:
pass
os.makedirs(tmp_dir, exist_ok=True)
if test_arg.rank == 0:
loader = tqdm(loader, ncols=20)
for i, data in enumerate(loader):
imgs, img_metas = data[:2]
imgs = imgs.cuda()
with torch.no_grad():
tubes, _, _ = zip(*model(imgs, img_metas, return_loss=False))
for img, tube, img_meta in zip(imgs, tubes, img_metas):
# ===========================================VIS OUTPUT====================================================
# if img is not None:
# vis_output(img.cpu(), img_meta, bbox.cpu(), stride=model_arg.frame_stride, out_folder='/home/pb/results/')
# =========================================================================================================
tube[:, [0, 5, 10]] += img_meta['start_frame']
os.makedirs(os.path.join(tmp_dir, img_meta['video_name']), exist_ok=True)
tube = tube.cpu().data.numpy()
pickle.dump(tube, open(os.path.join(tmp_dir, img_meta['video_name'], str(img_meta['start_frame'])), 'wb'))
synchronize()
if test_arg.rank == 0:
print('Finish prediction, Start matching')
video_names = os.listdir(tmp_dir)
pool = multiprocessing.Pool(processes=20)
pool_list = []
for vid in video_names:
pool_list.append(pool.apply_async(match_video, (vid, tmp_dir, os.path.join(output_dir, 'res'), model_arg,)))
for p in tqdm(pool_list, ncols=20):
p.get()
pool.close()
pool.join()
shutil.rmtree(tmp_dir)
if test_arg.trainOrTest == 'train' and test_arg.dataset == 'MOT17':
print("FINISH MATCHING, START EVALUATE")
seq_map = 'MOT17_train.txt'
evaluateTracking(seq_map, os.path.join(output_dir, 'res'),
os.path.join(test_arg.data_url, 'train'), 'MOT17')
# elif test_arg.trainOrTest == 'train' and test_arg.dataset == 'MOT15':
# print("FINISH MATCHING, START EVALUATE")
# seq_map = 'MOT15_train.txt'
# evaluateTracking(seq_map, os.path.join(output_dir, 'res'),
# os.path.join(test_arg.data_url[3], 'train'), 'MOT15')
def main(test_arg, model_arg):
torch.distributed.init_process_group(backend="nccl", init_method='env://')
local_rank = int(os.environ["LOCAL_RANK"])
print('Rank: ' + str(test_arg.rank) + " Start!")
torch.cuda.set_device(local_rank)
if local_rank == 0:
print("Building TubeTK Model")
model = TubeTK(num_classes=1, arg=model_arg, pretrained=False)
data_loader = Data_Loader_MOT(
batch_size=test_arg.batch_size,
num_workers=8,
input_path=test_arg.data_url,
train_epoch=1,
test_epoch=1,
model_arg=model_arg,
dataset=test_arg.dataset,
test_seq=None,
test_type=test_arg.trainOrTest,
)
model = model.cuda(local_rank)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if test_arg.apex:
model = amp.initialize(model, opt_level='O1')
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True)
if test_arg.local_rank == 0:
print("Loading Model")
checkpoint = torch.load(test_arg.model_path + '/' + test_arg.model_name, map_location=
{'cuda:0': 'cuda:' + str(test_arg.local_rank),
'cuda:1': 'cuda:' + str(test_arg.local_rank),
'cuda:2': 'cuda:' + str(test_arg.local_rank),
'cuda:3': 'cuda:' + str(test_arg.local_rank),
'cuda:4': 'cuda:' + str(test_arg.local_rank),
'cuda:5': 'cuda:' + str(test_arg.local_rank),
'cuda:6': 'cuda:' + str(test_arg.local_rank),
'cuda:7': 'cuda:' + str(test_arg.local_rank)})
model.load_state_dict(checkpoint['state'], strict=False)
if test_arg.local_rank == 0:
print("Finish Loading")
del checkpoint
model.eval()
loader = data_loader.test_loader
evaluate(model, loader, test_arg, model_arg, output_dir=test_arg.output_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--model_path', default='./models', type=str, help='model path')
parser.add_argument('--model_name', default='TubeTK', type=str, help='model name')
parser.add_argument('--data_url', default='./data/', type=str, help='model path')
parser.add_argument('--output_dir', default='./link_res', type=str, help='output path')
parser.add_argument('--apex', action='store_true', help='whether use apex')
parser.add_argument('--config', default='./configs/TubeTK_resnet_50_FPN_8frame_1stride.yaml', type=str, help='config file')
parser.add_argument('--dataset', default='MOT17', type=str, help='test which dataset: MOT17, MOT15')
parser.add_argument('--trainOrTest', default='test', type=str, help='evaluate train or test set')
parser.add_argument('--local_rank', type=int, help='gpus')
test_arg, unparsed = parser.parse_known_args()
model_arg = __C
if test_arg.config is not None:
cfg_from_file(test_arg.config)
test_arg.rank = int(os.environ["RANK"])
main(test_arg, model_arg)
| [
"apex.amp.initialize",
"torch.distributed.is_available",
"torch.distributed.barrier",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"numpy.concatenate",
"dataset.dataLoader.Data_Loader_MOT",
"network.tubetk.TubeTK",
"torch.nn.parallel.DistributedDataParallel",
"torch.distributed.get... | [((426, 459), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (449, 459), False, 'import warnings\n'), ((751, 785), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (783, 785), False, 'import torch\n'), ((829, 856), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (854, 856), False, 'import torch\n'), ((937, 970), 'os.path.join', 'os.path.join', (['tmp_dir', 'video_name'], {}), '(tmp_dir, video_name)\n', (949, 970), False, 'import os\n'), ((1182, 1203), 'numpy.concatenate', 'np.concatenate', (['tubes'], {}), '(tubes)\n', (1196, 1203), True, 'import numpy as np\n'), ((1469, 1500), 'os.path.join', 'os.path.join', (['output_dir', '"""tmp"""'], {}), "(output_dir, 'tmp')\n", (1481, 1500), False, 'import os\n'), ((1570, 1605), 'os.makedirs', 'os.makedirs', (['tmp_dir'], {'exist_ok': '(True)'}), '(tmp_dir, exist_ok=True)\n', (1581, 1605), False, 'import os\n'), ((3846, 3920), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (3882, 3920), False, 'import torch\n'), ((4026, 4059), 'torch.cuda.set_device', 'torch.cuda.set_device', (['local_rank'], {}), '(local_rank)\n', (4047, 4059), False, 'import torch\n'), ((4136, 4190), 'network.tubetk.TubeTK', 'TubeTK', ([], {'num_classes': '(1)', 'arg': 'model_arg', 'pretrained': '(False)'}), '(num_classes=1, arg=model_arg, pretrained=False)\n', (4142, 4190), False, 'from network.tubetk import TubeTK\n'), ((4210, 4434), 'dataset.dataLoader.Data_Loader_MOT', 'Data_Loader_MOT', ([], {'batch_size': 'test_arg.batch_size', 'num_workers': '(8)', 'input_path': 'test_arg.data_url', 'train_epoch': '(1)', 'test_epoch': '(1)', 'model_arg': 'model_arg', 'dataset': 'test_arg.dataset', 'test_seq': 'None', 'test_type': 'test_arg.trainOrTest'}), '(batch_size=test_arg.batch_size, num_workers=8, input_path=\n test_arg.data_url, train_epoch=1, test_epoch=1, model_arg=model_arg,\n dataset=test_arg.dataset, test_seq=None, test_type=test_arg.trainOrTest)\n', (4225, 4434), False, 'from dataset.dataLoader import Data_Loader_MOT\n'), ((4554, 4606), 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', (['model'], {}), '(model)\n', (4599, 4606), False, 'import torch\n'), ((4696, 4828), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[local_rank]', 'output_device': 'local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[local_rank],\n output_device=local_rank, find_unused_parameters=True)\n', (4737, 4828), False, 'import torch\n'), ((6014, 6039), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6037, 6039), False, 'import argparse\n'), ((623, 655), 'torch.distributed.is_available', 'torch.distributed.is_available', ([], {}), '()\n', (653, 655), False, 'import torch\n'), ((683, 717), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (715, 717), False, 'import torch\n'), ((1394, 1420), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1408, 1420), False, 'import os\n'), ((1430, 1453), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1441, 1453), False, 'import os\n'), ((1518, 1540), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (1531, 1540), False, 'import shutil\n'), ((1651, 1673), 'tqdm.tqdm', 'tqdm', (['loader'], {'ncols': '(20)'}), '(loader, ncols=20)\n', (1655, 1673), False, 'from tqdm import tqdm\n'), ((2771, 2790), 'os.listdir', 'os.listdir', (['tmp_dir'], {}), '(tmp_dir)\n', (2781, 2790), False, 'import os\n'), ((2806, 2840), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(20)'}), '(processes=20)\n', (2826, 2840), False, 'import multiprocessing\n'), ((3034, 3059), 'tqdm.tqdm', 'tqdm', (['pool_list'], {'ncols': '(20)'}), '(pool_list, ncols=20)\n', (3038, 3059), False, 'from tqdm import tqdm\n'), ((3130, 3152), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (3143, 3152), False, 'import shutil\n'), ((4645, 4682), 'apex.amp.initialize', 'amp.initialize', (['model'], {'opt_level': '"""O1"""'}), "(model, opt_level='O1')\n", (4659, 4682), False, 'from apex import amp\n'), ((7051, 7081), 'configs.default.cfg_from_file', 'cfg_from_file', (['test_arg.config'], {}), '(test_arg.config)\n', (7064, 7081), False, 'from configs.default import __C, cfg_from_file\n'), ((1234, 1279), 'os.path.join', 'os.path.join', (['output_dir', "(video_name + '.txt')"], {}), "(output_dir, video_name + '.txt')\n", (1246, 1279), False, 'import os\n'), ((1788, 1803), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1801, 1803), False, 'import torch\n'), ((1023, 1045), 'os.listdir', 'os.listdir', (['tubes_path'], {}), '(tubes_path)\n', (1033, 1045), False, 'import os\n'), ((2427, 2472), 'os.path.join', 'os.path.join', (['tmp_dir', "img_meta['video_name']"], {}), "(tmp_dir, img_meta['video_name'])\n", (2439, 2472), False, 'import os\n'), ((3361, 3392), 'os.path.join', 'os.path.join', (['output_dir', '"""res"""'], {}), "(output_dir, 'res')\n", (3373, 3392), False, 'import os\n'), ((3423, 3463), 'os.path.join', 'os.path.join', (['test_arg.data_url', '"""train"""'], {}), "(test_arg.data_url, 'train')\n", (3435, 3463), False, 'import os\n'), ((2970, 3001), 'os.path.join', 'os.path.join', (['output_dir', '"""res"""'], {}), "(output_dir, 'res')\n", (2982, 3001), False, 'import os\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from mpl_toolkits.mplot3d import Axes3D
# Initialize webcam input
cap = cv2.VideoCapture(0)
# Initialize video input
####BSL Corpus#####
#cap = cv2.VideoCapture("C:/Users/liangx/Desktop/Trajectoreis_test/BSL Corpus Data Results/5/BL9i.MOV")
#cap = cv2.VideoCapture("C:/Users/liangx/Desktop/Trajectoreis_test/BSL Corpus Data Results/4/BM17i.MOV")
#cap = cv2.VideoCapture("C:/Users/liangx/Desktop/Trajectoreis_test/BSL Corpus Data Results/3/BF3i.MOV")
#cap = cv2.VideoCapture("C:/Users/liangx/Desktop/Trajectoreis_test/BSL Corpus Data Results/2/G11c.MOV")
#cap = cv2.VideoCapture("C:/Users/liangx/Desktop/Trajectoreis_test/BSL Corpus Data Results/1/BF14c.MOV")
####Sign Bank #####
#cap = cv2.VideoCapture("C:/Users/liangx/Desktop/Trajectoreis_test/SignBank Data Results/5/SO-WHAT.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Desktop/Trajectoreis_test/SignBank Data Results/4/POLICE.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Desktop/Trajectoreis_test/SignBank Data Results/3/BISCUIT.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Desktop/Trajectoreis_test/SignBank Data Results/2/AFRICA.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Desktop/Trajectoreis_test/SignBank Data Results/1/ACCEPT.mp4")
###################
cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Stomach/FARM.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Below Waist/LAP.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Restricted/L2n.mov")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Pronated Wrist/WATCH2.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Conversation/L12n.mov")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Conversation/CF27l.mov")
# Set different colour conversion models {1 : HSV,2 : YCrCb,3 : LAB, 4 : XYZ,}
COLOUR_MODEL = 1;
# Set Tracking Delay (i.e. delay in number of frames) to wait for KNN background subtraction work (Camera: 30; Video: 5)
DELAY= 10
# Set countour radius to denoise, only contours bigger enough are tracked (Camera: 45-55 ajust the value depending on distance between tracking object and camera; Video: 35)
RADIUS = 35
# Set frame count number for tracking trails reset (when there is no hands being detected)
FRAME = 100
# Initialize frame_acount
frame_count = 0
# Create empty points array for hand trajectories tracking
points_left = []
points_right = []
# returns the elapsed milliseconds since the start of the program
def milliseconds():
dt = datetime.now() - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
return ms
# Sorting contour by area
def get_contour_areas(contours):
# returns the areas of all contours as list
all_areas = []
for cnt in contours:
area = cv2.contourArea(cnt)
all_areas.append(area)
return all_areas
# Sorting contour by position
def x_cord_contour(contours):
#Returns the X cordinate for the contour centroid
M = cv2.moments(contours)
return (int(M['m10']/M['m00']))
#Plot trajectories X-Y
def plot_trajectories(center,str, clr):
xs = [x[0] for x in center]
ys = [x[1] for x in center]
plt.plot(xs, ys, color= clr)
plt.xlabel('X')
plt.ylabel('Y')
plt.title(str + ' hand trajectories')
plt.gca().invert_yaxis() #Reverse Y-Axis in PyPlot (opencv choose the coordinate system of points/images from Top-Left corner)
#plt.gca().invert_xaxis() #Reverse X-Axis in PyPlot (Make trajectories like a Mirror View)
plt.show()
return None
#Plot trajectories with time
def plot_trajectories_vstime(center,str):
xs = [x[0] for x in center]
ys = [x[1] for x in center]
ts = [x[2] for x in center]
plt.plot(ts, xs, color='b', marker ='o',label='$X-Trajectory$')
plt.plot(ts, ys, color='y', marker ='^',label='$Y-Trajectory$')
plt.xlabel('Time')
plt.ylabel('X-Y')
plt.title(str + ' hand trajectories')
plt.gca().invert_yaxis() #Reverse Y-Axis in PyPlot (y reverted for:opencv choose the coordinate system of points/images from Top-Left corner; x reverted for: mirror effect)
#plt.gca().invert_xaxis() #Reverse X-Axis in PyPlot (Make trajectories like a Mirror View)
plt.legend(loc='upper right')
plt.show()
return None
#Plot 3D trjectories with Timeline in Z
def plot_trajectories_3d(center, str, clr):
xs = [x[0] for x in center]
ys = [x[1] for x in center]
ts = [x[2] for x in center]
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot3D(xs, ts, ys, color= clr, marker ='o')
#ax.set_yticks =(0, -1, 100)
ax.set_xlabel('X')
ax.set_ylabel('Time (ms)')
ax.set_zlabel('Y')
ax.set_title(str + '-Trajectory')
plt.gca().invert_zaxis() #Reverse Z-Axis in PyPlot (to revert y)
#plt.gca().invert_xaxis() #Reverse X-Axis in PyPlot (Make trajectories like a Mirror View)
plt.show()
return None
def plot_trajectory_diagrams():
plot_trajectories(points_left, "Left", "red")
plot_trajectories(points_right, "Right", "green")
plot_trajectories_vstime(points_left,(DATE+" Left"))
plot_trajectories_vstime(points_right, (DATE+" Right"))
plot_trajectories_3d(points_left,(DATE+" Left"), "red")
plot_trajectories_3d(points_right,(DATE+" Right"), "green")
return None
# define the different colour conversion function blocks: from RBG/BGR to HSV/YCrCb/LAB/XYZ
def HSV():
con_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cv2.imshow('HSV Colour Model Image:', con_img)
return con_img
def YCrCb():
con_img = cv2.cvtColor(frame, cv2.COLOR_BGR2YCR_CB)
cv2.imshow('YCrCb Colour Model Image:', con_img)
return con_img
def LAB():
con_img = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
cv2.imshow('CIE LAB Colour Model Image:', con_img)
return con_img
def XYZ():
con_img = cv2.cvtColor(frame, cv2.COLOR_BGR2XYZ)
cv2.imshow('CIE XYZ Colour Model Image:', con_img)
return con_img
# map the inputs to the different colour convertion function blocks
options = {1 : HSV,
2 : YCrCb,
3 : LAB,
4 : XYZ,
}
# define the different colour convertion threshold
def HSV_thre():
# Selected Value sets
low_thresh = np.array([0, 48, 80], dtype = "uint8")
up_thresh = np.array([20, 255, 255], dtype = "uint8")
return low_thresh, up_thresh
def YCrCb_thre():
# Selected Value sets
low_thresh = np.array((0, 133,77), dtype = "uint8")
up_thresh = np.array((255, 173,127), dtype = "uint8")
return low_thresh, up_thresh
def LAB_thre():
# Selected Value sets
low_thresh = np.array((20, 128, 130), dtype = "uint8")
up_thresh = np.array((220, 245, 255), dtype = "uint8")
return low_thresh, up_thresh
def XYZ_thre():
low_thresh = np.array((79, 80, 30), dtype = "uint8")
up_thresh = np.array((240, 240,140), dtype = "uint8")
return low_thresh, up_thresh
# map the inputs to different colour convertion threshold
options_thre = {1 : HSV_thre,
2 : YCrCb_thre,
3 : LAB_thre,
4 : XYZ_thre,
}
#Set lower_thresh, upper_thresh for different colour convertion models
lower_thresh, upper_thresh = options_thre[COLOUR_MODEL]()
# Get current date & time
DATE= datetime.now().strftime('%Y:%m:%d')
start_time = datetime.now()
# Loop video capture until break statement is exectured
while cap.isOpened():
# Read webcam/video image
ret, frame = cap.read()
# when there is a video input
if ret == True:
# Get default camera/video window size
Height, Width = frame.shape[:2]
#Different colour convertion function blocks is invoked:
converted_img = options[COLOUR_MODEL]()
# Face Detection Using HAAR CASCADE
hc_face = cv2.CascadeClassifier("C:/Users/liangx/source/repos/Skin Detection/haarcascade_frontalface_alt/haarcascade_frontalface_alt.xml")
faces = hc_face.detectMultiScale(converted_img)
for (x,y,w,h) in faces:
# If we do not draw a box on face, then use the code below
#cv2.rectangle(converted_img, (x,y), (x+w,y+h), 255, thickness=2)
# If we draw a box on face to avoid face skin detection, then use the code below
cv2.rectangle(converted_img, (x-10,y-30), (x+w+10, y+h+80), (255,255,255), -1)
crop_img = frame[y+2:y+w, x+2:x+h]
cv2.imshow('Face Detection', crop_img)
# Use inRange to capture only the values between lower & upper_thresh for skin detection
mask = cv2.inRange(converted_img, lower_thresh, upper_thresh)
# Adding morphology effects to denoise
kernel_morphology =np.ones((5, 5), np.uint8)
mask = cv2.erode(mask, kernel_morphology, iterations=1)
mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel_morphology)
mask = cv2.dilate(mask, kernel_morphology, iterations=1)
cv2.imshow('Skin colour + Morpho Mask', mask)
# Perform Bitwise AND on mask and original frame
# rest1 is the results after applying morphology effects + skin filtering
rest1 = cv2.bitwise_and(frame, frame, mask= mask)
# Find contours on mask
# cv2.RETR_EXTERNAL finds external contours only; cv2.CHAIN_APPROX_SIMPLE only provides start and end points of bounding contours, thus resulting in much more efficent storage of contour information.
_, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#print ("Number of contours1 found = ", len(contours))
# When both hands are detected
if len(contours) >=2:
# Get the largest two contours and its center (i.e. two hands)
sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True)[:2]
# Sort by reverse=True, using our x_cord_contour function (i.e. hands tracking from left to right)
contours_left_to_right = sorted(sorted_contours, key = x_cord_contour, reverse = True)
# Iterate over two contours and draw one at a time
for (i,c) in enumerate(contours_left_to_right):
# Draw Convex Hull Contour
hull=cv2.convexHull(c)
cv2.drawContours(rest1, [hull], -1, (0,0,255), 3)
# Draw Normal Contour
cv2.drawContours(rest1, [c], -1, (255,0,0), 3)
# Show hands Contour
cv2.imshow('Contours by area', rest1)
# Tracking Left hand
if i == 0:
(x, y), radius = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
#3D Plot in (mili second) Format
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]), milliseconds())
# Draw cirlce and leave the last center creating a trail
cv2.circle(frame, (int(x), int(y)), int(radius),(0, 0, 255), 2)
# Only contours with radius > RADIUS are tracked (de-noise)
if radius > RADIUS:
points_left.append(center)
# loop over the set of tracked points to draw tracking lines (starts with frames delay- to wait for KNN background subtraction work)
for l in range(DELAY, len(points_left)):
try:
cv2.line(frame, points_left[l - 1][:2], points_left[l][:2], (0, 0, 255), 2)
except:
pass
frame_count = 0
else:
frame_count += 1
# If there is no hand detected, when count frames to FRAME, plot trajectories before clear the trajectories trails
if frame_count == FRAME:
#print("frame_count",frame_count)
plot_trajectory_diagrams()
points_left = []
points_right = []
frame_count = 0
# Tracking Right hand
else:
(x, y), radius = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]), milliseconds())
# Draw cirlce and leave the last center creating a trail
cv2.circle(frame, (int(x), int(y)), int(radius),(0, 255, 0), 2)
# loop over the set of tracked points
if radius > RADIUS:
points_right.append(center)
for l in range(DELAY, len(points_right)):
try:
cv2.line(frame, points_right[l - 1][:2], points_right[l][:2], (0, 255, 0), 2)
except:
pass
frame_count = 0
else:
frame_count += 1
# If there is no hand detected, when count frames to FRAME, plot trajectories before clear the trajectories trails
if frame_count == FRAME:
#print("frame_count",frame_count)
plot_trajectory_diagrams()
points_left = []
points_right = []
frame_count = 0
else:
pass
# Display our object tracker
#frame = cv2.flip(frame, 1)
cv2.imshow("Object Tracker", frame)
if cv2.waitKey(1) == 13: #13 is the Enter Key
plot_trajectory_diagrams()
break
else:
if cv2.waitKey(1) == 13: #13 is the Enter Key
plot_trajectory_diagrams()
break
cap.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"matplotlib.pyplot.ylabel",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.CascadeClassifier",
"cv2.erode",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"cv2.line",
"cv2.contourArea",
"cv2.waitKey",
"cv2.drawContours",
"numpy.ones",
"matplotlib.pyplo... | [((165, 184), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (181, 184), False, 'import cv2\n'), ((1324, 1429), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Stomach/FARM.mp4"""'], {}), "(\n 'C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Stomach/FARM.mp4'\n )\n", (1340, 1429), False, 'import cv2\n'), ((7530, 7544), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7542, 7544), False, 'from datetime import datetime\n'), ((14518, 14541), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (14539, 14541), False, 'import cv2\n'), ((3192, 3213), 'cv2.moments', 'cv2.moments', (['contours'], {}), '(contours)\n', (3203, 3213), False, 'import cv2\n'), ((3382, 3409), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {'color': 'clr'}), '(xs, ys, color=clr)\n', (3390, 3409), True, 'import matplotlib.pyplot as plt\n'), ((3415, 3430), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (3425, 3430), True, 'import matplotlib.pyplot as plt\n'), ((3435, 3450), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (3445, 3450), True, 'import matplotlib.pyplot as plt\n'), ((3455, 3492), 'matplotlib.pyplot.title', 'plt.title', (["(str + ' hand trajectories')"], {}), "(str + ' hand trajectories')\n", (3464, 3492), True, 'import matplotlib.pyplot as plt\n'), ((3725, 3735), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3733, 3735), True, 'import matplotlib.pyplot as plt\n'), ((3924, 3987), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'xs'], {'color': '"""b"""', 'marker': '"""o"""', 'label': '"""$X-Trajectory$"""'}), "(ts, xs, color='b', marker='o', label='$X-Trajectory$')\n", (3932, 3987), True, 'import matplotlib.pyplot as plt\n'), ((3992, 4055), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'ys'], {'color': '"""y"""', 'marker': '"""^"""', 'label': '"""$Y-Trajectory$"""'}), "(ts, ys, color='y', marker='^', label='$Y-Trajectory$')\n", (4000, 4055), True, 'import matplotlib.pyplot as plt\n'), ((4060, 4078), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (4070, 4078), True, 'import matplotlib.pyplot as plt\n'), ((4083, 4100), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X-Y"""'], {}), "('X-Y')\n", (4093, 4100), True, 'import matplotlib.pyplot as plt\n'), ((4105, 4142), 'matplotlib.pyplot.title', 'plt.title', (["(str + ' hand trajectories')"], {}), "(str + ' hand trajectories')\n", (4114, 4142), True, 'import matplotlib.pyplot as plt\n'), ((4421, 4450), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4431, 4450), True, 'import matplotlib.pyplot as plt\n'), ((4455, 4465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4463, 4465), True, 'import matplotlib.pyplot as plt\n'), ((4676, 4688), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4686, 4688), True, 'import matplotlib.pyplot as plt\n'), ((4698, 4723), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (4706, 4723), True, 'import matplotlib.pyplot as plt\n'), ((5103, 5113), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5111, 5113), True, 'import matplotlib.pyplot as plt\n'), ((5651, 5689), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (5663, 5689), False, 'import cv2\n'), ((5694, 5740), 'cv2.imshow', 'cv2.imshow', (['"""HSV Colour Model Image:"""', 'con_img'], {}), "('HSV Colour Model Image:', con_img)\n", (5704, 5740), False, 'import cv2\n'), ((5788, 5829), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2YCR_CB'], {}), '(frame, cv2.COLOR_BGR2YCR_CB)\n', (5800, 5829), False, 'import cv2\n'), ((5834, 5882), 'cv2.imshow', 'cv2.imshow', (['"""YCrCb Colour Model Image:"""', 'con_img'], {}), "('YCrCb Colour Model Image:', con_img)\n", (5844, 5882), False, 'import cv2\n'), ((5928, 5966), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2LAB'], {}), '(frame, cv2.COLOR_BGR2LAB)\n', (5940, 5966), False, 'import cv2\n'), ((5971, 6021), 'cv2.imshow', 'cv2.imshow', (['"""CIE LAB Colour Model Image:"""', 'con_img'], {}), "('CIE LAB Colour Model Image:', con_img)\n", (5981, 6021), False, 'import cv2\n'), ((6066, 6104), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2XYZ'], {}), '(frame, cv2.COLOR_BGR2XYZ)\n', (6078, 6104), False, 'import cv2\n'), ((6108, 6158), 'cv2.imshow', 'cv2.imshow', (['"""CIE XYZ Colour Model Image:"""', 'con_img'], {}), "('CIE XYZ Colour Model Image:', con_img)\n", (6118, 6158), False, 'import cv2\n'), ((6441, 6477), 'numpy.array', 'np.array', (['[0, 48, 80]'], {'dtype': '"""uint8"""'}), "([0, 48, 80], dtype='uint8')\n", (6449, 6477), True, 'import numpy as np\n'), ((6496, 6535), 'numpy.array', 'np.array', (['[20, 255, 255]'], {'dtype': '"""uint8"""'}), "([20, 255, 255], dtype='uint8')\n", (6504, 6535), True, 'import numpy as np\n'), ((6638, 6675), 'numpy.array', 'np.array', (['(0, 133, 77)'], {'dtype': '"""uint8"""'}), "((0, 133, 77), dtype='uint8')\n", (6646, 6675), True, 'import numpy as np\n'), ((6693, 6733), 'numpy.array', 'np.array', (['(255, 173, 127)'], {'dtype': '"""uint8"""'}), "((255, 173, 127), dtype='uint8')\n", (6701, 6733), True, 'import numpy as np\n'), ((6829, 6868), 'numpy.array', 'np.array', (['(20, 128, 130)'], {'dtype': '"""uint8"""'}), "((20, 128, 130), dtype='uint8')\n", (6837, 6868), True, 'import numpy as np\n'), ((6887, 6927), 'numpy.array', 'np.array', (['(220, 245, 255)'], {'dtype': '"""uint8"""'}), "((220, 245, 255), dtype='uint8')\n", (6895, 6927), True, 'import numpy as np\n'), ((6999, 7036), 'numpy.array', 'np.array', (['(79, 80, 30)'], {'dtype': '"""uint8"""'}), "((79, 80, 30), dtype='uint8')\n", (7007, 7036), True, 'import numpy as np\n'), ((7055, 7095), 'numpy.array', 'np.array', (['(240, 240, 140)'], {'dtype': '"""uint8"""'}), "((240, 240, 140), dtype='uint8')\n", (7063, 7095), True, 'import numpy as np\n'), ((2706, 2720), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2718, 2720), False, 'from datetime import datetime\n'), ((2996, 3016), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (3011, 3016), False, 'import cv2\n'), ((7481, 7495), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7493, 7495), False, 'from datetime import datetime\n'), ((8036, 8174), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""C:/Users/liangx/source/repos/Skin Detection/haarcascade_frontalface_alt/haarcascade_frontalface_alt.xml"""'], {}), "(\n 'C:/Users/liangx/source/repos/Skin Detection/haarcascade_frontalface_alt/haarcascade_frontalface_alt.xml'\n )\n", (8057, 8174), False, 'import cv2\n'), ((8820, 8874), 'cv2.inRange', 'cv2.inRange', (['converted_img', 'lower_thresh', 'upper_thresh'], {}), '(converted_img, lower_thresh, upper_thresh)\n', (8831, 8874), False, 'import cv2\n'), ((8959, 8984), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (8966, 8984), True, 'import numpy as np\n'), ((9000, 9048), 'cv2.erode', 'cv2.erode', (['mask', 'kernel_morphology'], {'iterations': '(1)'}), '(mask, kernel_morphology, iterations=1)\n', (9009, 9048), False, 'import cv2\n'), ((9062, 9120), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_CLOSE', 'kernel_morphology'], {}), '(mask, cv2.MORPH_CLOSE, kernel_morphology)\n', (9078, 9120), False, 'import cv2\n'), ((9134, 9183), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel_morphology'], {'iterations': '(1)'}), '(mask, kernel_morphology, iterations=1)\n', (9144, 9183), False, 'import cv2\n'), ((9192, 9237), 'cv2.imshow', 'cv2.imshow', (['"""Skin colour + Morpho Mask"""', 'mask'], {}), "('Skin colour + Morpho Mask', mask)\n", (9202, 9237), False, 'import cv2\n'), ((9394, 9434), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (9409, 9434), False, 'import cv2\n'), ((14224, 14259), 'cv2.imshow', 'cv2.imshow', (['"""Object Tracker"""', 'frame'], {}), "('Object Tracker', frame)\n", (14234, 14259), False, 'import cv2\n'), ((3497, 3506), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3504, 3506), True, 'import matplotlib.pyplot as plt\n'), ((4147, 4156), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4154, 4156), True, 'import matplotlib.pyplot as plt\n'), ((4937, 4946), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4944, 4946), True, 'import matplotlib.pyplot as plt\n'), ((8510, 8608), 'cv2.rectangle', 'cv2.rectangle', (['converted_img', '(x - 10, y - 30)', '(x + w + 10, y + h + 80)', '(255, 255, 255)', '(-1)'], {}), '(converted_img, (x - 10, y - 30), (x + w + 10, y + h + 80), (\n 255, 255, 255), -1)\n', (8523, 8608), False, 'import cv2\n'), ((8648, 8686), 'cv2.imshow', 'cv2.imshow', (['"""Face Detection"""', 'crop_img'], {}), "('Face Detection', crop_img)\n", (8658, 8686), False, 'import cv2\n'), ((14281, 14295), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (14292, 14295), False, 'import cv2\n'), ((14403, 14417), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (14414, 14417), False, 'import cv2\n'), ((10524, 10541), 'cv2.convexHull', 'cv2.convexHull', (['c'], {}), '(c)\n', (10538, 10541), False, 'import cv2\n'), ((10558, 10609), 'cv2.drawContours', 'cv2.drawContours', (['rest1', '[hull]', '(-1)', '(0, 0, 255)', '(3)'], {}), '(rest1, [hull], -1, (0, 0, 255), 3)\n', (10574, 10609), False, 'import cv2\n'), ((10685, 10733), 'cv2.drawContours', 'cv2.drawContours', (['rest1', '[c]', '(-1)', '(255, 0, 0)', '(3)'], {}), '(rest1, [c], -1, (255, 0, 0), 3)\n', (10701, 10733), False, 'import cv2\n'), ((10812, 10849), 'cv2.imshow', 'cv2.imshow', (['"""Contours by area"""', 'rest1'], {}), "('Contours by area', rest1)\n", (10822, 10849), False, 'import cv2\n'), ((10956, 10981), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (10978, 10981), False, 'import cv2\n'), ((11006, 11020), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (11017, 11020), False, 'import cv2\n'), ((12758, 12783), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (12780, 12783), False, 'import cv2\n'), ((12808, 12822), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (12819, 12822), False, 'import cv2\n'), ((11881, 11956), 'cv2.line', 'cv2.line', (['frame', 'points_left[l - 1][:2]', 'points_left[l][:2]', '(0, 0, 255)', '(2)'], {}), '(frame, points_left[l - 1][:2], points_left[l][:2], (0, 0, 255), 2)\n', (11889, 11956), False, 'import cv2\n'), ((13385, 13462), 'cv2.line', 'cv2.line', (['frame', 'points_right[l - 1][:2]', 'points_right[l][:2]', '(0, 255, 0)', '(2)'], {}), '(frame, points_right[l - 1][:2], points_right[l][:2], (0, 255, 0), 2)\n', (13393, 13462), False, 'import cv2\n')] |
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
data = np.loadtxt("sample.dat")
mean = data.mean()
sigma = data.std()
x = np.linspace(data.min(), data.max(), 100)
y = np.exp(-0.5 * ((x -mean)/sigma)**2)
y = y/(np.sqrt(2.0* np.pi * sigma**2))
plt.hist(data, alpha=0.5, bins=20, normed=True, label="Data. N={}".format(len(data)))
plt.plot(x,y, label="Estimate. mean={:.1f} sigma={:.1f}".format(mean, sigma))
plt.xlabel("x")
plt.ylabel("PDF (x)")
plt.legend(loc=2)
plt.savefig("sample.pdf")
| [
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"numpy.exp",
"numpy.loadtxt",
"matplotlib.pyplot.legend"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((100, 124), 'numpy.loadtxt', 'np.loadtxt', (['"""sample.dat"""'], {}), "('sample.dat')\n", (110, 124), True, 'import numpy as np\n'), ((214, 254), 'numpy.exp', 'np.exp', (['(-0.5 * ((x - mean) / sigma) ** 2)'], {}), '(-0.5 * ((x - mean) / sigma) ** 2)\n', (220, 254), True, 'import numpy as np\n'), ((455, 470), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (465, 470), True, 'import matplotlib.pyplot as plt\n'), ((471, 492), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PDF (x)"""'], {}), "('PDF (x)')\n", (481, 492), True, 'import matplotlib.pyplot as plt\n'), ((493, 510), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (503, 510), True, 'import matplotlib.pyplot as plt\n'), ((511, 536), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""sample.pdf"""'], {}), "('sample.pdf')\n", (522, 536), True, 'import matplotlib.pyplot as plt\n'), ((258, 291), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi * sigma ** 2)'], {}), '(2.0 * np.pi * sigma ** 2)\n', (265, 291), True, 'import numpy as np\n')] |
import os, sys
sys.path.append(os.path.dirname(os.path.realpath(f'{__file__}/..')))
import pathlib
import numpy as np
from time import sleep
from src.GymEnvs import GiadogEnv, TerrainScene, QuadrupedRobot, GiadogEasyEnv
from src.__env__ import CONTROLLER_LATENCY_STEPS, GRAVITY_VECTOR, \
HISTORIAL_DATA, NON_PRIVILIGED_DATA, NON_PRIVILIGED_METHODS, \
PRIVILIGED_DATA, PRIVILIGED_METHODS, ACTION_DATA, SELECTED_GAIT, \
SIM_SECONDS_PER_STEP, UPDATE_METHODS, STATE_SIZE, PRIVILEGED_STATE_SIZE,\
STATE_FEATURES, PRIVILEGED_STATE_FEATURES, JOINTS_IDS, ACTION_SPACE_SIZE,\
TOES_IDS, H_OFF, V_OFF, SHANK_L, THIGH_L
from src.GymEnvs import pyBulletPainter as pbp
from src.GymEnvs.TestingFunctions import test_height_scan
from src.simulation import hills, steps, stairs
from pybullet_utils import bullet_client
import pybullet
import matplotlib.pyplot as plt
quadruped_urdf = str(pathlib.Path(__file__).parent.parent.resolve()) +\
'/mini_ros/urdf/spot.urdf'
terrain_file = 'terrains/steps.txt'
client = bullet_client.BulletClient(pybullet.GUI)
robot = QuadrupedRobot(
quadruped_urdf,
client,
UPDATE_METHODS,
STATE_FEATURES,
PRIVILEGED_STATE_FEATURES
)
scene = TerrainScene(
client,
GRAVITY_VECTOR,
SIM_SECONDS_PER_STEP,
CONTROLLER_LATENCY_STEPS
)
client.resetSimulation()
scene.episode_restart()
#steps_terray = steps(100, 100, 0.4, 0.3, 4)
hills_terrain = hills(100, 100, 0.001, 0.1, 0.01, 452)
scene.load_terrain_from_array(hills_terrain)
scene.place_terrain_in_simulation(robot, first_execution = True)
robot.add_to_scene(scene, first_execution = True)
x = np.random.uniform(-4, 4)
y = np.random.uniform(-4, 4)
robot.reset(np.array([3, -3]))
X_vel = [np.zeros(3)]
X_vel_real = [np.zeros(3)]
while scene.elapsed_time < 58:
# Update the state of te robot
state = robot.state()
scene.global_step()
robot.turn_dir = np.array([0])
robot.apply_action(np.zeros(ACTION_SPACE_SIZE))
robot.elapsed_time = scene.elapsed_time
estimated_base_velocity = robot.base_linear_vel_estimation
X_vel.append(estimated_base_velocity)
X_vel_real.append(robot.base_linear_vel* 0.01 + X_vel_real[-1]*0.99)
while scene.elapsed_time < 62:
# Update the state of te robot
state = robot.state()
scene.global_step()
#robot.apply_action(np.zeros(ACTION_SPACE_SIZE))
robot.elapsed_time = scene.elapsed_time
estimated_base_velocity = robot.base_linear_vel_estimation
X_vel.append(estimated_base_velocity)
X_vel_real.append(robot.base_linear_vel* 0.01 + X_vel_real[-1]*0.99)
X_vel = np.array(X_vel)
X_vel_real = np.array(X_vel_real)
print(sum([x * SIM_SECONDS_PER_STEP * CONTROLLER_LATENCY_STEPS for x in X_vel]))
print(sum([x * SIM_SECONDS_PER_STEP * CONTROLLER_LATENCY_STEPS for x in X_vel_real]))
plt.plot(X_vel_real[:, 0])
plt.plot(X_vel[:, 0])
plt.show()
plt.plot(X_vel_real[:, 1])
plt.plot(X_vel[:, 1])
plt.show()
plt.plot(X_vel_real[:, 2])
plt.plot(X_vel[:, 2])
plt.show()
| [
"pathlib.Path",
"matplotlib.pyplot.plot",
"src.simulation.hills",
"src.GymEnvs.QuadrupedRobot",
"os.path.realpath",
"src.GymEnvs.TerrainScene",
"numpy.array",
"numpy.zeros",
"numpy.random.uniform",
"pybullet_utils.bullet_client.BulletClient",
"matplotlib.pyplot.show"
] | [((1048, 1088), 'pybullet_utils.bullet_client.BulletClient', 'bullet_client.BulletClient', (['pybullet.GUI'], {}), '(pybullet.GUI)\n', (1074, 1088), False, 'from pybullet_utils import bullet_client\n'), ((1098, 1199), 'src.GymEnvs.QuadrupedRobot', 'QuadrupedRobot', (['quadruped_urdf', 'client', 'UPDATE_METHODS', 'STATE_FEATURES', 'PRIVILEGED_STATE_FEATURES'], {}), '(quadruped_urdf, client, UPDATE_METHODS, STATE_FEATURES,\n PRIVILEGED_STATE_FEATURES)\n', (1112, 1199), False, 'from src.GymEnvs import GiadogEnv, TerrainScene, QuadrupedRobot, GiadogEasyEnv\n'), ((1309, 1397), 'src.GymEnvs.TerrainScene', 'TerrainScene', (['client', 'GRAVITY_VECTOR', 'SIM_SECONDS_PER_STEP', 'CONTROLLER_LATENCY_STEPS'], {}), '(client, GRAVITY_VECTOR, SIM_SECONDS_PER_STEP,\n CONTROLLER_LATENCY_STEPS)\n', (1321, 1397), False, 'from src.GymEnvs import GiadogEnv, TerrainScene, QuadrupedRobot, GiadogEasyEnv\n'), ((1590, 1628), 'src.simulation.hills', 'hills', (['(100)', '(100)', '(0.001)', '(0.1)', '(0.01)', '(452)'], {}), '(100, 100, 0.001, 0.1, 0.01, 452)\n', (1595, 1628), False, 'from src.simulation import hills, steps, stairs\n'), ((1795, 1819), 'numpy.random.uniform', 'np.random.uniform', (['(-4)', '(4)'], {}), '(-4, 4)\n', (1812, 1819), True, 'import numpy as np\n'), ((1824, 1848), 'numpy.random.uniform', 'np.random.uniform', (['(-4)', '(4)'], {}), '(-4, 4)\n', (1841, 1848), True, 'import numpy as np\n'), ((2802, 2817), 'numpy.array', 'np.array', (['X_vel'], {}), '(X_vel)\n', (2810, 2817), True, 'import numpy as np\n'), ((2831, 2851), 'numpy.array', 'np.array', (['X_vel_real'], {}), '(X_vel_real)\n', (2839, 2851), True, 'import numpy as np\n'), ((3019, 3045), 'matplotlib.pyplot.plot', 'plt.plot', (['X_vel_real[:, 0]'], {}), '(X_vel_real[:, 0])\n', (3027, 3045), True, 'import matplotlib.pyplot as plt\n'), ((3046, 3067), 'matplotlib.pyplot.plot', 'plt.plot', (['X_vel[:, 0]'], {}), '(X_vel[:, 0])\n', (3054, 3067), True, 'import matplotlib.pyplot as plt\n'), ((3069, 3079), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3077, 3079), True, 'import matplotlib.pyplot as plt\n'), ((3080, 3106), 'matplotlib.pyplot.plot', 'plt.plot', (['X_vel_real[:, 1]'], {}), '(X_vel_real[:, 1])\n', (3088, 3106), True, 'import matplotlib.pyplot as plt\n'), ((3107, 3128), 'matplotlib.pyplot.plot', 'plt.plot', (['X_vel[:, 1]'], {}), '(X_vel[:, 1])\n', (3115, 3128), True, 'import matplotlib.pyplot as plt\n'), ((3130, 3140), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3138, 3140), True, 'import matplotlib.pyplot as plt\n'), ((3143, 3169), 'matplotlib.pyplot.plot', 'plt.plot', (['X_vel_real[:, 2]'], {}), '(X_vel_real[:, 2])\n', (3151, 3169), True, 'import matplotlib.pyplot as plt\n'), ((3170, 3191), 'matplotlib.pyplot.plot', 'plt.plot', (['X_vel[:, 2]'], {}), '(X_vel[:, 2])\n', (3178, 3191), True, 'import matplotlib.pyplot as plt\n'), ((3192, 3202), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3200, 3202), True, 'import matplotlib.pyplot as plt\n'), ((1861, 1878), 'numpy.array', 'np.array', (['[3, -3]'], {}), '([3, -3])\n', (1869, 1878), True, 'import numpy as np\n'), ((1892, 1903), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1900, 1903), True, 'import numpy as np\n'), ((1919, 1930), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1927, 1930), True, 'import numpy as np\n'), ((2080, 2093), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2088, 2093), True, 'import numpy as np\n'), ((49, 83), 'os.path.realpath', 'os.path.realpath', (['f"""{__file__}/.."""'], {}), "(f'{__file__}/..')\n", (65, 83), False, 'import os, sys\n'), ((2117, 2144), 'numpy.zeros', 'np.zeros', (['ACTION_SPACE_SIZE'], {}), '(ACTION_SPACE_SIZE)\n', (2125, 2144), True, 'import numpy as np\n'), ((910, 932), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (922, 932), False, 'import pathlib\n')] |
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from cavsim.base.fluids.base_fluid import BaseFluid
class DummyFluid(BaseFluid):
def __init__(self, one, two, three=5):
super(DummyFluid, self).__init__(1, 2, 3, 4)
self._one = one
self._two = two
self._three = three
def _addargs(self, *args, **kwargs):
if 'pressure' in kwargs.keys():
args = (*args, kwargs['pressure'])
if 'temperature' in kwargs.keys():
args = (*args, kwargs['temperature'])
if 'shear_rate' in kwargs.keys():
args = (*args, kwargs['shear_rate'])
return args
def viscosity(self, *args, **kwargs):
return self._one * self._ones(*self._addargs(*args, **kwargs))
def density(self, *args, **kwargs):
return self._two * self._ones(*self._addargs(*args, **kwargs))
def bulk_modulus(self, *args, **kwargs):
return self._three * self._ones(*self._addargs(*args, **kwargs))
class TestBaseFluid(TestCase):
def setUp(self):
self.fluid = BaseFluid(1, 2, 3, 4)
def tearDown(self):
del self.fluid
self.fluid = None
def test___init__(self):
# Invalid parameter tests
with self.assertRaises(TypeError):
BaseFluid('abc', 2, 3, 4, 5, 6)
with self.assertRaises(TypeError):
BaseFluid(1, 'def', 3, 4, 5, 6)
with self.assertRaises(TypeError):
BaseFluid(1, 2, 'ghi', 4, 5, 6)
with self.assertRaises(TypeError):
BaseFluid(1, 2, 3, 'jkl', 5, 6)
with self.assertRaises(TypeError):
BaseFluid(1, 2, 3, 4, 'mno', 6)
with self.assertRaises(TypeError):
BaseFluid(1, 2, 3, 4, 5, 'pqr')
# Valid parameter tests
self.assertEqual(101325, self.fluid._norm_pressure)
self.assertEqual(293.15, self.fluid._norm_temperature)
f = BaseFluid(9, 8, 7, 6, 5, 4)
self.assertEqual(9, f._norm_density)
self.assertEqual(8, f._norm_viscosity)
self.assertEqual(7, f._norm_bulk_modulus)
self.assertEqual(6, f._norm_vapor_pressure)
self.assertEqual(5, f._norm_pressure)
self.assertEqual(4, f._norm_temperature)
def test_norm_pressure(self):
self.fluid._norm_pressure = None
self.assertEqual(None, self.fluid.norm_pressure)
self.fluid._norm_pressure = 123.45
self.assertEqual(123.45, self.fluid.norm_pressure)
def test_norm_temperature(self):
self.fluid._norm_temperature = None
self.assertEqual(None, self.fluid.norm_temperature)
self.fluid._norm_temperature = 123.45
self.assertEqual(123.45, self.fluid.norm_temperature)
def test_norm_density(self):
self.fluid._norm_density = None
self.assertEqual(None, self.fluid.norm_density)
self.fluid._norm_density = 123.45
self.assertEqual(123.45, self.fluid.norm_density)
def test_norm_viscosity(self):
self.fluid._norm_viscosity = None
self.assertEqual(None, self.fluid.norm_viscosity)
self.fluid._norm_viscosity = 123.45
self.assertEqual(123.45, self.fluid.norm_viscosity)
def test_norm_bulk_modulus(self):
self.fluid._norm_bulk_modulus = None
self.assertEqual(None, self.fluid.norm_bulk_modulus)
self.fluid._norm_bulk_modulus = 123.45
self.assertEqual(123.45, self.fluid.norm_bulk_modulus)
def test_norm_compressibility(self):
self.fluid._norm_bulk_modulus = 123.45
self.assertAlmostEqual(1.0 / 123.45, self.fluid.norm_compressibility)
def test_norm_vapor_pressure(self):
self.fluid._norm_vapor_pressure = None
self.assertEqual(None, self.fluid.norm_vapor_pressure)
self.fluid._norm_vapor_pressure = 123.45
self.assertEqual(123.45, self.fluid.norm_vapor_pressure)
def test_density(self):
p = self.fluid.norm_pressure
t = self.fluid.norm_temperature
self.assertEqual(self.fluid.norm_density, self.fluid.density(p, t))
answer = np.asarray([self.fluid.norm_density, self.fluid.norm_density])
result = self.fluid.density([p, p], [t, t])
self.assertEqual(answer.shape, result.shape)
npt.assert_almost_equal(result, answer)
self.assertEqual(self.fluid.norm_density, self.fluid.density())
f = BaseFluid(2.0, 0, 3.0, 0)
p = f.norm_pressure + 5.0
self.assertAlmostEqual(10.5889801009, f.density(p))
def test_viscosity(self):
self.assertEqual(self.fluid.norm_viscosity, self.fluid.viscosity(99, 111))
answer = np.asarray([self.fluid.norm_viscosity, self.fluid.norm_viscosity])
result = self.fluid.viscosity([99, 99], [111, 111])
self.assertEqual(answer.shape, result.shape)
npt.assert_allclose(result, answer)
def test_kinematic_viscosity(self):
f = DummyFluid(1, 2)
self.assertEqual(0.5, f.kinematic_viscosity(1, 2, 3))
f = DummyFluid(15, 5)
self.assertEqual(3, f.kinematic_viscosity(1, 2, 3))
answer = np.asarray([3, 3])
result = f.kinematic_viscosity([1, 1], [2, 2], [3, 3])
self.assertEqual(answer.shape, result.shape)
npt.assert_allclose(result, answer)
def test_compressibility(self):
self.assertEqual(self.fluid.norm_compressibility, self.fluid.compressibility(222))
answer = np.asarray([self.fluid.norm_compressibility, self.fluid.norm_compressibility])
result = self.fluid.compressibility([222, 222])
self.assertEqual(answer.shape, result.shape)
npt.assert_allclose(result, answer)
def test_bulk_modulus(self):
self.assertEqual(self.fluid.norm_bulk_modulus, self.fluid.bulk_modulus(222))
answer = np.asarray([self.fluid.norm_bulk_modulus, self.fluid.norm_bulk_modulus])
result = self.fluid.bulk_modulus([222, 222])
self.assertEqual(answer.shape, result.shape)
npt.assert_allclose(result, answer)
def test_vapor_pressure(self):
self.assertEqual(self.fluid.norm_vapor_pressure, self.fluid.vapor_pressure(333))
answer = np.asarray([self.fluid.norm_vapor_pressure, self.fluid.norm_vapor_pressure])
result = self.fluid.vapor_pressure([333, 333])
self.assertEqual(answer.shape, result.shape)
npt.assert_allclose(result, answer)
def test_norm_speed_of_sound(self):
f = BaseFluid(4.0, 0.0, 4.0, 0.0)
self.assertEqual(1.0, f.norm_speed_of_sound)
f = BaseFluid(4.0, 0.0, 16.0, 0.0)
self.assertEqual(2.0, f.norm_speed_of_sound)
f = BaseFluid(64.0, 0.0, 16.0, 0.0)
self.assertEqual(0.5, f.norm_speed_of_sound)
def test_speed_of_sound(self):
f = DummyFluid(0., 4.0, 4.0)
self.assertEqual(1.0, f.speed_of_sound(7, 8))
f = DummyFluid(0., 4.0, 16.0)
self.assertEqual(2.0, f.speed_of_sound(7, 8))
f = DummyFluid(0., 64.0, 16.0)
self.assertEqual(0.5, f.speed_of_sound(7, 8))
answer = np.asarray([0.5, 0.5])
result = f.speed_of_sound([7, 7], [8, 8])
self.assertEqual(answer.shape, result.shape)
npt.assert_allclose(result, answer)
def test__ones(self):
f = BaseFluid(4.0, 0.0, 4.0, 0.0)
self.assertEqual(1.0, f._ones())
self.assertEqual(1.0, f._ones(4))
self.assertEqual(1.0, f._ones(param2=4))
self.assertEqual(1.0, f._ones(7, 8))
npt.assert_almost_equal(np.asarray([1, 1, 1]), f._ones(np.asarray([1, 2, 3])))
npt.assert_almost_equal(np.asarray([1, 1]), f._ones(np.asarray([1, 2]), np.asarray([3, 4])))
with self.assertRaises(IndexError):
f._ones(1, np.asarray([1, 2]))
with self.assertRaises(IndexError):
f._ones(np.asarray([1, 2]), np.asarray([3, 4, 5, 6]))
| [
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_allclose",
"cavsim.base.fluids.base_fluid.BaseFluid",
"numpy.asarray"
] | [((1086, 1107), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (1095, 1107), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((1935, 1962), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(9)', '(8)', '(7)', '(6)', '(5)', '(4)'], {}), '(9, 8, 7, 6, 5, 4)\n', (1944, 1962), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((4093, 4155), 'numpy.asarray', 'np.asarray', (['[self.fluid.norm_density, self.fluid.norm_density]'], {}), '([self.fluid.norm_density, self.fluid.norm_density])\n', (4103, 4155), True, 'import numpy as np\n'), ((4269, 4308), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['result', 'answer'], {}), '(result, answer)\n', (4292, 4308), True, 'import numpy.testing as npt\n'), ((4393, 4418), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(2.0)', '(0)', '(3.0)', '(0)'], {}), '(2.0, 0, 3.0, 0)\n', (4402, 4418), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((4644, 4710), 'numpy.asarray', 'np.asarray', (['[self.fluid.norm_viscosity, self.fluid.norm_viscosity]'], {}), '([self.fluid.norm_viscosity, self.fluid.norm_viscosity])\n', (4654, 4710), True, 'import numpy as np\n'), ((4832, 4867), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'answer'], {}), '(result, answer)\n', (4851, 4867), True, 'import numpy.testing as npt\n'), ((5107, 5125), 'numpy.asarray', 'np.asarray', (['[3, 3]'], {}), '([3, 3])\n', (5117, 5125), True, 'import numpy as np\n'), ((5250, 5285), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'answer'], {}), '(result, answer)\n', (5269, 5285), True, 'import numpy.testing as npt\n'), ((5431, 5509), 'numpy.asarray', 'np.asarray', (['[self.fluid.norm_compressibility, self.fluid.norm_compressibility]'], {}), '([self.fluid.norm_compressibility, self.fluid.norm_compressibility])\n', (5441, 5509), True, 'import numpy as np\n'), ((5627, 5662), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'answer'], {}), '(result, answer)\n', (5646, 5662), True, 'import numpy.testing as npt\n'), ((5799, 5871), 'numpy.asarray', 'np.asarray', (['[self.fluid.norm_bulk_modulus, self.fluid.norm_bulk_modulus]'], {}), '([self.fluid.norm_bulk_modulus, self.fluid.norm_bulk_modulus])\n', (5809, 5871), True, 'import numpy as np\n'), ((5986, 6021), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'answer'], {}), '(result, answer)\n', (6005, 6021), True, 'import numpy.testing as npt\n'), ((6164, 6240), 'numpy.asarray', 'np.asarray', (['[self.fluid.norm_vapor_pressure, self.fluid.norm_vapor_pressure]'], {}), '([self.fluid.norm_vapor_pressure, self.fluid.norm_vapor_pressure])\n', (6174, 6240), True, 'import numpy as np\n'), ((6357, 6392), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'answer'], {}), '(result, answer)\n', (6376, 6392), True, 'import numpy.testing as npt\n'), ((6446, 6475), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(4.0)', '(0.0)', '(4.0)', '(0.0)'], {}), '(4.0, 0.0, 4.0, 0.0)\n', (6455, 6475), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((6541, 6571), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(4.0)', '(0.0)', '(16.0)', '(0.0)'], {}), '(4.0, 0.0, 16.0, 0.0)\n', (6550, 6571), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((6637, 6668), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(64.0)', '(0.0)', '(16.0)', '(0.0)'], {}), '(64.0, 0.0, 16.0, 0.0)\n', (6646, 6668), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((7051, 7073), 'numpy.asarray', 'np.asarray', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (7061, 7073), True, 'import numpy as np\n'), ((7185, 7220), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'answer'], {}), '(result, answer)\n', (7204, 7220), True, 'import numpy.testing as npt\n'), ((7260, 7289), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(4.0)', '(0.0)', '(4.0)', '(0.0)'], {}), '(4.0, 0.0, 4.0, 0.0)\n', (7269, 7289), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((1301, 1332), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['"""abc"""', '(2)', '(3)', '(4)', '(5)', '(6)'], {}), "('abc', 2, 3, 4, 5, 6)\n", (1310, 1332), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((1388, 1419), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(1)', '"""def"""', '(3)', '(4)', '(5)', '(6)'], {}), "(1, 'def', 3, 4, 5, 6)\n", (1397, 1419), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((1475, 1506), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(1)', '(2)', '"""ghi"""', '(4)', '(5)', '(6)'], {}), "(1, 2, 'ghi', 4, 5, 6)\n", (1484, 1506), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((1562, 1593), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(1)', '(2)', '(3)', '"""jkl"""', '(5)', '(6)'], {}), "(1, 2, 3, 'jkl', 5, 6)\n", (1571, 1593), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((1649, 1680), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(1)', '(2)', '(3)', '(4)', '"""mno"""', '(6)'], {}), "(1, 2, 3, 4, 'mno', 6)\n", (1658, 1680), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((1736, 1767), 'cavsim.base.fluids.base_fluid.BaseFluid', 'BaseFluid', (['(1)', '(2)', '(3)', '(4)', '(5)', '"""pqr"""'], {}), "(1, 2, 3, 4, 5, 'pqr')\n", (1745, 1767), False, 'from cavsim.base.fluids.base_fluid import BaseFluid\n'), ((7499, 7520), 'numpy.asarray', 'np.asarray', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (7509, 7520), True, 'import numpy as np\n'), ((7586, 7604), 'numpy.asarray', 'np.asarray', (['[1, 1]'], {}), '([1, 1])\n', (7596, 7604), True, 'import numpy as np\n'), ((7530, 7551), 'numpy.asarray', 'np.asarray', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (7540, 7551), True, 'import numpy as np\n'), ((7614, 7632), 'numpy.asarray', 'np.asarray', (['[1, 2]'], {}), '([1, 2])\n', (7624, 7632), True, 'import numpy as np\n'), ((7634, 7652), 'numpy.asarray', 'np.asarray', (['[3, 4]'], {}), '([3, 4])\n', (7644, 7652), True, 'import numpy as np\n'), ((7722, 7740), 'numpy.asarray', 'np.asarray', (['[1, 2]'], {}), '([1, 2])\n', (7732, 7740), True, 'import numpy as np\n'), ((7806, 7824), 'numpy.asarray', 'np.asarray', (['[1, 2]'], {}), '([1, 2])\n', (7816, 7824), True, 'import numpy as np\n'), ((7826, 7850), 'numpy.asarray', 'np.asarray', (['[3, 4, 5, 6]'], {}), '([3, 4, 5, 6])\n', (7836, 7850), True, 'import numpy as np\n')] |
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
import scipy.integrate
from sklearn.metrics import mean_squared_error
from scipy.linalg import svd
from scipy.optimize import least_squares
import itertools
import bokeh.io
import bokeh.application
import bokeh.application.handlers
import bokeh.models
import holoviews as hv
# bokeh.io.output_notebook()
hv.extension('bokeh')
import git
import sys
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
sys.path.insert(1, f"{homedir}" + '/models/data_processing')
import loader
def process_data(data_covid, data_population, save=True):
covid = loader.load_data(data_covid)
loader.convert_dates(covid, "Date")
population = loader.load_data(data_population)
covid['Population'] = covid.apply(lambda row: loader.query(population, "Region", row.Region)['Population'], axis=1)
if save:
covid.to_csv("italy_training_data.csv")
return covid
# return params, 1 standard deviation errors
def get_errors(res, p0):
p0 = np.array(p0)
ysize = len(res.fun)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
warn_cov = False
absolute_sigma = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ysize > p0.size:
s_sq = cost / (ysize - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
print('cannot estimate variance')
return None
perr = np.sqrt(np.diag(pcov))
return perr
# returns standard deviation of fitted parameters
def get_param_errors(res):
pfit = res.x
pcov = res.jac
pcov = np.dot(pcov.T, pcov)
pcov = np.linalg.pinv(pcov) #uses svd
pcov = np.diag(pcov)
rcov = np.cov(res.fun)/3750000 #replace with population
perr = pcov * rcov
perr = np.sqrt(perr)
return perr
def mse_qd(A, B):
Ap = np.nan_to_num(A)
Bp = np.nan_to_num(B)
Ap[A == -np.inf] = 0
Bp[B == -np.inf] = 0
Ap[A == np.inf] = 0
Bp[B == np.inf] = 0
return mean_squared_error(Ap, Bp)
def pecaiqr(dat, t, params, N, max_t, offset):
if t >= max_t:
return [0]*8
a_1 = params[0]
a_2 = params[1]
a_3 = params[2]
b_1 = params[3]
b_2 = params[4]
b_3 = params[5]
b_4 = params[6]
g_a = params[7]
g_i = params[8]
th = params[9]
del_a = params[10]
del_i = params[11]
r_a = params[12]
r_i = params[13]
r_q = params[14]
d_i = params[15]
d_q = params[16]
P = dat[0]
E = dat[1]
C = dat[2]
A = dat[3]
I = dat[4]
Q = dat[5]
R = dat[6]
# N = P + E + C + A + I + Q + R
dPdt = (- ((a_1*a_2)*C*P)/N) + (-a_3*P + b_4*E)*(N/(P+E))
dEdt = (- ((b_1 * A + b_2 * I) * E) / N) + b_3*C + (a_3*P - b_4*E)*(N/(P+E))
dCdt = -(g_a + g_i)*C + (((b_1 * A + b_2 * I) * E) / N) - b_3*C
dAdt = (a_1 * C * P) / N + g_a*C - (r_a + del_a + th)*A
dIdt = (a_2 * C * P) / N + g_i*C - ((r_i+d_i)+del_i)*I+th*A
dQdt = del_a*A + del_i*I - (r_q+d_q)*Q
dRdt = r_a*A + (r_i+d_i)*I + (r_q+d_q)*Q
dDdt = d_i*I + d_q*Q
dzdt = [dPdt, dEdt, dCdt, dAdt, dIdt, dQdt, dRdt, dDdt]
return dzdt
# def plot_pecaiqr():
# n = 2000
# # params = [0.3, 0.8, 0.1, 0.3, 0.6, 0.1, 0.01, 0.1, 0.8, 0.1, 0.1, 0.5, 0.1, 0.1, 0.1, 0.2, 0.5]
# params = [0.1, 0.1, 0.1, 0.3, 0.5, 0.1, 0.1, 0.3, 0.5, 0.1, 0.1, 0.1, 0.001, 0.001, 0.001, 0.001, 0.001]
# params[:] = [x/1 for x in params]
# z0 = [75000, 25000, 100, 100, 100, 10, 10, 0]
# t = np.linspace(0,500,n)
# z = odeint(pecaiqr,z0,t, args = (params, ))
# P = z[:,0]
# E = z[:,1]
# C = z[:,2]
# A = z[:,3]
# I = z[:,4]
# Q = z[:,5]
# R = z[:,6]
# D = z[:,7]
# # plot results
# # plt.plot(t,P,'g--',label='P(t)')
# # plt.plot(t,E,'b--',label='E(t)')
# plt.plot(t,C,'b--',label='C(t)')
# plt.plot(t,A,'y--',label='A(t)')
# plt.plot(t,I,'r:',label='I(t)')
# plt.plot(t,Q,'r:',label='Q(t)')
# plt.plot(t,R,'k--',label='R(t)')
# plt.plot(t,D,'k--',label='D(t)')
# plt.ylabel('# people')
# plt.xlabel('time')
# plt.legend(loc='best')
# plt.show()
def model_qd(params, data, extrapolate=-1):
N = data['Population'].values[0] # total population
initial_conditions = N * np.array(params[-7:]) # the parameters are a fraction of the population so multiply by the population
P0 = initial_conditions[0]
E0 = initial_conditions[1]
C0 = initial_conditions[2]
A0 = initial_conditions[3]
I0 = initial_conditions[4]
Q0 = initial_conditions[5]
R0 = initial_conditions[6]
D0 = data['Deaths'].values[0]
offset = data['date_processed'].min()
yz_0 = np.array([P0, E0, C0, A0, I0, Q0, R0, D0])
n = len(data)
if extrapolate > 0:
n += extrapolate
# Package parameters into a tuple
args = (params, N, n, offset)
# Integrate ODEs
try:
s = scipy.integrate.odeint(pecaiqr, yz_0, np.arange(0, n), args=args)
except RuntimeError:
# print('RuntimeError', params)
return np.zeros((n, len(yz_0)))
return s
def get_deaths(res, data, extrapolate=14):
s = model_qd(res.x, data, len(data)+extrapolate)
P = s[:,0]
E = s[:,1]
C = s[:,2]
A = s[:,3]
I = s[:,4]
Q = s[:,5]
R = s[:,6]
D = s[:,7]
tp = np.arange(0, len(data)+extrapolate)
deaths = list(zip(tp,D))
return deaths
# returns uncertainty of the fit for all variables
def get_fit_errors(res, p0_params, data, extrapolate=14):
errors = get_param_errors(res)
errors[-7:] = 0
uncertainty = []
samples = 100
for i in range(samples):
sample = np.random.normal(loc=res.x, scale=errors)
s = model_qd(sample, data, len(data)+extrapolate)
uncertainty.append(s)
uncertainty = np.array(uncertainty)
return uncertainty
def plot_qd(res, p0_params, data, extrapolate=14, boundary=None, plot_infectious=False):
s = model_qd(res.x, data, len(data)+extrapolate)
P = s[:,0]
E = s[:,1]
C = s[:,2]
A = s[:,3]
I = s[:,4]
Q = s[:,5]
R = s[:,6]
D = s[:,7]
t = np.arange(0, len(data))
tp = np.arange(0, len(data)+extrapolate)
p = bokeh.plotting.figure(plot_width=600,
plot_height=400,
title = ' PECAIQR Model',
x_axis_label = 't (days)',
y_axis_label = '# people')
if plot_infectious:
p.line(tp, I, color = 'red', line_width = 1, legend = 'All infected')
p.line(tp, D, color = 'black', line_width = 1, legend = 'Deceased')
p.line(tp, Q, color = 'yellow', line_width = 1, legend = 'Quarantined')
p.line(tp, R, color = 'green', line_width = 1, legend = 'Recovered')
# death
p.circle(t, data['Deaths'], color ='black', legend='Real Death')
# quarantined
p.circle(t, data['TotalCurrentlyPositive'], color ='purple', legend='Tested Infected')
if boundary is not None:
vline = bokeh.models.Span(location=boundary, dimension='height', line_color='black', line_width=3)
p.renderers.extend([vline])
p.legend.location = 'top_left'
bokeh.io.show(p)
def plot_with_errors_sample(res, p0_params, data, extrapolate=14, boundary=None, plot_infectious=False):
s = model_qd(res.x, data, len(data)+extrapolate)
P = s[:,0]
E = s[:,1]
C = s[:,2]
A = s[:,3]
I = s[:,4]
Q = s[:,5]
R = s[:,6]
D = s[:,7]
uncertainty = get_fit_errors(res, p0_params, data, extrapolate=14)
s1 = np.percentile(uncertainty, 25, axis=0)
s2 = np.percentile(uncertainty, 75, axis=0)
t = np.arange(0, len(data))
tp = np.arange(0, len(data)+extrapolate)
p = bokeh.plotting.figure(plot_width=600,
plot_height=400,
title = ' PECAIQR Model Errors',
x_axis_label = 't (days)',
y_axis_label = '# people')
if plot_infectious:
p.varea(x=tp, y1=s1[:, 4], y2=s2[:, 2], color='red', fill_alpha=0.2)
p.varea(x=tp, y1=s1[:, 7], y2=s2[:, 7], color='black', fill_alpha=0.2)
if plot_infectious:
p.line(tp, I, color = 'red', line_width = 1, legend = 'Currently Infected')
p.line(tp, D, color = 'black', line_width = 1, legend = 'Deceased')
# p.line(tp, Q, color = 'yellow', line_width = 1, legend = 'Quarantined')
# p.line(tp, R, color = 'green', line_width = 1, legend = 'Recovered')
# death
p.circle(t, data['Deaths'], color ='black')
# quarantined
p.circle(t, data['TotalCurrentlyPositive'], color ='purple')
if boundary is not None:
vline = bokeh.models.Span(location=boundary, dimension='height', line_color='black', line_width=3)
p.renderers.extend([vline])
p.legend.location = 'top_left'
bokeh.io.show(p)
def leastsq_qd(params, data, weight=False):
Ddata = (data['Deaths'].values)
Idata = (data['TotalCurrentlyPositive'].values)
s = model_qd(params, data)
P = s[:,0]
E = s[:,1]
C = s[:,2]
A = s[:,3]
I = s[:,4]
Q = s[:,5]
R = s[:,6]
D = s[:,7]
error = D-Ddata
if weight:
# mu, sigma = 1, 0.2
# w = np.random.normal(mu, sigma, len(error))
# w = np.sort(w)
w = np.geomspace(0.5,1.5,len(data))
error = error*w
return error
def fit(data, guesses=None, weight=False, plot=False, extrapolate=14):
param_ranges = [(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1)]
initial_ranges = [(0,1), (0,1), (0,0.01), (0,0.01), (0,0.01), (0,0.01), (0,0.01)]
ranges = param_ranges+initial_ranges
if guesses is None:
params = [9e-02, 1e-01, 7e-02, 3.e-01, 4.e-01, 1e-01, 1e-01, 3e-01, 4e-01, 7e-02, 2e-04, 8e-02, 7e-03, 2e-02, 2e-04, 2e-06, 4e-03]
initial_conditions = [7e-01, 2e-01, 4e-08, 7e-03, 1e-08, 3e-20, 7e-06]
guesses = params+initial_conditions
else:
initial_ranges = [(0.5*guesses[17],2*guesses[17]), (0.5*guesses[18],2*guesses[18]), (0.5*guesses[19],2*guesses[19]), (0.5*guesses[20],2*guesses[20]), (0.5*guesses[21],2*guesses[21]), \
(0, 0.3), (0.5*guesses[23],2*guesses[23])]
ranges = param_ranges+initial_ranges
for boundary in [len(data)]:
res = least_squares(leastsq_qd, guesses, args=(data[:boundary],weight), bounds=np.transpose(np.array(ranges)))
if plot:
plot_qd(res, guesses, data, extrapolate=extrapolate, boundary=boundary, plot_infectious=True)
plot_with_errors_sample(res, guesses, data, extrapolate=extrapolate, boundary=boundary, plot_infectious=False)
predictions = get_deaths(res, data, extrapolate=extrapolate)
errors = get_fit_errors(res, guesses, data, extrapolate=extrapolate)
death_errors = errors[:,:,-1]
parameters = res.x
cost = res.cost
print(res.x)
# for boundary in [(0,30), (10,40), (20, 51)]:
# df = data[boundary[0]:boundary[1]]
# res = least_squares(leastsq_qd, guesses, args=(df,weight), bounds=np.transpose(np.array(ranges)))
# if plot:
# plot_qd(res, params, initial_conditions, df, extrapolate=14, boundary=boundary[1]-boundary[0], plot_infectious=True)
# # plot_with_errors_sample(res, params, initial_conditions, df, extrapolate=14, boundary=boundary[1]-boundary[0], plot_infectious=False)
# predictions = get_deaths(res, df, extrapolate=14)
# predictions = [(x+boundary[0],y) for (x,y) in predictions]
# parameters = res.x
# cost = res.cost
# # store in list right now it only returns the last prediction and parameters
# print(parameters)
# print(predictions)
# print(cost)
return (predictions, death_errors, parameters, cost)
def main(weight=True, plot=True):
#Get date range of April1 to June30 inclusive. Figure out how much to extrapolate
italy = process_data("/data/international/italy/covid/dpc-covid19-ita-regioni.csv", "/models/data/international/italy/demographics/region-populations.csv")
italy = loader.load_data("/models/epidemiological/italy/italy_training_data.csv")
lombardia = loader.query(italy, "Region", "Lombardia")
# guesses = [6.69209312e-02, 1.10239913e-01, 4.33677422e-02, 3.01411969e-01,
# 3.55547441e-01, 1.35711130e-01, 1.87415444e-01, 3.40118459e-01,
# 6.54169531e-01, 5.80742686e-02, 2.66926724e-05, 1.27460914e-01,
# 3.14216375e-02, 1.33884397e-06, 3.95164660e-02, 5.51770694e-11,
# 1.27560414e-02, 6.55819545e-01, 1.66249610e-01, 8.78316719e-10,
# 9.72332562e-03, 1.18016076e-18, 9.50245298e-18, 7.02140155e-06]
guesses = [3.29142138e-01, 6.10729377e-02, 1.54971604e-01, 4.57604830e-01,
4.00514413e-01, 2.29936105e-02, 3.12355123e-04, 4.19042120e-02,
5.20538956e-01, 5.89851095e-05, 7.51938317e-07, 1.48040699e-01,
2.79663721e-02, 1.22173817e-01, 1.33376981e-01, 9.21393091e-08,
1.18451662e-03, 8.58246731e-01, 1.02247495e-01, 4.45881513e-10,
1.92751396e-02, 2.36032152e-18, 3.35111924e-05, 7.02140155e-06]
# guesses = [1.41578513e-01, 1.61248129e-01, 2.48362028e-01, 3.42978127e-01, 5.79023652e-01, 4.64392758e-02, \
# 9.86745420e-06, 4.83700388e-02, 4.85290835e-01, 3.72688900e-02, 4.92398129e-04, 5.20319673e-02, \
# 4.16822944e-02, 2.93718207e-02, 2.37765976e-01, 6.38313283e-04, 1.00539865e-04, 7.86113867e-01, \
# 3.26287443e-01, 8.18317732e-06, 5.43511913e-10, 1.30387168e-04, 3.58953133e-03, 1.57388153e-05]
fit(lombardia, guesses = guesses, weight=weight, plot=plot, extrapolate=14)
if __name__ == '__main__':
main()
# plot_pecaiqr()
| [
"sys.path.insert",
"numpy.sqrt",
"numpy.linalg.pinv",
"numpy.array",
"numpy.cov",
"numpy.arange",
"holoviews.extension",
"numpy.dot",
"loader.query",
"numpy.random.normal",
"loader.convert_dates",
"sklearn.metrics.mean_squared_error",
"loader.load_data",
"scipy.linalg.svd",
"git.Repo",
... | [((515, 536), 'holoviews.extension', 'hv.extension', (['"""bokeh"""'], {}), "('bokeh')\n", (527, 536), True, 'import holoviews as hv\n'), ((567, 613), 'git.Repo', 'git.Repo', (['"""./"""'], {'search_parent_directories': '(True)'}), "('./', search_parent_directories=True)\n", (575, 613), False, 'import git\n'), ((641, 701), 'sys.path.insert', 'sys.path.insert', (['(1)', "(f'{homedir}' + '/models/data_processing')"], {}), "(1, f'{homedir}' + '/models/data_processing')\n", (656, 701), False, 'import sys\n'), ((785, 813), 'loader.load_data', 'loader.load_data', (['data_covid'], {}), '(data_covid)\n', (801, 813), False, 'import loader\n'), ((815, 850), 'loader.convert_dates', 'loader.convert_dates', (['covid', '"""Date"""'], {}), "(covid, 'Date')\n", (835, 850), False, 'import loader\n'), ((865, 898), 'loader.load_data', 'loader.load_data', (['data_population'], {}), '(data_population)\n', (881, 898), False, 'import loader\n'), ((1159, 1171), 'numpy.array', 'np.array', (['p0'], {}), '(p0)\n', (1167, 1171), True, 'import numpy as np\n'), ((1338, 1371), 'scipy.linalg.svd', 'svd', (['res.jac'], {'full_matrices': '(False)'}), '(res.jac, full_matrices=False)\n', (1341, 1371), False, 'from scipy.linalg import svd\n'), ((1481, 1506), 'numpy.dot', 'np.dot', (['(VT.T / s ** 2)', 'VT'], {}), '(VT.T / s ** 2, VT)\n', (1487, 1506), True, 'import numpy as np\n'), ((2059, 2079), 'numpy.dot', 'np.dot', (['pcov.T', 'pcov'], {}), '(pcov.T, pcov)\n', (2065, 2079), True, 'import numpy as np\n'), ((2088, 2108), 'numpy.linalg.pinv', 'np.linalg.pinv', (['pcov'], {}), '(pcov)\n', (2102, 2108), True, 'import numpy as np\n'), ((2127, 2140), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (2134, 2140), True, 'import numpy as np\n'), ((2226, 2239), 'numpy.sqrt', 'np.sqrt', (['perr'], {}), '(perr)\n', (2233, 2239), True, 'import numpy as np\n'), ((2278, 2294), 'numpy.nan_to_num', 'np.nan_to_num', (['A'], {}), '(A)\n', (2291, 2294), True, 'import numpy as np\n'), ((2301, 2317), 'numpy.nan_to_num', 'np.nan_to_num', (['B'], {}), '(B)\n', (2314, 2317), True, 'import numpy as np\n'), ((2412, 2438), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['Ap', 'Bp'], {}), '(Ap, Bp)\n', (2430, 2438), False, 'from sklearn.metrics import mean_squared_error\n'), ((4849, 4891), 'numpy.array', 'np.array', (['[P0, E0, C0, A0, I0, Q0, R0, D0]'], {}), '([P0, E0, C0, A0, I0, Q0, R0, D0])\n', (4857, 4891), True, 'import numpy as np\n'), ((5864, 5885), 'numpy.array', 'np.array', (['uncertainty'], {}), '(uncertainty)\n', (5872, 5885), True, 'import numpy as np\n'), ((7414, 7452), 'numpy.percentile', 'np.percentile', (['uncertainty', '(25)'], {'axis': '(0)'}), '(uncertainty, 25, axis=0)\n', (7427, 7452), True, 'import numpy as np\n'), ((7459, 7497), 'numpy.percentile', 'np.percentile', (['uncertainty', '(75)'], {'axis': '(0)'}), '(uncertainty, 75, axis=0)\n', (7472, 7497), True, 'import numpy as np\n'), ((11570, 11643), 'loader.load_data', 'loader.load_data', (['"""/models/epidemiological/italy/italy_training_data.csv"""'], {}), "('/models/epidemiological/italy/italy_training_data.csv')\n", (11586, 11643), False, 'import loader\n'), ((11657, 11699), 'loader.query', 'loader.query', (['italy', '"""Region"""', '"""Lombardia"""'], {}), "(italy, 'Region', 'Lombardia')\n", (11669, 11699), False, 'import loader\n'), ((1915, 1928), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (1922, 1928), True, 'import numpy as np\n'), ((2149, 2164), 'numpy.cov', 'np.cov', (['res.fun'], {}), '(res.fun)\n', (2155, 2164), True, 'import numpy as np\n'), ((4473, 4494), 'numpy.array', 'np.array', (['params[-7:]'], {}), '(params[-7:])\n', (4481, 4494), True, 'import numpy as np\n'), ((5728, 5769), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'res.x', 'scale': 'errors'}), '(loc=res.x, scale=errors)\n', (5744, 5769), True, 'import numpy as np\n'), ((5087, 5102), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (5096, 5102), True, 'import numpy as np\n'), ((946, 992), 'loader.query', 'loader.query', (['population', '"""Region"""', 'row.Region'], {}), "(population, 'Region', row.Region)\n", (958, 992), False, 'import loader\n'), ((1385, 1400), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1393, 1400), True, 'import numpy as np\n'), ((10001, 10017), 'numpy.array', 'np.array', (['ranges'], {}), '(ranges)\n', (10009, 10017), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
import time
import RPi.GPIO as GPIO
import sys
import select
import tracking
import drive
# Frame size
HEIGHT = 240
WIDTH = 320
#Filter
LOWER = np.array([68, 65, 31])
UPPER = np.array([100, 229, 167])
# Start capture
cap = cv.VideoCapture(0)
cap.set(3, WIDTH) # 3 CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.
cap.set(4, HEIGHT) # 4 CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.
# Real width of your object
REAL_WIDTH = 5
# Focal length of your camera
# Focal length = (Width of an object in pixels * Distance to object) / Real width of an object
FOCAL_LENGTH = 416
# Desired distance
DESIRED_DISTANCE = 10
# Desired distance's tolerance
DESIRED_DISTANCE_TOLERANCE = 0
# How tolerant our tracker is
STEERING_TOLERANCE = 30
# Prepare motors
drive.motor_setup()
while True:
ret, frame = cap.read()
decision, pixel_width = tracking.steering_decision(frame, LOWER, UPPER, STEERING_TOLERANCE, HEIGHT, WIDTH)
if pixel_width != 0:
distance_prediction = tracking.distance(pixel_width, REAL_WIDTH, FOCAL_LENGTH)
else:
distance_prediction = "No object"
print(decision, distance_prediction)
# Actions, based on previous decision
if decision == "stay":
drive.stop()
elif decision == "left":
drive.turn_left(0.01)
elif decision == "right":
drive.turn_right(0.01)
else:
drive.stop()
if distance_prediction != "No object":
if abs(distance_prediction - DESIRED_DISTANCE) > DESIRED_DISTANCE_TOLERANCE:
if distance_prediction - DESIRED_DISTANCE < 0:
drive.backward(0.015)
else:
drive.forward(0.015)
else:
drive.stop()
# Break on key pressed
if select.select([sys.stdin,],[],[],0.0)[0]:
break
# Release camera
time.sleep(0.5)
cap.release()
GPIO.cleanup()
time.sleep(1)
| [
"RPi.GPIO.cleanup",
"select.select",
"drive.backward",
"tracking.distance",
"drive.turn_right",
"time.sleep",
"numpy.array",
"drive.motor_setup",
"cv2.VideoCapture",
"drive.turn_left",
"drive.stop",
"tracking.steering_decision",
"drive.forward"
] | [((183, 205), 'numpy.array', 'np.array', (['[68, 65, 31]'], {}), '([68, 65, 31])\n', (191, 205), True, 'import numpy as np\n'), ((214, 239), 'numpy.array', 'np.array', (['[100, 229, 167]'], {}), '([100, 229, 167])\n', (222, 239), True, 'import numpy as np\n'), ((263, 281), 'cv2.VideoCapture', 'cv.VideoCapture', (['(0)'], {}), '(0)\n', (278, 281), True, 'import cv2 as cv\n'), ((828, 847), 'drive.motor_setup', 'drive.motor_setup', ([], {}), '()\n', (845, 847), False, 'import drive\n'), ((1743, 1758), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1753, 1758), False, 'import time\n'), ((1773, 1787), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (1785, 1787), True, 'import RPi.GPIO as GPIO\n'), ((1788, 1801), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1798, 1801), False, 'import time\n'), ((912, 998), 'tracking.steering_decision', 'tracking.steering_decision', (['frame', 'LOWER', 'UPPER', 'STEERING_TOLERANCE', 'HEIGHT', 'WIDTH'], {}), '(frame, LOWER, UPPER, STEERING_TOLERANCE, HEIGHT,\n WIDTH)\n', (938, 998), False, 'import tracking\n'), ((1042, 1098), 'tracking.distance', 'tracking.distance', (['pixel_width', 'REAL_WIDTH', 'FOCAL_LENGTH'], {}), '(pixel_width, REAL_WIDTH, FOCAL_LENGTH)\n', (1059, 1098), False, 'import tracking\n'), ((1248, 1260), 'drive.stop', 'drive.stop', ([], {}), '()\n', (1258, 1260), False, 'import drive\n'), ((1674, 1713), 'select.select', 'select.select', (['[sys.stdin]', '[]', '[]', '(0.0)'], {}), '([sys.stdin], [], [], 0.0)\n', (1687, 1713), False, 'import select\n'), ((1289, 1310), 'drive.turn_left', 'drive.turn_left', (['(0.01)'], {}), '(0.01)\n', (1304, 1310), False, 'import drive\n'), ((1628, 1640), 'drive.stop', 'drive.stop', ([], {}), '()\n', (1638, 1640), False, 'import drive\n'), ((1340, 1362), 'drive.turn_right', 'drive.turn_right', (['(0.01)'], {}), '(0.01)\n', (1356, 1362), False, 'import drive\n'), ((1372, 1384), 'drive.stop', 'drive.stop', ([], {}), '()\n', (1382, 1384), False, 'import drive\n'), ((1561, 1582), 'drive.backward', 'drive.backward', (['(0.015)'], {}), '(0.015)\n', (1575, 1582), False, 'import drive\n'), ((1596, 1616), 'drive.forward', 'drive.forward', (['(0.015)'], {}), '(0.015)\n', (1609, 1616), False, 'import drive\n')] |
import dash
import numpy as np
import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
from plotly import tools
from dash.dependencies import Input, Output
from cachetools import cached, TTLCache
import plotly.graph_objs as go
import geojson
import geopandas as gpd
cache = TTLCache(maxsize=100, ttl=300)
# read ls election file
df_ls = pd.read_csv('ls_cleaned.csv')
# read geojson file using geopandas
india_map = gpd.read_file('india_criminal.geojson')
# Traces
@cached(cache)
def fig(year, section, result):
if result != 'All':
candidates = df_ls[(df_ls.Year == year) & (df_ls.Winner == result)]
else:
candidates = df_ls[(df_ls.Year == year)]
if section == 'State':
candidates_group = candidates.groupby('State')
elif section == 'Party':
candidates_group = candidates.groupby('Party')
candidate_group_crim = candidates_group['Criminal_Case'].mean(
).sort_values(ascending=False).head(5)
candidate_group_assets = candidates_group['Assets_num'].mean(
).sort_values(ascending=False).head(5)
candidates_group_age = candidates_group['Age'].mean(
).sort_values(ascending=False).head(5)
traces = [{
"x": candidate_group_crim.index.values,
"y": np.around(candidate_group_crim.values, 1),
"name": "<i><b>{} wise average no. of criminal cases</b></i>".format(section),
"type": "bar",
"text": "criminal case(s)",
"marker": dict(
# color = 'rgb(158,202,225)',
line=dict(
color='rgb(8,48,107)',
width=1.5,
)
),
"opacity": 0.7
},
{
"x": candidate_group_assets.index.values,
"y": np.around(candidate_group_assets.values/10000000, 2),
"name": "{} wise average assets (in Cr.)".format(section),
"type": "bar",
"text": "Crores",
"marker": dict(
# color = 'rgb(158,202,225)',
line=dict(
color='rgb(8,48,107)',
width=1.5,
)
),
"opacity": 0.7
},
{
"x": candidates_group_age.index.values,
"y": np.around(candidates_group_age.values, 1),
"name": "{} wise average age (in Yrs.)".format(section),
"type": "bar",
"text": "Years",
"marker": dict(
# color = 'rgb(158,202,225)',
line=dict(
color='rgb(8,48,107)',
width=1.5,
)
),
"opacity": 0.7
}
]
return traces
# Layout for all the traces/divs/plots
def figlayout(year, section, result):
layouts = [
go.Layout( # layout1
# images= [dict(
# source= "https://images.indianexpress.com/2017/12/indian-parliament-express75911.jpg",
# xref= "x",
# yref= "y",
# x = 0,
# y = 0,
# sizex = 2,
# sizey = 2,
# sizing = "stretch",
# opacity = 0.5,
# layer = "below")],
margin=dict(t=100), # it is margin within a graph
title='<b>{} wise top 5 average no. of criminal cases</b>'.format(
section),
showlegend=False,
xaxis=dict(
title='<i><b>{}</i></b>'.format(section),
# tickangle=-25,
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
showticklabels=True,
# tickangle=45,
tickfont=dict(
family='Old Standard TT, serif',
size=10,
color='black'
)
),
yaxis=dict(
title='<i><b>No. of Cases</i></b>',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
showticklabels=True,
# tickangle=45,
tickfont=dict(
family='Old Standard TT, serif',
size=10,
color='black'
)
)
),
go.Layout( # layout2
# margin=dict(t=150),
title='<b>{} wise top 5 average assets (in Cr.)</b>'.format(
section),
showlegend=False,
xaxis=dict(
title='<i><b>{}</i></b>'.format(section),
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
showticklabels=True,
# tickangle=45,
tickfont=dict(
family='Old Standard TT, serif',
size=10,
color='black'
)
),
yaxis=dict(
title='<i><b>Assets (in Cr.)</i></b>',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
showticklabels=True,
# tickangle=45,
tickfont=dict(
family='Old Standard TT, serif',
size=10,
color='black'
)
)
),
go.Layout( # layout3
# margin=dict(t=150),
title='<b>{} wise top 5 average age (in Yrs.)</b>'.format(section),
showlegend=False,
xaxis=dict(
title='<i><b>{}</i></b>'.format(section),
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
showticklabels=True,
# tickangle=45,
tickfont=dict(
family='Old Standard TT, serif',
size=10,
color='black'
)
),
yaxis=dict(
title='<i><b>Age (in Yrs.)<i><b>',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
autorange=False,
nticks=10,
range=[30, 80],
showticklabels=True,
# tickangle=45,
tickfont=dict(
family='Old Standard TT, serif',
size=10,
color='black'
)
)
)
]
return layouts
# color settings
colors = {
'background': '#21618C',
'text': '#AED6F1'
}
app = dash.Dash()
app.title = 'Indian Lok Sabha'
app.layout = html.Div([
html.Div([
html.H1('Analysis of candidates in Indian elections', style={
'textAlign': 'center', 'fontFamily': 'system-ui'}),
html.A(html.I('Data source'), href="http://www.myneta.info")], style = {'textAlign' : 'center',
'color': colors['text'], 'backgroundColor': colors['background'],}),#, style={'color':'#BFC9CA'}),
dcc.Tabs(id="tabs", children=[
dcc.Tab(label='Lok Sabha', children=[
html.Div([ # year drop down
dcc.Dropdown(
id='year-slider',
options=[
{'label': '2004', 'value': '2004'},
{'label': '2009', 'value': '2009'},
{'label': '2014', 'value': '2014'}
],
value="2004"
)],
className='three columns'),
html.Div([ # party drop down
dcc.Dropdown(
id='section-slider',
options=[
{'label': 'State', 'value': 'State'},
{'label': 'Party', 'value': 'Party'}
],
value="State"
)],
className='three columns'),
html.Div([ # results dropdown
dcc.Dropdown(
id='result-slider',
options=[
{'label': 'Winners', 'value': 'Yes'},
{'label': 'Losers', 'value': 'No'},
{'label': 'All', 'value': 'All'}
],
value="Yes"
)],
className='three columns' # ,
# style={'marginBottom': 50, 'marginTop': 25}
),
html.Div(id='divgraph1', children=dcc.Graph(
id='graph1',
config={
'displayModeBar': False
# "displaylogo": False,
# 'modeBarButtonsToRemove': ['pan2d', 'lasso2d']
},
style={'width': "70vw"}
),
style={'marginLeft': 50, 'marginBottom': 50, 'marginTop': 125}
),
html.Div(id='divgraph2', children=dcc.Graph(
id='graph2',
config={
"displaylogo": False,
'modeBarButtonsToRemove': ['pan2d', 'lasso2d']
},
style={'width': "70vw"}
),
style={'marginLeft': 50, 'marginBottom': 50}
),
html.Div(id='divgraph3', children=dcc.Graph(
id='graph3',
config={
"displaylogo": False,
'modeBarButtonsToRemove': ['pan2d', 'lasso2d']
},
style={'width': "70vw"}
),
style={'marginLeft': 50, 'marginBottom': 50}
)
]),
dcc.Tab(label='Assembly', children=[
html.Div([
html.H1("Tab 2 content")
]),
html.Div(id='divgraph11', children=dcc.Graph(
id='graph4',
config={
'displayModeBar': False
},
style={'width': "70vw"}
),
style={'marginLeft': 50, 'marginBottom': 50, 'marginTop': 125}
),
])
]
)
])
@app.callback(
Output('graph1', 'figure'),
[Input('year-slider', 'value'),
Input('section-slider', 'value'),
Input('result-slider', 'value')])
def update_output_graph(input_value1, input_value2, input_value3):
year, section, result = int(input_value1), input_value2, input_value3
traces = fig(year, section, result)
layouts = figlayout(year, section, result)
return go.Figure(data=[traces[0]], layout=layouts[0])
@app.callback(
Output('graph2', 'figure'),
[Input('year-slider', 'value'),
Input('section-slider', 'value'),
Input('result-slider', 'value')])
def update_output_graph(input_value1, input_value2, input_value3):
year, section, result = int(input_value1), input_value2, input_value3
traces = fig(year, section, result)
layouts = figlayout(year, section, result)
return go.Figure(data=[traces[1]], layout=layouts[1])
@app.callback(
Output('graph3', 'figure'),
[Input('year-slider', 'value'),
Input('section-slider', 'value'),
Input('result-slider', 'value')])
def update_output_graph(input_value1, input_value2, input_value3):
year, section, result = int(input_value1), input_value2, input_value3
traces = fig(year, section, result)
layouts = figlayout(year, section, result)
return go.Figure(data=[traces[2]], layout=layouts[2])
if __name__ == '__main__':
app.run_server(debug=True)
| [
"dash_html_components.I",
"geopandas.read_file",
"pandas.read_csv",
"dash.dependencies.Output",
"dash.dependencies.Input",
"cachetools.TTLCache",
"cachetools.cached",
"dash_core_components.Dropdown",
"numpy.around",
"dash_html_components.H1",
"plotly.graph_objs.Figure",
"dash.Dash",
"dash_co... | [((310, 340), 'cachetools.TTLCache', 'TTLCache', ([], {'maxsize': '(100)', 'ttl': '(300)'}), '(maxsize=100, ttl=300)\n', (318, 340), False, 'from cachetools import cached, TTLCache\n'), ((374, 403), 'pandas.read_csv', 'pd.read_csv', (['"""ls_cleaned.csv"""'], {}), "('ls_cleaned.csv')\n", (385, 403), True, 'import pandas as pd\n'), ((453, 492), 'geopandas.read_file', 'gpd.read_file', (['"""india_criminal.geojson"""'], {}), "('india_criminal.geojson')\n", (466, 492), True, 'import geopandas as gpd\n'), ((505, 518), 'cachetools.cached', 'cached', (['cache'], {}), '(cache)\n', (511, 518), False, 'from cachetools import cached, TTLCache\n'), ((7007, 7018), 'dash.Dash', 'dash.Dash', ([], {}), '()\n', (7016, 7018), False, 'import dash\n'), ((10924, 10970), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': '[traces[0]]', 'layout': 'layouts[0]'}), '(data=[traces[0]], layout=layouts[0])\n', (10933, 10970), True, 'import plotly.graph_objs as go\n'), ((10545, 10571), 'dash.dependencies.Output', 'Output', (['"""graph1"""', '"""figure"""'], {}), "('graph1', 'figure')\n", (10551, 10571), False, 'from dash.dependencies import Input, Output\n'), ((11370, 11416), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': '[traces[1]]', 'layout': 'layouts[1]'}), '(data=[traces[1]], layout=layouts[1])\n', (11379, 11416), True, 'import plotly.graph_objs as go\n'), ((10991, 11017), 'dash.dependencies.Output', 'Output', (['"""graph2"""', '"""figure"""'], {}), "('graph2', 'figure')\n", (10997, 11017), False, 'from dash.dependencies import Input, Output\n'), ((11818, 11864), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': '[traces[2]]', 'layout': 'layouts[2]'}), '(data=[traces[2]], layout=layouts[2])\n', (11827, 11864), True, 'import plotly.graph_objs as go\n'), ((11438, 11464), 'dash.dependencies.Output', 'Output', (['"""graph3"""', '"""figure"""'], {}), "('graph3', 'figure')\n", (11444, 11464), False, 'from dash.dependencies import Input, Output\n'), ((10578, 10607), 'dash.dependencies.Input', 'Input', (['"""year-slider"""', '"""value"""'], {}), "('year-slider', 'value')\n", (10583, 10607), False, 'from dash.dependencies import Input, Output\n'), ((10613, 10645), 'dash.dependencies.Input', 'Input', (['"""section-slider"""', '"""value"""'], {}), "('section-slider', 'value')\n", (10618, 10645), False, 'from dash.dependencies import Input, Output\n'), ((10651, 10682), 'dash.dependencies.Input', 'Input', (['"""result-slider"""', '"""value"""'], {}), "('result-slider', 'value')\n", (10656, 10682), False, 'from dash.dependencies import Input, Output\n'), ((11024, 11053), 'dash.dependencies.Input', 'Input', (['"""year-slider"""', '"""value"""'], {}), "('year-slider', 'value')\n", (11029, 11053), False, 'from dash.dependencies import Input, Output\n'), ((11059, 11091), 'dash.dependencies.Input', 'Input', (['"""section-slider"""', '"""value"""'], {}), "('section-slider', 'value')\n", (11064, 11091), False, 'from dash.dependencies import Input, Output\n'), ((11097, 11128), 'dash.dependencies.Input', 'Input', (['"""result-slider"""', '"""value"""'], {}), "('result-slider', 'value')\n", (11102, 11128), False, 'from dash.dependencies import Input, Output\n'), ((11471, 11500), 'dash.dependencies.Input', 'Input', (['"""year-slider"""', '"""value"""'], {}), "('year-slider', 'value')\n", (11476, 11500), False, 'from dash.dependencies import Input, Output\n'), ((11507, 11539), 'dash.dependencies.Input', 'Input', (['"""section-slider"""', '"""value"""'], {}), "('section-slider', 'value')\n", (11512, 11539), False, 'from dash.dependencies import Input, Output\n'), ((11545, 11576), 'dash.dependencies.Input', 'Input', (['"""result-slider"""', '"""value"""'], {}), "('result-slider', 'value')\n", (11550, 11576), False, 'from dash.dependencies import Input, Output\n'), ((1277, 1318), 'numpy.around', 'np.around', (['candidate_group_crim.values', '(1)'], {}), '(candidate_group_crim.values, 1)\n', (1286, 1318), True, 'import numpy as np\n'), ((1750, 1804), 'numpy.around', 'np.around', (['(candidate_group_assets.values / 10000000)', '(2)'], {}), '(candidate_group_assets.values / 10000000, 2)\n', (1759, 1804), True, 'import numpy as np\n'), ((2202, 2243), 'numpy.around', 'np.around', (['candidates_group_age.values', '(1)'], {}), '(candidates_group_age.values, 1)\n', (2211, 2243), True, 'import numpy as np\n'), ((7095, 7210), 'dash_html_components.H1', 'html.H1', (['"""Analysis of candidates in Indian elections"""'], {'style': "{'textAlign': 'center', 'fontFamily': 'system-ui'}"}), "('Analysis of candidates in Indian elections', style={'textAlign':\n 'center', 'fontFamily': 'system-ui'})\n", (7102, 7210), True, 'import dash_html_components as html\n'), ((7232, 7253), 'dash_html_components.I', 'html.I', (['"""Data source"""'], {}), "('Data source')\n", (7238, 7253), True, 'import dash_html_components as html\n'), ((7572, 7742), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""year-slider"""', 'options': "[{'label': '2004', 'value': '2004'}, {'label': '2009', 'value': '2009'}, {\n 'label': '2014', 'value': '2014'}]", 'value': '"""2004"""'}), "(id='year-slider', options=[{'label': '2004', 'value': '2004'},\n {'label': '2009', 'value': '2009'}, {'label': '2014', 'value': '2014'}],\n value='2004')\n", (7584, 7742), True, 'import dash_core_components as dcc\n'), ((8009, 8147), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""section-slider"""', 'options': "[{'label': 'State', 'value': 'State'}, {'label': 'Party', 'value': 'Party'}]", 'value': '"""State"""'}), "(id='section-slider', options=[{'label': 'State', 'value':\n 'State'}, {'label': 'Party', 'value': 'Party'}], value='State')\n", (8021, 8147), True, 'import dash_core_components as dcc\n'), ((8398, 8569), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""result-slider"""', 'options': "[{'label': 'Winners', 'value': 'Yes'}, {'label': 'Losers', 'value': 'No'},\n {'label': 'All', 'value': 'All'}]", 'value': '"""Yes"""'}), "(id='result-slider', options=[{'label': 'Winners', 'value':\n 'Yes'}, {'label': 'Losers', 'value': 'No'}, {'label': 'All', 'value':\n 'All'}], value='Yes')\n", (8410, 8569), True, 'import dash_core_components as dcc\n'), ((8907, 8992), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""graph1"""', 'config': "{'displayModeBar': False}", 'style': "{'width': '70vw'}"}), "(id='graph1', config={'displayModeBar': False}, style={'width':\n '70vw'})\n", (8916, 8992), True, 'import dash_core_components as dcc\n'), ((9344, 9474), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""graph2"""', 'config': "{'displaylogo': False, 'modeBarButtonsToRemove': ['pan2d', 'lasso2d']}", 'style': "{'width': '70vw'}"}), "(id='graph2', config={'displaylogo': False,\n 'modeBarButtonsToRemove': ['pan2d', 'lasso2d']}, style={'width': '70vw'})\n", (9353, 9474), True, 'import dash_core_components as dcc\n'), ((9715, 9845), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""graph3"""', 'config': "{'displaylogo': False, 'modeBarButtonsToRemove': ['pan2d', 'lasso2d']}", 'style': "{'width': '70vw'}"}), "(id='graph3', config={'displaylogo': False,\n 'modeBarButtonsToRemove': ['pan2d', 'lasso2d']}, style={'width': '70vw'})\n", (9724, 9845), True, 'import dash_core_components as dcc\n'), ((10134, 10158), 'dash_html_components.H1', 'html.H1', (['"""Tab 2 content"""'], {}), "('Tab 2 content')\n", (10141, 10158), True, 'import dash_html_components as html\n'), ((10222, 10307), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""graph4"""', 'config': "{'displayModeBar': False}", 'style': "{'width': '70vw'}"}), "(id='graph4', config={'displayModeBar': False}, style={'width':\n '70vw'})\n", (10231, 10307), True, 'import dash_core_components as dcc\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module implements Deep QL with two networks: Q-network and target-network
the DQL uses an MLP Q-network
it is retrained only after 'episodes' iterations
the training uses replay memory
"""
from copy import deepcopy
import numpy as np
import logging
from DQL.deepQL import DeepQL
from DQL.clone import clone_model
class DQL(DeepQL):
"""
ref. https://keon.io/deep-q-learning/
https://github.com/simoninithomas/deep_q_learning/blob/master/DQL%20Cartpole.ipynb
https://medium.com/@gtnjuvin/my-journey-into-deep-q-learning-with-keras-and-gym-3e779cc12762
"""
def __init__(self,
env, # the environment object
model,
memory,
timesteps=1,
epsilon=0.1, # exploration rate
epsilon_min=0.1,
epsilon_decay=0.995,
learning_rate=0.001,
gamma=0.95,
batch_size=32, # size of the batch in the replay
episodes=30, # number of iterations before replay occurs
epochs=1,
log_level=logging.DEBUG,
interaction_interval=30, # wait 30 second, before another cycle
**kwargs):
# call super
super().__init__(env=env,
model=model,
memory=memory,
timesteps=timesteps,
epsilon=epsilon,
epsilon_min=epsilon_min,
epsilon_decay=epsilon_decay,
learning_rate=learning_rate,
gamma=gamma,
batch_size=batch_size,
episodes=episodes,
epochs=epochs,
log_level=log_level,
interaction_interval=interaction_interval,
)
# create a target model based on the self.model
self.target = self.model
self.copy_to_target()
def copy_to_target(self):
self.log.debug("copy model to target")
try:
model_copy = deepcopy(self.model)
except TypeError:
model_copy = clone_model(self.model)
self.target = model_copy
def copy_weights(self):
weights = self.model.get_weights()
self.target.set_weights(weights)
def save_model(self, model_filename='model.json'):
""" save the model and target networks to a json file and the weights to a h5 file
overwritten method to save both networks
@param model_filename: the filename with '.json' extension
"""
super().save_model(model_filename=model_filename) # this saves self.model
self.target.save_weights(model_filename.replace('.json', '-target.h5'))
def get_q_max(self, sprime):
""" the Q_max is calculated using the target network
@param sprime: the sequence of next states (s')
@return: the Qmax value used in the TD-error, defined as the greedy move
Q_max = max Q_target(s', a')
a'
"""
q_prediction = self.target.predict(sprime)
q_max = np.amax(q_prediction, axis=1)
self.log.debug("s': {} Q max: {}".format(sprime, q_max))
return q_max
def replay(self):
"""
produces the replay, that trains the model's parameters
and if C replays occur then update target's parameters
"""
# call the parent replay
if super().replay() and self.replay_counter == self.episodes:
# after C replays, the theta from model is copied to target
self.copy_to_target()
self.log.info("Updated target network in #{}".format(self.runs))
self.replay_counter = 0 # zeros the count
| [
"DQL.clone.clone_model",
"numpy.amax",
"copy.deepcopy"
] | [((3334, 3363), 'numpy.amax', 'np.amax', (['q_prediction'], {'axis': '(1)'}), '(q_prediction, axis=1)\n', (3341, 3363), True, 'import numpy as np\n'), ((2259, 2279), 'copy.deepcopy', 'deepcopy', (['self.model'], {}), '(self.model)\n', (2267, 2279), False, 'from copy import deepcopy\n'), ((2331, 2354), 'DQL.clone.clone_model', 'clone_model', (['self.model'], {}), '(self.model)\n', (2342, 2354), False, 'from DQL.clone import clone_model\n')] |
import pandas as pd
import time
import random
import numpy as np
from datetime import timedelta
from datetime import datetime
import MAUC
import argparse
parser = argparse.ArgumentParser(usage='python3 evalOneSubmission.py',
description=r'''
TADPOLE Evaluation Script:
The program computes the following matrics:
Clinical diagnosis prediction:
1. Multiclass area under the receiver operating curve (mAUC)
2. Balanced classification accuracy (BCA)
Continuous feature predictions:
3. Mean Absolute Error (MAE)
4. Coverage Probability Accuracy (CPA)
5. Weighted Error Score (WES)
Author: <NAME>, <EMAIL>
''')
def calcBCA(estimLabels, trueLabels, nrClasses):
# Balanced Classification Accuracy
bcaAll = []
for c0 in range(nrClasses):
for c1 in range(c0+1,nrClasses):
# c0 = positive class & c1 = negative class
TP = np.sum((estimLabels == c0) & (trueLabels == c0))
TN = np.sum((estimLabels == c1) & (trueLabels == c1))
FP = np.sum((estimLabels == c1) & (trueLabels == c0))
FN = np.sum((estimLabels == c0) & (trueLabels == c1))
# sometimes the sensitivity of specificity can be NaN, if the user doesn't forecast one of the classes.
# In this case we assume a default value for sensitivity/specificity
if (TP+FN) == 0:
sensitivity = 0.5
else:
sensitivity = TP/(TP+FN)
if (TN+FP) == 0:
specificity = 0.5
else:
specificity = TN/(TN+FP)
bcaCurr = 0.5*(sensitivity+specificity)
bcaAll += [bcaCurr]
# print('bcaCurr %f TP %f TN %f FP %f FN %f' % (bcaCurr, TP, TN, FP, FN))
return np.mean(bcaAll)
def parseData(d4Df, forecastDf, diagLabels):
trueDiag = d4Df['Diagnosis']
trueADAS = d4Df['ADAS13']
trueVents = d4Df['Ventricles']
nrSubj = d4Df.shape[0]
zipTrueLabelAndProbs = []
hardEstimClass = -1 * np.ones(nrSubj, int)
adasEstim = -1 * np.ones(nrSubj, float)
adasEstimLo = -1 * np.ones(nrSubj, float) # lower margin
adasEstimUp = -1 * np.ones(nrSubj, float) # upper margin
ventriclesEstim = -1 * np.ones(nrSubj, float)
ventriclesEstimLo = -1 * np.ones(nrSubj, float) # lower margin
ventriclesEstimUp = -1 * np.ones(nrSubj, float) # upper margin
# print('subDf.keys()', forecastDf['Forecast Date'])
invalidResultReturn = (None,None,None,None,None,None,None,None,None,None,None)
invalidFlag = False
# for each subject in D4 match the closest user forecasts
for s in range(nrSubj):
currSubjMask = d4Df['RID'].iloc[s] == forecastDf['RID']
currSubjData = forecastDf[currSubjMask]
# if subject is missing
if currSubjData.shape[0] == 0:
print('WARNING: Subject RID %s missing from user forecasts' % d4Df['RID'].iloc[s])
invalidFlag = True
continue
# if not all forecast months are present
if currSubjData.shape[0] < 5*12: # check if at least 5 years worth of forecasts exist
print('WARNING: Missing forecast months for subject with RID %s' % d4Df['RID'].iloc[s])
invalidFlag = True
continue
currSubjData = currSubjData.reset_index(drop=True)
timeDiffsScanCog = [d4Df['CognitiveAssessmentDate'].iloc[s] - d for d in currSubjData['Forecast Date']]
# print('Forecast Date 2',currSubjData['Forecast Date'])
indexMin = np.argsort(np.abs(timeDiffsScanCog))[0]
# print('timeDiffsScanMri', indexMin, timeDiffsScanMri)
pCN = currSubjData['CN relative probability'].iloc[indexMin]
pMCI = currSubjData['MCI relative probability'].iloc[indexMin]
pAD = currSubjData['AD relative probability'].iloc[indexMin]
# normalise the relative probabilities by their sum
pSum = (pCN + pMCI + pAD)/3
pCN /= pSum
pMCI /= pSum
pAD /= pSum
hardEstimClass[s] = np.argmax([pCN, pMCI, pAD])
adasEstim[s] = currSubjData['ADAS13'].iloc[indexMin]
adasEstimLo[s] = currSubjData['ADAS13 50% CI lower'].iloc[indexMin]
adasEstimUp[s] = currSubjData['ADAS13 50% CI upper'].iloc[indexMin]
# for the mri scan find the forecast closest to the scan date,
# which might be different from the cognitive assessment date
timeDiffsScanMri = [d4Df['ScanDate'].iloc[s] - d for d in currSubjData['Forecast Date']]
indexMinMri = np.argsort(np.abs(timeDiffsScanMri))[0]
ventriclesEstim[s] = currSubjData['Ventricles_ICV'].iloc[indexMinMri]
ventriclesEstimLo[s] = currSubjData['Ventricles_ICV 50% CI lower'].iloc[indexMinMri]
ventriclesEstimUp[s] = currSubjData['Ventricles_ICV 50% CI upper'].iloc[indexMinMri]
# print('%d probs' % d4Df['RID'].iloc[s], pCN, pMCI, pAD)
if not np.isnan(trueDiag.iloc[s]):
zipTrueLabelAndProbs += [(trueDiag.iloc[s], [pCN, pMCI, pAD])]
if invalidFlag:
# if at least one subject was missing or if
raise ValueError('Submission was incomplete. Please resubmit')
# If there are NaNs in D4, filter out them along with the corresponding user forecasts
# This can happen if rollover subjects don't come for visit in ADNI3.
notNanMaskDiag = np.logical_not(np.isnan(trueDiag))
trueDiagFilt = trueDiag[notNanMaskDiag]
hardEstimClassFilt = hardEstimClass[notNanMaskDiag]
notNanMaskADAS = np.logical_not(np.isnan(trueADAS))
trueADASFilt = trueADAS[notNanMaskADAS]
adasEstim = adasEstim[notNanMaskADAS]
adasEstimLo = adasEstimLo[notNanMaskADAS]
adasEstimUp = adasEstimUp[notNanMaskADAS]
notNanMaskVents = np.logical_not(np.isnan(trueVents))
trueVentsFilt = trueVents[notNanMaskVents]
ventriclesEstim = ventriclesEstim[notNanMaskVents]
ventriclesEstimLo = ventriclesEstimLo[notNanMaskVents]
ventriclesEstimUp = ventriclesEstimUp[notNanMaskVents]
assert trueDiagFilt.shape[0] == hardEstimClassFilt.shape[0]
assert trueADASFilt.shape[0] == adasEstim.shape[0] == adasEstimLo.shape[0] == adasEstimUp.shape[0]
assert trueVentsFilt.shape[0] == ventriclesEstim.shape[0] == \
ventriclesEstimLo.shape[0] == ventriclesEstimUp.shape[0]
return zipTrueLabelAndProbs, hardEstimClassFilt, adasEstim, adasEstimLo, adasEstimUp, \
ventriclesEstim, ventriclesEstimLo, ventriclesEstimUp, trueDiagFilt, trueADASFilt, trueVentsFilt
def evalOneSub(d4Df, forecastDf):
"""
Evaluates one submission.
Parameters
----------
d4Df - Pandas data frame containing the D4 dataset
subDf - Pandas data frame containing user forecasts for D2 subjects.
Returns
-------
mAUC - multiclass Area Under Curve
bca - balanced classification accuracy
adasMAE - ADAS13 Mean Aboslute Error
ventsMAE - Ventricles Mean Aboslute Error
adasCovProb - ADAS13 Coverage Probability for 50% confidence interval
ventsCovProb - Ventricles Coverage Probability for 50% confidence interval
"""
forecastDf['Forecast Date'] = [datetime.strptime(x, '%Y-%m') for x in forecastDf['Forecast Date']] # considers every month estimate to be the actual first day 2017-01
if isinstance(d4Df['Diagnosis'].iloc[0], str):
d4Df['CognitiveAssessmentDate'] = [datetime.strptime(x, '%Y-%m-%d') for x in d4Df['CognitiveAssessmentDate']]
d4Df['ScanDate'] = [datetime.strptime(x, '%Y-%m-%d') for x in d4Df['ScanDate']]
mapping = {'CN' : 0, 'MCI' : 1, 'AD' : 2}
d4Df.replace({'Diagnosis':mapping}, inplace=True)
diagLabels = ['CN', 'MCI', 'AD']
zipTrueLabelAndProbs, hardEstimClass, adasEstim, adasEstimLo, adasEstimUp, \
ventriclesEstim, ventriclesEstimLo, ventriclesEstimUp, trueDiagFilt, trueADASFilt, trueVentsFilt = \
parseData(d4Df, forecastDf, diagLabels)
zipTrueLabelAndProbs = list(zipTrueLabelAndProbs)
########## compute metrics for the clinical status #############
##### Multiclass AUC (mAUC) #####
nrClasses = len(diagLabels)
mAUC = MAUC.MAUC(zipTrueLabelAndProbs, num_classes=nrClasses)
### Balanced Classification Accuracy (BCA) ###
# print('hardEstimClass', np.unique(hardEstimClass), hardEstimClass)
trueDiagFilt = trueDiagFilt.astype(int)
# print('trueDiagFilt', np.unique(trueDiagFilt), trueDiagFilt)
bca = calcBCA(hardEstimClass, trueDiagFilt, nrClasses=nrClasses)
####### compute metrics for Ventricles and ADAS13 ##########
#### Mean Absolute Error (MAE) #####
adasMAE = np.mean(np.abs(adasEstim - trueADASFilt))
ventsMAE = np.mean(np.abs(ventriclesEstim - trueVentsFilt))
##### Weighted Error Score (WES) ####
adasCoeffs = 1/(adasEstimUp - adasEstimLo)
adasWES = np.sum(adasCoeffs * np.abs(adasEstim - trueADASFilt))/np.sum(adasCoeffs)
ventsCoeffs = 1/(ventriclesEstimUp - ventriclesEstimLo)
ventsWES = np.sum(ventsCoeffs * np.abs(ventriclesEstim - trueVentsFilt))/np.sum(ventsCoeffs)
#### Coverage Probability Accuracy (CPA) ####
adasCovProb = np.sum((adasEstimLo < trueADASFilt) &
(adasEstimUp > trueADASFilt))/trueADASFilt.shape[0]
adasCPA = np.abs(adasCovProb - 0.5)
ventsCovProb = np.sum((ventriclesEstimLo < trueVentsFilt) &
(ventriclesEstimUp > trueVentsFilt))/trueVentsFilt.shape[0]
ventsCPA = np.abs(ventsCovProb - 0.5)
return mAUC, bca, adasMAE, ventsMAE, adasWES, ventsWES, adasCPA, ventsCPA
if __name__ == "__main__":
parser.add_argument('--d4File', dest='d4File', help='CSV file containing the D4 dataset. '\
'Needs to be in the same format of D4_dummy.csv')
parser.add_argument('--forecastFile', dest='forecastFile', help='CSV file containing the user '
'forecasts for subjects in D2. Needs to be in the same format as '
'TADPOLE_Submission_TeamName1.xlsx or TADPOLE_Submission_Leaderboard_TeamName1.csv')
parser.add_argument('--leaderboard', action='store_true', help='pass this flag if the submission is a leaderboard submission. It ensures the filename is in the right format')
args = parser.parse_args()
d4File = args.d4File
forecastFile = args.forecastFile
forecastFileShort = forecastFile.split('/')[-1]
if args.leaderboard:
if (not forecastFileShort.startswith('TADPOLE_Submission_Pycon_')) or (not forecastFileShort.endswith('.csv')):
raise ValueError('File %s is not in the correct format: ' % forecastFileShort +
'TADPOLE_Submission_Pycon_TeamName.csv')
else:
if (not forecastFileShort.startswith('TADPOLE_Submission_')) or (not forecastFileShort.endswith('.csv')):
raise ValueError('File %s is not in the correct format: TADPOLE_Submission_TeamName.csv.' % forecastFileShort)
if 'TeamName' in forecastFileShort:
raise ValueError(r'''
Wrong file name! First rename the submission file
''' + forecastFileShort + r'''
to
TADPOLE_Submission_Pycon_<YourTeamName><Index>.csv
Examples:
TADPOLE_Submission_Pycon_PyHackers1.csv (first submission)
TADPOLE_Submission_Pycon_PowerRangers3.csv (third submission)
''')
d4Df = pd.read_csv(d4File)
subDf = pd.read_csv(forecastFile)
# don't catch the exception here, as this main function is used to test if the submission if correct
mAUC, bca, adasMAE, ventsMAE, adasWES, ventsWES, adasCPA, ventsCPA = \
evalOneSub(d4Df, subDf)
print('########### Metrics for clinical status ##################')
print('mAUC', mAUC)
print('bca', bca)
print('\n########### Mean Absolute Error (MAE) ##################')
print('adasMAE', adasMAE, 'ventsMAE', ventsMAE)
print('\n########### Weighted Error Score (WES) ##################')
print('adasWES', adasWES, 'ventsWES', ventsWES)
print('\n########### Coverage Probability Accuracy ##################')
print('adasCPA', adasCPA, 'ventsCPA', ventsCPA)
print('\n\n########### File is ready for submission to TADPOLE ###########')
| [
"numpy.mean",
"numpy.abs",
"MAUC.MAUC",
"numpy.ones",
"pandas.read_csv",
"argparse.ArgumentParser",
"datetime.datetime.strptime",
"numpy.argmax",
"numpy.sum",
"numpy.isnan"
] | [((164, 642), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""python3 evalOneSubmission.py"""', 'description': '"""\n TADPOLE Evaluation Script:\n The program computes the following matrics:\n\n Clinical diagnosis prediction:\n\n 1. Multiclass area under the receiver operating curve (mAUC)\n 2. Balanced classification accuracy (BCA)\n\n Continuous feature predictions:\n 3. Mean Absolute Error (MAE)\n 4. Coverage Probability Accuracy (CPA)\n 5. Weighted Error Score (WES)\n\n Author: <NAME>, <EMAIL>\n\n """'}), '(usage=\'python3 evalOneSubmission.py\', description=\n """\n TADPOLE Evaluation Script:\n The program computes the following matrics:\n\n Clinical diagnosis prediction:\n\n 1. Multiclass area under the receiver operating curve (mAUC)\n 2. Balanced classification accuracy (BCA)\n\n Continuous feature predictions:\n 3. Mean Absolute Error (MAE)\n 4. Coverage Probability Accuracy (CPA)\n 5. Weighted Error Score (WES)\n\n Author: <NAME>, <EMAIL>\n\n """\n )\n', (187, 642), False, 'import argparse\n'), ((1636, 1651), 'numpy.mean', 'np.mean', (['bcaAll'], {}), '(bcaAll)\n', (1643, 1651), True, 'import numpy as np\n'), ((7658, 7712), 'MAUC.MAUC', 'MAUC.MAUC', (['zipTrueLabelAndProbs'], {'num_classes': 'nrClasses'}), '(zipTrueLabelAndProbs, num_classes=nrClasses)\n', (7667, 7712), False, 'import MAUC\n'), ((8745, 8770), 'numpy.abs', 'np.abs', (['(adasCovProb - 0.5)'], {}), '(adasCovProb - 0.5)\n', (8751, 8770), True, 'import numpy as np\n'), ((8931, 8957), 'numpy.abs', 'np.abs', (['(ventsCovProb - 0.5)'], {}), '(ventsCovProb - 0.5)\n', (8937, 8957), True, 'import numpy as np\n'), ((10698, 10717), 'pandas.read_csv', 'pd.read_csv', (['d4File'], {}), '(d4File)\n', (10709, 10717), True, 'import pandas as pd\n'), ((10728, 10753), 'pandas.read_csv', 'pd.read_csv', (['forecastFile'], {}), '(forecastFile)\n', (10739, 10753), True, 'import pandas as pd\n'), ((1871, 1891), 'numpy.ones', 'np.ones', (['nrSubj', 'int'], {}), '(nrSubj, int)\n', (1878, 1891), True, 'import numpy as np\n'), ((1911, 1933), 'numpy.ones', 'np.ones', (['nrSubj', 'float'], {}), '(nrSubj, float)\n', (1918, 1933), True, 'import numpy as np\n'), ((1955, 1977), 'numpy.ones', 'np.ones', (['nrSubj', 'float'], {}), '(nrSubj, float)\n', (1962, 1977), True, 'import numpy as np\n'), ((2015, 2037), 'numpy.ones', 'np.ones', (['nrSubj', 'float'], {}), '(nrSubj, float)\n', (2022, 2037), True, 'import numpy as np\n'), ((2079, 2101), 'numpy.ones', 'np.ones', (['nrSubj', 'float'], {}), '(nrSubj, float)\n', (2086, 2101), True, 'import numpy as np\n'), ((2129, 2151), 'numpy.ones', 'np.ones', (['nrSubj', 'float'], {}), '(nrSubj, float)\n', (2136, 2151), True, 'import numpy as np\n'), ((2195, 2217), 'numpy.ones', 'np.ones', (['nrSubj', 'float'], {}), '(nrSubj, float)\n', (2202, 2217), True, 'import numpy as np\n'), ((3748, 3775), 'numpy.argmax', 'np.argmax', (['[pCN, pMCI, pAD]'], {}), '([pCN, pMCI, pAD])\n', (3757, 3775), True, 'import numpy as np\n'), ((5018, 5036), 'numpy.isnan', 'np.isnan', (['trueDiag'], {}), '(trueDiag)\n', (5026, 5036), True, 'import numpy as np\n'), ((5169, 5187), 'numpy.isnan', 'np.isnan', (['trueADAS'], {}), '(trueADAS)\n', (5177, 5187), True, 'import numpy as np\n'), ((5395, 5414), 'numpy.isnan', 'np.isnan', (['trueVents'], {}), '(trueVents)\n', (5403, 5414), True, 'import numpy as np\n'), ((6711, 6740), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m"""'], {}), "(x, '%Y-%m')\n", (6728, 6740), False, 'from datetime import datetime\n'), ((8133, 8165), 'numpy.abs', 'np.abs', (['(adasEstim - trueADASFilt)'], {}), '(adasEstim - trueADASFilt)\n', (8139, 8165), True, 'import numpy as np\n'), ((8188, 8227), 'numpy.abs', 'np.abs', (['(ventriclesEstim - trueVentsFilt)'], {}), '(ventriclesEstim - trueVentsFilt)\n', (8194, 8227), True, 'import numpy as np\n'), ((8381, 8399), 'numpy.sum', 'np.sum', (['adasCoeffs'], {}), '(adasCoeffs)\n', (8387, 8399), True, 'import numpy as np\n'), ((8534, 8553), 'numpy.sum', 'np.sum', (['ventsCoeffs'], {}), '(ventsCoeffs)\n', (8540, 8553), True, 'import numpy as np\n'), ((8620, 8687), 'numpy.sum', 'np.sum', (['((adasEstimLo < trueADASFilt) & (adasEstimUp > trueADASFilt))'], {}), '((adasEstimLo < trueADASFilt) & (adasEstimUp > trueADASFilt))\n', (8626, 8687), True, 'import numpy as np\n'), ((8789, 8874), 'numpy.sum', 'np.sum', (['((ventriclesEstimLo < trueVentsFilt) & (ventriclesEstimUp > trueVentsFilt))'], {}), '((ventriclesEstimLo < trueVentsFilt) & (ventriclesEstimUp >\n trueVentsFilt))\n', (8795, 8874), True, 'import numpy as np\n'), ((869, 917), 'numpy.sum', 'np.sum', (['((estimLabels == c0) & (trueLabels == c0))'], {}), '((estimLabels == c0) & (trueLabels == c0))\n', (875, 917), True, 'import numpy as np\n'), ((929, 977), 'numpy.sum', 'np.sum', (['((estimLabels == c1) & (trueLabels == c1))'], {}), '((estimLabels == c1) & (trueLabels == c1))\n', (935, 977), True, 'import numpy as np\n'), ((989, 1037), 'numpy.sum', 'np.sum', (['((estimLabels == c1) & (trueLabels == c0))'], {}), '((estimLabels == c1) & (trueLabels == c0))\n', (995, 1037), True, 'import numpy as np\n'), ((1049, 1097), 'numpy.sum', 'np.sum', (['((estimLabels == c0) & (trueLabels == c1))'], {}), '((estimLabels == c0) & (trueLabels == c1))\n', (1055, 1097), True, 'import numpy as np\n'), ((4590, 4616), 'numpy.isnan', 'np.isnan', (['trueDiag.iloc[s]'], {}), '(trueDiag.iloc[s])\n', (4598, 4616), True, 'import numpy as np\n'), ((6935, 6967), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m-%d"""'], {}), "(x, '%Y-%m-%d')\n", (6952, 6967), False, 'from datetime import datetime\n'), ((7034, 7066), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m-%d"""'], {}), "(x, '%Y-%m-%d')\n", (7051, 7066), False, 'from datetime import datetime\n'), ((3298, 3322), 'numpy.abs', 'np.abs', (['timeDiffsScanCog'], {}), '(timeDiffsScanCog)\n', (3304, 3322), True, 'import numpy as np\n'), ((4234, 4258), 'numpy.abs', 'np.abs', (['timeDiffsScanMri'], {}), '(timeDiffsScanMri)\n', (4240, 4258), True, 'import numpy as np\n'), ((8347, 8379), 'numpy.abs', 'np.abs', (['(adasEstim - trueADASFilt)'], {}), '(adasEstim - trueADASFilt)\n', (8353, 8379), True, 'import numpy as np\n'), ((8493, 8532), 'numpy.abs', 'np.abs', (['(ventriclesEstim - trueVentsFilt)'], {}), '(ventriclesEstim - trueVentsFilt)\n', (8499, 8532), True, 'import numpy as np\n')] |
#-------by HYH -------#
import sys
sys.path.append('D:\\Python File\\robot\\P13')
import P13
import numpy as np
x=np.array([7,38,4,23,18])
x2=np.square(x)
x2mu=P13.compMean(x2)
xmu=P13.compMean(x)
xvar=P13.compVariance(x)
print('The Variance of X \t=%s \n'%xvar)
print('E[X^2]-E[X]^2 \t\t=%s \n'%(x2mu-np.square(xmu))) | [
"P13.compVariance",
"P13.compMean",
"numpy.square",
"numpy.array",
"sys.path.append"
] | [((35, 81), 'sys.path.append', 'sys.path.append', (['"""D:\\\\Python File\\\\robot\\\\P13"""'], {}), "('D:\\\\Python File\\\\robot\\\\P13')\n", (50, 81), False, 'import sys\n'), ((114, 142), 'numpy.array', 'np.array', (['[7, 38, 4, 23, 18]'], {}), '([7, 38, 4, 23, 18])\n', (122, 142), True, 'import numpy as np\n'), ((142, 154), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (151, 154), True, 'import numpy as np\n'), ((160, 176), 'P13.compMean', 'P13.compMean', (['x2'], {}), '(x2)\n', (172, 176), False, 'import P13\n'), ((181, 196), 'P13.compMean', 'P13.compMean', (['x'], {}), '(x)\n', (193, 196), False, 'import P13\n'), ((202, 221), 'P13.compVariance', 'P13.compVariance', (['x'], {}), '(x)\n', (218, 221), False, 'import P13\n'), ((302, 316), 'numpy.square', 'np.square', (['xmu'], {}), '(xmu)\n', (311, 316), True, 'import numpy as np\n')] |
"""
See also:
https://github.com/danieljtait/jax_xla_adventures/blob/master/pybind11_register_custom_call/test.py
"""
import numpy as np
from jaxlib import xla_client
from . import _signal
for _name, _value in _signal.registrations().items():
xla_client.register_custom_call_target(_name, _value, platform="cpu")
def lfilter(ctx, b, a, x):
dtype = ctx.get_shape(x).element_type()
b_shape = ctx.get_shape(b).dimensions()
a_shape = ctx.get_shape(a).dimensions()
x_shape = ctx.get_shape(x).dimensions()
n = len(x_shape)
arr_shape_b = xla_client.Shape.array_shape(np.dtype(dtype), b_shape, tuple(range(len(b_shape) - 1, -1, -1)))
arr_shape_a = xla_client.Shape.array_shape(np.dtype(dtype), a_shape, tuple(range(len(a_shape) - 1, -1, -1)))
arr_shape_x = xla_client.Shape.array_shape(np.dtype(dtype), x_shape, tuple(range(len(x_shape) - 1, -1, -1)))
descriptor_bytes = _signal.build_lfilter_descriptor(b_shape[0], a_shape[0], x_shape[0])
op_name = {
np.dtype('float32') : b"lfilter_f32",
np.dtype('float64') : b"lfilter_f64",
np.dtype('complex64') : b"lfilter_c64",
np.dtype('complex128'): b"lfilter_c128",
}[dtype]
return xla_client.ops.CustomCallWithLayout(
ctx,
op_name,
operands=(
xla_client.ops.Constant(ctx, np.frombuffer(descriptor_bytes, dtype=np.uint8)),
b, a, x
),
# Input shapes
operand_shapes_with_layout=(
xla_client.Shape.array_shape(np.dtype(np.uint8), (len(descriptor_bytes),), (0,)),
arr_shape_b,
arr_shape_a,
arr_shape_x,
),
# Output shapes
shape_with_layout=arr_shape_x,
)
| [
"jaxlib.xla_client.register_custom_call_target",
"numpy.frombuffer",
"numpy.dtype"
] | [((247, 316), 'jaxlib.xla_client.register_custom_call_target', 'xla_client.register_custom_call_target', (['_name', '_value'], {'platform': '"""cpu"""'}), "(_name, _value, platform='cpu')\n", (285, 316), False, 'from jaxlib import xla_client\n'), ((579, 594), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (587, 594), True, 'import numpy as np\n'), ((690, 705), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (698, 705), True, 'import numpy as np\n'), ((801, 816), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (809, 816), True, 'import numpy as np\n'), ((977, 996), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (985, 996), True, 'import numpy as np\n'), ((1021, 1040), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (1029, 1040), True, 'import numpy as np\n'), ((1065, 1086), 'numpy.dtype', 'np.dtype', (['"""complex64"""'], {}), "('complex64')\n", (1073, 1086), True, 'import numpy as np\n'), ((1109, 1131), 'numpy.dtype', 'np.dtype', (['"""complex128"""'], {}), "('complex128')\n", (1117, 1131), True, 'import numpy as np\n'), ((1288, 1335), 'numpy.frombuffer', 'np.frombuffer', (['descriptor_bytes'], {'dtype': 'np.uint8'}), '(descriptor_bytes, dtype=np.uint8)\n', (1301, 1335), True, 'import numpy as np\n'), ((1458, 1476), 'numpy.dtype', 'np.dtype', (['np.uint8'], {}), '(np.uint8)\n', (1466, 1476), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
membership inference test
"""
import pytest
import numpy as np
import mindspore.dataset as ds
from mindspore import nn
from mindspore.train import Model
import mindspore.context as context
from mindarmour.privacy.evaluation import MembershipInference
from tests.ut.python.utils.mock_net import Net
context.set_context(mode=context.GRAPH_MODE)
def dataset_generator():
"""mock training data."""
batch_size = 16
batches = 1
data = np.random.randn(batches*batch_size, 1, 32, 32).astype(
np.float32)
label = np.random.randint(0, 10, batches*batch_size).astype(np.int32)
for i in range(batches):
yield data[i*batch_size:(i + 1)*batch_size],\
label[i*batch_size:(i + 1)*batch_size]
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@pytest.mark.component_mindarmour
def test_get_membership_inference_object():
net = Net()
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(network=net, loss_fn=loss, optimizer=opt)
inference_model = MembershipInference(model, -1)
assert isinstance(inference_model, MembershipInference)
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.component_mindarmour
def test_membership_inference_object_train():
net = Net()
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(network=net, loss_fn=loss, optimizer=opt)
inference_model = MembershipInference(model, 2)
assert isinstance(inference_model, MembershipInference)
config = [{
"method": "KNN",
"params": {
"n_neighbors": [3, 5, 7],
}
}]
ds_train = ds.GeneratorDataset(dataset_generator,
["image", "label"])
ds_test = ds.GeneratorDataset(dataset_generator,
["image", "label"])
inference_model.train(ds_train, ds_test, config)
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@pytest.mark.component_mindarmour
def test_membership_inference_eval():
net = Net()
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(network=net, loss_fn=loss, optimizer=opt)
inference_model = MembershipInference(model, -1)
assert isinstance(inference_model, MembershipInference)
eval_train = ds.GeneratorDataset(dataset_generator,
["image", "label"])
eval_test = ds.GeneratorDataset(dataset_generator,
["image", "label"])
metrics = ["precision", "accuracy", "recall"]
inference_model.eval(eval_train, eval_test, metrics)
| [
"tests.ut.python.utils.mock_net.Net",
"mindspore.dataset.GeneratorDataset",
"mindspore.nn.SoftmaxCrossEntropyWithLogits",
"mindspore.context.set_context",
"mindspore.train.Model",
"mindarmour.privacy.evaluation.MembershipInference",
"numpy.random.randint",
"numpy.random.randn"
] | [((896, 940), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE'}), '(mode=context.GRAPH_MODE)\n', (915, 940), True, 'import mindspore.context as context\n'), ((1549, 1554), 'tests.ut.python.utils.mock_net.Net', 'Net', ([], {}), '()\n', (1552, 1554), False, 'from tests.ut.python.utils.mock_net import Net\n'), ((1566, 1611), 'mindspore.nn.SoftmaxCrossEntropyWithLogits', 'nn.SoftmaxCrossEntropyWithLogits', ([], {'sparse': '(True)'}), '(sparse=True)\n', (1598, 1611), False, 'from mindspore import nn\n'), ((1710, 1757), 'mindspore.train.Model', 'Model', ([], {'network': 'net', 'loss_fn': 'loss', 'optimizer': 'opt'}), '(network=net, loss_fn=loss, optimizer=opt)\n', (1715, 1757), False, 'from mindspore.train import Model\n'), ((1780, 1810), 'mindarmour.privacy.evaluation.MembershipInference', 'MembershipInference', (['model', '(-1)'], {}), '(model, -1)\n', (1799, 1810), False, 'from mindarmour.privacy.evaluation import MembershipInference\n'), ((2161, 2166), 'tests.ut.python.utils.mock_net.Net', 'Net', ([], {}), '()\n', (2164, 2166), False, 'from tests.ut.python.utils.mock_net import Net\n'), ((2178, 2223), 'mindspore.nn.SoftmaxCrossEntropyWithLogits', 'nn.SoftmaxCrossEntropyWithLogits', ([], {'sparse': '(True)'}), '(sparse=True)\n', (2210, 2223), False, 'from mindspore import nn\n'), ((2322, 2369), 'mindspore.train.Model', 'Model', ([], {'network': 'net', 'loss_fn': 'loss', 'optimizer': 'opt'}), '(network=net, loss_fn=loss, optimizer=opt)\n', (2327, 2369), False, 'from mindspore.train import Model\n'), ((2392, 2421), 'mindarmour.privacy.evaluation.MembershipInference', 'MembershipInference', (['model', '(2)'], {}), '(model, 2)\n', (2411, 2421), False, 'from mindarmour.privacy.evaluation import MembershipInference\n'), ((2614, 2672), 'mindspore.dataset.GeneratorDataset', 'ds.GeneratorDataset', (['dataset_generator', "['image', 'label']"], {}), "(dataset_generator, ['image', 'label'])\n", (2633, 2672), True, 'import mindspore.dataset as ds\n'), ((2722, 2780), 'mindspore.dataset.GeneratorDataset', 'ds.GeneratorDataset', (['dataset_generator', "['image', 'label']"], {}), "(dataset_generator, ['image', 'label'])\n", (2741, 2780), True, 'import mindspore.dataset as ds\n'), ((3081, 3086), 'tests.ut.python.utils.mock_net.Net', 'Net', ([], {}), '()\n', (3084, 3086), False, 'from tests.ut.python.utils.mock_net import Net\n'), ((3098, 3143), 'mindspore.nn.SoftmaxCrossEntropyWithLogits', 'nn.SoftmaxCrossEntropyWithLogits', ([], {'sparse': '(True)'}), '(sparse=True)\n', (3130, 3143), False, 'from mindspore import nn\n'), ((3242, 3289), 'mindspore.train.Model', 'Model', ([], {'network': 'net', 'loss_fn': 'loss', 'optimizer': 'opt'}), '(network=net, loss_fn=loss, optimizer=opt)\n', (3247, 3289), False, 'from mindspore.train import Model\n'), ((3312, 3342), 'mindarmour.privacy.evaluation.MembershipInference', 'MembershipInference', (['model', '(-1)'], {}), '(model, -1)\n', (3331, 3342), False, 'from mindarmour.privacy.evaluation import MembershipInference\n'), ((3421, 3479), 'mindspore.dataset.GeneratorDataset', 'ds.GeneratorDataset', (['dataset_generator', "['image', 'label']"], {}), "(dataset_generator, ['image', 'label'])\n", (3440, 3479), True, 'import mindspore.dataset as ds\n'), ((3533, 3591), 'mindspore.dataset.GeneratorDataset', 'ds.GeneratorDataset', (['dataset_generator', "['image', 'label']"], {}), "(dataset_generator, ['image', 'label'])\n", (3552, 3591), True, 'import mindspore.dataset as ds\n'), ((1045, 1093), 'numpy.random.randn', 'np.random.randn', (['(batches * batch_size)', '(1)', '(32)', '(32)'], {}), '(batches * batch_size, 1, 32, 32)\n', (1060, 1093), True, 'import numpy as np\n'), ((1132, 1178), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(batches * batch_size)'], {}), '(0, 10, batches * batch_size)\n', (1149, 1178), True, 'import numpy as np\n')] |
import numpy as np
import os
from os import path as osp
import pybullet as p
import pybullet_data as pb_data
from pybullet_utils import bullet_client
current_file_path = osp.abspath(__file__)
def get_ycb_file_path(object_name: str):
""" Return the abspath of the urdf file given the object name (be aware of the uppercase)
NOTE: the name has to start with 'Ycb'
"""
assert object_name.startswith("Ycb")
return osp.join(osp.dirname(current_file_path), "ycb_objects", object_name, "model.urdf")
class MultiObjectsMixin:
""" A mixin class implementing common methods of loading objects and resetting them.
Due to different robot have different observation_space and action_space, the workable env
should be found in their corresponding directory
"""
def __init__(self,
object_names: list, # a list of objects to put in the scene (allows duplicate)
position_sample_region: np.ndarray= None, # shape (2, 3)
**kwargs,
):
self.object_names = object_names
# representing where to spawn the objects randomly, see default example
self.position_sample_region = np.array([
[0.2, -0.25, 0.1],
[0.6, 0.25, 0.3],
]) if position_sample_region is None else position_sample_region
super().__init__(**kwargs)
def _build_surroundings(self):
super()._build_surroundings()
self._object_body_ids = []
for name in self.object_names:
if name.startswith("Ycb"):
file_path = get_ycb_file_path(name)
else:
raise NotImplementedError
# load model
object_position = np.random.uniform(self.position_sample_region[0], self.position_sample_region[1])
object_xyz_euler = np.random.uniform(np.array([-np.pi]*3), np.array([np.pi]*3))
if file_path.endswith(".urdf"):
self._object_body_ids.append(self.pb_client.loadURDF(
file_path,
basePosition= object_position,
baseOrientation= p.getQuaternionFromEuler(object_xyz_euler),
))
else:
raise NotImplementedError
def _reset_surroundings(self, *args, **kwargs):
for body_id in self._object_body_ids:
object_position = np.random.uniform(self.position_sample_region[0], self.position_sample_region[1])
object_xyz_euler = np.random.uniform(np.array([-np.pi]*3), np.array([np.pi]*3))
self.pb_client.resetBasePositionAndOrientation(
body_id,
object_position,
p.getQuaternionFromEuler(object_xyz_euler),
)
| [
"os.path.dirname",
"numpy.array",
"pybullet.getQuaternionFromEuler",
"numpy.random.uniform",
"os.path.abspath"
] | [((172, 193), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (183, 193), True, 'from os import path as osp\n'), ((442, 472), 'os.path.dirname', 'osp.dirname', (['current_file_path'], {}), '(current_file_path)\n', (453, 472), True, 'from os import path as osp\n'), ((1162, 1209), 'numpy.array', 'np.array', (['[[0.2, -0.25, 0.1], [0.6, 0.25, 0.3]]'], {}), '([[0.2, -0.25, 0.1], [0.6, 0.25, 0.3]])\n', (1170, 1209), True, 'import numpy as np\n'), ((1709, 1795), 'numpy.random.uniform', 'np.random.uniform', (['self.position_sample_region[0]', 'self.position_sample_region[1]'], {}), '(self.position_sample_region[0], self.\n position_sample_region[1])\n', (1726, 1795), True, 'import numpy as np\n'), ((2380, 2466), 'numpy.random.uniform', 'np.random.uniform', (['self.position_sample_region[0]', 'self.position_sample_region[1]'], {}), '(self.position_sample_region[0], self.\n position_sample_region[1])\n', (2397, 2466), True, 'import numpy as np\n'), ((1840, 1862), 'numpy.array', 'np.array', (['([-np.pi] * 3)'], {}), '([-np.pi] * 3)\n', (1848, 1862), True, 'import numpy as np\n'), ((1862, 1883), 'numpy.array', 'np.array', (['([np.pi] * 3)'], {}), '([np.pi] * 3)\n', (1870, 1883), True, 'import numpy as np\n'), ((2511, 2533), 'numpy.array', 'np.array', (['([-np.pi] * 3)'], {}), '([-np.pi] * 3)\n', (2519, 2533), True, 'import numpy as np\n'), ((2533, 2554), 'numpy.array', 'np.array', (['([np.pi] * 3)'], {}), '([np.pi] * 3)\n', (2541, 2554), True, 'import numpy as np\n'), ((2688, 2730), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['object_xyz_euler'], {}), '(object_xyz_euler)\n', (2712, 2730), True, 'import pybullet as p\n'), ((2116, 2158), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['object_xyz_euler'], {}), '(object_xyz_euler)\n', (2140, 2158), True, 'import pybullet as p\n')] |
#!/usr/bin/env python3
import numpy as np
from matplotlib import pyplot as plt
import collections
def PolyCoefficients(x, coeffs):
""" Returns a polynomial for ``x`` values for the ``coeffs`` provided.
The coefficients must be in ascending order (``x**0`` to ``x**o``).
"""
o = len(coeffs)
# print('This is a polynomial of order' + str(o))
y = 0
for i in range(o):
y += coeffs[i] * x ** i
return y
def readSplineDataFromFile(filename):
d = {}
with open(filename) as file:
for line in file:
words = line.split()
name = words[0].replace("'", "")
i = 1
polies = []
while i < len(words):
min = float(words[i])
max = float(words[i + 1])
nr_coef = int(words[i + 2])
coefs = []
for j in range(0, nr_coef):
coefs.append(float(words[i + j + 3]))
polies.append((min, max, nr_coef, coefs))
i += 3 + nr_coef
d[name] = polies
print(d)
print("\n\n")
return d
data = readSplineDataFromFile("/tmp/spline_export")
oderedData = collections.OrderedDict(sorted(data.items()))
position = 1
for name in oderedData:
if (not "foot_" in name) and (not "trunk_" in name):
continue
plt.subplot(4, 3, position)
for poly in data[name]:
min = poly[0]
max = poly[1]
nr_coef = poly[2]
coeffs = poly[3]
x = np.linspace(min, max)
plt.plot(x, PolyCoefficients(x, coeffs))
plt.title(name)
position += 1
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.linspace",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((1626, 1636), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1634, 1636), True, 'from matplotlib import pyplot as plt\n'), ((1352, 1379), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(3)', 'position'], {}), '(4, 3, position)\n', (1363, 1379), True, 'from matplotlib import pyplot as plt\n'), ((1591, 1606), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (1600, 1606), True, 'from matplotlib import pyplot as plt\n'), ((1515, 1536), 'numpy.linspace', 'np.linspace', (['min', 'max'], {}), '(min, max)\n', (1526, 1536), True, 'import numpy as np\n')] |
# Demo of a naive cart-pole control algorithm
import gym
import numpy as np
def naive_policy(obsv):
# The cart-pole simulator has four states: x, x_dot, theta, theta_dot
# In the simulator, action = 1 accelerates right, action = 0 accelerates left
theta = obsv[2]
# If the angle is positive (clockwise deflection), accelerate right
# to "get under" the pole
if theta > 0:
action = 1
else:
action = 0
return action
totals = []
env = gym.make("CartPole-v0")
# env.render()
# Run the naive policy and compute its average rewards
for episode in range(1000):
episode_rewards = 0
obsv = env.reset()
if episode % 20 == 0:
print( "Running episode " + str(episode) )
# 1000 actions per episode
for i in range(1000):
action = naive_policy(obsv)
obsv, reward, is_done, info = env.step(action)
# env.render()
episode_rewards += reward
if is_done:
break
totals.append(episode_rewards)
# env.close()
mean_reward = np.mean(totals)
min_reward = np.min(totals)
max_reward = np.max(totals)
print("Mean Reward = " + str(mean_reward))
print("Maximum iterations (max reward) = " + str(max_reward))
| [
"numpy.max",
"numpy.mean",
"gym.make",
"numpy.min"
] | [((483, 506), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (491, 506), False, 'import gym\n'), ((1038, 1053), 'numpy.mean', 'np.mean', (['totals'], {}), '(totals)\n', (1045, 1053), True, 'import numpy as np\n'), ((1067, 1081), 'numpy.min', 'np.min', (['totals'], {}), '(totals)\n', (1073, 1081), True, 'import numpy as np\n'), ((1095, 1109), 'numpy.max', 'np.max', (['totals'], {}), '(totals)\n', (1101, 1109), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- CoDing: utf-8 -*-
"""
Created on May 22 2019
Last Update May 22 2019
@author: simonvanvliet
Department of Zoology
University of Britisch Columbia
<EMAIL>
This recreates the data and figure for figure 2
By default data is loaded unless parameters have changes, to rerun model set override_data to True
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import MLS_static_fast as mlssf
import mls_general_code as mlsg
from joblib import Parallel, delayed
import datetime
from pathlib import Path
"""
# SET model settings
"""
# set to True to force recalculation of data
override_data = False
# set folder
data_folder = Path("Data_Paper/")
fig_Folder = Path("Figures_Paper/")
figureName = 'figureSI5.pdf'
dataName = 'data_FigureSI5.npz'
# set model parameters
sb = [1, 0]
cost_vec = [-0.005, -0.001, 0, 0.001, 0.005]
maxT_vec = [5000, 10000, 50000, 10000, 5000]
#run each parset numRepeat times
numRepeat = 100
model_par = {
# selection strength settings
"s": 1,
"K_H": 500.,
"B_H": 1.,
"D_H": 0,
# tau_var settings
"cost": -0.001,
"TAU_H": 100,
"sigmaBirth": 0.05,
# tau_mig settings
"n0": 1E-4,
"mig": 1E-6,
# init conditions
"F0": 1E-6,
"N0init": 1.,
"NUMGROUP": -1,
# time settings
"maxT": 50000,
"dT": 5E-2,
"sampleT": 10,
"rms_err_treshold": 1E-10,
"mav_window": 100,
"rms_window": 1000,
# fixed model parameters
"sampling": "fixedvar",
"mu": 1E-9,
"K": 1,
"numTypeBins": 100
}
# store timescales
tauHer, tauVar = mlsg.calc_timescale(model_par)
model_par['TAU_her'] = tauHer
model_par['TAU_var'] = tauVar
"""
# SET figure settings
"""
# set figure settings
wFig = 17.8
hFig = 3.5
font = {'family': 'Helvetica',
'weight': 'light',
'size': 6}
axes = {'linewidth': 0.5,
'titlesize': 7,
'labelsize': 6,
'labelpad': 2,
'spines.top': False,
'spines.right': False,
}
ticks = {'major.width': 0.5,
'direction': 'in',
'major.size': 2,
'labelsize': 6,
'major.pad': 2}
legend = {'fontsize': 6,
'handlelength': 1.5,
'handletextpad': 0.5,
'labelspacing': 0.2}
figure = {'dpi': 300}
savefigure = {'dpi': 300,
'transparent': True}
mpl.style.use('seaborn-ticks')
mpl.rc('font', **font)
mpl.rc('axes', **axes)
mpl.rc('xtick', **ticks)
mpl.rc('ytick', **ticks)
mpl.rc('legend', **legend)
mpl.rc('figure', **figure)
mpl.rc('savefig', **savefigure)
colors = ['777777', 'e24a33', '348ABD', '988ED5',
'FBC15E', '8EBA42', 'FFB5B8']
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=colors)
"""
Main code
"""
# set host birth/death effect and cost in model par
def set_BH_cost(BH, cost):
model_par_local = model_par.copy()
model_par_local['B_H'] = BH
model_par_local['cost'] = cost
return model_par_local
# run model
def run_model():
#setup par list
modelParList = []
for cost in cost_vec:
# set modelpar list to run
model_par_wHostSel = set_BH_cost(sb[0], cost)
model_par_woHostSel = set_BH_cost(sb[1], cost)
#run each parset numRepeat times
modelParListLoc = [model_par_wHostSel, model_par_woHostSel] * numRepeat
modelParList.extend(modelParListLoc)
# run model
nJobs = min(len(modelParList), 4)
results = Parallel(n_jobs=nJobs, verbose=9, timeout=1.E9)(
delayed(mlssf.run_model_fixed_parameters)(par) for par in modelParList)
# process and store output
Output, _, _, _ = zip(*results)
statData = np.vstack(Output)
saveName = data_folder / dataName
np.savez(saveName, statData=statData,
modelParList=modelParList, date=datetime.datetime.now())
return statData
# checks of model parmaters have changed
def check_model_par(model_par_load, parToIgnore):
rerun = False
for key in model_par_load:
if not (key in parToIgnore):
if model_par_load[key] != model_par[key]:
print('Parameter "%s" has changed, rerunning model!' % 'load')
rerun = True
return rerun
# Load model is datafile found, run model if not found or if settings have changed
def load_model():
parToIgnore = ('B_H', 'cost') # need not check these parameters
loadName = data_folder / dataName
if loadName.is_file() and not override_data:
# open file and load data
data_file = np.load(loadName, allow_pickle=True)
Output = data_file['statData']
rerun = check_model_par(data_file['modelParList'][0], parToIgnore)
data_file.close()
else:
# cannot load, need to rerun model
rerun = True
print('Model data not found, running model')
if rerun or override_data:
# rerun model
Output = run_model()
return Output
# plot line chart
def plot_line(axs, Output, FieldName, yAxis, maxX, legendLoc):
handle_list = []
for i in range(2):
timeMat = Output[i::2]['time']
mavMat = Output[i::2][FieldName]
timeAv = timeMat.mean(axis=0)
mavAv = mavMat.mean(axis=0)
# plot data
alphaVal='30'
axs.plot(timeMat.transpose(), mavMat.transpose(),
linewidth=0.25, color='#'+colors[i]+alphaVal)
handle, = axs.plot(timeAv, mavAv,
linewidth=1.5, color='#'+colors[i])
handle_list.append(handle)
# for i in range(numRepeat):
# # plot data
# axs.plot(dataStruc[2*i]['time'], dataStruc[2*i][FieldName],
# linewidth=0.5, color=colors[0], alpha=0.5)
# axs.plot(dataStruc[2*i+1]['time'], dataStruc[2*i+1][FieldName],
# '--', linewidth=0.5, color=colors[1], alpha=0.5)
# make plot nice
axs.set_xlabel('time [a.u.]')
maxY = 1
#maxX = model_par['maxT']
xStep = 3
yStep = 3
axs.set_ylim((0, maxY))
axs.set_xlim((0, maxX))
axs.set_xticks(np.linspace(0, maxX, xStep))
axs.set_yticks(np.linspace(0, maxY, yStep))
#axs.legend(('$s_b$=%.0f' % model_par['s'], '$s_b$=0'), loc='center right')
if legendLoc != 'none':
axs.legend(handle_list, ['$s_b$=%.0f' % x for x in sb], loc=legendLoc)
if yAxis:
axs.set_ylabel("mean frac. helpers $\\langle f \\rangle$")
else:
axs.set_yticklabels([])
return
# calcualte moving average over time
def calc_mav(data):
dataMAV = data[-model_par['mav_window']:, :]
dataMAV = np.nanmean(data, axis=0)
return dataMAV
# plot histogram chart
def plot_histogram_line(axs, data):
# calc moving average
dataMav1 = calc_mav(data[0])
dataMav2 = calc_mav(data[1])
# get bin centers
bins = np.linspace(0, 1, dataMav1.size+1)
x = (bins[1:] + bins[0:-1]) / 2
# plot histogram
axs.plot(x, dataMav1)
axs.plot(x, dataMav2, '--')
# make plot nice
maxY = 0.06
maxX = 1
xStep = 3
yStep = 3
axs.set_ylim((0, maxY))
axs.set_xlim((0, maxX))
axs.set_xticks(np.linspace(0, maxX, xStep))
axs.set_yticks(np.linspace(0, maxY, yStep))
axs.set_ylabel('frac. of hosts')
axs.set_xlabel("frac. helpers in host $f_i$")
#axs.legend(('$G_H$=%.0f' % tauH, ), loc='upper right')
axs.legend(['$s_b$=%.0f' % x for x in sb], loc='upper right')
return None
# main function to create figure
def create_fig():
# load data or compute model
Output = load_model()
# set fonts
fig = plt.figure()
mlsg.set_fig_size_cm(fig, wFig, hFig)
numcost = len(cost_vec)
numElTot = Output.shape[0]
numElLoc = numElTot/numcost
legendLoc = 'lower right'
for i in range(numcost):
if i == 0:
yAxis = True
else:
yAxis = False
if i>1:
legendLoc = 'none'
startP = int(i*numElLoc)
endP = int((i+1)*numElLoc)
currOutput = Output[startP:endP,:]
# plot average investment
axs = fig.add_subplot(1, 5, i+1)
plot_line(axs, currOutput, "F_mav", yAxis, maxT_vec[i], legendLoc)
axs.set_title('Cost $\\gamma=%.3f$' % cost_vec[i])
# axs = fig.add_subplot(1, 2, 2)
# plot_histogram_line(axs, InvPerHost)
plt.tight_layout(pad=0.2, h_pad=0.5, w_pad=0.5)
fig.savefig(fig_Folder / figureName,
format="pdf", transparent=True)
return None
if __name__ == "__main__":
create_fig()
| [
"mls_general_code.calc_timescale",
"pathlib.Path",
"mls_general_code.set_fig_size_cm",
"matplotlib.cycler",
"joblib.Parallel",
"numpy.nanmean",
"numpy.linspace",
"matplotlib.style.use",
"matplotlib.pyplot.figure",
"matplotlib.rc",
"numpy.vstack",
"matplotlib.pyplot.tight_layout",
"datetime.d... | [((681, 700), 'pathlib.Path', 'Path', (['"""Data_Paper/"""'], {}), "('Data_Paper/')\n", (685, 700), False, 'from pathlib import Path\n'), ((714, 736), 'pathlib.Path', 'Path', (['"""Figures_Paper/"""'], {}), "('Figures_Paper/')\n", (718, 736), False, 'from pathlib import Path\n'), ((1597, 1627), 'mls_general_code.calc_timescale', 'mlsg.calc_timescale', (['model_par'], {}), '(model_par)\n', (1616, 1627), True, 'import mls_general_code as mlsg\n'), ((2350, 2380), 'matplotlib.style.use', 'mpl.style.use', (['"""seaborn-ticks"""'], {}), "('seaborn-ticks')\n", (2363, 2380), True, 'import matplotlib as mpl\n'), ((2381, 2403), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **font)\n", (2387, 2403), True, 'import matplotlib as mpl\n'), ((2404, 2426), 'matplotlib.rc', 'mpl.rc', (['"""axes"""'], {}), "('axes', **axes)\n", (2410, 2426), True, 'import matplotlib as mpl\n'), ((2427, 2451), 'matplotlib.rc', 'mpl.rc', (['"""xtick"""'], {}), "('xtick', **ticks)\n", (2433, 2451), True, 'import matplotlib as mpl\n'), ((2452, 2476), 'matplotlib.rc', 'mpl.rc', (['"""ytick"""'], {}), "('ytick', **ticks)\n", (2458, 2476), True, 'import matplotlib as mpl\n'), ((2477, 2503), 'matplotlib.rc', 'mpl.rc', (['"""legend"""'], {}), "('legend', **legend)\n", (2483, 2503), True, 'import matplotlib as mpl\n'), ((2504, 2530), 'matplotlib.rc', 'mpl.rc', (['"""figure"""'], {}), "('figure', **figure)\n", (2510, 2530), True, 'import matplotlib as mpl\n'), ((2531, 2562), 'matplotlib.rc', 'mpl.rc', (['"""savefig"""'], {}), "('savefig', **savefigure)\n", (2537, 2562), True, 'import matplotlib as mpl\n'), ((2688, 2712), 'matplotlib.cycler', 'mpl.cycler', ([], {'color': 'colors'}), '(color=colors)\n', (2698, 2712), True, 'import matplotlib as mpl\n'), ((3650, 3667), 'numpy.vstack', 'np.vstack', (['Output'], {}), '(Output)\n', (3659, 3667), True, 'import numpy as np\n'), ((6631, 6655), 'numpy.nanmean', 'np.nanmean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (6641, 6655), True, 'import numpy as np\n'), ((6862, 6898), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(dataMav1.size + 1)'], {}), '(0, 1, dataMav1.size + 1)\n', (6873, 6898), True, 'import numpy as np\n'), ((7624, 7636), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7634, 7636), True, 'import matplotlib.pyplot as plt\n'), ((7641, 7678), 'mls_general_code.set_fig_size_cm', 'mlsg.set_fig_size_cm', (['fig', 'wFig', 'hFig'], {}), '(fig, wFig, hFig)\n', (7661, 7678), True, 'import mls_general_code as mlsg\n'), ((8422, 8469), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.2)', 'h_pad': '(0.5)', 'w_pad': '(0.5)'}), '(pad=0.2, h_pad=0.5, w_pad=0.5)\n', (8438, 8469), True, 'import matplotlib.pyplot as plt\n'), ((3433, 3488), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'nJobs', 'verbose': '(9)', 'timeout': '(1000000000.0)'}), '(n_jobs=nJobs, verbose=9, timeout=1000000000.0)\n', (3441, 3488), False, 'from joblib import Parallel, delayed\n'), ((4509, 4545), 'numpy.load', 'np.load', (['loadName'], {'allow_pickle': '(True)'}), '(loadName, allow_pickle=True)\n', (4516, 4545), True, 'import numpy as np\n'), ((6101, 6128), 'numpy.linspace', 'np.linspace', (['(0)', 'maxX', 'xStep'], {}), '(0, maxX, xStep)\n', (6112, 6128), True, 'import numpy as np\n'), ((6149, 6176), 'numpy.linspace', 'np.linspace', (['(0)', 'maxY', 'yStep'], {}), '(0, maxY, yStep)\n', (6160, 6176), True, 'import numpy as np\n'), ((7165, 7192), 'numpy.linspace', 'np.linspace', (['(0)', 'maxX', 'xStep'], {}), '(0, maxX, xStep)\n', (7176, 7192), True, 'import numpy as np\n'), ((7213, 7240), 'numpy.linspace', 'np.linspace', (['(0)', 'maxY', 'yStep'], {}), '(0, maxY, yStep)\n', (7224, 7240), True, 'import numpy as np\n'), ((3793, 3816), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3814, 3816), False, 'import datetime\n'), ((3490, 3531), 'joblib.delayed', 'delayed', (['mlssf.run_model_fixed_parameters'], {}), '(mlssf.run_model_fixed_parameters)\n', (3497, 3531), False, 'from joblib import Parallel, delayed\n')] |
# encoding: utf-8
from datetime import date, datetime, time
import motor.motor_asyncio
import numpy as np
import pandas as pd
from dateutil.relativedelta import relativedelta
from rqalpha.const import INSTRUMENT_TYPE
from rqalpha.model.instrument import Instrument
from rqalpha.utils.datetime_func import convert_date_to_int
from rqalpha.utils.py2 import lru_cache
from rqalpha.mod.rqalpha_mod_fxdayu_source.data_source.common import CacheMixin
from rqalpha.mod.rqalpha_mod_fxdayu_source.data_source.common.minite import MiniteBarDataSourceMixin
from rqalpha.mod.rqalpha_mod_fxdayu_source.data_source.common.odd import OddFrequencyBaseDataSource
from rqalpha.mod.rqalpha_mod_fxdayu_source.utils import Singleton
from rqalpha.mod.rqalpha_mod_fxdayu_source.utils.asyncio import get_asyncio_event_loop
from rqalpha.mod.rqalpha_mod_fxdayu_source.utils.converter import DataFrameConverter
INSTRUMENT_TYPE_MAP = {
INSTRUMENT_TYPE.CS: "stock",
INSTRUMENT_TYPE.INDX: "stock",
}
class NoneDataError(BaseException):
pass
class MongoDataSource(OddFrequencyBaseDataSource, MiniteBarDataSourceMixin):
__metaclass__ = Singleton
def __init__(self, path, mongo_url):
super(MongoDataSource, self).__init__(path)
from rqalpha.mod.rqalpha_mod_fxdayu_source.share.mongo_handler import MongoHandler
self._handler = MongoHandler(mongo_url)
self._client = motor.motor_asyncio.AsyncIOMotorClient(mongo_url)
self._db_map = self._get_frequency_db_map()
def _get_frequency_db_map(self):
map_ = self._handler.client.get_database("meta").get_collection("db_map").find()
dct = {item["type"]: item["map"] for item in map_}
return dct
def _get_db(self, instrument, frequency):
try:
if isinstance(instrument, Instrument):
instrument_type = instrument.enum_type
else:
instrument_type = instrument
type_ = INSTRUMENT_TYPE_MAP[instrument_type]
return self._db_map[type_][frequency]
except KeyError:
message = instrument.order_book_id if isinstance(instrument, Instrument) else instrument
raise NoneDataError("MongoDB 中没有品种%s的%s数据" % (message, frequency))
async def _do_get_bars(self, db, collection, filters, projection, fill=np.NaN):
dct = {}
l = 0
async for doc in self._client[db][collection].find(filters, projection):
_l = doc.pop('_l')
l += _l
for key, values in doc.items():
if isinstance(values, list) and (len(values) == _l):
dct.setdefault(key, []).extend(values)
for values in dct.values():
if len(values) != l:
values.extend([fill] * l)
df = pd.DataFrame(dct)
if df.size:
return df.sort_values("datetime")
else:
return None
def _get_bars_in_days(self, instrument, frequency, params):
s_date = params[0]["trade_date"]
e_date = params[-1]["trade_date"]
s_time = params[0]["start_time"] if "start_time" in params[0] else 0
e_time = params[-1]["end_time"] if "end_time" in params[-1] else 150000
s_dt_int = convert_date_to_int(s_date) + s_time
e_dt_int = convert_date_to_int(e_date) + e_time
db = self._get_db(instrument=instrument, frequency=frequency)
collection = instrument.order_book_id
filters = {"_d": {"$gte": datetime.combine(s_date, time=time()), "$lte": datetime.combine(e_date, time=time())}}
projection = {"_id": 0, "_d": 0}
loop = get_asyncio_event_loop()
bars = loop.run_until_complete(self._do_get_bars(db, collection, filters, projection))
if bars is not None and bars.size:
bars = DataFrameConverter.df2np(bars)
else:
bars = DataFrameConverter.empty()
s_pos = np.searchsorted(bars["datetime"], s_dt_int)
e_pos = np.searchsorted(bars["datetime"], e_dt_int, side="right")
return bars[s_pos:e_pos]
def raw_history_bars(self, instrument, frequency, start_dt=None, end_dt=None, length=None):
# 转换到自建mongodb结构s
if frequency.endswith("m"):
return MiniteBarDataSourceMixin.raw_history_bars(
self, instrument, frequency, start_dt=start_dt, end_dt=end_dt, length=length)
else:
code = instrument.order_book_id
db = self._get_db(instrument, frequency)
data = self._handler.read(code, db=db, start=start_dt, end=end_dt, length=length, sort=[("datetime", 1)]). \
reset_index()
if data is not None and data.size:
return DataFrameConverter.df2np(data)
else:
return DataFrameConverter.empty()
def is_base_frequency(self, instrument, frequency):
if isinstance(instrument, Instrument):
instrument_type = instrument.enum_type
else:
instrument_type = instrument
type_ = INSTRUMENT_TYPE_MAP[instrument_type]
return type_ in self._db_map and frequency in self._db_map[type_]
def current_snapshot(self, instrument, frequency, dt):
pass
def _get_date_range(self, frequency):
from pymongo import DESCENDING
try:
db = self._get_db(INSTRUMENT_TYPE.CS, frequency)
except NoneDataError:
db = self._get_db(INSTRUMENT_TYPE.CS, "1" + frequency[-1])
key = "_d" if frequency.endswith("m") else "datetime"
try:
start = self._handler.client.get_database(db).get_collection("600000.XSHG").find() \
.sort(key).limit(1)[0][key]
end = self._handler.client.get_database(db).get_collection("600000.XSHG").find() \
.sort(key, direction=DESCENDING).limit(1)[0][key]
except IndexError:
raise RuntimeError("无法从MongoDb获取数据时间范围")
return start.date(), end.date()
@lru_cache(maxsize=10)
def available_data_range(self, frequency):
if frequency.endswith("d") or frequency.endswith("h"):
return date(2012, 6, 1), date.today() - relativedelta(days=1)
return self._get_date_range(frequency)
class MongoCacheDataSource(MongoDataSource, CacheMixin):
def __init__(self, path, mongo_url):
super(MongoCacheDataSource, self).__init__(path, mongo_url)
CacheMixin.__init__(self)
| [
"dateutil.relativedelta.relativedelta",
"rqalpha.mod.rqalpha_mod_fxdayu_source.data_source.common.CacheMixin.__init__",
"rqalpha.mod.rqalpha_mod_fxdayu_source.utils.asyncio.get_asyncio_event_loop",
"rqalpha.mod.rqalpha_mod_fxdayu_source.utils.converter.DataFrameConverter.empty",
"numpy.searchsorted",
"dat... | [((5985, 6006), 'rqalpha.utils.py2.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (5994, 6006), False, 'from rqalpha.utils.py2 import lru_cache\n'), ((1346, 1369), 'rqalpha.mod.rqalpha_mod_fxdayu_source.share.mongo_handler.MongoHandler', 'MongoHandler', (['mongo_url'], {}), '(mongo_url)\n', (1358, 1369), False, 'from rqalpha.mod.rqalpha_mod_fxdayu_source.share.mongo_handler import MongoHandler\n'), ((2797, 2814), 'pandas.DataFrame', 'pd.DataFrame', (['dct'], {}), '(dct)\n', (2809, 2814), True, 'import pandas as pd\n'), ((3629, 3653), 'rqalpha.mod.rqalpha_mod_fxdayu_source.utils.asyncio.get_asyncio_event_loop', 'get_asyncio_event_loop', ([], {}), '()\n', (3651, 3653), False, 'from rqalpha.mod.rqalpha_mod_fxdayu_source.utils.asyncio import get_asyncio_event_loop\n'), ((3918, 3961), 'numpy.searchsorted', 'np.searchsorted', (["bars['datetime']", 's_dt_int'], {}), "(bars['datetime'], s_dt_int)\n", (3933, 3961), True, 'import numpy as np\n'), ((3978, 4035), 'numpy.searchsorted', 'np.searchsorted', (["bars['datetime']", 'e_dt_int'], {'side': '"""right"""'}), "(bars['datetime'], e_dt_int, side='right')\n", (3993, 4035), True, 'import numpy as np\n'), ((6414, 6439), 'rqalpha.mod.rqalpha_mod_fxdayu_source.data_source.common.CacheMixin.__init__', 'CacheMixin.__init__', (['self'], {}), '(self)\n', (6433, 6439), False, 'from rqalpha.mod.rqalpha_mod_fxdayu_source.data_source.common import CacheMixin\n'), ((3243, 3270), 'rqalpha.utils.datetime_func.convert_date_to_int', 'convert_date_to_int', (['s_date'], {}), '(s_date)\n', (3262, 3270), False, 'from rqalpha.utils.datetime_func import convert_date_to_int\n'), ((3299, 3326), 'rqalpha.utils.datetime_func.convert_date_to_int', 'convert_date_to_int', (['e_date'], {}), '(e_date)\n', (3318, 3326), False, 'from rqalpha.utils.datetime_func import convert_date_to_int\n'), ((3811, 3841), 'rqalpha.mod.rqalpha_mod_fxdayu_source.utils.converter.DataFrameConverter.df2np', 'DataFrameConverter.df2np', (['bars'], {}), '(bars)\n', (3835, 3841), False, 'from rqalpha.mod.rqalpha_mod_fxdayu_source.utils.converter import DataFrameConverter\n'), ((3875, 3901), 'rqalpha.mod.rqalpha_mod_fxdayu_source.utils.converter.DataFrameConverter.empty', 'DataFrameConverter.empty', ([], {}), '()\n', (3899, 3901), False, 'from rqalpha.mod.rqalpha_mod_fxdayu_source.utils.converter import DataFrameConverter\n'), ((4247, 4370), 'rqalpha.mod.rqalpha_mod_fxdayu_source.data_source.common.minite.MiniteBarDataSourceMixin.raw_history_bars', 'MiniteBarDataSourceMixin.raw_history_bars', (['self', 'instrument', 'frequency'], {'start_dt': 'start_dt', 'end_dt': 'end_dt', 'length': 'length'}), '(self, instrument, frequency,\n start_dt=start_dt, end_dt=end_dt, length=length)\n', (4288, 4370), False, 'from rqalpha.mod.rqalpha_mod_fxdayu_source.data_source.common.minite import MiniteBarDataSourceMixin\n'), ((4716, 4746), 'rqalpha.mod.rqalpha_mod_fxdayu_source.utils.converter.DataFrameConverter.df2np', 'DataFrameConverter.df2np', (['data'], {}), '(data)\n', (4740, 4746), False, 'from rqalpha.mod.rqalpha_mod_fxdayu_source.utils.converter import DataFrameConverter\n'), ((4788, 4814), 'rqalpha.mod.rqalpha_mod_fxdayu_source.utils.converter.DataFrameConverter.empty', 'DataFrameConverter.empty', ([], {}), '()\n', (4812, 4814), False, 'from rqalpha.mod.rqalpha_mod_fxdayu_source.utils.converter import DataFrameConverter\n'), ((6136, 6152), 'datetime.date', 'date', (['(2012)', '(6)', '(1)'], {}), '(2012, 6, 1)\n', (6140, 6152), False, 'from datetime import date, datetime, time\n'), ((6154, 6166), 'datetime.date.today', 'date.today', ([], {}), '()\n', (6164, 6166), False, 'from datetime import date, datetime, time\n'), ((6169, 6190), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (6182, 6190), False, 'from dateutil.relativedelta import relativedelta\n'), ((3516, 3522), 'datetime.time', 'time', ([], {}), '()\n', (3520, 3522), False, 'from datetime import date, datetime, time\n'), ((3563, 3569), 'datetime.time', 'time', ([], {}), '()\n', (3567, 3569), False, 'from datetime import date, datetime, time\n')] |
import numpy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import math
from itertools import cycle
SIZE_X = 128
I1 = 128
J1 = 128
H = 1/8##?
U = numpy.zeros((I1,J1))
F = numpy.zeros((I1,J1))
B = numpy.zeros((I1,J1))
for I in range(0,I1):
for J in range(0,J1):
# U[I,J] = 10
# U[I+15,J] = -10
# #
# B[I,J] = 1
# B[I+15,J] = 1
U[I,J] = math.cos(3.0*(I+J-2)*H)
for I in range(32,48):
for J in range(32,48):
U[I,J] = 1
B[I,J] = 1
def FND(I3, J3):
return (M1-math.fabs(I-I3))*(M1-math.fabs(J-J3))/(M1*M1)
N=5
prev_E = 1
while True:
#
for root_level in range(0,N+1):
for k in (list(range(0,root_level)) + list(range(root_level,0,-1))): #levels
M1 = 2**(N-k)
print(M1)
for relaxes in range(0,2): #Number of relaxations; 1 usually suffices
E=0
T = U.copy()
for I in range(M1,I1-M1,M1):
for J in range(M1,J1-M1,M1):
submesh_relaxes = 1 #
if(B[I-M1:I+M1,J-M1:J+M1].max() and B[I-M1:I+M1,J-M1:J+M1].sum() != 9 and k < 2): #if there's a boundary nearby, pre-smooth.
for d in range(0,10):
for I3 in range(I-M1+1,I+M1): #fix ranges
for J3 in range(J-M1+1,J+M1):
if(not B[I3,J3]):
U[I3,J3] = ((U[I3,J3-1] + U[I3,J3+1] + U[I3-1,J3] + U[I3+1,J3] + F[I3,J3]) / 4.0)
for d in range(0,submesh_relaxes):
A1=0
R1=0
for I3 in range(I-M1+1,I+M1): #fix ranges
for J3 in range(J-M1+1,J+M1):
D = 4
F[I3,J3] = 0
R = (D*U[I3,J3]) - U[I3,J3-1] - U[I3,J3+1] - U[I3-1,J3] - U[I3+1,J3]
R -= F[I3,J3] #compute residual
A3 = D*FND(I3,J3) - FND(I3,J3+1) - FND(I3,J3-1) - FND(I3+1,J3) - FND(I3-1,J3)
if(not B[I3,J3]):
R1 = R1 + FND(I3,J3)*R
A1 = A1 + FND(I3,J3)*A3
S=R1/A1
E=E+R1*R1
for I3 in range(I-M1+1,I+M1):
for J3 in range(J-M1+1,J+M1):
if(not B[I3,J3]):
T[I3,J3] = U[I3,J3] - 0.8*S*FND(I3,J3)
numpy.copyto(U,T)
E=math.sqrt(E)/M1/H
print(E)
# #FND COMPUTES THE UNIGRID DIRECTIONS
T = U.copy()
for I3 in range(1,I1-1): #fix ranges
for J3 in range(1,J1-1):
if(not B[I3,J3]):
U[I3,J3] = ((U[I3,J3-1] + U[I3,J3+1] + U[I3-1,J3] + U[I3+1,J3] + F[I3,J3]) / 4.0)
T = U - T
plt.subplot(2, 3, 2)
plt.gca().set_title('Potentials')
plt.imshow(U)
plt.subplot(2, 3, 3)
plt.gca().set_title('Boundaries')
plt.imshow(B)
print("Converge: {}".format(E/prev_E))
print("a: {}".format(numpy.linalg.norm(T)))
prev_E = E
#
plt.draw()
plt.pause(0.001)
plt.cla()
| [
"matplotlib.pyplot.imshow",
"numpy.copyto",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.cla",
"math.sqrt",
"math.cos",
"numpy.zeros",
"math.fabs",
"numpy.linalg.norm",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.subplot"
] | [((175, 196), 'numpy.zeros', 'numpy.zeros', (['(I1, J1)'], {}), '((I1, J1))\n', (186, 196), False, 'import numpy\n'), ((200, 221), 'numpy.zeros', 'numpy.zeros', (['(I1, J1)'], {}), '((I1, J1))\n', (211, 221), False, 'import numpy\n'), ((225, 246), 'numpy.zeros', 'numpy.zeros', (['(I1, J1)'], {}), '((I1, J1))\n', (236, 246), False, 'import numpy\n'), ((3118, 3138), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (3129, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3181, 3194), 'matplotlib.pyplot.imshow', 'plt.imshow', (['U'], {}), '(U)\n', (3191, 3194), True, 'import matplotlib.pyplot as plt\n'), ((3199, 3219), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (3210, 3219), True, 'import matplotlib.pyplot as plt\n'), ((3262, 3275), 'matplotlib.pyplot.imshow', 'plt.imshow', (['B'], {}), '(B)\n', (3272, 3275), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3402), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3400, 3402), True, 'import matplotlib.pyplot as plt\n'), ((3407, 3423), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (3416, 3423), True, 'import matplotlib.pyplot as plt\n'), ((3428, 3437), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3435, 3437), True, 'import matplotlib.pyplot as plt\n'), ((408, 439), 'math.cos', 'math.cos', (['(3.0 * (I + J - 2) * H)'], {}), '(3.0 * (I + J - 2) * H)\n', (416, 439), False, 'import math\n'), ((3143, 3152), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3150, 3152), True, 'import matplotlib.pyplot as plt\n'), ((3224, 3233), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3231, 3233), True, 'import matplotlib.pyplot as plt\n'), ((3344, 3364), 'numpy.linalg.norm', 'numpy.linalg.norm', (['T'], {}), '(T)\n', (3361, 3364), False, 'import numpy\n'), ((554, 571), 'math.fabs', 'math.fabs', (['(I - I3)'], {}), '(I - I3)\n', (563, 571), False, 'import math\n'), ((575, 592), 'math.fabs', 'math.fabs', (['(J - J3)'], {}), '(J - J3)\n', (584, 592), False, 'import math\n'), ((2758, 2776), 'numpy.copyto', 'numpy.copyto', (['U', 'T'], {}), '(U, T)\n', (2770, 2776), False, 'import numpy\n'), ((2791, 2803), 'math.sqrt', 'math.sqrt', (['E'], {}), '(E)\n', (2800, 2803), False, 'import math\n')] |
from helita.sim import rh15d
import matplotlib.pyplot as plt
import numpy as np
import os
import warnings # ignore tedious warnings
warnings.filterwarnings("ignore")
##############################################################
def load_rh_data(folder, print_attributes=False):
# reset IPython kernel
try:
# works only if in IPython kernel
from IPython import get_ipython
get_ipython().magic("reset -sf")
except:
pass
# defines variables that should be global
global DATA, WAVELENGTH, WAVELENGTH_INDEX, WAVELENGTH_SELECTED
#global
DATA = rh15d.Rh15dout(folder)
WAVELENGTH = DATA.files[4].wavelength
#wl_idx = data.files[4].wavelength_indices
WAVELENGTH_INDEX = np.where((WAVELENGTH.values>279.401921894) &
(WAVELENGTH.values<280.501526399))[0]
WAVELENGTH_SELECTED = WAVELENGTH[WAVELENGTH_INDEX]
# print attributes from RH code
if print_attributes==True:
for i, file in enumerate(DATA.files):
print("\n\n--------------------------\n\n")
print("data.files[index]: ", i)
print(file)
##############################################################
# get all output folders within current folder
def get_output_folders():
folders = os.walk(".")
output_folders = []
for path in folders:
if not "wrong" in path[0] and all(x in path[0] for x in ["x","y","dx","dy"]):
output_folders.append(path[0].replace("./", ""))
return output_folders
##############################################################
# plots intensity from RH code
def plot_intensity():
nx = DATA.files[4].nx
ny = DATA.files[4].ny
colx,coly = np.random.randint(nx), np.random.randint(ny)
"""for i in np.linspace(0,nx-1,nx).astype("int"):
for j in np.linspace(0,ny-1,nx).astype("int"):
I = DATA.files[4].intensity[i, j, WAVELENGTH_INDEX]
plt.plot(WAVELENGTH_SELECTED, I/I[0], color="black", alpha=0.01)
"""
mean_I = np.mean(DATA.files[4].intensity[:,:,WAVELENGTH_INDEX], axis=(0,1))
plt.plot(WAVELENGTH_SELECTED, mean_I, ".-")
plt.xlabel("Wavelength (nm)")
plt.ylabel(r"Intensity $(I/I_0)$")
plt.grid()
plt.show()
##############################################################
def kmean_model(data=False, k_create_model=False, test_model=False,
test_profiles=False, wavelength=False):
#if not data:
# assert False, "Did not input data!"
if I_data.ndim != 2:
assert False, "Wrong dimension on input data! Must be two-dimensianal."
# if 'test_model' is not defined, train a model instead
if not test_model:
from rh_kmean import create_kmean_from_data
kmean = create_kmean_from_data(data, k_value=k_create_model)
# test a trained model
else:
from rh_kmean import test_kmean_model
test_kmean_model(test_model,
test_profiles=test_profiles,
wavelength=wavelength,
k_nearest=False)
##############################################################
def get_intensity_from_all_folders(plot_converge_box=False):
folders = get_output_folders()
colx = [] # x coordinate of converged columns
coly = [] # y coordinate of converged columns
colx_nc = [] # x columns that did not converge
coly_nc = [] # y columns that did not converge
I = [] # intensities
#folders.pop(3) # as the folder is currently empty
#folders.pop(1)
#folders.append(".")
for f in folders:
print(f)
# goes through all folders that is
# output from rh code
for f_i, folder in enumerate(folders):
load_rh_data(folder=folder, print_attributes=False)
nx = DATA.files[4].nx # number of columns in x
ny = DATA.files[4].ny # number of columns in y
xnum = DATA.files[3].xnum.values # column index x from model
ynum = DATA.files[3].ynum.values # column index y from model
# making sure that same columns are
# not added multiple times
if f_i != 0:
for i in range(nx):
for j in range(ny):
add_col = True
# if column is already added, do not add again
for k in range(len(colx)):
if xnum[i] == colx[k] and ynum[j] == coly[k]:
add_col = False
break
if add_col and ~np.isnan(DATA.files[4].intensity[i,j,:].values).any():
colx.append(xnum[i])
coly.append(ynum[j])
I.append(DATA.files[4].intensity[i, j, WAVELENGTH_INDEX].values)
elif add_col:
colx_nc.append(xnum[i])
coly_nc.append(ynum[j])
# if first output folder, add all columns
else:
for i in range(nx):
for j in range(ny):
if ~np.isnan(DATA.files[4].intensity[i,j,:].values).any():
colx.append(xnum[i])
coly.append(ynum[j])
I.append(DATA.files[4].intensity[i, j, WAVELENGTH_INDEX].values)
else:
colx_nc.append(xnum[i])
coly_nc.append(ynum[j])
# remove elemnts from col(x/y)_nc that later
# has converged (from other folders/runs)
pop = []
for i in range(len(colx_nc)):
for j in range(len(colx)):
if colx_nc[i] == colx[j] and coly_nc[i] == coly[j]:
pop.append(i)
#colx_nc.pop(i)
#coly_nc.pop(i)
# must pop aftewards, else remove wrong index
pop.reverse()
for i in pop:
colx_nc.pop(i)
coly_nc.pop(i)
# plot columns that have and have not
# converged in model
if plot_converge_box:
plt.close()
fig,ax = plt.subplots()
ax.set_title("Converged synthetic spectra of model: %.2f %%"
% (100*len(colx)/1024**2))
ax.plot([0,1024,1024,0,0], [0,0,1024,1024,0], "--", color="black", label="Model box", lw=0.5)
ax.scatter(colx, coly, marker="s", label="Converged columns", s=20, color="green")
ax.scatter(colx_nc, coly_nc, marker="s", label="Non-converged columns", s=20, color="red")
ax.legend()
ax.grid()
fig.show()
# return intensities, converged columns
# and non-converged columns
return np.array(I), np.array([colx,coly]), np.array([colx_nc,coly_nc])
##############################################################
get_intensity_from_all_folders(plot_converge_box=True)
#get_output_folders()
#output_folder = "x0-1024_y0-1024_dxdy25-25"
#load_rh_data(folder=output_folder, print_attributes=False)
#plot_intensity()
#I_data = DATA.files[4].intensity[:,:,WAVELENGTH_INDEX].values
#I_data = I_data[~np.isnan(I_data).any(axis=2)]
"""k = 5
kmean_model(data=I_data, k_create_model=k)
kmean_model(test_model="KMEAN_MODELS/1641_%i"%k,
test_profiles=I_data,
wavelength=WAVELENGTH_SELECTED)"""
#
| [
"IPython.get_ipython",
"numpy.mean",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"rh_kmean.create_kmean_from_data",
"matplotlib.pyplot.close",
"numpy.array",
"helita.sim.rh15d.Rh15dout",
"numpy.random.randint",
"n... | [((136, 169), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (159, 169), False, 'import warnings\n'), ((609, 631), 'helita.sim.rh15d.Rh15dout', 'rh15d.Rh15dout', (['folder'], {}), '(folder)\n', (623, 631), False, 'from helita.sim import rh15d\n'), ((1312, 1324), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (1319, 1324), False, 'import os\n'), ((2053, 2122), 'numpy.mean', 'np.mean', (['DATA.files[4].intensity[:, :, WAVELENGTH_INDEX]'], {'axis': '(0, 1)'}), '(DATA.files[4].intensity[:, :, WAVELENGTH_INDEX], axis=(0, 1))\n', (2060, 2122), True, 'import numpy as np\n'), ((2125, 2168), 'matplotlib.pyplot.plot', 'plt.plot', (['WAVELENGTH_SELECTED', 'mean_I', '""".-"""'], {}), "(WAVELENGTH_SELECTED, mean_I, '.-')\n", (2133, 2168), True, 'import matplotlib.pyplot as plt\n'), ((2173, 2202), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength (nm)"""'], {}), "('Wavelength (nm)')\n", (2183, 2202), True, 'import matplotlib.pyplot as plt\n'), ((2207, 2240), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity $(I/I_0)$"""'], {}), "('Intensity $(I/I_0)$')\n", (2217, 2240), True, 'import matplotlib.pyplot as plt\n'), ((2246, 2256), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2254, 2256), True, 'import matplotlib.pyplot as plt\n'), ((2261, 2271), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2269, 2271), True, 'import matplotlib.pyplot as plt\n'), ((744, 832), 'numpy.where', 'np.where', (['((WAVELENGTH.values > 279.401921894) & (WAVELENGTH.values < 280.501526399))'], {}), '((WAVELENGTH.values > 279.401921894) & (WAVELENGTH.values < \n 280.501526399))\n', (752, 832), True, 'import numpy as np\n'), ((1735, 1756), 'numpy.random.randint', 'np.random.randint', (['nx'], {}), '(nx)\n', (1752, 1756), True, 'import numpy as np\n'), ((1758, 1779), 'numpy.random.randint', 'np.random.randint', (['ny'], {}), '(ny)\n', (1775, 1779), True, 'import numpy as np\n'), ((2784, 2836), 'rh_kmean.create_kmean_from_data', 'create_kmean_from_data', (['data'], {'k_value': 'k_create_model'}), '(data, k_value=k_create_model)\n', (2806, 2836), False, 'from rh_kmean import create_kmean_from_data\n'), ((2929, 3031), 'rh_kmean.test_kmean_model', 'test_kmean_model', (['test_model'], {'test_profiles': 'test_profiles', 'wavelength': 'wavelength', 'k_nearest': '(False)'}), '(test_model, test_profiles=test_profiles, wavelength=\n wavelength, k_nearest=False)\n', (2945, 3031), False, 'from rh_kmean import test_kmean_model\n'), ((6097, 6108), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6106, 6108), True, 'import matplotlib.pyplot as plt\n'), ((6126, 6140), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6138, 6140), True, 'import matplotlib.pyplot as plt\n'), ((6695, 6706), 'numpy.array', 'np.array', (['I'], {}), '(I)\n', (6703, 6706), True, 'import numpy as np\n'), ((6708, 6730), 'numpy.array', 'np.array', (['[colx, coly]'], {}), '([colx, coly])\n', (6716, 6730), True, 'import numpy as np\n'), ((6731, 6759), 'numpy.array', 'np.array', (['[colx_nc, coly_nc]'], {}), '([colx_nc, coly_nc])\n', (6739, 6759), True, 'import numpy as np\n'), ((413, 426), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (424, 426), False, 'from IPython import get_ipython\n'), ((5168, 5217), 'numpy.isnan', 'np.isnan', (['DATA.files[4].intensity[i, j, :].values'], {}), '(DATA.files[4].intensity[i, j, :].values)\n', (5176, 5217), True, 'import numpy as np\n'), ((4646, 4695), 'numpy.isnan', 'np.isnan', (['DATA.files[4].intensity[i, j, :].values'], {}), '(DATA.files[4].intensity[i, j, :].values)\n', (4654, 4695), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.