code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Useful packages
import warnings
import os
import time
import signal
import sys
import copy
import h5py
import pickle
import random
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import pandas as pd
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
# Experiment setting
FLAGS = tf.app.flags.FLAGS
# -- Configuration of the environnement --
tf.app.flags.DEFINE_string('log_dir', "../log", "log_dir")
tf.app.flags.DEFINE_string('dir_data', "", "Repository for all the files needed for the training and the evaluation")
tf.app.flags.DEFINE_bool('save', False, "Do you need to save the model?")
tf.app.flags.DEFINE_bool('restore', False, "Do you want to restore a previous model?")
tf.app.flags.DEFINE_bool('is_training', True, "Is the model trainable?")
tf.app.flags.DEFINE_string('processing', "train", "What to do with the model? {train,evaluate,predict}")
# -- Architecture of the neural network --
tf.app.flags.DEFINE_integer('n_input', 54675, "number of features")
tf.app.flags.DEFINE_integer('n_classes', 1, "number of classes")
tf.app.flags.DEFINE_integer('n_layers', 6, "number of layers")
tf.app.flags.DEFINE_integer('n_hidden_1', 1574, "number of neurons for the first hidden layer") #Level 7
tf.app.flags.DEFINE_integer('n_hidden_2', 1386, "number of neurons for the second hidden layer") #Level 6
tf.app.flags.DEFINE_integer('n_hidden_3', 951, "number of neurons for the third hidden layer") #Level 5
tf.app.flags.DEFINE_integer('n_hidden_4', 515, "number of neurons for the fourth hidden layer") #Level 4
tf.app.flags.DEFINE_integer('n_hidden_5', 255, "number of neurons for the fifth hidden layer") #Level 3
tf.app.flags.DEFINE_integer('n_hidden_6', 90, "number of neurons for the sixth hidden layer") #Level 2
# -- Learning and Hyperparameters --
tf.app.flags.DEFINE_string('lr_method', 'adam', "optimizer {adam, momentum, adagrad, rmsprop}")
tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial learning rate")
tf.app.flags.DEFINE_bool('bn', False, "use of batch normalization")
tf.app.flags.DEFINE_float('keep_prob', 0.4, "keep probability for the dropout")
tf.app.flags.DEFINE_string('type_training', 'LGO', "regularization term {"", LGO, L2, L1}")
tf.app.flags.DEFINE_float('alpha', 1, "value of the hyperparameter alpha")
tf.app.flags.DEFINE_integer('display_step', 5, "when to print the performances")
tf.app.flags.DEFINE_integer('batch_size', 2**9, "the number of examples in a batch")
tf.app.flags.DEFINE_integer('epochs', 20, "the number of epochs for training")
tf.app.flags.DEFINE_string('GPU_device', '/gpu:0', "GPU device")
from base_model import BaseModel
def l1_loss_func(x):
return tf.reduce_sum(tf.math.abs(x))
def l2_loss_func(x):
return tf.reduce_sum(tf.square(x))
def train(save_dir):
warnings.filterwarnings("ignore")
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.GPU_device[len(FLAGS.GPU_device)-1]
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Load the useful files to build the architecture
print("Loading the connection matrix...")
start = time.time()
adj_matrix = pd.read_csv(os.path.abspath(os.path.join(FLAGS.dir_data,"adj_matrix.csv")),index_col=0)
first_matrix_connection = pd.read_csv(os.path.abspath(os.path.join(FLAGS.dir_data,"first_matrix_connection_GO.csv")),index_col=0)
csv_go = pd.read_csv(os.path.abspath(os.path.join(FLAGS.dir_data,"go_level.csv")),index_col=0)
connection_matrix = []
connection_matrix.append(np.array(first_matrix_connection.values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(7)].loc[lambda x: x==1].index,csv_go[str(6)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(6)].loc[lambda x: x==1].index,csv_go[str(5)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(5)].loc[lambda x: x==1].index,csv_go[str(4)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(4)].loc[lambda x: x==1].index,csv_go[str(3)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(3)].loc[lambda x: x==1].index,csv_go[str(2)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.ones((FLAGS.n_hidden_6, FLAGS.n_classes),dtype=np.float32))
end = time.time()
elapsed=end - start
print("Total time: {}h {}min {}sec".format(time.gmtime(elapsed).tm_hour,
time.gmtime(elapsed).tm_min,
time.gmtime(elapsed).tm_sec))
# Load the data
print("Loading the data...")
start = time.time()
loaded = np.load(os.path.abspath(os.path.join(FLAGS.dir_data,"X_train.npz")))
X_train = loaded['x']
y_train = loaded['y']
if FLAGS.n_classes>=2:
y_train=to_categorical(y_train)
loaded = np.load(os.path.abspath(os.path.join(FLAGS.dir_data,"X_test.npz")))
X_test = loaded['x']
y_test = loaded['y']
if FLAGS.n_classes>=2:
y_test=to_categorical(y_test)
end = time.time()
elapsed=end - start
print("Total time: {}h {}min {}sec".format(time.gmtime(elapsed).tm_hour,
time.gmtime(elapsed).tm_min,
time.gmtime(elapsed).tm_sec))
# Launch the model
print("Launching the learning")
if FLAGS.type_training != "":
print("with {} and ALPHA={}".format(FLAGS.type_training,FLAGS.alpha))
tf.reset_default_graph()
# -- Inputs of the model --
X = tf.placeholder(tf.float32, shape=[None, FLAGS.n_input])
Y = tf.placeholder(tf.float32, shape=[None, FLAGS.n_classes])
# -- Hyperparameters of the neural network --
is_training = tf.placeholder(tf.bool,name="is_training") # Batch Norm hyperparameter
learning_rate = tf.placeholder(tf.float32, name="learning_rate") # Optimizer hyperparameter
keep_prob = tf.placeholder(tf.float32, name="keep_prob") # Dropout hyperparameter
total_batches=len(X_train)//FLAGS.batch_size
network=BaseModel(X=X,n_input=FLAGS.n_input,n_classes=FLAGS.n_classes,
n_hidden_1=FLAGS.n_hidden_1,n_hidden_2=FLAGS.n_hidden_2,n_hidden_3=FLAGS.n_hidden_3,n_hidden_4=FLAGS.n_hidden_4,
n_hidden_5=FLAGS.n_hidden_5,n_hidden_6=FLAGS.n_hidden_6,keep_prob=keep_prob,is_training=is_training) # Model instantiation
pred = network()
# -- Loss function --
# ---- CE loss ----
# Compute the average of the loss across all the dimensions
if FLAGS.n_classes>=2:
ce_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=Y))
else:
ce_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred, labels=Y))
# ---- Regularization loss (LGO, L2, L1) ----
additional_loss = 0
if FLAGS.type_training=="LGO":
for idx,weight in enumerate(network.weights.values()):
additional_loss+=l2_loss_func(weight*(1-connection_matrix[idx])) # Penalization of the noGO connections
elif FLAGS.type_training=="L2" :
for weight in network.weights.values():
additional_loss += l2_loss_func(weight)
elif FLAGS.type_training=="L1" :
for idx,weight in enumerate(network.weights.values()):
additional_loss+=l1_loss_func(weight)
# ---- Total loss ----
if FLAGS.type_training!='' :
total_loss = ce_loss + FLAGS.alpha*additional_loss
else:
total_loss = ce_loss
# ---- Norm of the weights of the connections ----
norm_no_go_connections=0
norm_go_connections=0
for idx,weight in enumerate(list(network.weights.values())[:-1]):
norm_no_go_connections+=tf.norm((weight*(1-connection_matrix[idx])),ord=1)/np.count_nonzero(1-connection_matrix[idx])
norm_go_connections+=tf.norm((weight*connection_matrix[idx]),ord=1)/np.count_nonzero(connection_matrix[idx])
norm_no_go_connections/=FLAGS.n_layers
norm_go_connections/=FLAGS.n_layers
# -- Optimizer --
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
if FLAGS.lr_method=="adam":
trainer = tf.train.AdamOptimizer(learning_rate = learning_rate)
elif FLAGS.lr_method=="momentum":
trainer = tf.train.MomentumOptimizer(learning_rate = learning_rate, momentum=0.09, use_nesterov=True)
elif FLAGS.lr_method=="adagrad":
trainer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
elif FLAGS.lr_method=="rmsprop":
trainer = tf.train.RMSPropOptimizer(learning_rate = learning_rate)
optimizer = trainer.minimize(total_loss)
# -- Compute the prediction error --
if FLAGS.n_classes>=2:
correct_prediction = tf.equal(tf.argmax(pred,1), tf.argmax(Y, 1))
else:
sig_pred=tf.nn.sigmoid(pred)
sig_pred=tf.cast(sig_pred>0.5,dtype=tf.int64)
ground_truth=tf.cast(Y,dtype=tf.int64)
correct_prediction = tf.equal(sig_pred,ground_truth)
# -- Calculate the accuracy across all the given batches and average them out --
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# -- Initialize the variables --
init = tf.global_variables_initializer()
# -- Configure the use of the gpu --
config = tf.ConfigProto(log_device_placement=False,allow_soft_placement=True)
#config.gpu_options.allow_growth = True, log_device_placement=True
if FLAGS.save or FLAGS.restore : saver = tf.train.Saver()
start = time.time()
with tf.device(FLAGS.GPU_device):
with tf.Session(config=config) as sess:
sess.run(init)
train_c_accuracy=[]
train_c_total_loss=[]
test_c_accuracy=[]
test_c_total_loss=[]
c_l1_norm_go=[]
c_l1_norm_no_go=[]
if FLAGS.type_training!="":
train_c_ce_loss=[]
test_c_ce_loss=[]
train_c_additional_loss=[]
test_c_additional_loss=[]
for epoch in tqdm(np.arange(0,FLAGS.epochs)):
index = np.arange(X_train.shape[0])
np.random.shuffle(index)
batch_X = np.array_split(X_train[index], total_batches)
batch_Y = np.array_split(y_train[index], total_batches)
# -- Optimization --
for batch in range(total_batches):
batch_x,batch_y=batch_X[batch],batch_Y[batch]
sess.run(optimizer, feed_dict={X: batch_x,Y: batch_y,is_training:FLAGS.is_training,keep_prob:FLAGS.keep_prob,learning_rate:FLAGS.learning_rate})
if ((epoch+1) % FLAGS.display_step == 0) or (epoch==0) :
if not((FLAGS.display_step==FLAGS.epochs) and (epoch==0)):
# -- Calculate batch loss and accuracy after a specific epoch on the train and test set --
avg_cost,avg_acc,l1_norm_no_go,l1_norm_go = sess.run([total_loss, accuracy,norm_no_go_connections,norm_go_connections], feed_dict={X: X_train,Y: y_train,
is_training:False,keep_prob:1.0})
train_c_total_loss.append(avg_cost)
train_c_accuracy.append(avg_acc)
c_l1_norm_go.append(l1_norm_go)
c_l1_norm_no_go.append(l1_norm_no_go)
if FLAGS.type_training!="":
avg_ce_loss,avg_additional_loss= sess.run([ce_loss, additional_loss], feed_dict={X: X_train,Y: y_train,is_training:False,keep_prob:1.0})
train_c_additional_loss.append(avg_additional_loss)
train_c_ce_loss.append(avg_ce_loss)
avg_cost,avg_acc = sess.run([total_loss, accuracy], feed_dict={X: X_test,Y: y_test,is_training:False,keep_prob:1.0})
test_c_total_loss.append(avg_cost)
test_c_accuracy.append(avg_acc)
if FLAGS.type_training!="":
avg_ce_loss,avg_additional_loss= sess.run([ce_loss, additional_loss], feed_dict={X: X_test,Y: y_test,is_training:False,keep_prob:1.0})
test_c_additional_loss.append(avg_additional_loss)
test_c_ce_loss.append(avg_ce_loss)
current_idx=len(train_c_total_loss)-1
print('| Epoch: {}/{} | Train: Loss {:.6f} Accuracy : {:.6f} '\
'| Test: Loss {:.6f} Accuracy : {:.6f}\n'.format(
epoch+1, FLAGS.epochs,train_c_total_loss[current_idx], train_c_accuracy[current_idx],test_c_total_loss[current_idx],test_c_accuracy[current_idx]))
if FLAGS.save: saver.save(sess=sess, save_path=os.path.join(save_dir,"model"))
end = time.time()
elapsed=end - start
print("Total time: {}h {}min {}sec ".format(time.gmtime(elapsed).tm_hour,
time.gmtime(elapsed).tm_min,
time.gmtime(elapsed).tm_sec))
performances = {
'total_loss':train_c_total_loss,'test_total_loss':test_c_total_loss,
'acc':train_c_accuracy,'test_acc':test_c_accuracy
}
performances['norm_go']=c_l1_norm_go
performances['norm_no_go']=c_l1_norm_no_go
if FLAGS.type_training!="":
performances['additional_loss']=train_c_additional_loss
performances['test_additional_loss']=test_c_additional_loss
performances['ce_loss']=train_c_ce_loss
performances['test_ce_loss']=test_c_ce_loss
return performances
def evaluate(save_dir):
warnings.filterwarnings("ignore")
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.GPU_device[len(FLAGS.GPU_device)-1]
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Load the useful files to build the architecture
print("Loading the connection matrix...")
start = time.time()
adj_matrix = pd.read_csv(os.path.join(FLAGS.dir_data,"adj_matrix.csv"),index_col=0)
first_matrix_connection = pd.read_csv(os.path.join(FLAGS.dir_data,"first_matrix_connection_GO.csv"),index_col=0)
csv_go = pd.read_csv(os.path.join(FLAGS.dir_data,"go_level.csv"),index_col=0)
connection_matrix = []
connection_matrix.append(np.array(first_matrix_connection.values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(7)].loc[lambda x: x==1].index,csv_go[str(6)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(6)].loc[lambda x: x==1].index,csv_go[str(5)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(5)].loc[lambda x: x==1].index,csv_go[str(4)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(4)].loc[lambda x: x==1].index,csv_go[str(3)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(3)].loc[lambda x: x==1].index,csv_go[str(2)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.ones((FLAGS.n_hidden_6, FLAGS.n_classes),dtype=np.float32))
end = time.time()
elapsed=end - start
print("Total time: {}h {}min {}sec".format(time.gmtime(elapsed).tm_hour,
time.gmtime(elapsed).tm_min,
time.gmtime(elapsed).tm_sec))
# Load the data
print("Loading the test dataset...")
loaded = np.load(os.path.join(FLAGS.dir_data,"X_test.npz"))
X_test = loaded['x']
y_test = loaded['y']
if FLAGS.n_classes>=2:
y_test=to_categorical(y_test)
end = time.time()
elapsed=end - start
print("Total time: {}h {}min {}sec".format(time.gmtime(elapsed).tm_hour,
time.gmtime(elapsed).tm_min,
time.gmtime(elapsed).tm_sec))
# Launch the model
print("Launching the evaluation")
if FLAGS.type_training != "":
print("with {} and ALPHA={}".format(FLAGS.type_training,FLAGS.alpha))
tf.reset_default_graph()
# -- Inputs of the model --
X = tf.placeholder(tf.float32, shape=[None, FLAGS.n_input])
Y = tf.placeholder(tf.float32, shape=[None, FLAGS.n_classes])
# -- Hyperparameters of the neural network --
is_training = tf.placeholder(tf.bool,name="is_training") # Batch Norm hyperparameter
keep_prob = tf.placeholder(tf.float32, name="keep_prob") # Dropout hyperparameter
network=BaseModel(X=X,n_input=FLAGS.n_input,n_classes=FLAGS.n_classes,
n_hidden_1=FLAGS.n_hidden_1,n_hidden_2=FLAGS.n_hidden_2,n_hidden_3=FLAGS.n_hidden_3,n_hidden_4=FLAGS.n_hidden_4,
n_hidden_5=FLAGS.n_hidden_5,n_hidden_6=FLAGS.n_hidden_6,keep_prob=keep_prob,is_training=is_training) # Model instantiation
pred = network()
# -- Loss function --
# ---- CE loss ----
# Compute the average of the loss across all the dimensions
if FLAGS.n_classes>=2:
ce_loss = f.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=Y))
else:
ce_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred, labels=Y))
# ---- Regularization loss (LGO, L2, L1) ----
additional_loss = 0
if FLAGS.type_training=="LGO":
for idx,weight in enumerate(network.weights.values()):
additional_loss+=l2_loss_func(weight*(1-connection_matrix[idx])) # Penalization of the noGO connections
elif FLAGS.type_training=="L2" :
for weight in network.weights.values():
additional_loss += l2_loss_func(weight)
elif FLAGS.type_training=="L1" :
for idx,weight in enumerate(network.weights.values()):
additional_loss+=l1_loss_func(weight)
# ---- Total loss ----
if FLAGS.type_training!='' :
total_loss = ce_loss + FLAGS.alpha*additional_loss
else:
total_loss = ce_loss
# -- Compute the prediction error --
if FLAGS.n_classes>=2:
correct_prediction = tf.equal(tf.argmax(pred,1), tf.argmax(Y, 1))
else:
sig_pred=tf.nn.sigmoid(pred)
sig_pred=tf.cast(sig_pred>0.5,dtype=tf.int64)
ground_truth=tf.cast(Y,dtype=tf.int64)
correct_prediction = tf.equal(sig_pred,ground_truth)
# -- Calculate the accuracy across all the given batches and average them out --
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# -- Configure the use of the gpu --
config = tf.ConfigProto(log_device_placement=False,allow_soft_placement=True)
#config.gpu_options.allow_growth = True, log_device_placement=True
if FLAGS.restore : saver = tf.train.Saver()
start = time.time()
with tf.device(FLAGS.GPU_device):
with tf.Session(config=config) as sess:
if FLAGS.restore:
saver.restore(sess,os.path.join(save_dir,"model"))
# -- Calculate the final loss and the final accuracy on the test set --
avg_cost,avg_acc = sess.run([total_loss, accuracy], feed_dict={X: X_test,Y: y_test,is_training:FLAGS.is_training,keep_prob:1})
print('Test loss {:.6f}, test accuracy : {:.6f}\n'.format(avg_cost,avg_acc))
end = time.time()
elapsed=end - start
print("Total time: {}h {}min {}sec ".format(time.gmtime(elapsed).tm_hour,
time.gmtime(elapsed).tm_min,
time.gmtime(elapsed).tm_sec))
return
def predict(save_dir):
warnings.filterwarnings("ignore")
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.GPU_device[len(FLAGS.GPU_device)-1]
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Load the useful files to build the architecture
print("Loading the connection matrix...")
start = time.time()
adj_matrix = pd.read_csv(os.path.join(FLAGS.dir_data,"adj_matrix.csv"),index_col=0)
first_matrix_connection = pd.read_csv(os.path.join(FLAGS.dir_data,"first_matrix_connection_GO.csv"),index_col=0)
csv_go = pd.read_csv(os.path.join(FLAGS.dir_data,"go_level.csv"),index_col=0)
connection_matrix = []
connection_matrix.append(np.array(first_matrix_connection.values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(7)].loc[lambda x: x==1].index,csv_go[str(6)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(6)].loc[lambda x: x==1].index,csv_go[str(5)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(5)].loc[lambda x: x==1].index,csv_go[str(4)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(4)].loc[lambda x: x==1].index,csv_go[str(3)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.array(adj_matrix.loc[csv_go[str(3)].loc[lambda x: x==1].index,csv_go[str(2)].loc[lambda x: x==1].index].values,dtype=np.float32))
connection_matrix.append(np.ones((FLAGS.n_hidden_6, FLAGS.n_classes),dtype=np.float32))
end = time.time()
elapsed=end - start
print("Total time: {}h {}min {}sec".format(time.gmtime(elapsed).tm_hour,
time.gmtime(elapsed).tm_min,
time.gmtime(elapsed).tm_sec))
# Load the data
print("Loading the test dataset...")
loaded = np.load(os.path.join(FLAGS.dir_data,"X_test.npz"))
X_test = loaded['x']
y_test = loaded['y']
if FLAGS.n_classes>=2:
y_test=to_categorical(y_test)
end = time.time()
elapsed=end - start
print("Total time: {}h {}min {}sec".format(time.gmtime(elapsed).tm_hour,
time.gmtime(elapsed).tm_min,
time.gmtime(elapsed).tm_sec))
# Launch the model
print("Launching the evaluation")
if FLAGS.type_training != "":
print("with {} and ALPHA={}".format(FLAGS.type_training,FLAGS.alpha))
tf.reset_default_graph()
# -- Inputs of the model --
X = tf.placeholder(tf.float32, shape=[None, FLAGS.n_input])
Y = tf.placeholder(tf.float32, shape=[None, FLAGS.n_classes])
# -- Hyperparameters of the neural network --
is_training = tf.placeholder(tf.bool,name="is_training") # Batch Norm hyperparameter
keep_prob = tf.placeholder(tf.float32, name="keep_prob") # Dropout hyperparameter
network=BaseModel(X=X,n_input=FLAGS.n_input,n_classes=FLAGS.n_classes,
n_hidden_1=FLAGS.n_hidden_1,n_hidden_2=FLAGS.n_hidden_2,n_hidden_3=FLAGS.n_hidden_3,n_hidden_4=FLAGS.n_hidden_4,
n_hidden_5=FLAGS.n_hidden_5,n_hidden_6=FLAGS.n_hidden_6,keep_prob=keep_prob,is_training=is_training) # Model instantiation
pred = network()
# -- Compute the prediction error --
if FLAGS.n_classes>=2:
y_hat = tf.argmax(pred,1)
else:
y_hat = tf.nn.sigmoid(pred)
y_hat = tf.cast(pred>0.5,dtype=tf.int64)
# -- Configure the use of the gpu --
config = tf.ConfigProto(log_device_placement=False,allow_soft_placement=True)
#config.gpu_options.allow_growth = True, log_device_placement=True
if FLAGS.restore : saver = tf.train.Saver()
start = time.time()
with tf.device(FLAGS.GPU_device):
with tf.Session(config=config) as sess:
if FLAGS.restore:
saver.restore(sess,os.path.join(save_dir,"model"))
# -- Predict the outcome predictions of the samples from the test set --
y_hat = sess.run([y_hat], feed_dict={X: X_test,Y: y_test,is_training:FLAGS.is_training,keep_prob:1})
end = time.time()
elapsed=end - start
print("Total time: {}h {}min {}sec ".format(time.gmtime(elapsed).tm_hour,
time.gmtime(elapsed).tm_min,
time.gmtime(elapsed).tm_sec))
return y_hat
def main(_):
save_dir=os.path.join(FLAGS.log_dir,'MLP_DP={}_BN={}_EPOCHS={}_OPT={}'.format(FLAGS.keep_prob,FLAGS.bn,FLAGS.epochs,FLAGS.lr_method))
if FLAGS.type_training!="" :
save_dir=save_dir+'_{}_ALPHA={}'.format(FLAGS.type_training,FLAGS.alpha)
if FLAGS.processing=="train":
start_full = time.time()
if not(os.path.isdir(save_dir)):
os.mkdir(save_dir)
performances=train(save_dir=save_dir)
with open(os.path.join(save_dir,"histories.txt"), "wb") as fp:
#Pickling
pickle.dump(performances, fp)
end = time.time()
elapsed =end - start_full
print("Total time full process: {}h {}min {}sec".format(time.gmtime(elapsed).tm_hour,
time.gmtime(elapsed).tm_min,
time.gmtime(elapsed).tm_sec))
elif FLAGS.processing=="evaluate":
evaluate(save_dir=save_dir)
elif FLAGS.processing=="predict":
np.savez_compressed(os.path.join(save_dir,'y_test_hat'),y_hat=predict(save_dir=save_dir))
if __name__ == "__main__":
tf.app.run()
| [
"tensorflow.equal",
"numpy.count_nonzero",
"numpy.array",
"numpy.array_split",
"tensorflow.app.flags.DEFINE_bool",
"tensorflow.cast",
"numpy.arange",
"tensorflow.app.run",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.nn.sigmoid",
"os.path.isdir",
"os.mkdir",
"tensorflow.squa... | [((412, 470), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""log_dir"""', '"""../log"""', '"""log_dir"""'], {}), "('log_dir', '../log', 'log_dir')\n", (438, 470), True, 'import tensorflow as tf\n'), ((471, 592), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dir_data"""', '""""""', '"""Repository for all the files needed for the training and the evaluation"""'], {}), "('dir_data', '',\n 'Repository for all the files needed for the training and the evaluation')\n", (497, 592), True, 'import tensorflow as tf\n'), ((589, 662), 'tensorflow.app.flags.DEFINE_bool', 'tf.app.flags.DEFINE_bool', (['"""save"""', '(False)', '"""Do you need to save the model?"""'], {}), "('save', False, 'Do you need to save the model?')\n", (613, 662), True, 'import tensorflow as tf\n'), ((663, 753), 'tensorflow.app.flags.DEFINE_bool', 'tf.app.flags.DEFINE_bool', (['"""restore"""', '(False)', '"""Do you want to restore a previous model?"""'], {}), "('restore', False,\n 'Do you want to restore a previous model?')\n", (687, 753), True, 'import tensorflow as tf\n'), ((750, 822), 'tensorflow.app.flags.DEFINE_bool', 'tf.app.flags.DEFINE_bool', (['"""is_training"""', '(True)', '"""Is the model trainable?"""'], {}), "('is_training', True, 'Is the model trainable?')\n", (774, 822), True, 'import tensorflow as tf\n'), ((823, 931), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""processing"""', '"""train"""', '"""What to do with the model? {train,evaluate,predict}"""'], {}), "('processing', 'train',\n 'What to do with the model? {train,evaluate,predict}')\n", (849, 931), True, 'import tensorflow as tf\n'), ((972, 1039), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""n_input"""', '(54675)', '"""number of features"""'], {}), "('n_input', 54675, 'number of features')\n", (999, 1039), True, 'import tensorflow as tf\n'), ((1040, 1104), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""n_classes"""', '(1)', '"""number of classes"""'], {}), "('n_classes', 1, 'number of classes')\n", (1067, 1104), True, 'import tensorflow as tf\n'), ((1105, 1167), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""n_layers"""', '(6)', '"""number of layers"""'], {}), "('n_layers', 6, 'number of layers')\n", (1132, 1167), True, 'import tensorflow as tf\n'), ((1168, 1267), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""n_hidden_1"""', '(1574)', '"""number of neurons for the first hidden layer"""'], {}), "('n_hidden_1', 1574,\n 'number of neurons for the first hidden layer')\n", (1195, 1267), True, 'import tensorflow as tf\n'), ((1273, 1373), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""n_hidden_2"""', '(1386)', '"""number of neurons for the second hidden layer"""'], {}), "('n_hidden_2', 1386,\n 'number of neurons for the second hidden layer')\n", (1300, 1373), True, 'import tensorflow as tf\n'), ((1379, 1477), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""n_hidden_3"""', '(951)', '"""number of neurons for the third hidden layer"""'], {}), "('n_hidden_3', 951,\n 'number of neurons for the third hidden layer')\n", (1406, 1477), True, 'import tensorflow as tf\n'), ((1483, 1582), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""n_hidden_4"""', '(515)', '"""number of neurons for the fourth hidden layer"""'], {}), "('n_hidden_4', 515,\n 'number of neurons for the fourth hidden layer')\n", (1510, 1582), True, 'import tensorflow as tf\n'), ((1588, 1686), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""n_hidden_5"""', '(255)', '"""number of neurons for the fifth hidden layer"""'], {}), "('n_hidden_5', 255,\n 'number of neurons for the fifth hidden layer')\n", (1615, 1686), True, 'import tensorflow as tf\n'), ((1692, 1789), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""n_hidden_6"""', '(90)', '"""number of neurons for the sixth hidden layer"""'], {}), "('n_hidden_6', 90,\n 'number of neurons for the sixth hidden layer')\n", (1719, 1789), True, 'import tensorflow as tf\n'), ((1833, 1932), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""lr_method"""', '"""adam"""', '"""optimizer {adam, momentum, adagrad, rmsprop}"""'], {}), "('lr_method', 'adam',\n 'optimizer {adam, momentum, adagrad, rmsprop}')\n", (1859, 1932), True, 'import tensorflow as tf\n'), ((1929, 2003), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(0.001)', '"""initial learning rate"""'], {}), "('learning_rate', 0.001, 'initial learning rate')\n", (1954, 2003), True, 'import tensorflow as tf\n'), ((2004, 2071), 'tensorflow.app.flags.DEFINE_bool', 'tf.app.flags.DEFINE_bool', (['"""bn"""', '(False)', '"""use of batch normalization"""'], {}), "('bn', False, 'use of batch normalization')\n", (2028, 2071), True, 'import tensorflow as tf\n'), ((2072, 2151), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""keep_prob"""', '(0.4)', '"""keep probability for the dropout"""'], {}), "('keep_prob', 0.4, 'keep probability for the dropout')\n", (2097, 2151), True, 'import tensorflow as tf\n'), ((2152, 2245), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""type_training"""', '"""LGO"""', '"""regularization term {, LGO, L2, L1}"""'], {}), "('type_training', 'LGO',\n 'regularization term {, LGO, L2, L1}')\n", (2178, 2245), True, 'import tensorflow as tf\n'), ((2244, 2318), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""alpha"""', '(1)', '"""value of the hyperparameter alpha"""'], {}), "('alpha', 1, 'value of the hyperparameter alpha')\n", (2269, 2318), True, 'import tensorflow as tf\n'), ((2319, 2404), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""display_step"""', '(5)', '"""when to print the performances"""'], {}), "('display_step', 5, 'when to print the performances'\n )\n", (2346, 2404), True, 'import tensorflow as tf\n'), ((2400, 2490), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(2 ** 9)', '"""the number of examples in a batch"""'], {}), "('batch_size', 2 ** 9,\n 'the number of examples in a batch')\n", (2427, 2490), True, 'import tensorflow as tf\n'), ((2485, 2563), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""epochs"""', '(20)', '"""the number of epochs for training"""'], {}), "('epochs', 20, 'the number of epochs for training')\n", (2512, 2563), True, 'import tensorflow as tf\n'), ((2564, 2628), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""GPU_device"""', '"""/gpu:0"""', '"""GPU device"""'], {}), "('GPU_device', '/gpu:0', 'GPU device')\n", (2590, 2628), True, 'import tensorflow as tf\n'), ((2816, 2849), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2839, 2849), False, 'import warnings\n'), ((3139, 3150), 'time.time', 'time.time', ([], {}), '()\n', (3148, 3150), False, 'import time\n'), ((4519, 4530), 'time.time', 'time.time', ([], {}), '()\n', (4528, 4530), False, 'import time\n'), ((4766, 4777), 'time.time', 'time.time', ([], {}), '()\n', (4775, 4777), False, 'import time\n'), ((5187, 5198), 'time.time', 'time.time', ([], {}), '()\n', (5196, 5198), False, 'import time\n'), ((5545, 5569), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5567, 5569), True, 'import tensorflow as tf\n'), ((5615, 5670), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, FLAGS.n_input]'}), '(tf.float32, shape=[None, FLAGS.n_input])\n', (5629, 5670), True, 'import tensorflow as tf\n'), ((5679, 5736), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, FLAGS.n_classes]'}), '(tf.float32, shape=[None, FLAGS.n_classes])\n', (5693, 5736), True, 'import tensorflow as tf\n'), ((5806, 5849), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_training"""'}), "(tf.bool, name='is_training')\n", (5820, 5849), True, 'import tensorflow as tf\n'), ((5897, 5945), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""learning_rate"""'}), "(tf.float32, name='learning_rate')\n", (5911, 5945), True, 'import tensorflow as tf\n'), ((5989, 6033), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keep_prob"""'}), "(tf.float32, name='keep_prob')\n", (6003, 6033), True, 'import tensorflow as tf\n'), ((6121, 6419), 'base_model.BaseModel', 'BaseModel', ([], {'X': 'X', 'n_input': 'FLAGS.n_input', 'n_classes': 'FLAGS.n_classes', 'n_hidden_1': 'FLAGS.n_hidden_1', 'n_hidden_2': 'FLAGS.n_hidden_2', 'n_hidden_3': 'FLAGS.n_hidden_3', 'n_hidden_4': 'FLAGS.n_hidden_4', 'n_hidden_5': 'FLAGS.n_hidden_5', 'n_hidden_6': 'FLAGS.n_hidden_6', 'keep_prob': 'keep_prob', 'is_training': 'is_training'}), '(X=X, n_input=FLAGS.n_input, n_classes=FLAGS.n_classes, n_hidden_1\n =FLAGS.n_hidden_1, n_hidden_2=FLAGS.n_hidden_2, n_hidden_3=FLAGS.\n n_hidden_3, n_hidden_4=FLAGS.n_hidden_4, n_hidden_5=FLAGS.n_hidden_5,\n n_hidden_6=FLAGS.n_hidden_6, keep_prob=keep_prob, is_training=is_training)\n', (6130, 6419), False, 'from base_model import BaseModel\n'), ((9293, 9326), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9324, 9326), True, 'import tensorflow as tf\n'), ((9382, 9451), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)', 'allow_soft_placement': '(True)'}), '(log_device_placement=False, allow_soft_placement=True)\n', (9396, 9451), True, 'import tensorflow as tf\n'), ((9598, 9609), 'time.time', 'time.time', ([], {}), '()\n', (9607, 9609), False, 'import time\n'), ((13021, 13032), 'time.time', 'time.time', ([], {}), '()\n', (13030, 13032), False, 'import time\n'), ((13824, 13857), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (13847, 13857), False, 'import warnings\n'), ((14147, 14158), 'time.time', 'time.time', ([], {}), '()\n', (14156, 14158), False, 'import time\n'), ((15476, 15487), 'time.time', 'time.time', ([], {}), '()\n', (15485, 15487), False, 'import time\n'), ((15909, 15920), 'time.time', 'time.time', ([], {}), '()\n', (15918, 15920), False, 'import time\n'), ((16269, 16293), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (16291, 16293), True, 'import tensorflow as tf\n'), ((16339, 16394), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, FLAGS.n_input]'}), '(tf.float32, shape=[None, FLAGS.n_input])\n', (16353, 16394), True, 'import tensorflow as tf\n'), ((16403, 16460), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, FLAGS.n_classes]'}), '(tf.float32, shape=[None, FLAGS.n_classes])\n', (16417, 16460), True, 'import tensorflow as tf\n'), ((16530, 16573), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_training"""'}), "(tf.bool, name='is_training')\n", (16544, 16573), True, 'import tensorflow as tf\n'), ((16617, 16661), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keep_prob"""'}), "(tf.float32, name='keep_prob')\n", (16631, 16661), True, 'import tensorflow as tf\n'), ((16700, 16998), 'base_model.BaseModel', 'BaseModel', ([], {'X': 'X', 'n_input': 'FLAGS.n_input', 'n_classes': 'FLAGS.n_classes', 'n_hidden_1': 'FLAGS.n_hidden_1', 'n_hidden_2': 'FLAGS.n_hidden_2', 'n_hidden_3': 'FLAGS.n_hidden_3', 'n_hidden_4': 'FLAGS.n_hidden_4', 'n_hidden_5': 'FLAGS.n_hidden_5', 'n_hidden_6': 'FLAGS.n_hidden_6', 'keep_prob': 'keep_prob', 'is_training': 'is_training'}), '(X=X, n_input=FLAGS.n_input, n_classes=FLAGS.n_classes, n_hidden_1\n =FLAGS.n_hidden_1, n_hidden_2=FLAGS.n_hidden_2, n_hidden_3=FLAGS.\n n_hidden_3, n_hidden_4=FLAGS.n_hidden_4, n_hidden_5=FLAGS.n_hidden_5,\n n_hidden_6=FLAGS.n_hidden_6, keep_prob=keep_prob, is_training=is_training)\n', (16709, 16998), False, 'from base_model import BaseModel\n'), ((18693, 18762), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)', 'allow_soft_placement': '(True)'}), '(log_device_placement=False, allow_soft_placement=True)\n', (18707, 18762), True, 'import tensorflow as tf\n'), ((18895, 18906), 'time.time', 'time.time', ([], {}), '()\n', (18904, 18906), False, 'import time\n'), ((19439, 19450), 'time.time', 'time.time', ([], {}), '()\n', (19448, 19450), False, 'import time\n'), ((19664, 19697), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (19687, 19697), False, 'import warnings\n'), ((19987, 19998), 'time.time', 'time.time', ([], {}), '()\n', (19996, 19998), False, 'import time\n'), ((21316, 21327), 'time.time', 'time.time', ([], {}), '()\n', (21325, 21327), False, 'import time\n'), ((21749, 21760), 'time.time', 'time.time', ([], {}), '()\n', (21758, 21760), False, 'import time\n'), ((22109, 22133), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (22131, 22133), True, 'import tensorflow as tf\n'), ((22179, 22234), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, FLAGS.n_input]'}), '(tf.float32, shape=[None, FLAGS.n_input])\n', (22193, 22234), True, 'import tensorflow as tf\n'), ((22243, 22300), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, FLAGS.n_classes]'}), '(tf.float32, shape=[None, FLAGS.n_classes])\n', (22257, 22300), True, 'import tensorflow as tf\n'), ((22370, 22413), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_training"""'}), "(tf.bool, name='is_training')\n", (22384, 22413), True, 'import tensorflow as tf\n'), ((22457, 22501), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keep_prob"""'}), "(tf.float32, name='keep_prob')\n", (22471, 22501), True, 'import tensorflow as tf\n'), ((22540, 22838), 'base_model.BaseModel', 'BaseModel', ([], {'X': 'X', 'n_input': 'FLAGS.n_input', 'n_classes': 'FLAGS.n_classes', 'n_hidden_1': 'FLAGS.n_hidden_1', 'n_hidden_2': 'FLAGS.n_hidden_2', 'n_hidden_3': 'FLAGS.n_hidden_3', 'n_hidden_4': 'FLAGS.n_hidden_4', 'n_hidden_5': 'FLAGS.n_hidden_5', 'n_hidden_6': 'FLAGS.n_hidden_6', 'keep_prob': 'keep_prob', 'is_training': 'is_training'}), '(X=X, n_input=FLAGS.n_input, n_classes=FLAGS.n_classes, n_hidden_1\n =FLAGS.n_hidden_1, n_hidden_2=FLAGS.n_hidden_2, n_hidden_3=FLAGS.\n n_hidden_3, n_hidden_4=FLAGS.n_hidden_4, n_hidden_5=FLAGS.n_hidden_5,\n n_hidden_6=FLAGS.n_hidden_6, keep_prob=keep_prob, is_training=is_training)\n', (22549, 22838), False, 'from base_model import BaseModel\n'), ((23128, 23197), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)', 'allow_soft_placement': '(True)'}), '(log_device_placement=False, allow_soft_placement=True)\n', (23142, 23197), True, 'import tensorflow as tf\n'), ((23330, 23341), 'time.time', 'time.time', ([], {}), '()\n', (23339, 23341), False, 'import time\n'), ((23762, 23773), 'time.time', 'time.time', ([], {}), '()\n', (23771, 23773), False, 'import time\n'), ((25086, 25098), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (25096, 25098), True, 'import tensorflow as tf\n'), ((2710, 2724), 'tensorflow.math.abs', 'tf.math.abs', (['x'], {}), '(x)\n', (2721, 2724), True, 'import tensorflow as tf\n'), ((2774, 2786), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (2783, 2786), True, 'import tensorflow as tf\n'), ((3547, 3605), 'numpy.array', 'np.array', (['first_matrix_connection.values'], {'dtype': 'np.float32'}), '(first_matrix_connection.values, dtype=np.float32)\n', (3555, 3605), True, 'import numpy as np\n'), ((4445, 4507), 'numpy.ones', 'np.ones', (['(FLAGS.n_hidden_6, FLAGS.n_classes)'], {'dtype': 'np.float32'}), '((FLAGS.n_hidden_6, FLAGS.n_classes), dtype=np.float32)\n', (4452, 4507), True, 'import numpy as np\n'), ((4955, 4978), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_train'], {}), '(y_train)\n', (4969, 4978), False, 'from tensorflow.keras.utils import to_categorical\n'), ((5153, 5175), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_test'], {}), '(y_test)\n', (5167, 5175), False, 'from tensorflow.keras.utils import to_categorical\n'), ((8903, 8922), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['pred'], {}), '(pred)\n', (8916, 8922), True, 'import tensorflow as tf\n'), ((8940, 8979), 'tensorflow.cast', 'tf.cast', (['(sig_pred > 0.5)'], {'dtype': 'tf.int64'}), '(sig_pred > 0.5, dtype=tf.int64)\n', (8947, 8979), True, 'import tensorflow as tf\n'), ((8998, 9024), 'tensorflow.cast', 'tf.cast', (['Y'], {'dtype': 'tf.int64'}), '(Y, dtype=tf.int64)\n', (9005, 9024), True, 'import tensorflow as tf\n'), ((9053, 9085), 'tensorflow.equal', 'tf.equal', (['sig_pred', 'ground_truth'], {}), '(sig_pred, ground_truth)\n', (9061, 9085), True, 'import tensorflow as tf\n'), ((9201, 9240), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (9208, 9240), True, 'import tensorflow as tf\n'), ((9568, 9584), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (9582, 9584), True, 'import tensorflow as tf\n'), ((9620, 9647), 'tensorflow.device', 'tf.device', (['FLAGS.GPU_device'], {}), '(FLAGS.GPU_device)\n', (9629, 9647), True, 'import tensorflow as tf\n'), ((14189, 14235), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""adj_matrix.csv"""'], {}), "(FLAGS.dir_data, 'adj_matrix.csv')\n", (14201, 14235), False, 'import os\n'), ((14290, 14352), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""first_matrix_connection_GO.csv"""'], {}), "(FLAGS.dir_data, 'first_matrix_connection_GO.csv')\n", (14302, 14352), False, 'import os\n'), ((14390, 14434), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""go_level.csv"""'], {}), "(FLAGS.dir_data, 'go_level.csv')\n", (14402, 14434), False, 'import os\n'), ((14504, 14562), 'numpy.array', 'np.array', (['first_matrix_connection.values'], {'dtype': 'np.float32'}), '(first_matrix_connection.values, dtype=np.float32)\n', (14512, 14562), True, 'import numpy as np\n'), ((15402, 15464), 'numpy.ones', 'np.ones', (['(FLAGS.n_hidden_6, FLAGS.n_classes)'], {'dtype': 'np.float32'}), '((FLAGS.n_hidden_6, FLAGS.n_classes), dtype=np.float32)\n', (15409, 15464), True, 'import numpy as np\n'), ((15740, 15782), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""X_test.npz"""'], {}), "(FLAGS.dir_data, 'X_test.npz')\n", (15752, 15782), False, 'import os\n'), ((15875, 15897), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_test'], {}), '(y_test)\n', (15889, 15897), False, 'from tensorflow.keras.utils import to_categorical\n'), ((18297, 18316), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['pred'], {}), '(pred)\n', (18310, 18316), True, 'import tensorflow as tf\n'), ((18334, 18373), 'tensorflow.cast', 'tf.cast', (['(sig_pred > 0.5)'], {'dtype': 'tf.int64'}), '(sig_pred > 0.5, dtype=tf.int64)\n', (18341, 18373), True, 'import tensorflow as tf\n'), ((18392, 18418), 'tensorflow.cast', 'tf.cast', (['Y'], {'dtype': 'tf.int64'}), '(Y, dtype=tf.int64)\n', (18399, 18418), True, 'import tensorflow as tf\n'), ((18447, 18479), 'tensorflow.equal', 'tf.equal', (['sig_pred', 'ground_truth'], {}), '(sig_pred, ground_truth)\n', (18455, 18479), True, 'import tensorflow as tf\n'), ((18595, 18634), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (18602, 18634), True, 'import tensorflow as tf\n'), ((18865, 18881), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (18879, 18881), True, 'import tensorflow as tf\n'), ((18917, 18944), 'tensorflow.device', 'tf.device', (['FLAGS.GPU_device'], {}), '(FLAGS.GPU_device)\n', (18926, 18944), True, 'import tensorflow as tf\n'), ((20029, 20075), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""adj_matrix.csv"""'], {}), "(FLAGS.dir_data, 'adj_matrix.csv')\n", (20041, 20075), False, 'import os\n'), ((20130, 20192), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""first_matrix_connection_GO.csv"""'], {}), "(FLAGS.dir_data, 'first_matrix_connection_GO.csv')\n", (20142, 20192), False, 'import os\n'), ((20230, 20274), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""go_level.csv"""'], {}), "(FLAGS.dir_data, 'go_level.csv')\n", (20242, 20274), False, 'import os\n'), ((20344, 20402), 'numpy.array', 'np.array', (['first_matrix_connection.values'], {'dtype': 'np.float32'}), '(first_matrix_connection.values, dtype=np.float32)\n', (20352, 20402), True, 'import numpy as np\n'), ((21242, 21304), 'numpy.ones', 'np.ones', (['(FLAGS.n_hidden_6, FLAGS.n_classes)'], {'dtype': 'np.float32'}), '((FLAGS.n_hidden_6, FLAGS.n_classes), dtype=np.float32)\n', (21249, 21304), True, 'import numpy as np\n'), ((21580, 21622), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""X_test.npz"""'], {}), "(FLAGS.dir_data, 'X_test.npz')\n", (21592, 21622), False, 'import os\n'), ((21715, 21737), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_test'], {}), '(y_test)\n', (21729, 21737), False, 'from tensorflow.keras.utils import to_categorical\n'), ((22960, 22978), 'tensorflow.argmax', 'tf.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (22969, 22978), True, 'import tensorflow as tf\n'), ((23004, 23023), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['pred'], {}), '(pred)\n', (23017, 23023), True, 'import tensorflow as tf\n'), ((23040, 23075), 'tensorflow.cast', 'tf.cast', (['(pred > 0.5)'], {'dtype': 'tf.int64'}), '(pred > 0.5, dtype=tf.int64)\n', (23047, 23075), True, 'import tensorflow as tf\n'), ((23300, 23316), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (23314, 23316), True, 'import tensorflow as tf\n'), ((23352, 23379), 'tensorflow.device', 'tf.device', (['FLAGS.GPU_device'], {}), '(FLAGS.GPU_device)\n', (23361, 23379), True, 'import tensorflow as tf\n'), ((24294, 24305), 'time.time', 'time.time', ([], {}), '()\n', (24303, 24305), False, 'import time\n'), ((24588, 24599), 'time.time', 'time.time', ([], {}), '()\n', (24597, 24599), False, 'import time\n'), ((3197, 3243), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""adj_matrix.csv"""'], {}), "(FLAGS.dir_data, 'adj_matrix.csv')\n", (3209, 3243), False, 'import os\n'), ((3315, 3377), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""first_matrix_connection_GO.csv"""'], {}), "(FLAGS.dir_data, 'first_matrix_connection_GO.csv')\n", (3327, 3377), False, 'import os\n'), ((3432, 3476), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""go_level.csv"""'], {}), "(FLAGS.dir_data, 'go_level.csv')\n", (3444, 3476), False, 'import os\n'), ((4815, 4858), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""X_train.npz"""'], {}), "(FLAGS.dir_data, 'X_train.npz')\n", (4827, 4858), False, 'import os\n'), ((5017, 5059), 'os.path.join', 'os.path.join', (['FLAGS.dir_data', '"""X_test.npz"""'], {}), "(FLAGS.dir_data, 'X_test.npz')\n", (5029, 5059), False, 'import os\n'), ((6634, 6699), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'pred', 'labels': 'Y'}), '(logits=pred, labels=Y)\n', (6676, 6699), True, 'import tensorflow as tf\n'), ((6745, 6807), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'pred', 'labels': 'Y'}), '(logits=pred, labels=Y)\n', (6784, 6807), True, 'import tensorflow as tf\n'), ((7783, 7836), 'tensorflow.norm', 'tf.norm', (['(weight * (1 - connection_matrix[idx]))'], {'ord': '(1)'}), '(weight * (1 - connection_matrix[idx]), ord=1)\n', (7790, 7836), True, 'import tensorflow as tf\n'), ((7834, 7878), 'numpy.count_nonzero', 'np.count_nonzero', (['(1 - connection_matrix[idx])'], {}), '(1 - connection_matrix[idx])\n', (7850, 7878), True, 'import numpy as np\n'), ((7906, 7953), 'tensorflow.norm', 'tf.norm', (['(weight * connection_matrix[idx])'], {'ord': '(1)'}), '(weight * connection_matrix[idx], ord=1)\n', (7913, 7953), True, 'import tensorflow as tf\n'), ((7953, 7993), 'numpy.count_nonzero', 'np.count_nonzero', (['connection_matrix[idx]'], {}), '(connection_matrix[idx])\n', (7969, 7993), True, 'import numpy as np\n'), ((8133, 8175), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (8150, 8175), True, 'import tensorflow as tf\n'), ((8236, 8287), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (8258, 8287), True, 'import tensorflow as tf\n'), ((8840, 8858), 'tensorflow.argmax', 'tf.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (8849, 8858), True, 'import tensorflow as tf\n'), ((8859, 8874), 'tensorflow.argmax', 'tf.argmax', (['Y', '(1)'], {}), '(Y, 1)\n', (8868, 8874), True, 'import tensorflow as tf\n'), ((9662, 9687), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (9672, 9687), True, 'import tensorflow as tf\n'), ((17212, 17277), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'pred', 'labels': 'Y'}), '(logits=pred, labels=Y)\n', (17254, 17277), True, 'import tensorflow as tf\n'), ((17323, 17385), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'pred', 'labels': 'Y'}), '(logits=pred, labels=Y)\n', (17362, 17385), True, 'import tensorflow as tf\n'), ((18234, 18252), 'tensorflow.argmax', 'tf.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (18243, 18252), True, 'import tensorflow as tf\n'), ((18253, 18268), 'tensorflow.argmax', 'tf.argmax', (['Y', '(1)'], {}), '(Y, 1)\n', (18262, 18268), True, 'import tensorflow as tf\n'), ((18959, 18984), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (18969, 18984), True, 'import tensorflow as tf\n'), ((23394, 23419), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (23404, 23419), True, 'import tensorflow as tf\n'), ((24330, 24353), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (24343, 24353), False, 'import os\n'), ((24368, 24386), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (24376, 24386), False, 'import os\n'), ((24543, 24572), 'pickle.dump', 'pickle.dump', (['performances', 'fp'], {}), '(performances, fp)\n', (24554, 24572), False, 'import pickle\n'), ((4602, 4622), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (4613, 4622), False, 'import time\n'), ((4636, 4656), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (4647, 4656), False, 'import time\n'), ((4669, 4689), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (4680, 4689), False, 'import time\n'), ((5270, 5290), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (5281, 5290), False, 'import time\n'), ((5304, 5324), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (5315, 5324), False, 'import time\n'), ((5337, 5357), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (5348, 5357), False, 'import time\n'), ((8354, 8447), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'learning_rate', 'momentum': '(0.09)', 'use_nesterov': '(True)'}), '(learning_rate=learning_rate, momentum=0.09,\n use_nesterov=True)\n', (8380, 8447), True, 'import tensorflow as tf\n'), ((10143, 10169), 'numpy.arange', 'np.arange', (['(0)', 'FLAGS.epochs'], {}), '(0, FLAGS.epochs)\n', (10152, 10169), True, 'import numpy as np\n'), ((10196, 10223), 'numpy.arange', 'np.arange', (['X_train.shape[0]'], {}), '(X_train.shape[0])\n', (10205, 10223), True, 'import numpy as np\n'), ((10240, 10264), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (10257, 10264), True, 'import numpy as np\n'), ((10291, 10336), 'numpy.array_split', 'np.array_split', (['X_train[index]', 'total_batches'], {}), '(X_train[index], total_batches)\n', (10305, 10336), True, 'import numpy as np\n'), ((10363, 10408), 'numpy.array_split', 'np.array_split', (['y_train[index]', 'total_batches'], {}), '(y_train[index], total_batches)\n', (10377, 10408), True, 'import numpy as np\n'), ((13105, 13125), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (13116, 13125), False, 'import time\n'), ((13139, 13159), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (13150, 13159), False, 'import time\n'), ((13172, 13192), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (13183, 13192), False, 'import time\n'), ((15559, 15579), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (15570, 15579), False, 'import time\n'), ((15593, 15613), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (15604, 15613), False, 'import time\n'), ((15626, 15646), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (15637, 15646), False, 'import time\n'), ((15992, 16012), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (16003, 16012), False, 'import time\n'), ((16026, 16046), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (16037, 16046), False, 'import time\n'), ((16059, 16079), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (16070, 16079), False, 'import time\n'), ((19523, 19543), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (19534, 19543), False, 'import time\n'), ((19557, 19577), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (19568, 19577), False, 'import time\n'), ((19590, 19610), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (19601, 19610), False, 'import time\n'), ((21399, 21419), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (21410, 21419), False, 'import time\n'), ((21433, 21453), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (21444, 21453), False, 'import time\n'), ((21466, 21486), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (21477, 21486), False, 'import time\n'), ((21832, 21852), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (21843, 21852), False, 'import time\n'), ((21866, 21886), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (21877, 21886), False, 'import time\n'), ((21899, 21919), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (21910, 21919), False, 'import time\n'), ((23846, 23866), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (23857, 23866), False, 'import time\n'), ((23880, 23900), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (23891, 23900), False, 'import time\n'), ((23913, 23933), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (23924, 23933), False, 'import time\n'), ((24456, 24495), 'os.path.join', 'os.path.join', (['save_dir', '"""histories.txt"""'], {}), "(save_dir, 'histories.txt')\n", (24468, 24495), False, 'import os\n'), ((8509, 8563), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (8534, 8563), True, 'import tensorflow as tf\n'), ((19061, 19092), 'os.path.join', 'os.path.join', (['save_dir', '"""model"""'], {}), "(save_dir, 'model')\n", (19073, 19092), False, 'import os\n'), ((23496, 23527), 'os.path.join', 'os.path.join', (['save_dir', '"""model"""'], {}), "(save_dir, 'model')\n", (23508, 23527), False, 'import os\n'), ((24698, 24718), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (24709, 24718), False, 'import time\n'), ((24736, 24756), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (24747, 24756), False, 'import time\n'), ((24773, 24793), 'time.gmtime', 'time.gmtime', (['elapsed'], {}), '(elapsed)\n', (24784, 24793), False, 'import time\n'), ((24980, 25016), 'os.path.join', 'os.path.join', (['save_dir', '"""y_test_hat"""'], {}), "(save_dir, 'y_test_hat')\n", (24992, 25016), False, 'import os\n'), ((8627, 8681), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (8652, 8681), True, 'import tensorflow as tf\n'), ((12978, 13009), 'os.path.join', 'os.path.join', (['save_dir', '"""model"""'], {}), "(save_dir, 'model')\n", (12990, 13009), False, 'import os\n')] |
import torch.nn as nn
from HeadNeRFOptions import BaseOptions
from RenderUtils import ExtractLandMarkPosition, SoftSimpleShader
import torch
import torch.nn.functional as F
import FaceModels
from pytorch3d.structures import Meshes
from pytorch3d.renderer import (
PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader)
import numpy as np
class NL3DMMRenderer(nn.Module):
def __init__(self, img_size, opt: BaseOptions):
super().__init__()
self.opt = opt
self.img_h = img_size
self.img_w = img_size
self.build_info()
self.build_nl3dmm()
self.build_tool_funcs()
self.set_3dmmdecoder_eval()
def build_nl3dmm(self):
self.decoder_3dmm = FaceModels.Linear_3DMM(self.opt)
self.decoder_nl3dmm_new = FaceModels.NonLinear_3DMM(self.opt)
def build_info(self):
topo_info = np.load("ConfigFiles/nl_3dmm_topo_info.npz")
tris = torch.as_tensor(topo_info['fv_indices']).long()
vert_tris = torch.as_tensor(topo_info['corr_vf_indices']).long()
self.register_buffer("tris", tris)
self.register_buffer("corr_vf_indices", vert_tris)
self.a0 = np.pi
self.a1 = 2 * np.pi / np.sqrt(3.0)
self.a2 = 2 * np.pi / np.sqrt(8.0)
self.c0 = 1 / np.sqrt(4 * np.pi)
self.c1 = np.sqrt(3.0) / np.sqrt(4 * np.pi)
self.c2 = 3 * np.sqrt(5.0) / np.sqrt(12 * np.pi)
self.d0 = 0.5/ np.sqrt(3.0)
def build_tool_funcs(self):
self.extract_lm3d_func = ExtractLandMarkPosition()
def set_3dmmdecoder_eval(self):
self.decoder_3dmm.eval()
self.decoder_nl3dmm_new.eval()
def train(self, mode=True):
r"""Sets the module in training mode."""
self.training = mode
for module in self.children():
module.train(mode)
self.set_3dmmdecoder_eval()
return self
def calc_geometry_Albedo(self, iden_codes, text_codes, expr_codes):
batch_vps = self.decoder_nl3dmm_new(iden_codes, expr_codes)
batch_vcs = self.decoder_3dmm(text_codes)
return batch_vps, batch_vcs
def calc_normal(self, geometry):
vert_1 = geometry[:, self.tris[:, 0], :]
vert_2 = geometry[:, self.tris[:, 1], :]
vert_3 = geometry[:, self.tris[:, 2], :]
nnorm = torch.cross(vert_2 - vert_1, vert_3 - vert_1, 2)
tri_normal = F.normalize(nnorm, dim=2)
tri_normal = F.pad(tri_normal, [0, 0, 0, 1, 0, 0], mode="constant", value=0)
v_norm = tri_normal[:, self.corr_vf_indices, :].sum(2)
vert_normal = F.normalize(v_norm, dim=-1)
return vert_normal
def build_color(self, batch_vcolor, batch_norm, batch_gamma):
"""
batch_vcolor: [1, n_v, 3]
batch_norm: [B, n_v, 3]
batch_gamma: [B, 27]
"""
# n_b, num_vertex, _ = batch_vcolor.size()
n_b, num_vertex, _ = batch_norm.size()
gamma = batch_gamma.view(-1, 9, 3)
norm = batch_norm.view(-1, 3)
nx, ny, nz = norm[:, 0], norm[:, 1], norm[:, 2]
Y0 = torch.ones_like(nx) * self.a0 * self.c0
arrH = []
arrH.append(Y0)
arrH.append(-self.a1 * self.c1 * ny)
arrH.append(self.a1 * self.c1 * nz)
arrH.append(-self.a1 * self.c1 * nx)
arrH.append(self.a2 * self.c2 * nx * ny)
arrH.append(-self.a2 * self.c2 * ny * nz)
arrH.append(self.a2 * self.c2 * self.d0 * (3 * nz.pow(2) - 1))
arrH.append(-self.a2 * self.c2 * nx * nz)
arrH.append(self.a2 * self.c2 * 0.5 * (nx.pow(2) - ny.pow(2)))
H = torch.stack(arrH, 1)
Y = H.view(n_b, num_vertex, 9)
lighting = Y.bmm(gamma)
face_color = batch_vcolor * lighting
return face_color
def calc_ProjUV(self, cam_vps, batch_inmat):
tv = cam_vps[:, :, 2:3] + 1e-7
temp_uvs = cam_vps / tv
uv = torch.bmm(temp_uvs, batch_inmat.permute(0, 2, 1))
# uv = bmm_self_define_dim3(temp_uvs, batch_inmat, mat_2_is_trans=True)
return uv[:, :, :2]
def generate_renderer(self, batch_inmats):
cur_device = batch_inmats.device
batch_size = batch_inmats.size(0)
cur_dtype = batch_inmats.dtype
#cameras:
half_w = self.img_w * 0.5
half_h = self.img_h * 0.5
focal_info = torch.stack([batch_inmats[:, 0, 0] / half_w, batch_inmats[:, 1, 1] / half_w], dim=-1)
center_info = torch.stack([batch_inmats[:, 0, 2] / half_w - 1.0, batch_inmats[:, 1, 2] / half_h - 1.0], dim=-1)
iden_mat = torch.eye(3)
iden_mat[0, 0] = -1.0
iden_mat[1, 1] = -1.0
temp_Rmat = iden_mat.unsqueeze(0).expand(batch_size, -1, -1)
temp_Vec = torch.zeros((batch_size, 3), dtype=cur_dtype)
cameras = PerspectiveCameras(
focal_length=focal_info,
principal_point=center_info,
R=temp_Rmat,
T=temp_Vec,
device=cur_device
)
# focal_info = torch.stack([batch_inmats[:, 0, 0], batch_inmats[:, 1, 1]], dim=-1)
# center_info = torch.stack([batch_inmats[:, 0, 2], batch_inmats[:, 1, 2]], dim=-1)
# iden_mat = torch.eye(3)
# iden_mat[0, 0] = -1.0
# iden_mat[1, 1] = -1.0
# temp_Rmat = iden_mat.unsqueeze(0).expand(batch_size, -1, -1)
# temp_Vec = torch.zeros((batch_size, 3), dtype=cur_dtype)
# cameras = PerspectiveCameras(
# focal_length=focal_info,
# principal_point=center_info,
# R=temp_Rmat,
# T=temp_Vec,
# in_ndc=False,
# image_size = [[self.img_h, self.img_w] * batch_size],
# device=cur_device
# )
# light
lights = PointLights(
location=[[0.0, 0.0, 1e5]],
ambient_color=[[1, 1, 1]],
specular_color=[[0., 0., 0.]],
diffuse_color=[[0., 0., 0.]], device=cur_device
)
raster_settings = RasterizationSettings(
image_size=(self.img_h, self.img_w),
# blur_radius=0.000001,
# faces_per_pixel=10,
blur_radius=0,
faces_per_pixel=1,
)
blend_params = blending.BlendParams(background_color=[0, 0, 0])
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
raster_settings=raster_settings,
cameras=cameras
),
shader=SoftSimpleShader(
lights=lights,
blend_params=blend_params,
cameras=cameras
),
).to(cur_device)
return renderer
def render_img(self,
batch_vps, batch_vcs, illu_sh,
c2l_Scales, c2l_Rmats, c2l_Tvecs,
batch_Rmats, batch_Tvecs, batch_inmats,
):
batch_size = batch_vps.size(0)
live_vps = torch.bmm(c2l_Scales * batch_vps, c2l_Rmats.permute(0, 2, 1)) + c2l_Tvecs.view(-1, 1, 3)
cam_vps = torch.bmm(live_vps, batch_Rmats.permute(0, 2, 1)) + batch_Tvecs.view(-1, 1, 3)
vns = self.calc_normal(cam_vps)
sh_vcs = self.build_color(batch_vcs, vns, illu_sh)
face_color = TexturesVertex(sh_vcs)
meshes = Meshes(cam_vps, self.tris.unsqueeze(0).expand(batch_size, -1, -1), face_color)
cur_renderer = self.generate_renderer(batch_inmats)
rendered_res = cur_renderer(meshes)
rendered_res /= 255.0
mask_c3b = (rendered_res[:, :, :, 3:]).detach().expand(-1, -1, -1, 3) > 0.0001
rendered_img = rendered_res[:, :, :, :3]
rendered_img = torch.clamp(rendered_img, min=0.0, max=1.0)
lm_3d_posi = self.extract_lm3d_func(cam_vps)
proj_lm2d = self.calc_ProjUV(lm_3d_posi, batch_inmats)
return rendered_img, mask_c3b, proj_lm2d, sh_vcs
def generate_renderer_for_eval(self, batch_inmats):
cur_device = batch_inmats.device
batch_size = batch_inmats.size(0)
cur_dtype = batch_inmats.dtype
#cameras:
# half_w = self.img_w * 0.5
# half_h = self.img_h * 0.5
focal_info = torch.stack([batch_inmats[:, 0, 0], batch_inmats[:, 1, 1]], dim=-1)
center_info = torch.stack([batch_inmats[:, 0, 2], batch_inmats[:, 1, 2]], dim=-1)
iden_mat = torch.eye(3)
iden_mat[0, 0] = -1.0
iden_mat[1, 1] = -1.0
temp_Rmat = iden_mat.unsqueeze(0).expand(batch_size, -1, -1)
temp_Vec = torch.zeros((batch_size, 3), dtype=cur_dtype)
cameras = PerspectiveCameras(
focal_length=focal_info,
principal_point=center_info,
R=temp_Rmat,
T=temp_Vec,
in_ndc=False,
image_size = [[self.img_h, self.img_w] * batch_size],
device=cur_device
)
# light
lights = PointLights(
location=[[0.0, 0.0, 1e5]],
ambient_color=[[1, 1, 1]],
specular_color=[[0., 0., 0.]],
diffuse_color=[[0., 0., 0.]], device=cur_device
)
raster_settings = RasterizationSettings(
image_size=(self.img_h, self.img_w),
# blur_radius=0.000001,
# faces_per_pixel=10,
blur_radius=0,
faces_per_pixel=1,
)
blend_params = blending.BlendParams(background_color=[0, 0, 0])
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
raster_settings=raster_settings,
cameras=cameras
),
shader=SoftSimpleShader(
lights=lights,
blend_params=blend_params,
cameras=cameras
),
).to(cur_device)
lights_phong = PointLights(
location=[[0.0, 0.0, -1e5]],
ambient_color=[[0.5, 0.5, 0.5]],
specular_color=[[0.2, 0.2, 0.2]],
diffuse_color=[[0.3, 0.3, 0.3]], device=cur_device
)
renderer_phong = MeshRenderer(
rasterizer=MeshRasterizer(
raster_settings=raster_settings,
cameras=cameras
),
shader=HardPhongShader(
lights=lights_phong,
blend_params=blend_params,
cameras=cameras
),
).to(cur_device)
return renderer, renderer_phong
def render_img_for_eval(self,
batch_vps, batch_vcs, illu_sh,
batch_Rmats, batch_Tvecs, batch_inmats
):
batch_size = batch_vps.size(0)
cam_vps = torch.bmm(batch_vps, batch_Rmats.permute(0, 2, 1)) + batch_Tvecs.view(-1, 1, 3)
vns = self.calc_normal(cam_vps)
sh_vcs = self.build_color(batch_vcs, vns, illu_sh)
face_color = TexturesVertex(sh_vcs)
meshes = Meshes(cam_vps, self.tris.unsqueeze(0).expand(batch_size, -1, -1), face_color)
cur_renderer, renderer_phong = self.generate_renderer_for_eval(batch_inmats)
rendered_res = cur_renderer(meshes)
rendered_res /= 255.0
mask_c3b = (rendered_res[:, :, :, 3:]).detach().expand(-1, -1, -1, 3) > 0.0001
rendered_img = rendered_res[:, :, :, :3]
rendered_img = torch.clamp(rendered_img, min=0.0, max=1.0)
lm_3d_posi = self.extract_lm3d_func(cam_vps)
proj_lm2d = self.calc_ProjUV(lm_3d_posi, batch_inmats)
color_phong = torch.ones_like(cam_vps)
color_phong = TexturesVertex(color_phong)
meshes_phong = Meshes(cam_vps, self.tris.unsqueeze(0).expand(batch_size, -1, -1), color_phong)
rendered_phong = renderer_phong(meshes_phong)
phong_mask_c3b = (rendered_phong[:, :, :, 3:]).detach().expand(-1, -1, -1, 3) > 0.0001
rendered_phong = rendered_phong[:, :, :, :3]
return rendered_img, mask_c3b, proj_lm2d, sh_vcs, rendered_phong, phong_mask_c3b
def forward(self,
iden_codes, text_codes, expr_codes, cur_sh,
batch_Rmats, batch_Tvecs, batch_inmats, eval = False, **kwargs
):
batch_vps = self.decoder_nl3dmm_new(iden_codes, expr_codes, scale = 0.01)
batch_vcs = self.decoder_3dmm(text_codes)
if eval:
return self.render_img_for_eval(batch_vps, batch_vcs, cur_sh,
batch_Rmats, batch_Tvecs, batch_inmats)
else:
c2l_Scales, c2l_Rmats, c2l_Tvecs = kwargs["c2l_Scales"], kwargs["c2l_Rmats"], kwargs["c2l_Tvecs"]
return self.render_img(batch_vps, batch_vcs, cur_sh,
c2l_Scales, c2l_Rmats, c2l_Tvecs,
batch_Rmats, batch_Tvecs, batch_inmats)
| [
"torch.as_tensor",
"numpy.sqrt",
"pytorch3d.renderer.PointLights",
"pytorch3d.renderer.TexturesVertex",
"pytorch3d.renderer.PerspectiveCameras",
"pytorch3d.renderer.HardPhongShader",
"torch.nn.functional.pad",
"torch.eye",
"FaceModels.NonLinear_3DMM",
"pytorch3d.renderer.blending.BlendParams",
"... | [((822, 854), 'FaceModels.Linear_3DMM', 'FaceModels.Linear_3DMM', (['self.opt'], {}), '(self.opt)\n', (844, 854), False, 'import FaceModels\n'), ((889, 924), 'FaceModels.NonLinear_3DMM', 'FaceModels.NonLinear_3DMM', (['self.opt'], {}), '(self.opt)\n', (914, 924), False, 'import FaceModels\n'), ((981, 1025), 'numpy.load', 'np.load', (['"""ConfigFiles/nl_3dmm_topo_info.npz"""'], {}), "('ConfigFiles/nl_3dmm_topo_info.npz')\n", (988, 1025), True, 'import numpy as np\n'), ((1653, 1678), 'RenderUtils.ExtractLandMarkPosition', 'ExtractLandMarkPosition', ([], {}), '()\n', (1676, 1678), False, 'from RenderUtils import ExtractLandMarkPosition, SoftSimpleShader\n'), ((2503, 2551), 'torch.cross', 'torch.cross', (['(vert_2 - vert_1)', '(vert_3 - vert_1)', '(2)'], {}), '(vert_2 - vert_1, vert_3 - vert_1, 2)\n', (2514, 2551), False, 'import torch\n'), ((2573, 2598), 'torch.nn.functional.normalize', 'F.normalize', (['nnorm'], {'dim': '(2)'}), '(nnorm, dim=2)\n', (2584, 2598), True, 'import torch.nn.functional as F\n'), ((2620, 2683), 'torch.nn.functional.pad', 'F.pad', (['tri_normal', '[0, 0, 0, 1, 0, 0]'], {'mode': '"""constant"""', 'value': '(0)'}), "(tri_normal, [0, 0, 0, 1, 0, 0], mode='constant', value=0)\n", (2625, 2683), True, 'import torch.nn.functional as F\n'), ((2786, 2813), 'torch.nn.functional.normalize', 'F.normalize', (['v_norm'], {'dim': '(-1)'}), '(v_norm, dim=-1)\n', (2797, 2813), True, 'import torch.nn.functional as F\n'), ((3817, 3837), 'torch.stack', 'torch.stack', (['arrH', '(1)'], {}), '(arrH, 1)\n', (3828, 3837), False, 'import torch\n'), ((4574, 4664), 'torch.stack', 'torch.stack', (['[batch_inmats[:, 0, 0] / half_w, batch_inmats[:, 1, 1] / half_w]'], {'dim': '(-1)'}), '([batch_inmats[:, 0, 0] / half_w, batch_inmats[:, 1, 1] / half_w\n ], dim=-1)\n', (4585, 4664), False, 'import torch\n'), ((4682, 4783), 'torch.stack', 'torch.stack', (['[batch_inmats[:, 0, 2] / half_w - 1.0, batch_inmats[:, 1, 2] / half_h - 1.0]'], {'dim': '(-1)'}), '([batch_inmats[:, 0, 2] / half_w - 1.0, batch_inmats[:, 1, 2] /\n half_h - 1.0], dim=-1)\n', (4693, 4783), False, 'import torch\n'), ((4800, 4812), 'torch.eye', 'torch.eye', (['(3)'], {}), '(3)\n', (4809, 4812), False, 'import torch\n'), ((4970, 5015), 'torch.zeros', 'torch.zeros', (['(batch_size, 3)'], {'dtype': 'cur_dtype'}), '((batch_size, 3), dtype=cur_dtype)\n', (4981, 5015), False, 'import torch\n'), ((5043, 5164), 'pytorch3d.renderer.PerspectiveCameras', 'PerspectiveCameras', ([], {'focal_length': 'focal_info', 'principal_point': 'center_info', 'R': 'temp_Rmat', 'T': 'temp_Vec', 'device': 'cur_device'}), '(focal_length=focal_info, principal_point=center_info, R=\n temp_Rmat, T=temp_Vec, device=cur_device)\n', (5061, 5164), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((6134, 6299), 'pytorch3d.renderer.PointLights', 'PointLights', ([], {'location': '[[0.0, 0.0, 100000.0]]', 'ambient_color': '[[1, 1, 1]]', 'specular_color': '[[0.0, 0.0, 0.0]]', 'diffuse_color': '[[0.0, 0.0, 0.0]]', 'device': 'cur_device'}), '(location=[[0.0, 0.0, 100000.0]], ambient_color=[[1, 1, 1]],\n specular_color=[[0.0, 0.0, 0.0]], diffuse_color=[[0.0, 0.0, 0.0]],\n device=cur_device)\n', (6145, 6299), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((6377, 6473), 'pytorch3d.renderer.RasterizationSettings', 'RasterizationSettings', ([], {'image_size': '(self.img_h, self.img_w)', 'blur_radius': '(0)', 'faces_per_pixel': '(1)'}), '(image_size=(self.img_h, self.img_w), blur_radius=0,\n faces_per_pixel=1)\n', (6398, 6473), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((6611, 6659), 'pytorch3d.renderer.blending.BlendParams', 'blending.BlendParams', ([], {'background_color': '[0, 0, 0]'}), '(background_color=[0, 0, 0])\n', (6631, 6659), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((7651, 7673), 'pytorch3d.renderer.TexturesVertex', 'TexturesVertex', (['sh_vcs'], {}), '(sh_vcs)\n', (7665, 7673), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((8073, 8116), 'torch.clamp', 'torch.clamp', (['rendered_img'], {'min': '(0.0)', 'max': '(1.0)'}), '(rendered_img, min=0.0, max=1.0)\n', (8084, 8116), False, 'import torch\n'), ((8604, 8671), 'torch.stack', 'torch.stack', (['[batch_inmats[:, 0, 0], batch_inmats[:, 1, 1]]'], {'dim': '(-1)'}), '([batch_inmats[:, 0, 0], batch_inmats[:, 1, 1]], dim=-1)\n', (8615, 8671), False, 'import torch\n'), ((8694, 8761), 'torch.stack', 'torch.stack', (['[batch_inmats[:, 0, 2], batch_inmats[:, 1, 2]]'], {'dim': '(-1)'}), '([batch_inmats[:, 0, 2], batch_inmats[:, 1, 2]], dim=-1)\n', (8705, 8761), False, 'import torch\n'), ((8782, 8794), 'torch.eye', 'torch.eye', (['(3)'], {}), '(3)\n', (8791, 8794), False, 'import torch\n'), ((8952, 8997), 'torch.zeros', 'torch.zeros', (['(batch_size, 3)'], {'dtype': 'cur_dtype'}), '((batch_size, 3), dtype=cur_dtype)\n', (8963, 8997), False, 'import torch\n'), ((9025, 9217), 'pytorch3d.renderer.PerspectiveCameras', 'PerspectiveCameras', ([], {'focal_length': 'focal_info', 'principal_point': 'center_info', 'R': 'temp_Rmat', 'T': 'temp_Vec', 'in_ndc': '(False)', 'image_size': '[[self.img_h, self.img_w] * batch_size]', 'device': 'cur_device'}), '(focal_length=focal_info, principal_point=center_info, R=\n temp_Rmat, T=temp_Vec, in_ndc=False, image_size=[[self.img_h, self.\n img_w] * batch_size], device=cur_device)\n', (9043, 9217), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((9412, 9577), 'pytorch3d.renderer.PointLights', 'PointLights', ([], {'location': '[[0.0, 0.0, 100000.0]]', 'ambient_color': '[[1, 1, 1]]', 'specular_color': '[[0.0, 0.0, 0.0]]', 'diffuse_color': '[[0.0, 0.0, 0.0]]', 'device': 'cur_device'}), '(location=[[0.0, 0.0, 100000.0]], ambient_color=[[1, 1, 1]],\n specular_color=[[0.0, 0.0, 0.0]], diffuse_color=[[0.0, 0.0, 0.0]],\n device=cur_device)\n', (9423, 9577), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((9655, 9751), 'pytorch3d.renderer.RasterizationSettings', 'RasterizationSettings', ([], {'image_size': '(self.img_h, self.img_w)', 'blur_radius': '(0)', 'faces_per_pixel': '(1)'}), '(image_size=(self.img_h, self.img_w), blur_radius=0,\n faces_per_pixel=1)\n', (9676, 9751), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((9889, 9937), 'pytorch3d.renderer.blending.BlendParams', 'blending.BlendParams', ([], {'background_color': '[0, 0, 0]'}), '(background_color=[0, 0, 0])\n', (9909, 9937), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((10316, 10489), 'pytorch3d.renderer.PointLights', 'PointLights', ([], {'location': '[[0.0, 0.0, -100000.0]]', 'ambient_color': '[[0.5, 0.5, 0.5]]', 'specular_color': '[[0.2, 0.2, 0.2]]', 'diffuse_color': '[[0.3, 0.3, 0.3]]', 'device': 'cur_device'}), '(location=[[0.0, 0.0, -100000.0]], ambient_color=[[0.5, 0.5, 0.5\n ]], specular_color=[[0.2, 0.2, 0.2]], diffuse_color=[[0.3, 0.3, 0.3]],\n device=cur_device)\n', (10327, 10489), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((11421, 11443), 'pytorch3d.renderer.TexturesVertex', 'TexturesVertex', (['sh_vcs'], {}), '(sh_vcs)\n', (11435, 11443), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((11868, 11911), 'torch.clamp', 'torch.clamp', (['rendered_img'], {'min': '(0.0)', 'max': '(1.0)'}), '(rendered_img, min=0.0, max=1.0)\n', (11879, 11911), False, 'import torch\n'), ((12052, 12076), 'torch.ones_like', 'torch.ones_like', (['cam_vps'], {}), '(cam_vps)\n', (12067, 12076), False, 'import torch\n'), ((12099, 12126), 'pytorch3d.renderer.TexturesVertex', 'TexturesVertex', (['color_phong'], {}), '(color_phong)\n', (12113, 12126), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((1336, 1348), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (1343, 1348), True, 'import numpy as np\n'), ((1379, 1391), 'numpy.sqrt', 'np.sqrt', (['(8.0)'], {}), '(8.0)\n', (1386, 1391), True, 'import numpy as np\n'), ((1414, 1432), 'numpy.sqrt', 'np.sqrt', (['(4 * np.pi)'], {}), '(4 * np.pi)\n', (1421, 1432), True, 'import numpy as np\n'), ((1451, 1463), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (1458, 1463), True, 'import numpy as np\n'), ((1466, 1484), 'numpy.sqrt', 'np.sqrt', (['(4 * np.pi)'], {}), '(4 * np.pi)\n', (1473, 1484), True, 'import numpy as np\n'), ((1522, 1541), 'numpy.sqrt', 'np.sqrt', (['(12 * np.pi)'], {}), '(12 * np.pi)\n', (1529, 1541), True, 'import numpy as np\n'), ((1565, 1577), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (1572, 1577), True, 'import numpy as np\n'), ((1041, 1081), 'torch.as_tensor', 'torch.as_tensor', (["topo_info['fv_indices']"], {}), "(topo_info['fv_indices'])\n", (1056, 1081), False, 'import torch\n'), ((1109, 1154), 'torch.as_tensor', 'torch.as_tensor', (["topo_info['corr_vf_indices']"], {}), "(topo_info['corr_vf_indices'])\n", (1124, 1154), False, 'import torch\n'), ((1507, 1519), 'numpy.sqrt', 'np.sqrt', (['(5.0)'], {}), '(5.0)\n', (1514, 1519), True, 'import numpy as np\n'), ((3295, 3314), 'torch.ones_like', 'torch.ones_like', (['nx'], {}), '(nx)\n', (3310, 3314), False, 'import torch\n'), ((6717, 6781), 'pytorch3d.renderer.MeshRasterizer', 'MeshRasterizer', ([], {'raster_settings': 'raster_settings', 'cameras': 'cameras'}), '(raster_settings=raster_settings, cameras=cameras)\n', (6731, 6781), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((6849, 6924), 'RenderUtils.SoftSimpleShader', 'SoftSimpleShader', ([], {'lights': 'lights', 'blend_params': 'blend_params', 'cameras': 'cameras'}), '(lights=lights, blend_params=blend_params, cameras=cameras)\n', (6865, 6924), False, 'from RenderUtils import ExtractLandMarkPosition, SoftSimpleShader\n'), ((9995, 10059), 'pytorch3d.renderer.MeshRasterizer', 'MeshRasterizer', ([], {'raster_settings': 'raster_settings', 'cameras': 'cameras'}), '(raster_settings=raster_settings, cameras=cameras)\n', (10009, 10059), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((10127, 10202), 'RenderUtils.SoftSimpleShader', 'SoftSimpleShader', ([], {'lights': 'lights', 'blend_params': 'blend_params', 'cameras': 'cameras'}), '(lights=lights, blend_params=blend_params, cameras=cameras)\n', (10143, 10202), False, 'from RenderUtils import ExtractLandMarkPosition, SoftSimpleShader\n'), ((10600, 10664), 'pytorch3d.renderer.MeshRasterizer', 'MeshRasterizer', ([], {'raster_settings': 'raster_settings', 'cameras': 'cameras'}), '(raster_settings=raster_settings, cameras=cameras)\n', (10614, 10664), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n'), ((10732, 10817), 'pytorch3d.renderer.HardPhongShader', 'HardPhongShader', ([], {'lights': 'lights_phong', 'blend_params': 'blend_params', 'cameras': 'cameras'}), '(lights=lights_phong, blend_params=blend_params, cameras=cameras\n )\n', (10747, 10817), False, 'from pytorch3d.renderer import PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader\n')] |
#
# This file is implemented based on the author code of
# Lee et al., "A simple unified framework for detecting out-of-distribution samples and adversarial attacks", in NeurIPS 2018.
#
import os
import torch
import numpy as np
def compute_confscores(model, test_loader, outdir, id_flag):
total = 0
if id_flag == True:
outfile = os.path.join(outdir, 'confscores_id.txt')
else:
outfile = os.path.join(outdir, 'confscores_ood.txt')
f = open(outfile, 'w')
for data, _ in test_loader:
dists = model(data.cuda())
confscores, _ = torch.min(dists, dim=1)
total += data.size(0)
for i in range(data.size(0)):
f.write("{}\n".format(-confscores[i]))
f.close()
def get_auroc_curve(indir):
known = np.loadtxt(os.path.join(indir, 'confscores_id.txt'), delimiter='\n')
novel = np.loadtxt(os.path.join(indir, 'confscores_ood.txt'), delimiter='\n')
known.sort()
novel.sort()
end = np.max([np.max(known), np.max(novel)])
start = np.min([np.min(known),np.min(novel)])
num_k = known.shape[0]
num_n = novel.shape[0]
tp = -np.ones([num_k+num_n+1], dtype=int)
fp = -np.ones([num_k+num_n+1], dtype=int)
tp[0], fp[0] = num_k, num_n
k, n = 0, 0
for l in range(num_k+num_n):
if k == num_k:
tp[l+1:] = tp[l]
fp[l+1:] = np.arange(fp[l]-1, -1, -1)
break
elif n == num_n:
tp[l+1:] = np.arange(tp[l]-1, -1, -1)
fp[l+1:] = fp[l]
break
else:
if novel[n] < known[k]:
n += 1
tp[l+1] = tp[l]
fp[l+1] = fp[l] - 1
else:
k += 1
tp[l+1] = tp[l] - 1
fp[l+1] = fp[l]
tpr85_pos = np.abs(tp / num_k - .85).argmin()
tpr95_pos = np.abs(tp / num_k - .95).argmin()
tnr_at_tpr85 = 1. - fp[tpr85_pos] / num_n
tnr_at_tpr95 = 1. - fp[tpr95_pos] / num_n
return tp, fp, tnr_at_tpr85, tnr_at_tpr95
def compute_metrics(dir_name, verbose=False):
tp, fp, tnr_at_tpr85, tnr_at_tpr95 = get_auroc_curve(dir_name)
results = dict()
mtypes = ['TNR85', 'TNR95', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']
if verbose:
print(' ', end='')
for mtype in mtypes:
print(' {mtype:6s}'.format(mtype=mtype), end='')
print('')
if verbose:
print('{stype:5s} '.format(stype=stype), end='')
results = dict()
# TNR85
mtype = 'TNR85'
results[mtype] = tnr_at_tpr85
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
# TNR95
mtype = 'TNR95'
results[mtype] = tnr_at_tpr95
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
# AUROC
mtype = 'AUROC'
tpr = np.concatenate([[1.], tp/tp[0], [0.]])
fpr = np.concatenate([[1.], fp/fp[0], [0.]])
results[mtype] = -np.trapz(1. - fpr, tpr)
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
# DTACC
mtype = 'DTACC'
results[mtype] = .5 * (tp/tp[0] + 1. - fp/fp[0]).max()
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
# AUIN
mtype = 'AUIN'
denom = tp + fp
denom[denom == 0.] = -1.
pin_ind = np.concatenate([[True], denom > 0., [True]])
pin = np.concatenate([[.5], tp/denom, [0.]])
results[mtype] = -np.trapz(pin[pin_ind], tpr[pin_ind])
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
# AUOUT
mtype = 'AUOUT'
denom = tp[0] - tp + fp[0] - fp
denom[denom == 0.] = -1.
pout_ind = np.concatenate([[True], denom > 0., [True]])
pout = np.concatenate([[0.], (fp[0] - fp)/denom, [.5]])
results[mtype] = np.trapz(pout[pout_ind], 1. - fpr[pout_ind])
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
print('')
return results
def print_ood_results(ood_result):
for mtype in ['TNR85', 'TNR95', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']:
print(' {mtype:6s}'.format(mtype=mtype), end='')
print('\n{val:6.2f}'.format(val=100.*ood_result['TNR85']), end='')
print(' {val:6.2f}'.format(val=100.*ood_result['TNR95']), end='')
print(' {val:6.2f}'.format(val=100.*ood_result['AUROC']), end='')
print(' {val:6.2f}'.format(val=100.*ood_result['DTACC']), end='')
print(' {val:6.2f}'.format(val=100.*ood_result['AUIN']), end='')
print(' {val:6.2f}\n'.format(val=100.*ood_result['AUOUT']), end='')
print('')
def print_ood_results_total(ood_result_list):
TNR85_list = [100.*ood_result['TNR85'] for ood_result in ood_result_list]
TNR95_list = [100.*ood_result['TNR95'] for ood_result in ood_result_list]
AUROC_list = [100.*ood_result['AUROC'] for ood_result in ood_result_list]
DTACC_list = [100.*ood_result['DTACC'] for ood_result in ood_result_list]
AUIN_list = [100.*ood_result['AUIN'] for ood_result in ood_result_list]
AUOUT_list = [100.*ood_result['AUOUT'] for ood_result in ood_result_list]
for mtype in ['TNR85', 'TNR95', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']:
print(' {mtype:15s}'.format(mtype=mtype), end='')
print('\n{mean:6.2f} ({std:6.3f})'.format(mean=np.mean(TNR85_list), std=np.std(TNR85_list)), end='')
print(' {mean:6.2f} ({std:6.3f})'.format(mean=np.mean(TNR95_list), std=np.std(TNR95_list)), end='')
print(' {mean:6.2f} ({std:6.3f})'.format(mean=np.mean(AUROC_list), std=np.std(AUROC_list)), end='')
print(' {mean:6.2f} ({std:6.3f})'.format(mean=np.mean(DTACC_list), std=np.std(DTACC_list)), end='')
print(' {mean:6.2f} ({std:6.3f})'.format(mean=np.mean(AUIN_list), std=np.std(AUIN_list)), end='')
print(' {mean:6.2f} ({std:6.3f})\n'.format(mean=np.mean(AUOUT_list), std=np.std(AUOUT_list)), end='')
print('')
| [
"numpy.abs",
"numpy.mean",
"numpy.trapz",
"numpy.ones",
"numpy.std",
"os.path.join",
"torch.min",
"numpy.max",
"numpy.concatenate",
"numpy.min",
"numpy.arange"
] | [((2863, 2905), 'numpy.concatenate', 'np.concatenate', (['[[1.0], tp / tp[0], [0.0]]'], {}), '([[1.0], tp / tp[0], [0.0]])\n', (2877, 2905), True, 'import numpy as np\n'), ((2912, 2954), 'numpy.concatenate', 'np.concatenate', (['[[1.0], fp / fp[0], [0.0]]'], {}), '([[1.0], fp / fp[0], [0.0]])\n', (2926, 2954), True, 'import numpy as np\n'), ((3361, 3406), 'numpy.concatenate', 'np.concatenate', (['[[True], denom > 0.0, [True]]'], {}), '([[True], denom > 0.0, [True]])\n', (3375, 3406), True, 'import numpy as np\n'), ((3416, 3458), 'numpy.concatenate', 'np.concatenate', (['[[0.5], tp / denom, [0.0]]'], {}), '([[0.5], tp / denom, [0.0]])\n', (3430, 3458), True, 'import numpy as np\n'), ((3716, 3761), 'numpy.concatenate', 'np.concatenate', (['[[True], denom > 0.0, [True]]'], {}), '([[True], denom > 0.0, [True]])\n', (3730, 3761), True, 'import numpy as np\n'), ((3772, 3824), 'numpy.concatenate', 'np.concatenate', (['[[0.0], (fp[0] - fp) / denom, [0.5]]'], {}), '([[0.0], (fp[0] - fp) / denom, [0.5]])\n', (3786, 3824), True, 'import numpy as np\n'), ((3842, 3887), 'numpy.trapz', 'np.trapz', (['pout[pout_ind]', '(1.0 - fpr[pout_ind])'], {}), '(pout[pout_ind], 1.0 - fpr[pout_ind])\n', (3850, 3887), True, 'import numpy as np\n'), ((350, 391), 'os.path.join', 'os.path.join', (['outdir', '"""confscores_id.txt"""'], {}), "(outdir, 'confscores_id.txt')\n", (362, 391), False, 'import os\n'), ((420, 462), 'os.path.join', 'os.path.join', (['outdir', '"""confscores_ood.txt"""'], {}), "(outdir, 'confscores_ood.txt')\n", (432, 462), False, 'import os\n'), ((587, 610), 'torch.min', 'torch.min', (['dists'], {'dim': '(1)'}), '(dists, dim=1)\n', (596, 610), False, 'import torch\n'), ((802, 842), 'os.path.join', 'os.path.join', (['indir', '"""confscores_id.txt"""'], {}), "(indir, 'confscores_id.txt')\n", (814, 842), False, 'import os\n'), ((883, 924), 'os.path.join', 'os.path.join', (['indir', '"""confscores_ood.txt"""'], {}), "(indir, 'confscores_ood.txt')\n", (895, 924), False, 'import os\n'), ((1154, 1193), 'numpy.ones', 'np.ones', (['[num_k + num_n + 1]'], {'dtype': 'int'}), '([num_k + num_n + 1], dtype=int)\n', (1161, 1193), True, 'import numpy as np\n'), ((1200, 1239), 'numpy.ones', 'np.ones', (['[num_k + num_n + 1]'], {'dtype': 'int'}), '([num_k + num_n + 1], dtype=int)\n', (1207, 1239), True, 'import numpy as np\n'), ((2973, 2997), 'numpy.trapz', 'np.trapz', (['(1.0 - fpr)', 'tpr'], {}), '(1.0 - fpr, tpr)\n', (2981, 2997), True, 'import numpy as np\n'), ((3477, 3513), 'numpy.trapz', 'np.trapz', (['pin[pin_ind]', 'tpr[pin_ind]'], {}), '(pin[pin_ind], tpr[pin_ind])\n', (3485, 3513), True, 'import numpy as np\n'), ((999, 1012), 'numpy.max', 'np.max', (['known'], {}), '(known)\n', (1005, 1012), True, 'import numpy as np\n'), ((1014, 1027), 'numpy.max', 'np.max', (['novel'], {}), '(novel)\n', (1020, 1027), True, 'import numpy as np\n'), ((1050, 1063), 'numpy.min', 'np.min', (['known'], {}), '(known)\n', (1056, 1063), True, 'import numpy as np\n'), ((1064, 1077), 'numpy.min', 'np.min', (['novel'], {}), '(novel)\n', (1070, 1077), True, 'import numpy as np\n'), ((1392, 1420), 'numpy.arange', 'np.arange', (['(fp[l] - 1)', '(-1)', '(-1)'], {}), '(fp[l] - 1, -1, -1)\n', (1401, 1420), True, 'import numpy as np\n'), ((1825, 1850), 'numpy.abs', 'np.abs', (['(tp / num_k - 0.85)'], {}), '(tp / num_k - 0.85)\n', (1831, 1850), True, 'import numpy as np\n'), ((1875, 1900), 'numpy.abs', 'np.abs', (['(tp / num_k - 0.95)'], {}), '(tp / num_k - 0.95)\n', (1881, 1900), True, 'import numpy as np\n'), ((1485, 1513), 'numpy.arange', 'np.arange', (['(tp[l] - 1)', '(-1)', '(-1)'], {}), '(tp[l] - 1, -1, -1)\n', (1494, 1513), True, 'import numpy as np\n'), ((5312, 5331), 'numpy.mean', 'np.mean', (['TNR85_list'], {}), '(TNR85_list)\n', (5319, 5331), True, 'import numpy as np\n'), ((5337, 5355), 'numpy.std', 'np.std', (['TNR85_list'], {}), '(TNR85_list)\n', (5343, 5355), True, 'import numpy as np\n'), ((5416, 5435), 'numpy.mean', 'np.mean', (['TNR95_list'], {}), '(TNR95_list)\n', (5423, 5435), True, 'import numpy as np\n'), ((5441, 5459), 'numpy.std', 'np.std', (['TNR95_list'], {}), '(TNR95_list)\n', (5447, 5459), True, 'import numpy as np\n'), ((5520, 5539), 'numpy.mean', 'np.mean', (['AUROC_list'], {}), '(AUROC_list)\n', (5527, 5539), True, 'import numpy as np\n'), ((5545, 5563), 'numpy.std', 'np.std', (['AUROC_list'], {}), '(AUROC_list)\n', (5551, 5563), True, 'import numpy as np\n'), ((5624, 5643), 'numpy.mean', 'np.mean', (['DTACC_list'], {}), '(DTACC_list)\n', (5631, 5643), True, 'import numpy as np\n'), ((5649, 5667), 'numpy.std', 'np.std', (['DTACC_list'], {}), '(DTACC_list)\n', (5655, 5667), True, 'import numpy as np\n'), ((5728, 5746), 'numpy.mean', 'np.mean', (['AUIN_list'], {}), '(AUIN_list)\n', (5735, 5746), True, 'import numpy as np\n'), ((5752, 5769), 'numpy.std', 'np.std', (['AUIN_list'], {}), '(AUIN_list)\n', (5758, 5769), True, 'import numpy as np\n'), ((5832, 5851), 'numpy.mean', 'np.mean', (['AUOUT_list'], {}), '(AUOUT_list)\n', (5839, 5851), True, 'import numpy as np\n'), ((5857, 5875), 'numpy.std', 'np.std', (['AUOUT_list'], {}), '(AUOUT_list)\n', (5863, 5875), True, 'import numpy as np\n')] |
"""Filter classifier"""
import json
import logging
import collections
import math
import scipy.optimize
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import sklearn.linear_model
from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score, log_loss
from opustools.util import file_open
from . import grouper
logger = logging.getLogger(__name__)
def load_dataframe(data_file):
"""Load normalized scores dataframe from a JSON lines file"""
data = []
with file_open(data_file) as dfile:
for line in dfile:
try:
data.append(json.loads(line))
except json.decoder.JSONDecodeError as err:
logger.error(line)
raise err
return pd.DataFrame(json_normalize(data))
def load_dataframe_in_chunks(data_file, chunksize):
"""Yield normalized scores dataframes from a chunked JSON lines file
Use instead of load_dataframe if the data is too large to fit in memory.
"""
with file_open(data_file) as dfile:
for num, chunk in enumerate(grouper(dfile, chunksize)):
data = []
for line in chunk:
try:
data.append(json.loads(line))
except json.decoder.JSONDecodeError as err:
logger.error(line)
raise err
logger.info("Processing chunk %s with %s lines", num, len(data))
yield pd.DataFrame(json_normalize(data))
def standardize_dataframe_scores(df, features, means_stds=None):
"""Normalize, zero average, and set direction for scores in each column"""
new_df = pd.DataFrame()
if not means_stds:
means_stds = {}
for column in df:
x = df[column].to_numpy()
if features[column].get('clean-direction', 'high') == 'low':
direction = -1
else:
direction = 1
means_stds[column] = (x.mean(), x.std(), direction)
for column in features:
x = df[column].to_numpy()
mean, std, direction = means_stds[column]
if std == 0:
x = [0 for i in range(len(df[column]))]
else:
x = direction * (x - mean) / std
new_df[column] = x
return new_df, means_stds
class Classifier:
"""Wrapper for sklearn classifiers (e.g. LogisticRegression)
Includes feature selection and standardization from pandas
dataframes.
"""
def __init__(self, classname, params, features, standardize_params):
self.classname = classname
cls = getattr(sklearn.linear_model, self.classname)
self.classifier = cls(**params)
self.features = features
self.standardize_params = standardize_params
def standardize(self, df):
"""Standardize features in the data frame"""
if not self.standardize_params:
logger.warning("Feature standardization parameters missing")
return df[self.features]
return standardize_dataframe_scores(df, self.features, self.standardize_params)[0]
def train(self, df, labels, standardize=True):
"""Train logistic regression with training_data"""
df = self.standardize(df) if standardize else df
self.classifier.fit(df[self.features], labels)
def write_preds(self, input_fname, output_fname, true_label=None,
standardize=True, chunksize=None):
"""Write predicted class labels to output file"""
if chunksize:
dfs_tbc = load_dataframe_in_chunks(input_fname, chunksize)
else:
dfs_tbc = [load_dataframe(input_fname)]
logger.info("Classifier labels: %s", self.classifier.classes_)
with file_open(output_fname, 'w') as output:
for df_tbc in dfs_tbc:
df = self.standardize(df_tbc) if standardize else df_tbc
labels = self.classifier.predict(df[self.features])
if true_label:
true_labels = df_tbc[true_label]
logger.info('accuracy: %s', accuracy_score(true_labels, labels))
logger.info('confusion matrix:\n%s', confusion_matrix(true_labels, labels))
for label in labels:
output.write('{}\n'.format(label))
def write_probs(self, input_fname, output_fname, true_label=None,
standardize=True, chunksize=None):
"""Write classification probabilities to output file"""
if chunksize:
dfs_tbc = load_dataframe_in_chunks(input_fname, chunksize)
else:
dfs_tbc = [load_dataframe(input_fname)]
logger.info("Classifier labels: %s", self.classifier.classes_)
with file_open(output_fname, 'w') as output:
for df_tbc in dfs_tbc:
df = self.standardize(df_tbc) if standardize else df_tbc
probas = self.classifier.predict_proba(df[self.features])
if true_label:
true_labels = df_tbc[true_label]
logger.info('roc_auc: %s', roc_auc_score(true_labels, probas[:,1]))
for proba in probas[:,1]:
output.write('{0:.10f}\n'.format(proba))
def weights(self):
"""Yield classifier weights"""
if self.classname == "LogisticRegression":
yield '(intercept)', self.classifier.intercept_[0]
for name, value in zip(self.features, self.classifier.coef_[0]):
yield name, value
else:
logger.warning("Method weights unsupported for %s", self.classname)
return
class TrainClassifier:
"""Classify clean and noisy sentence pairs"""
def __init__(self, training_scores=None, dev_scores=None, model_type=None,
model_parameters=None, features=None, **kwargs):
logger.info("Loading training data")
self.df_training_data = load_dataframe(training_scores)
self.group_config = features
self.feature_config = {}
for t_key in self.df_training_data.keys():
for f_key in features.keys():
if t_key.startswith(f_key):
self.feature_config[t_key] = features[f_key]
self.df_training_data = self.df_training_data[self.feature_config.keys()]
self.df_training_data, self.means_stds = standardize_dataframe_scores(
self.df_training_data, self.feature_config)
if dev_scores:
logger.info("Loading development data")
self.dev_data = load_dataframe(dev_scores)
self.dev_labels = self.dev_data.pop('label')
self.dev_data = self.dev_data[self.feature_config.keys()]
self.dev_data = standardize_dataframe_scores(
self.dev_data, self.feature_config, self.means_stds)[0]
else:
self.dev_data = None
self.dev_labels = None
if model_type == None:
self.model_type = 'LogisticRegression'
else:
self.model_type = model_type
if model_parameters == None:
self.model_parameters = {}
else:
self.model_parameters = model_parameters
def train_classifier(self, training_data, labels):
"""Train logistic regression with training_data"""
classifier = Classifier(self.model_type, self.model_parameters,
training_data.columns, self.means_stds)
classifier.train(training_data, labels, standardize=False)
return classifier
def get_roc_auc(self, model, dev_data):
"""Calculate ROC AUC for a given model (requires dev_data)"""
probs = model.classifier.predict_proba(dev_data)
# pred = model.classifier.predict(dev_data)
# logger.info("Classifier labels: %s", model.classifier.classes_)
# logger.info("Predicted labels: %s", collections.Counter(pred))
return roc_auc_score(self.dev_labels, probs[:,1])
def get_sse(self, model, training_data, labels):
"""Calculate the residual sum of squares"""
y_hat = model.classifier.predict(training_data)
resid = labels - y_hat
sse = sum(resid**2)+0.01
return sse
def get_ce(self, model, training_data, labels):
"""Calculate cross entropy for a given model"""
y_pred = model.classifier.predict_proba(training_data)
return log_loss(labels, y_pred)
def get_aic(self, model, training_data, labels):
"""Calculate AIC for a given model"""
loss = self.get_ce(model, training_data, labels)
k = training_data.shape[1] # number of variables
AIC = 2*k - 2*math.log(loss)
return AIC
def get_bic(self, model, training_data, labels):
"""Calculate BIC for a given model"""
loss = self.get_ce(model, training_data, labels)
k = training_data.shape[1] # number of variables
n = training_data.shape[0] # number of observations
BIC = n*math.log(loss/n) + k*math.log(n)
#BIC = math.log(n)*k - 2*math.log(loss)
return BIC
def get_labels(self, training_data, cutoffs):
"""Get labels for training data based on cutoffs"""
labels = []
training_data_dict = training_data.copy().to_dict()
for i in range(len(training_data.index)):
label = 1
for key in cutoffs.keys():
if training_data_dict[key][i] < cutoffs[key]:
label = 0
labels.append(label)
return labels
def get_cutoffs(self, training_data, quantiles, features):
"""Get cutoff values based on discard percentages"""
cutoffs = {}
for key in features:
cutoffs[key] = training_data[key].quantile(quantiles[key])
return cutoffs
@staticmethod
def _load_feature_bounds_and_init(fdict):
"""Load feature boundaries and initial values from config dict"""
features = []
bounds = []
initial = []
for key, params in fdict.items():
features.append(key)
if 'quantiles' in params:
min_ = params['quantiles'].get('min', 0)
max_ = params['quantiles'].get('max', 1)
else:
min_, max_ = 0, 1
logger.warning(
"No quantile bounds defined for %s, setting to [%s, %s]",
key, min_, max_)
bounds.append([min_, max_])
if 'initial' in params.get('quantiles', {}):
init = params['quantiles']['initial']
else:
init = 0.1
logger.warning(
"No initial quantile defined for %s, setting to %s",
key, init)
initial.append(init)
initial = np.array(initial)
return features, bounds, initial
def find_best_model(self, criterion_name, algorithm='default', options=None):
"""Find the model with the best AIC / BIC / SSE / CE / ROC_AUC"""
criteria = {'AIC':
{'func': self.get_aic, 'best': 'low', 'dev': False},
'BIC':
{'func': self.get_bic, 'best': 'low', 'dev': False},
'SSE':
{'func': self.get_sse, 'best': 'low', 'dev': False},
'CE':
{'func': self.get_ce, 'best': 'low', 'dev': False},
'ROC_AUC':
{'func': self.get_roc_auc, 'best': 'high', 'dev': True}}
if criterion_name not in criteria.keys():
raise ValueError('Invalid criterion. Expected one of: {}'.format(
list(criteria.keys())))
criterion = criteria[criterion_name]
features, bounds, initial = self._load_feature_bounds_and_init(
self.feature_config)
cutoffs = {key: None for key in features}
def cost(qvector):
best_quantiles = {key: value for key, value in zip(features, qvector)}
logger.info('Training logistic regression model with quantiles:\n'
'{}'.format(
'\n'.join('* {}: {}'.format(*t)
for t in best_quantiles.items())))
if any(q == 0 for q in best_quantiles.values()):
# Remove unused features
df_train_copy = self.df_training_data.copy()
if self.dev_data is not None:
df_dev_copy = self.dev_data.copy()
active = set(features)
for key, value in best_quantiles.items():
if value == 0:
df_train_copy.pop(key)
if self.dev_data is not None:
df_dev_copy.pop(key)
active.remove(key)
else:
df_train_copy = self.df_training_data
df_dev_copy = self.dev_data
active = set(features)
cutoffs = self.get_cutoffs(
df_train_copy, best_quantiles, active)
labels = self.get_labels(df_train_copy, cutoffs)
counts = collections.Counter(labels)
logger.info("Label counts in data: %s", counts)
if len(counts) > 1:
LR = self.train_classifier(df_train_copy, labels)
if criterion['dev']:
crit_value = criterion['func'](LR, df_dev_copy)
else:
crit_value = criterion['func'](LR, df_train_copy, labels)
else:
crit_value = np.inf if criterion['best'] == 'low' else -np.inf
logger.info('Model {crit}: {value}'.format(
crit=criterion_name, value=crit_value))
return crit_value if criterion['best'] == 'low' else -crit_value
if options is None:
options = {}
if algorithm == 'none':
# Use initial values
best_quantiles = {key: value for key, value in zip(features, initial)}
elif algorithm == 'default':
# Default local search with multiplicative updates
res = self.default_search(cost, initial, bounds=bounds, **options)
best_quantiles = {key: value for key, value in zip(features, res)}
else:
# Use optimization algorithm from scipy
res = scipy.optimize.minimize(
cost, initial, method=algorithm, bounds=bounds, options=options)
best_quantiles = {key: value for key, value in zip(features, res.x)}
df_train_copy = self.df_training_data.copy()
if self.dev_data is not None:
df_dev_copy = self.dev_data.copy()
active = set(features)
for key, value in best_quantiles.items():
if value == 0:
df_train_copy.pop(key)
if self.dev_data is not None:
df_dev_copy.pop(key)
active.remove(key)
cutoffs = self.get_cutoffs(
df_train_copy, best_quantiles, active)
labels = self.get_labels(df_train_copy, cutoffs)
LR = self.train_classifier(df_train_copy, labels)
if criterion['dev']:
crit_value = criterion['func'](LR, df_dev_copy)
else:
crit_value = criterion['func'](LR, df_train_copy, labels)
return LR, crit_value, best_quantiles
@staticmethod
def default_search(costfunc, initial, bounds=None, step_coef=1.25):
"""Local search algorithm with multiplicative updates"""
if bounds is None:
bounds = [(0, 1) for _ in range(len(initial))]
x = initial.copy()
cur_x = x
cur_cost = costfunc(x)
while True:
no_change = 0
for fidx in range(len(initial)):
new_x = cur_x.copy()
if new_x[fidx] / step_coef >= bounds[fidx][0]:
new_x[fidx] /= step_coef
cost = costfunc(new_x)
if cost < cur_cost:
cur_cost = cost
cur_x = new_x
continue
new_x = cur_x.copy()
if new_x[fidx] * step_coef <= bounds[fidx][1]:
new_x[fidx] *= step_coef
cost = costfunc(new_x)
if cost < cur_cost:
cur_cost = cost
cur_x = new_x
continue
no_change += 1
if no_change == len(initial):
return cur_x
| [
"logging.getLogger",
"opustools.util.file_open",
"json.loads",
"sklearn.metrics.roc_auc_score",
"collections.Counter",
"numpy.array",
"math.log",
"sklearn.metrics.log_loss",
"pandas.DataFrame",
"sklearn.metrics.accuracy_score",
"pandas.io.json.json_normalize",
"sklearn.metrics.confusion_matrix... | [((372, 399), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (389, 399), False, 'import logging\n'), ((1666, 1680), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1678, 1680), True, 'import pandas as pd\n'), ((522, 542), 'opustools.util.file_open', 'file_open', (['data_file'], {}), '(data_file)\n', (531, 542), False, 'from opustools.util import file_open\n'), ((784, 804), 'pandas.io.json.json_normalize', 'json_normalize', (['data'], {}), '(data)\n', (798, 804), False, 'from pandas.io.json import json_normalize\n'), ((1029, 1049), 'opustools.util.file_open', 'file_open', (['data_file'], {}), '(data_file)\n', (1038, 1049), False, 'from opustools.util import file_open\n'), ((7975, 8018), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['self.dev_labels', 'probs[:, 1]'], {}), '(self.dev_labels, probs[:, 1])\n', (7988, 8018), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score, log_loss\n'), ((8450, 8474), 'sklearn.metrics.log_loss', 'log_loss', (['labels', 'y_pred'], {}), '(labels, y_pred)\n', (8458, 8474), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score, log_loss\n'), ((10864, 10881), 'numpy.array', 'np.array', (['initial'], {}), '(initial)\n', (10872, 10881), True, 'import numpy as np\n'), ((3754, 3782), 'opustools.util.file_open', 'file_open', (['output_fname', '"""w"""'], {}), "(output_fname, 'w')\n", (3763, 3782), False, 'from opustools.util import file_open\n'), ((4760, 4788), 'opustools.util.file_open', 'file_open', (['output_fname', '"""w"""'], {}), "(output_fname, 'w')\n", (4769, 4788), False, 'from opustools.util import file_open\n'), ((13222, 13249), 'collections.Counter', 'collections.Counter', (['labels'], {}), '(labels)\n', (13241, 13249), False, 'import collections\n'), ((8711, 8725), 'math.log', 'math.log', (['loss'], {}), '(loss)\n', (8719, 8725), False, 'import math\n'), ((9035, 9053), 'math.log', 'math.log', (['(loss / n)'], {}), '(loss / n)\n', (9043, 9053), False, 'import math\n'), ((9056, 9067), 'math.log', 'math.log', (['n'], {}), '(n)\n', (9064, 9067), False, 'import math\n'), ((625, 641), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (635, 641), False, 'import json\n'), ((1485, 1505), 'pandas.io.json.json_normalize', 'json_normalize', (['data'], {}), '(data)\n', (1499, 1505), False, 'from pandas.io.json import json_normalize\n'), ((1230, 1246), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1240, 1246), False, 'import json\n'), ((4102, 4137), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true_labels', 'labels'], {}), '(true_labels, labels)\n', (4116, 4137), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score, log_loss\n'), ((4196, 4233), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['true_labels', 'labels'], {}), '(true_labels, labels)\n', (4212, 4233), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score, log_loss\n'), ((5113, 5153), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['true_labels', 'probas[:, 1]'], {}), '(true_labels, probas[:, 1])\n', (5126, 5153), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score, log_loss\n')] |
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import os
import numpy as np
from Orange.data import io, ContinuousVariable, DiscreteVariable, Table
def get_dataset(name):
return os.path.join(os.path.dirname(__file__), "xlsx_files", name)
def read_file(name):
return io.ExcelReader(get_dataset(name)).read()
class TestExcelHeader0(unittest.TestCase):
def test_read(self):
table = read_file("header_0.xlsx")
domain = table.domain
self.assertIsNone(domain.class_var)
self.assertEqual(len(domain.metas), 0)
self.assertEqual(len(domain.attributes), 4)
for i, var in enumerate(domain.attributes):
self.assertIsInstance(var, ContinuousVariable)
self.assertEqual(var.name, "Feature {}".format(i + 1))
np.testing.assert_almost_equal(
table.X, np.array([[0.1, 0.5, 0.1, 21], [0.2, 0.1, 2.5, 123], [0, 0, 0, 0]])
)
self.assertEqual(table.name, "header_0")
class TextExcelSheets(unittest.TestCase):
def setUp(self):
self.reader = io.ExcelReader(get_dataset("header_0_sheet.xlsx"))
def test_sheets(self):
self.assertSequenceEqual(self.reader.sheets, ["Sheet1", "my_sheet", "Sheet3"])
def test_named_sheet(self):
self.reader.select_sheet("my_sheet")
table = self.reader.read()
self.assertEqual(len(table.domain.attributes), 4)
self.assertEqual(table.name, "header_0_sheet-my_sheet")
def test_named_sheet_table(self):
table = Table.from_file(get_dataset("header_0_sheet.xlsx"), sheet="my_sheet")
self.assertEqual(len(table.domain.attributes), 4)
self.assertEqual(table.name, "header_0_sheet-my_sheet")
class TestExcelHeader1(unittest.TestCase):
def test_no_flags(self):
table = read_file("header_1_no_flags.xlsx")
domain = table.domain
self.assertEqual(len(domain.metas), 0)
self.assertEqual(len(domain.attributes), 4)
self.assertIsInstance(domain[0], DiscreteVariable)
self.assertIsInstance(domain[1], ContinuousVariable)
self.assertIsInstance(domain[2], DiscreteVariable)
self.assertIsInstance(domain[3], ContinuousVariable)
for i, var in enumerate(domain.variables):
self.assertEqual(var.name, chr(97 + i))
self.assertEqual(domain[0].values, ["green", "red"])
np.testing.assert_almost_equal(
table.X, np.array([[1, 0.5, 0, 21], [1, 0.1, 0, 123], [0, 0, np.nan, 0]])
)
np.testing.assert_equal(table.Y, np.array([]).reshape(3, 0))
def test_flags(self):
table = read_file("header_1_flags.xlsx")
domain = table.domain
self.assertEqual(len(domain.attributes), 1)
attr = domain.attributes[0]
self.assertEqual(attr.name, "d")
self.assertIsInstance(attr, ContinuousVariable)
np.testing.assert_almost_equal(table.X, np.arange(23).reshape(23, 1))
self.assertEqual(len(domain.class_vars), 1)
class_ = domain.class_var
self.assertEqual(class_.name, "b")
self.assertIsInstance(class_, ContinuousVariable)
np.testing.assert_almost_equal(
table.Y, np.array([0.5, 0.1, 0, 0] * 5 + [0.5, 0.1, 0])
)
self.assertEqual(len(domain.metas), 3)
for n, var in zip("acf", domain.metas):
self.assertEqual(var.name, n)
self.assertIsInstance(domain.metas[0], DiscreteVariable)
self.assertEqual(domain.metas[0].values, ["green", "red"])
self.assertIsInstance(domain.metas[1], ContinuousVariable)
np.testing.assert_almost_equal(
table.metas[:, 0], np.array([1, 1, 0] * 7 + [1, 1])
)
np.testing.assert_almost_equal(
table.metas[:, 1], np.array([0, 1, 2, 3] * 5 + [0, 1, 2])
)
class TestExcelHeader3(unittest.TestCase):
def test_read(self):
table = read_file("header_3.xlsx")
domain = table.domain
self.assertEqual(len(domain.attributes), 2)
attr = domain.attributes[0]
self.assertEqual(attr.name, "d")
self.assertIsInstance(attr, ContinuousVariable)
np.testing.assert_almost_equal(table.X[:, 0], np.arange(23))
attr = domain.attributes[1]
self.assertEqual(attr.name, "g")
self.assertIsInstance(attr, DiscreteVariable)
np.testing.assert_almost_equal(
table.X[:, 1], np.array([1, 0] + [float("nan")] * 21)
)
self.assertEqual(len(domain.class_vars), 1)
class_ = domain.class_var
self.assertEqual(class_.name, "b")
self.assertIsInstance(class_, ContinuousVariable)
np.testing.assert_almost_equal(
table.Y, np.array([0.5, 0.1, 0, 0] * 5 + [0.5, 0.1, 0])
)
self.assertEqual(len(domain.metas), 3)
for n, var in zip("acf", domain.metas):
self.assertEqual(var.name, n)
self.assertIsInstance(domain.metas[0], DiscreteVariable)
self.assertEqual(domain.metas[0].values, ["green", "red"])
self.assertIsInstance(domain.metas[1], ContinuousVariable)
np.testing.assert_almost_equal(
table.metas[:, 0], np.array([1, 1, 0] * 7 + [1, 1])
)
np.testing.assert_almost_equal(
table.metas[:, 1], np.array([0, 1, 2, 3] * 5 + [0, 1, 2])
)
np.testing.assert_equal(
table.metas[:, 2], np.array(list("abcdefghijklmnopqrstuvw"))
)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"os.path.dirname",
"numpy.array",
"numpy.arange"
] | [((5559, 5574), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5572, 5574), False, 'import unittest\n'), ((268, 293), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (283, 293), False, 'import os\n'), ((915, 982), 'numpy.array', 'np.array', (['[[0.1, 0.5, 0.1, 21], [0.2, 0.1, 2.5, 123], [0, 0, 0, 0]]'], {}), '([[0.1, 0.5, 0.1, 21], [0.2, 0.1, 2.5, 123], [0, 0, 0, 0]])\n', (923, 982), True, 'import numpy as np\n'), ((2497, 2561), 'numpy.array', 'np.array', (['[[1, 0.5, 0, 21], [1, 0.1, 0, 123], [0, 0, np.nan, 0]]'], {}), '([[1, 0.5, 0, 21], [1, 0.1, 0, 123], [0, 0, np.nan, 0]])\n', (2505, 2561), True, 'import numpy as np\n'), ((3260, 3306), 'numpy.array', 'np.array', (['([0.5, 0.1, 0, 0] * 5 + [0.5, 0.1, 0])'], {}), '([0.5, 0.1, 0, 0] * 5 + [0.5, 0.1, 0])\n', (3268, 3306), True, 'import numpy as np\n'), ((3725, 3757), 'numpy.array', 'np.array', (['([1, 1, 0] * 7 + [1, 1])'], {}), '([1, 1, 0] * 7 + [1, 1])\n', (3733, 3757), True, 'import numpy as np\n'), ((3839, 3877), 'numpy.array', 'np.array', (['([0, 1, 2, 3] * 5 + [0, 1, 2])'], {}), '([0, 1, 2, 3] * 5 + [0, 1, 2])\n', (3847, 3877), True, 'import numpy as np\n'), ((4271, 4284), 'numpy.arange', 'np.arange', (['(23)'], {}), '(23)\n', (4280, 4284), True, 'import numpy as np\n'), ((4782, 4828), 'numpy.array', 'np.array', (['([0.5, 0.1, 0, 0] * 5 + [0.5, 0.1, 0])'], {}), '([0.5, 0.1, 0, 0] * 5 + [0.5, 0.1, 0])\n', (4790, 4828), True, 'import numpy as np\n'), ((5247, 5279), 'numpy.array', 'np.array', (['([1, 1, 0] * 7 + [1, 1])'], {}), '([1, 1, 0] * 7 + [1, 1])\n', (5255, 5279), True, 'import numpy as np\n'), ((5361, 5399), 'numpy.array', 'np.array', (['([0, 1, 2, 3] * 5 + [0, 1, 2])'], {}), '([0, 1, 2, 3] * 5 + [0, 1, 2])\n', (5369, 5399), True, 'import numpy as np\n'), ((2613, 2625), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2621, 2625), True, 'import numpy as np\n'), ((2981, 2994), 'numpy.arange', 'np.arange', (['(23)'], {}), '(23)\n', (2990, 2994), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import numpy.testing as npt
import tensorflow as tf
from experiment.qa.model.helper.pooling_helper import non_zero_tokens, attention_softmax, soft_alignment, \
attentive_pooling_weights, weighted_pooling
class TestPoolingHelper(unittest.TestCase):
def setUp(self):
self.sess = tf.InteractiveSession()
def tearDown(self):
self.sess.close()
def test_non_zero_tokens(self):
tokens = tf.constant([
[24., 22., 11234., 0., 0.],
[31., 0., 0., 0., 0.]
])
result = self.sess.run(non_zero_tokens(tokens))
reference_value = np.array([
[1., 1., 1., 0., 0.],
[1., 0., 0., 0., 0.]
])
npt.assert_array_equal(result, reference_value)
def test_attention_softmax(self):
vector_in = tf.constant([
[1., 2., 1., 2.0],
[.3, .2, .9, .3]
])
padding = tf.constant([
[1., 1., 1., 0.],
[1., 1., 0., 0.]
])
result = self.sess.run(attention_softmax(vector_in, padding))
reference_value = np.array([
[0.21194156, 0.57611692, 0.21194156, 0.],
[0.52497919, 0.47502081, 0., 0.]
])
npt.assert_array_almost_equal(result, reference_value)
def test_soft_alignment(self):
"""Tests the soft alignment function and its capability to handle minibatches with zero-padding"""
U_AP = tf.constant(
[
[1., 1.],
[1., 1.]
]
)
raw_question_rep = tf.constant(
[[
[.2, .7],
[.4, .8],
[.1, .9],
[.7, .8]
]] * 2
)
raw_answer_rep = tf.constant(
[[
[.3, .9],
[.5, .9],
[.7, .6],
[.9, .7]
]] * 2
)
tokens_question_non_zero = tf.constant(
[
[1., 1., 0., 0.]
] * 2
)
tokens_answer_non_zero = tf.constant(
[
[1., 1., 1., 0.]
] * 2
)
result = self.sess.run(soft_alignment(
U_AP, raw_question_rep, raw_answer_rep, tokens_question_non_zero, tokens_answer_non_zero
))
# QU = [[0.9, 0.9], [1.2, 1.2]]
# QU(A^T) = [[1.08, 1.26, 1.17], [1.44, 1.68, 1.56]]
# tanh(...) = [[0.7931991, 0.85106411, 0.82427217], [0.89369773, 0.93286155, 0.91542046]]
# Due to padding, the resulting tensor will have a different shape. We verify that the relevant part of the
# result has the correct values, and the rest holds values less than -1
reference_value = np.array(
[[
[0.7931991, 0.85106411, 0.82427217],
[0.89369773, 0.93286155, 0.91542046]
]] * 2
)
npt.assert_array_almost_equal(result[:, 0:2, 0:3], reference_value)
npt.assert_array_less(result, np.array(
[[
[1.01, 1.01, 1.01, -1.],
[1.01, 1.01, 1.01, -1.],
[-1., -1., -1., -1.],
[-1., -1., -1., -1.]
]] * 2
))
def test_attentive_pooling(self):
"""Test the full functionality with the same values as before"""
U_AP = tf.constant(
[
[1., 1.],
[1., 1.]
]
)
raw_question_rep = tf.constant(
[[
[.2, .7],
[.4, .8],
[.1, .9],
[.7, .8]
]] * 2
)
raw_answer_rep = tf.constant(
[[
[.3, .9],
[.5, .9],
[.7, .6],
[.9, .7]
]] * 2
)
tokens_question = tf.constant(
[
[123, 6, 0., 0.]
] * 2
)
tokens_answer = tf.constant(
[
[33, 1, 12, 0.]
] * 2
)
ap_weights_q, ap_weights_a = attentive_pooling_weights(
U_AP, raw_question_rep, raw_answer_rep, tokens_question, tokens_answer
)
result_repr_q = self.sess.run(weighted_pooling(raw_question_rep, ap_weights_q, tokens_question))
result_repr_a = self.sess.run(weighted_pooling(raw_answer_rep, ap_weights_a, tokens_answer))
# tanh(...) = [[0.7931991, 0.85106411, 0.82427217], [0.89369773, 0.93286155, 0.91542046]]
# max over rows = [[0.85106411, 0.93286155]]
# max over colums = [[0.89369773, 0.93286155, 0.91542046]]
# attention question = [ 0.47956203, 0.52043797]
# attention answer = [ 0.32659447, 0.33963892, 0.33376661]
# question-rep = [0.304088, 0.752044]
# answer-rep = [0.501434, 0.79987]
reference_value_repr_q = np.array(
[
[0.304088, 0.752044]
] * 2
)
reference_value_repr_a = np.array(
[
[0.501434, 0.79987]
] * 2
)
npt.assert_array_almost_equal(result_repr_q, reference_value_repr_q)
npt.assert_array_almost_equal(result_repr_a, reference_value_repr_a)
| [
"experiment.qa.model.helper.pooling_helper.attentive_pooling_weights",
"tensorflow.InteractiveSession",
"numpy.testing.assert_array_almost_equal",
"experiment.qa.model.helper.pooling_helper.attention_softmax",
"numpy.array",
"tensorflow.constant",
"experiment.qa.model.helper.pooling_helper.soft_alignmen... | [((332, 355), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (353, 355), True, 'import tensorflow as tf\n'), ((461, 535), 'tensorflow.constant', 'tf.constant', (['[[24.0, 22.0, 11234.0, 0.0, 0.0], [31.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[24.0, 22.0, 11234.0, 0.0, 0.0], [31.0, 0.0, 0.0, 0.0, 0.0]])\n', (472, 535), True, 'import tensorflow as tf\n'), ((642, 706), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 0.0]])\n', (650, 706), True, 'import numpy as np\n'), ((740, 787), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['result', 'reference_value'], {}), '(result, reference_value)\n', (762, 787), True, 'import numpy.testing as npt\n'), ((847, 904), 'tensorflow.constant', 'tf.constant', (['[[1.0, 2.0, 1.0, 2.0], [0.3, 0.2, 0.9, 0.3]]'], {}), '([[1.0, 2.0, 1.0, 2.0], [0.3, 0.2, 0.9, 0.3]])\n', (858, 904), True, 'import tensorflow as tf\n'), ((950, 1007), 'tensorflow.constant', 'tf.constant', (['[[1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0]])\n', (961, 1007), True, 'import tensorflow as tf\n'), ((1131, 1225), 'numpy.array', 'np.array', (['[[0.21194156, 0.57611692, 0.21194156, 0.0], [0.52497919, 0.47502081, 0.0, 0.0]]'], {}), '([[0.21194156, 0.57611692, 0.21194156, 0.0], [0.52497919, \n 0.47502081, 0.0, 0.0]])\n', (1139, 1225), True, 'import numpy as np\n'), ((1261, 1315), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['result', 'reference_value'], {}), '(result, reference_value)\n', (1290, 1315), True, 'import numpy.testing as npt\n'), ((1474, 1511), 'tensorflow.constant', 'tf.constant', (['[[1.0, 1.0], [1.0, 1.0]]'], {}), '([[1.0, 1.0], [1.0, 1.0]])\n', (1485, 1511), True, 'import tensorflow as tf\n'), ((1603, 1670), 'tensorflow.constant', 'tf.constant', (['([[[0.2, 0.7], [0.4, 0.8], [0.1, 0.9], [0.7, 0.8]]] * 2)'], {}), '([[[0.2, 0.7], [0.4, 0.8], [0.1, 0.9], [0.7, 0.8]]] * 2)\n', (1614, 1670), True, 'import tensorflow as tf\n'), ((1788, 1855), 'tensorflow.constant', 'tf.constant', (['([[[0.3, 0.9], [0.5, 0.9], [0.7, 0.6], [0.9, 0.7]]] * 2)'], {}), '([[[0.3, 0.9], [0.5, 0.9], [0.7, 0.6], [0.9, 0.7]]] * 2)\n', (1799, 1855), True, 'import tensorflow as tf\n'), ((1983, 2022), 'tensorflow.constant', 'tf.constant', (['([[1.0, 1.0, 0.0, 0.0]] * 2)'], {}), '([[1.0, 1.0, 0.0, 0.0]] * 2)\n', (1994, 2022), True, 'import tensorflow as tf\n'), ((2104, 2143), 'tensorflow.constant', 'tf.constant', (['([[1.0, 1.0, 1.0, 0.0]] * 2)'], {}), '([[1.0, 1.0, 1.0, 0.0]] * 2)\n', (2115, 2143), True, 'import tensorflow as tf\n'), ((2776, 2872), 'numpy.array', 'np.array', (['([[[0.7931991, 0.85106411, 0.82427217], [0.89369773, 0.93286155, 0.91542046\n ]]] * 2)'], {}), '([[[0.7931991, 0.85106411, 0.82427217], [0.89369773, 0.93286155, \n 0.91542046]]] * 2)\n', (2784, 2872), True, 'import numpy as np\n'), ((2945, 3012), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['result[:, 0:2, 0:3]', 'reference_value'], {}), '(result[:, 0:2, 0:3], reference_value)\n', (2974, 3012), True, 'import numpy.testing as npt\n'), ((3390, 3427), 'tensorflow.constant', 'tf.constant', (['[[1.0, 1.0], [1.0, 1.0]]'], {}), '([[1.0, 1.0], [1.0, 1.0]])\n', (3401, 3427), True, 'import tensorflow as tf\n'), ((3519, 3586), 'tensorflow.constant', 'tf.constant', (['([[[0.2, 0.7], [0.4, 0.8], [0.1, 0.9], [0.7, 0.8]]] * 2)'], {}), '([[[0.2, 0.7], [0.4, 0.8], [0.1, 0.9], [0.7, 0.8]]] * 2)\n', (3530, 3586), True, 'import tensorflow as tf\n'), ((3704, 3771), 'tensorflow.constant', 'tf.constant', (['([[[0.3, 0.9], [0.5, 0.9], [0.7, 0.6], [0.9, 0.7]]] * 2)'], {}), '([[[0.3, 0.9], [0.5, 0.9], [0.7, 0.6], [0.9, 0.7]]] * 2)\n', (3715, 3771), True, 'import tensorflow as tf\n'), ((3890, 3927), 'tensorflow.constant', 'tf.constant', (['([[123, 6, 0.0, 0.0]] * 2)'], {}), '([[123, 6, 0.0, 0.0]] * 2)\n', (3901, 3927), True, 'import tensorflow as tf\n'), ((4002, 4037), 'tensorflow.constant', 'tf.constant', (['([[33, 1, 12, 0.0]] * 2)'], {}), '([[33, 1, 12, 0.0]] * 2)\n', (4013, 4037), True, 'import tensorflow as tf\n'), ((4127, 4228), 'experiment.qa.model.helper.pooling_helper.attentive_pooling_weights', 'attentive_pooling_weights', (['U_AP', 'raw_question_rep', 'raw_answer_rep', 'tokens_question', 'tokens_answer'], {}), '(U_AP, raw_question_rep, raw_answer_rep,\n tokens_question, tokens_answer)\n', (4152, 4228), False, 'from experiment.qa.model.helper.pooling_helper import non_zero_tokens, attention_softmax, soft_alignment, attentive_pooling_weights, weighted_pooling\n'), ((4924, 4960), 'numpy.array', 'np.array', (['([[0.304088, 0.752044]] * 2)'], {}), '([[0.304088, 0.752044]] * 2)\n', (4932, 4960), True, 'import numpy as np\n'), ((5046, 5081), 'numpy.array', 'np.array', (['([[0.501434, 0.79987]] * 2)'], {}), '([[0.501434, 0.79987]] * 2)\n', (5054, 5081), True, 'import numpy as np\n'), ((5143, 5211), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['result_repr_q', 'reference_value_repr_q'], {}), '(result_repr_q, reference_value_repr_q)\n', (5172, 5211), True, 'import numpy.testing as npt\n'), ((5220, 5288), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['result_repr_a', 'reference_value_repr_a'], {}), '(result_repr_a, reference_value_repr_a)\n', (5249, 5288), True, 'import numpy.testing as npt\n'), ((591, 614), 'experiment.qa.model.helper.pooling_helper.non_zero_tokens', 'non_zero_tokens', (['tokens'], {}), '(tokens)\n', (606, 614), False, 'from experiment.qa.model.helper.pooling_helper import non_zero_tokens, attention_softmax, soft_alignment, attentive_pooling_weights, weighted_pooling\n'), ((1066, 1103), 'experiment.qa.model.helper.pooling_helper.attention_softmax', 'attention_softmax', (['vector_in', 'padding'], {}), '(vector_in, padding)\n', (1083, 1103), False, 'from experiment.qa.model.helper.pooling_helper import non_zero_tokens, attention_softmax, soft_alignment, attentive_pooling_weights, weighted_pooling\n'), ((2224, 2332), 'experiment.qa.model.helper.pooling_helper.soft_alignment', 'soft_alignment', (['U_AP', 'raw_question_rep', 'raw_answer_rep', 'tokens_question_non_zero', 'tokens_answer_non_zero'], {}), '(U_AP, raw_question_rep, raw_answer_rep,\n tokens_question_non_zero, tokens_answer_non_zero)\n', (2238, 2332), False, 'from experiment.qa.model.helper.pooling_helper import non_zero_tokens, attention_softmax, soft_alignment, attentive_pooling_weights, weighted_pooling\n'), ((3051, 3175), 'numpy.array', 'np.array', (['([[[1.01, 1.01, 1.01, -1.0], [1.01, 1.01, 1.01, -1.0], [-1.0, -1.0, -1.0, -\n 1.0], [-1.0, -1.0, -1.0, -1.0]]] * 2)'], {}), '([[[1.01, 1.01, 1.01, -1.0], [1.01, 1.01, 1.01, -1.0], [-1.0, -1.0,\n -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0]]] * 2)\n', (3059, 3175), True, 'import numpy as np\n'), ((4285, 4350), 'experiment.qa.model.helper.pooling_helper.weighted_pooling', 'weighted_pooling', (['raw_question_rep', 'ap_weights_q', 'tokens_question'], {}), '(raw_question_rep, ap_weights_q, tokens_question)\n', (4301, 4350), False, 'from experiment.qa.model.helper.pooling_helper import non_zero_tokens, attention_softmax, soft_alignment, attentive_pooling_weights, weighted_pooling\n'), ((4390, 4451), 'experiment.qa.model.helper.pooling_helper.weighted_pooling', 'weighted_pooling', (['raw_answer_rep', 'ap_weights_a', 'tokens_answer'], {}), '(raw_answer_rep, ap_weights_a, tokens_answer)\n', (4406, 4451), False, 'from experiment.qa.model.helper.pooling_helper import non_zero_tokens, attention_softmax, soft_alignment, attentive_pooling_weights, weighted_pooling\n')] |
import numpy as np
def orbit_to_poincare_polar(orbit):
r"""
Convert an array of 6D Cartesian positions to Poincaré
symplectic polar coordinates. These are similar to cylindrical
coordinates.
Parameters
----------
"""
if orbit.norbits > 1:
raise RuntimeError("Can only use with one orbit.")
R = np.sqrt(orbit.x.value**2 + orbit.y.value**2)
phi = np.arctan2(orbit.x.value, orbit.y.value) # TODO: is this right?
vR = ((orbit.x*orbit.v_x + orbit.y*orbit.v_y) / R).value
Theta = (orbit.x*orbit.v_y - orbit.y*orbit.v_x).value
# pg. 437, Papaphillipou & Laskar (1996)
# http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1996A%26A...307..427P&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf
sqrt_2THETA = np.sqrt(np.abs(2*Theta))
fs = [R+1j*vR,
sqrt_2THETA * (np.cos(phi) + 1j*np.sin(phi)),
orbit.z.value+1j*orbit.v_z.value]
return fs
| [
"numpy.abs",
"numpy.sqrt",
"numpy.arctan2",
"numpy.cos",
"numpy.sin"
] | [((343, 391), 'numpy.sqrt', 'np.sqrt', (['(orbit.x.value ** 2 + orbit.y.value ** 2)'], {}), '(orbit.x.value ** 2 + orbit.y.value ** 2)\n', (350, 391), True, 'import numpy as np\n'), ((398, 438), 'numpy.arctan2', 'np.arctan2', (['orbit.x.value', 'orbit.y.value'], {}), '(orbit.x.value, orbit.y.value)\n', (408, 438), True, 'import numpy as np\n'), ((822, 839), 'numpy.abs', 'np.abs', (['(2 * Theta)'], {}), '(2 * Theta)\n', (828, 839), True, 'import numpy as np\n'), ((884, 895), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (890, 895), True, 'import numpy as np\n'), ((901, 912), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (907, 912), True, 'import numpy as np\n')] |
from __future__ import print_function
import sys
import os
import numpy as np
import pandas as pd
from scipy.stats import norm
from sklearn.utils.extmath import cartesian
from nilmtk.feature_detectors.steady_states import cluster
def statesCombinations(meterlist):
"""Returns all possible levels of the aggregated signal, by finding all
combinations of all possible appliance states
Args:
meterlist (list): List of paths to Tracebase directories of each meter
Returns:
list: all possible levels
"""
max_states = 7
states = [None] * len(meterlist)
for i, meter in enumerate(meterlist):
states[i] = cluster(power_series(meter), max_num_clusters=max_states)
return np.sum(cartesian(states), axis=1)
def power_series(folder):
"""Creates a timeseries from Tracebase files
Args:
folder (str): Path to the folder containing the trace files
Returns:
Pandas DataFrame: Timeseries of the data
"""
files = [fn for fn in os.listdir(folder) if fn.startswith('dev')]
a = np.array([])
for f in files:
a = np.append(a, np.loadtxt(os.path.join(folder, f), delimiter=';', usecols=(2,)))
df = pd.DataFrame(data=a, index=range(len(a)), columns=['power'])
return df
def compute(meterpaths):
"""Computes the power disaggregation complexity as described in
https://arxiv.org/pdf/1501.02954.pdf
Args:
meterpaths (list of str): A list of paths to folders in the Tracebase dataset
Returns:
(float, float): (max, mean) disaggregation complexity of the given set of meters
"""
std = 5
print("Finding appliance states...")
# All of possible appliance states
P = statesCombinations(meterpaths)
Pm = np.max(P)
print("Computing complexity for each state...")
# Compute Ck for each state
C = np.zeros(len(P))
x1 = np.linspace(0, Pm, 1000)
for k in range(1,len(P)):
print(" {} of {}".format(k+1,len(P)), end="\r")
sys.stdout.flush()
for j in range(1,len(P)):
y1 = np.minimum(norm.pdf(x1, P[k], std), norm.pdf(x1, P[j], std))
C[k] = C[k] + np.trapz(y1,x1)
return np.max(C[1:]), np.mean(C[1:])
| [
"numpy.mean",
"os.listdir",
"numpy.trapz",
"sklearn.utils.extmath.cartesian",
"os.path.join",
"numpy.max",
"numpy.array",
"numpy.linspace",
"scipy.stats.norm.pdf",
"sys.stdout.flush"
] | [((1064, 1076), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1072, 1076), True, 'import numpy as np\n'), ((1758, 1767), 'numpy.max', 'np.max', (['P'], {}), '(P)\n', (1764, 1767), True, 'import numpy as np\n'), ((1887, 1911), 'numpy.linspace', 'np.linspace', (['(0)', 'Pm', '(1000)'], {}), '(0, Pm, 1000)\n', (1898, 1911), True, 'import numpy as np\n'), ((733, 750), 'sklearn.utils.extmath.cartesian', 'cartesian', (['states'], {}), '(states)\n', (742, 750), False, 'from sklearn.utils.extmath import cartesian\n'), ((2006, 2024), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2022, 2024), False, 'import sys\n'), ((2191, 2204), 'numpy.max', 'np.max', (['C[1:]'], {}), '(C[1:])\n', (2197, 2204), True, 'import numpy as np\n'), ((2206, 2220), 'numpy.mean', 'np.mean', (['C[1:]'], {}), '(C[1:])\n', (2213, 2220), True, 'import numpy as np\n'), ((1012, 1030), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1022, 1030), False, 'import os\n'), ((1134, 1157), 'os.path.join', 'os.path.join', (['folder', 'f'], {}), '(folder, f)\n', (1146, 1157), False, 'import os\n'), ((2087, 2110), 'scipy.stats.norm.pdf', 'norm.pdf', (['x1', 'P[k]', 'std'], {}), '(x1, P[k], std)\n', (2095, 2110), False, 'from scipy.stats import norm\n'), ((2112, 2135), 'scipy.stats.norm.pdf', 'norm.pdf', (['x1', 'P[j]', 'std'], {}), '(x1, P[j], std)\n', (2120, 2135), False, 'from scipy.stats import norm\n'), ((2163, 2179), 'numpy.trapz', 'np.trapz', (['y1', 'x1'], {}), '(y1, x1)\n', (2171, 2179), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
blissops.numpyops
=================
Somewhat async image manipulation library
created for use within the bliss Discord bot.
Makes use of numpy and wand and takes BytesIO
as an input and as an output.
:copyright: (c) 2019 Liam (ir-3) H.
:license: MIT, see LICENSE for more details.
"""
from io import BytesIO
import skimage
import skimage.transform
import numpy as np
from skimage.exposure import rescale_intensity
from skimage.color.adapt_rgb import adapt_rgb, each_channel
import skimage.segmentation
import skimage.filters
import matplotlib.pyplot as plt
from skimage import io
async def bytes_to_np(img_bytes: BytesIO):
"""This takes a BytesIO containing an image and converts them to a np.ndarray using
skimage.io.imread."""
ret = skimage.io.imread(img_bytes)
return ret
async def np_to_bytes(img_bytes: BytesIO):
"""This takes a np.ndarray containing an image and converts it to a BytesIO object
containing an image."""
b = BytesIO()
plt.imsave(b, img_bytes)
b.seek(0)
return b
def _sort(img: np.ndarray):
shape = img.shape
img = img.reshape((img.shape[0] * img.shape[1], img.shape[2]))
img.sort(0)
return img.reshape(shape)
# If you are seeing this and think that you can make
# the selection of characters better, please do.
# this is just some random one I found online and
# made into a dictionary.
_ascii_characters = {
0: " ", 1: ".", 2: "'", 3: "`",
4: "^", 5: "\"", 6: ",", 7: ":",
8: ";", 9: "I", 10: "1", 11: "!",
12: "i", 13: ">", 14: "<", 15: "~",
16: "+", 17: "?", 18: "]", 19: "[",
20: "}", 21: "{", 22: "]", 23: "[",
24: "|", 25: "/", 26: "\\", 27: "t",
28: "x", 29: "n", 30: "u", 31: "v",
32: "z", 33: "X", 34: "Y", 35: "U",
36: "J", 37: "C", 38: "L", 39: "Q",
40: "0", 41: "O", 42: "Z", 43: "#",
44: "M", 45: "W", 46: "&", 47: "8",
48: "%", 49: "B", 50: "@", 51: "@"
}
def _ascii_art(img: np.ndarray):
ascii_art = ""
for i_row in range(0, img.shape[0], 2):
row = img[i_row]
ascii_art += "\n"
for col in row:
avg = int(col[0]) + int(col[1]) + int(col[2])
avg = int(avg / 3)
ascii_art += _ascii_characters[int(avg / 5)]
return ascii_art
def _sobel(img: np.ndarray):
@adapt_rgb(each_channel)
def _sobel_each(image):
return skimage.filters.sobel(image)
return rescale_intensity(255 - _sobel_each(img) * 255)
def _shuffle(img: np.ndarray):
shape = img.shape
img = img.reshape((img.shape[0] * img.shape[1], img.shape[2]))
np.random.shuffle(img)
return img.reshape(shape)
| [
"matplotlib.pyplot.imsave",
"skimage.filters.sobel",
"io.BytesIO",
"skimage.io.imread",
"skimage.color.adapt_rgb.adapt_rgb",
"numpy.random.shuffle"
] | [((782, 810), 'skimage.io.imread', 'skimage.io.imread', (['img_bytes'], {}), '(img_bytes)\n', (799, 810), False, 'import skimage\n'), ((994, 1003), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1001, 1003), False, 'from io import BytesIO\n'), ((1008, 1032), 'matplotlib.pyplot.imsave', 'plt.imsave', (['b', 'img_bytes'], {}), '(b, img_bytes)\n', (1018, 1032), True, 'import matplotlib.pyplot as plt\n'), ((2320, 2343), 'skimage.color.adapt_rgb.adapt_rgb', 'adapt_rgb', (['each_channel'], {}), '(each_channel)\n', (2329, 2343), False, 'from skimage.color.adapt_rgb import adapt_rgb, each_channel\n'), ((2602, 2624), 'numpy.random.shuffle', 'np.random.shuffle', (['img'], {}), '(img)\n', (2619, 2624), True, 'import numpy as np\n'), ((2387, 2415), 'skimage.filters.sobel', 'skimage.filters.sobel', (['image'], {}), '(image)\n', (2408, 2415), False, 'import skimage\n')] |
import numpy as np
class PointSourceParam(object):
"""
"""
def __init__(self, model_list, kwargs_fixed, num_point_source_list=None, linear_solver=True,
fixed_magnification_list=None, kwargs_lower=None, kwargs_upper=None):
"""
:param model_list: list of point source model names
:param kwargs_fixed: list of keyword arguments with parameters to be held fixed
:param num_point_source_list: list of number of point sources per point source model class
:param linear_solver: bool, if True, does not return linear parameters for the sampler
(will be solved linearly instead)
:param fixed_magnification_list: list of booleans, if entry is True, keeps one overall scaling among the
point sources in this class
"""
self.model_list = model_list
if num_point_source_list is None:
num_point_source_list = [0] * len(model_list)
self._num_point_sources_list = num_point_source_list
self.kwargs_fixed = kwargs_fixed
if linear_solver is True:
self.kwargs_fixed = self.add_fix_linear(kwargs_fixed)
self._linear_solver = linear_solver
if fixed_magnification_list is None:
self._fixed_magnification_list = [False] * len(model_list)
if kwargs_lower is None:
kwargs_lower = []
for k, model in enumerate(self.model_list):
num = self._num_point_sources_list[k]
if model in ['LENSED_POSITION', 'UNLENSED']:
fixed_low = {'ra_image': [-100] * num, 'dec_image': [-100] * num, 'point_amp': [0] * num}
elif model in ['SOURCE_POSITION']:
fixed_low = {'ra_source': -100, 'dec_source': -100, 'point_amp': 0}
else:
raise ValueError("%s not a valid point source model" % model)
kwargs_lower.append(fixed_low)
if kwargs_upper is None:
kwargs_upper = []
for k, model in enumerate(self.model_list):
num = self._num_point_sources_list[k]
if model in ['LENSED_POSITION', 'UNLENSED']:
fixed_high = {'ra_image': [100] * num, 'dec_image': [100] * num, 'point_amp': [100] * num}
elif model in ['SOURCE_POSITION']:
fixed_high = {'ra_source': 100, 'dec_source': 100, 'point_amp': 100}
else:
raise ValueError("%s not a valid point source model" % model)
kwargs_upper.append(fixed_high)
self.lower_limit = kwargs_lower
self.upper_limit = kwargs_upper
def getParams(self, args, i):
"""
:param args:
:param i:
:return:
"""
kwargs_list = []
for k, model in enumerate(self.model_list):
kwargs = {}
kwargs_fixed = self.kwargs_fixed[k]
if model in ['LENSED_POSITION', 'UNLENSED']:
if not 'ra_image' in kwargs_fixed:
kwargs['ra_image'] = np.array(args[i:i + self._num_point_sources_list[k]])
i += self._num_point_sources_list[k]
else:
kwargs['ra_image'] = kwargs_fixed['ra_image']
if not 'dec_image' in kwargs_fixed:
kwargs['dec_image'] = np.array(args[i:i + self._num_point_sources_list[k]])
i += self._num_point_sources_list[k]
else:
kwargs['dec_image'] = kwargs_fixed['dec_image']
if not 'point_amp' in kwargs_fixed:
kwargs['point_amp'] = np.array(args[i:i + self._num_point_sources_list[k]])
i += self._num_point_sources_list[k]
else:
kwargs['point_amp'] = kwargs_fixed['point_amp']
if model in ['SOURCE_POSITION']:
if not 'ra_source' in kwargs_fixed:
kwargs['ra_source'] = args[i]
i += 1
else:
kwargs['ra_source'] = kwargs_fixed['ra_source']
if not 'dec_source' in kwargs_fixed:
kwargs['dec_source'] = args[i]
i += 1
else:
kwargs['dec_source'] = kwargs_fixed['dec_source']
if not 'point_amp' in kwargs_fixed:
kwargs['point_amp'] = args[i]
i += 1
else:
kwargs['point_amp'] = kwargs_fixed['point_amp']
kwargs_list.append(kwargs)
return kwargs_list, i
def setParams(self, kwargs_list):
"""
:param kwargs:
:return:
"""
args = []
for k, model in enumerate(self.model_list):
kwargs = kwargs_list[k]
kwargs_fixed = self.kwargs_fixed[k]
if model in ['LENSED_POSITION', 'UNLENSED']:
if not 'ra_image' in kwargs_fixed:
x_pos = kwargs['ra_image'][0:self._num_point_sources_list[k]]
for x in x_pos:
args.append(x)
if not 'dec_image' in kwargs_fixed:
y_pos = kwargs['dec_image'][0:self._num_point_sources_list[k]]
for y in y_pos:
args.append(y)
if not 'point_amp' in kwargs_fixed:
amp = kwargs['point_amp'][0:self._num_point_sources_list[k]]
for a in amp:
args.append(a)
if model in ['SOURCE_POSITION']:
if not 'ra_source' in kwargs_fixed:
args.append(kwargs['ra_source'])
if not 'dec_source' in kwargs_fixed:
args.append(kwargs['dec_source'])
if not 'point_amp' in kwargs_fixed:
args.append(kwargs['point_amp'])
return args
def num_param(self):
"""
:return:
"""
num = 0
list = []
for k, model in enumerate(self.model_list):
kwargs_fixed = self.kwargs_fixed[k]
if model in ['LENSED_POSITION', 'UNLENSED']:
if not 'ra_image' in kwargs_fixed:
num += self._num_point_sources_list[k]
for i in range(self._num_point_sources_list[k]):
list.append('ra_image')
if not 'dec_image' in kwargs_fixed:
num += self._num_point_sources_list[k]
for i in range(self._num_point_sources_list[k]):
list.append('dec_image')
if not 'point_amp' in kwargs_fixed:
num += self._num_point_sources_list[k]
for i in range(self._num_point_sources_list[k]):
list.append('point_amp')
if model in ['SOURCE_POSITION']:
if not 'ra_source' in kwargs_fixed:
num += 1
list.append('ra_source')
if not 'dec_source' in kwargs_fixed:
num += 1
list.append('dec_source')
if not 'point_amp' in kwargs_fixed:
num += 1
list.append('point_amp')
return num, list
def add_fix_linear(self, kwargs_fixed):
"""
:param kwargs_options:
:param kwargs_ps:
:return:
"""
for k, model in enumerate(self.model_list):
kwargs_fixed[k]['point_amp'] = 1
return kwargs_fixed
def num_param_linear(self):
"""
:return: number of linear parameters
"""
num = 0
if self._linear_solver is True:
for k, model in enumerate(self.model_list):
if self._fixed_magnification_list[k] is True:
num += 1
else:
num += self._num_point_sources_list[k]
return num
| [
"numpy.array"
] | [((3078, 3131), 'numpy.array', 'np.array', (['args[i:i + self._num_point_sources_list[k]]'], {}), '(args[i:i + self._num_point_sources_list[k]])\n', (3086, 3131), True, 'import numpy as np\n'), ((3371, 3424), 'numpy.array', 'np.array', (['args[i:i + self._num_point_sources_list[k]]'], {}), '(args[i:i + self._num_point_sources_list[k]])\n', (3379, 3424), True, 'import numpy as np\n'), ((3666, 3719), 'numpy.array', 'np.array', (['args[i:i + self._num_point_sources_list[k]]'], {}), '(args[i:i + self._num_point_sources_list[k]])\n', (3674, 3719), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""This module defines miscellaneous functions dealing with protein data."""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2010-2012 <NAME>'
import numpy as np
from prody.atomic import Atomic, Atom, AtomGroup, Selection, HierView
from prody.utilities import openFile, showFigure
from prody import SETTINGS
__all__ = ['showProtein', 'writePQR', ]
def writePQR(filename, atoms):
"""Write *atoms* in PQR format to a file with name *filename*. Only
current coordinate set is written. Returns *filename* upon success. If
*filename* ends with :file:`.gz`, a compressed file will be written."""
if not isinstance(atoms, Atomic):
raise TypeError('atoms does not have a valid type')
if isinstance(atoms, Atom):
atoms = Selection(atoms.getAtomGroup(), [atoms.getIndex()],
atoms.getACSIndex(),
'index ' + str(atoms.getIndex()))
stream = openFile(filename, 'w')
n_atoms = atoms.numAtoms()
atomnames = atoms.getNames()
if atomnames is None:
raise RuntimeError('atom names are not set')
for i, an in enumerate(atomnames):
lenan = len(an)
if lenan < 4:
atomnames[i] = ' ' + an
elif lenan > 4:
atomnames[i] = an[:4]
s_or_u = np.array(['a']).dtype.char
resnames = atoms._getResnames()
if resnames is None:
resnames = ['UNK'] * n_atoms
resnums = atoms._getResnums()
if resnums is None:
resnums = np.ones(n_atoms, int)
chainids = atoms._getChids()
if chainids is None:
chainids = np.zeros(n_atoms, s_or_u + '1')
charges = atoms._getCharges()
if charges is None:
charges = np.zeros(n_atoms, float)
radii = atoms._getRadii()
if radii is None:
radii = np.zeros(n_atoms, float)
icodes = atoms._getIcodes()
if icodes is None:
icodes = np.zeros(n_atoms, s_or_u + '1')
hetero = ['ATOM'] * n_atoms
heteroflags = atoms._getFlags('hetatm')
if heteroflags is None:
heteroflags = atoms._getFlags('hetero')
if heteroflags is not None:
hetero = np.array(hetero, s_or_u + '6')
hetero[heteroflags] = 'HETATM'
altlocs = atoms._getAltlocs()
if altlocs is None:
altlocs = np.zeros(n_atoms, s_or_u + '1')
format = ('{0:6s}{1:5d} {2:4s}{3:1s}' +
'{4:4s}{5:1s}{6:4d}{7:1s} ' +
'{8:8.3f}{9:8.3f}{10:8.3f}' +
'{11:8.4f}{12:7.4f}\n').format
coords = atoms._getCoords()
write = stream.write
for i, xyz in enumerate(coords):
write(format(hetero[i], i+1, atomnames[i], altlocs[i],
resnames[i], chainids[i], int(resnums[i]),
icodes[i], xyz[0], xyz[1], xyz[2], charges[i], radii[i]))
write('TER\nEND')
stream.close()
return filename
def showProtein(*atoms, **kwargs):
"""Show protein representation using :meth:`~mpl_toolkits.mplot3d.Axes3D`.
This function is designed for generating a quick view of the contents of a
:class:`~.AtomGroup` or :class:`~.Selection`.
Protein atoms matching ``"calpha"`` selection are displayed using solid
lines by picking a random and unique color per chain. Line with can
be adjusted using *lw* argument, e.g. ``lw=12``. Default width is 4.
Chain colors can be overwritten using chain identifier as in ``A='green'``.
Water molecule oxygen atoms are represented by red colored circles. Color
can be changed using *water* keyword argument, e.g. ``water='aqua'``.
Water marker and size can be changed using *wmarker* and *wsize* keywords,
defaults values are ``wmarker='.', wsize=6``.
Hetero atoms matching ``"hetero and noh"`` selection are represented by
circles and unique colors are picked at random on a per residue basis.
Colors can be customized using residue name as in ``NAH='purple'``. Note
that this will color all distinct residues with the same name in the same
color. Hetero atom marker and size can be changed using *hmarker* and
*hsize* keywords, default values are ``hmarker='o', hsize=6``.
ProDy will set the size of axis so the representation is not distorted when
the shape of figure window is close to a square. Colors are picked at
random, except for water oxygens which will always be colored red."""
alist = atoms
for atoms in alist:
if not isinstance(atoms, Atomic):
raise TypeError('atoms must be an Atomic instance')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
cf = plt.gcf()
show = None
for child in cf.get_children():
if isinstance(child, Axes3D):
show = child
break
if show is None:
show = Axes3D(cf)
from matplotlib import colors
cnames = dict(colors.cnames)
wcolor = kwargs.get('water', 'red').lower()
avoid = np.array(colors.hex2color(cnames.pop(wcolor, cnames.pop('red'))))
for cn, val in cnames.items(): # PY3K: OK
clr = np.array(colors.hex2color(val))
if clr.sum() > 2.4:
cnames.pop(cn)
elif np.abs(avoid - clr).sum() <= 0.6:
cnames.pop(cn)
cnames = list(cnames)
import random
random.shuffle(cnames)
min_ = list()
max_ = list()
for atoms in alist:
if isinstance(atoms, AtomGroup):
title = atoms.getTitle()
else:
title = atoms.getAtomGroup().getTitle()
calpha = atoms.select('calpha')
if calpha:
for ch in HierView(calpha, chain=True):
xyz = ch._getCoords()
chid = ch.getChid()
show.plot(xyz[:, 0], xyz[:, 1], xyz[:, 2],
label=title + '_' + chid,
color=kwargs.get(chid, cnames.pop()).lower(),
lw=kwargs.get('lw', 4))
water = atoms.select('water and noh')
if water:
xyz = atoms.select('water')._getCoords()
show.plot(xyz[:, 0], xyz[:, 1], xyz[:, 2], label=title + '_water',
color=wcolor,
ls='None', marker=kwargs.get('wmarker', '.'),
ms=kwargs.get('wsize', 6))
hetero = atoms.select('not protein and not nucleic and not water')
if hetero:
for res in HierView(hetero).iterResidues():
xyz = res._getCoords()
resname = res.getResname()
resnum = str(res.getResnum())
chid = res.getChid()
show.plot(xyz[:, 0], xyz[:, 1], xyz[:, 2], ls='None',
color=kwargs.get(resname, cnames.pop()).lower(),
label=title + '_' + chid + '_' + resname + resnum,
marker=kwargs.get('hmarker', 'o'),
ms=kwargs.get('hsize', 6))
xyz = atoms._getCoords()
min_.append(xyz.min(0))
max_.append(xyz.max(0))
show.set_xlabel('x')
show.set_ylabel('y')
show.set_zlabel('z')
min_ = np.array(min_).min(0)
max_ = np.array(max_).max(0)
center = (max_ + min_) / 2
half = (max_ - min_).max() / 2
show.set_xlim3d(center[0]-half, center[0]+half)
show.set_ylim3d(center[1]-half, center[1]+half)
show.set_zlim3d(center[2]-half, center[2]+half)
if kwargs.get('legend', False):
show.legend(prop={'size': 10})
if SETTINGS['auto_show']:
showFigure()
return show
| [
"prody.utilities.openFile",
"numpy.abs",
"prody.utilities.showFigure",
"numpy.ones",
"random.shuffle",
"matplotlib.pyplot.gcf",
"numpy.array",
"numpy.zeros",
"prody.atomic.HierView",
"matplotlib.colors.hex2color",
"mpl_toolkits.mplot3d.Axes3D"
] | [((1686, 1709), 'prody.utilities.openFile', 'openFile', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (1694, 1709), False, 'from prody.utilities import openFile, showFigure\n'), ((5346, 5355), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5353, 5355), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6021), 'random.shuffle', 'random.shuffle', (['cnames'], {}), '(cnames)\n', (6013, 6021), False, 'import random\n'), ((2248, 2269), 'numpy.ones', 'np.ones', (['n_atoms', 'int'], {}), '(n_atoms, int)\n', (2255, 2269), True, 'import numpy as np\n'), ((2347, 2378), 'numpy.zeros', 'np.zeros', (['n_atoms', "(s_or_u + '1')"], {}), "(n_atoms, s_or_u + '1')\n", (2355, 2378), True, 'import numpy as np\n'), ((2455, 2479), 'numpy.zeros', 'np.zeros', (['n_atoms', 'float'], {}), '(n_atoms, float)\n', (2463, 2479), True, 'import numpy as np\n'), ((2548, 2572), 'numpy.zeros', 'np.zeros', (['n_atoms', 'float'], {}), '(n_atoms, float)\n', (2556, 2572), True, 'import numpy as np\n'), ((2645, 2676), 'numpy.zeros', 'np.zeros', (['n_atoms', "(s_or_u + '1')"], {}), "(n_atoms, s_or_u + '1')\n", (2653, 2676), True, 'import numpy as np\n'), ((2878, 2908), 'numpy.array', 'np.array', (['hetero', "(s_or_u + '6')"], {}), "(hetero, s_or_u + '6')\n", (2886, 2908), True, 'import numpy as np\n'), ((3024, 3055), 'numpy.zeros', 'np.zeros', (['n_atoms', "(s_or_u + '1')"], {}), "(n_atoms, s_or_u + '1')\n", (3032, 3055), True, 'import numpy as np\n'), ((5525, 5535), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['cf'], {}), '(cf)\n', (5531, 5535), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((8218, 8230), 'prody.utilities.showFigure', 'showFigure', ([], {}), '()\n', (8228, 8230), False, 'from prody.utilities import openFile, showFigure\n'), ((2046, 2061), 'numpy.array', 'np.array', (["['a']"], {}), "(['a'])\n", (2054, 2061), True, 'import numpy as np\n'), ((5799, 5820), 'matplotlib.colors.hex2color', 'colors.hex2color', (['val'], {}), '(val)\n', (5815, 5820), False, 'from matplotlib import colors\n'), ((6307, 6335), 'prody.atomic.HierView', 'HierView', (['calpha'], {'chain': '(True)'}), '(calpha, chain=True)\n', (6315, 6335), False, 'from prody.atomic import Atomic, Atom, AtomGroup, Selection, HierView\n'), ((7828, 7842), 'numpy.array', 'np.array', (['min_'], {}), '(min_)\n', (7836, 7842), True, 'import numpy as np\n'), ((7861, 7875), 'numpy.array', 'np.array', (['max_'], {}), '(max_)\n', (7869, 7875), True, 'import numpy as np\n'), ((7110, 7126), 'prody.atomic.HierView', 'HierView', (['hetero'], {}), '(hetero)\n', (7118, 7126), False, 'from prody.atomic import Atomic, Atom, AtomGroup, Selection, HierView\n'), ((5890, 5909), 'numpy.abs', 'np.abs', (['(avoid - clr)'], {}), '(avoid - clr)\n', (5896, 5909), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
from scipy import interpolate
def interpolate_traj(x_queue, x, y, interpolate_kind):
# if interpolate_kind == 'traj'
# interpolate the desired trajectory by time value
# x_queue is the interpolated point/points
# if x_queue is 1-D numpy array, then the output is 2-D numpy array
# if x_queue is a float, then the output is 2-D numpy array, but only has one column
# x is 1-D numpy array (1 by n), y is 2-D numpy array (m by n)
# default is linear interpolation
# when x_queue exceeds the range of x, function returns the boundary value of y
# if interpolate_kind == 'cmd'
# interpolate the commands by the machine time by Zero-Order Hold
# x_queue is the interpolated machine time
# assume x_queue is always 1-D numpy array, and the output is 2-D numpy array
# time before the first timestamp, commands are zeros
# time after the last timestamp, commands are the last ones.
if interpolate_kind == 'traj':
boundary = (y[:, 0], y[:, -1])
f = interpolate.interp1d(x, y, kind='linear', bounds_error=False, fill_value=boundary)
y_raw = f(x_queue)
if isinstance(x_queue, float):
y_queue = y_raw.reshape(y_raw.shape[0], -1)
else:
y_queue = y_raw
elif interpolate_kind == 'cmd':
boundary = (np.zeros(4), y[:, -1])
f = interpolate.interp1d(x, y, kind='zero', bounds_error=False, fill_value=boundary)
y_queue = f(x_queue)
else:
y_queue = []
raise Exception("The interpolation type is wrong!")
return y_queue
if __name__ == "__main__":
x = np.array([0, 2, 4, 6, 8, 10])
y = np.array([[0, 1, 2, 3, 4, 5], [0, -1, -2, -3, -4, -5], [10, 20, 30, 40, 50, 60], [-10, -20, -30, -40, -50, -60], [0.5, 1.5, 2.5, 3.5, 4.5, 5.5]])
x_queue = 1
print(x)
print(y)
y_queue = interpolate_traj(x_queue, x, y, 'traj')
print(y_queue)
| [
"numpy.array",
"numpy.zeros",
"scipy.interpolate.interp1d"
] | [((1604, 1633), 'numpy.array', 'np.array', (['[0, 2, 4, 6, 8, 10]'], {}), '([0, 2, 4, 6, 8, 10])\n', (1612, 1633), True, 'import numpy as np\n'), ((1642, 1791), 'numpy.array', 'np.array', (['[[0, 1, 2, 3, 4, 5], [0, -1, -2, -3, -4, -5], [10, 20, 30, 40, 50, 60], [-\n 10, -20, -30, -40, -50, -60], [0.5, 1.5, 2.5, 3.5, 4.5, 5.5]]'], {}), '([[0, 1, 2, 3, 4, 5], [0, -1, -2, -3, -4, -5], [10, 20, 30, 40, 50,\n 60], [-10, -20, -30, -40, -50, -60], [0.5, 1.5, 2.5, 3.5, 4.5, 5.5]])\n', (1650, 1791), True, 'import numpy as np\n'), ((1006, 1093), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'y'], {'kind': '"""linear"""', 'bounds_error': '(False)', 'fill_value': 'boundary'}), "(x, y, kind='linear', bounds_error=False, fill_value=\n boundary)\n", (1026, 1093), False, 'from scipy import interpolate\n'), ((1345, 1430), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'y'], {'kind': '"""zero"""', 'bounds_error': '(False)', 'fill_value': 'boundary'}), "(x, y, kind='zero', bounds_error=False, fill_value=boundary\n )\n", (1365, 1430), False, 'from scipy import interpolate\n'), ((1310, 1321), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1318, 1321), True, 'import numpy as np\n')] |
import random
import numpy as np
import pandas as pd
from copulas import get_qualified_name
from rdt.transformers.positive_number import PositiveNumberTransformer
import exrex
GAUSSIAN_COPULA = 'copulas.multivariate.gaussian.GaussianMultivariate'
MODEL_ERROR_MESSAGES = {
True: (
'There was an error recreating models from parameters. '
'Sampling could not continue.'
),
False: (
'Modeler hasn\'t been fitted. '
'Please call Modeler.model_database() before sampling'
)
}
class Sampler:
"""Class to sample data from a model."""
def __init__(self, data_navigator, modeler):
"""Instantiate a new object."""
self.dn = data_navigator
self.modeler = modeler
self.sampled = {} # table_name -> [(primary_key, generated_row)]
self.primary_key = {}
@staticmethod
def update_mapping_list(mapping, key, value):
"""Append value on mapping[key] if exists, create it otherwise."""
item = mapping.get(key)
if item:
item.append(value)
else:
mapping[key] = [value]
return mapping
@staticmethod
def _square_matrix(triangular_matrix):
"""Fill with zeros a triangular matrix to reshape it to a square one.
Args:
triangular_matrix (list[list[float]]): Array of arrays of
Returns:
list: Square matrix.
"""
length = len(triangular_matrix)
zero = [0.0]
for item in triangular_matrix:
item.extend(zero * (length - len(item)))
return triangular_matrix
def _get_table_meta(self, metadata, table_name):
"""Return metadata get table meta for a given table name.
Args:
metadata (dict): Metadata for dataset.
table_name (str): Name of table to get metadata from.
Returns:
dict: Metadata for given table.
"""
for table in metadata['tables']:
if table['name'] == table_name:
return table
return None
def _prepare_sampled_covariance(self, covariance):
"""
Args:
covariance (list): covariance after unflattening model parameters.
Result:
list[list]: symmetric Positive semi-definite matrix.
"""
covariance = np.array(self._square_matrix(covariance))
covariance = (covariance + covariance.T - (np.identity(covariance.shape[0]) * covariance))
return covariance
@staticmethod
def reset_indices_tables(sampled_tables):
"""Reset the indices of sampled tables.
Args:
sampled_tables (dict): All values are dataframes for sampled tables.
Returns:
dict: The same dict entered, just reindexed.
"""
for name, table in sampled_tables.items():
sampled_tables[name] = table.reset_index(drop=True)
return sampled_tables
def transform_synthesized_rows(self, synthesized, table_name, num_rows):
"""Add primary key and reverse transform synthetized data.
Args:
synthesized(pandas.DataFrame): Generated data from model
table_name(str): Name of the table.
num_rows(int): Number of rows sampled.
Return:
pandas.DataFrame: Formatted synthesized data.
"""
# get primary key column name
meta = self.dn.tables[table_name].meta
orig_meta = self._get_table_meta(self.dn.meta, table_name)
primary_key = meta.get('primary_key')
if primary_key:
node = meta['fields'][primary_key]
regex = node['regex']
generator = self.primary_key.get(table_name)
if not generator:
generator = exrex.generate(regex)
values = [x for i, x in zip(range(num_rows), generator)]
self.primary_key[table_name] = generator
if len(values) != num_rows:
raise ValueError(
'Not enough unique values for primary key of table {} with regex {}'
' to generate {} samples.'.format(table_name, regex, num_rows)
)
synthesized[primary_key] = pd.Series(values)
if (node['type'] == 'number') and (node['subtype'] == 'integer'):
synthesized[primary_key] = pd.to_numeric(synthesized[primary_key])
sample_info = (primary_key, synthesized)
self.sampled = self.update_mapping_list(self.sampled, table_name, sample_info)
# filter out parameters
labels = list(self.dn.tables[table_name].data)
reverse_columns = [
transformer[1] for transformer in self.dn.ht.transformers
if table_name in transformer
]
text_filled = self._fill_text_columns(synthesized, labels, table_name)
# reverse transform data
reversed_data = self.dn.ht.reverse_transform_table(text_filled[reverse_columns], orig_meta)
synthesized.update(reversed_data)
return synthesized[labels]
def _get_parent_row(self, table_name):
parents = self.dn.get_parents(table_name)
if not parents:
return None
parent_rows = dict()
for parent in parents:
if parent not in self.sampled:
raise Exception('Parents must be synthesized first')
parent_rows[parent] = self.sampled[parent]
random_parent, parent_rows = random.choice(list(parent_rows.items()))
foreign_key, parent_row = random.choice(parent_rows)
return random_parent, foreign_key, parent_row
@staticmethod
def generate_keys(prefix=''):
def f(row):
parts = [str(row[key]) for key in row.keys() if row[key] is not None]
if prefix:
parts = [prefix] + parts
return '__'.join(parts)
return f
@classmethod
def _get_sorted_keys(cls, _dict):
result = []
keys = list(_dict.keys())
if not keys:
return []
serie = pd.Series(keys)
df = pd.DataFrame(serie.str.split('__').values.tolist())
uniques = df[0].unique()
for value in uniques:
index = df[df[0] == value].index
_slice = df.loc[index, range(1, df.shape[1])].copy()
try:
for column in _slice.columns:
_slice[column] = _slice[column].astype(int)
except (ValueError, TypeError):
pass
df.drop(index, inplace=True)
_slice = _slice.sort_values(list(range(1, df.shape[1])))
result += _slice.apply(cls.generate_keys(value), axis=1).values.tolist()
df = df.sort_values(list(range(df.shape[1])))
result += df.apply(cls.generate_keys(), axis=1).values.tolist()
return result
def _unflatten_dict(self, flat, table_name=''):
"""Transform a flattened dict into its original form.
Works in exact opposite way that `sdv.Modeler._flatten_dict`.
Args:
flat (dict): Flattened dict.
"""
result = {}
children = self.dn.get_children(table_name)
keys = self._get_sorted_keys(flat)
for key in keys:
path = key.split('__')
if any(['__{}__'.format(child) in key for child in children]):
path = [
path[0],
'__'.join(path[1: -1]),
path[-1]
]
value = flat[key]
walked = result
for step, name in enumerate(path):
if isinstance(walked, dict) and name in walked:
walked = walked[name]
continue
elif isinstance(walked, list) and len(walked) and len(walked) - 1 >= int(name):
walked = walked[int(name)]
continue
else:
if name.isdigit():
name = int(name)
if step == len(path) - 1:
if isinstance(walked, list):
walked.append(value)
else:
walked[name] = value
else:
next_step = path[step + 1]
if next_step.isdigit():
if isinstance(name, int):
walked.append([])
while len(walked) < name + 1:
walked.append([])
else:
walked[name] = []
walked = walked[name]
else:
if isinstance(name, int):
walked.append({})
else:
walked[name] = {}
walked = walked[name]
return result
def _make_positive_definite(self, matrix):
"""Find the nearest positive-definite matrix to input
Args:
matrix (numpy.ndarray): Matrix to transform
Returns:
numpy.ndarray: Closest symetric positive-definite matrix.
"""
symetric_matrix = (matrix + matrix.T) / 2
_, s, V = np.linalg.svd(symetric_matrix)
symmetric_polar = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (symetric_matrix + symmetric_polar) / 2
A3 = (A2 + A2.T) / 2
if self._check_matrix_symmetric_positive_definite(A3):
return A3
spacing = np.spacing(np.linalg.norm(matrix))
identity = np.eye(matrix.shape[0])
iterations = 1
while not self._check_matrix_symmetric_positive_definite(A3):
min_eigenvals = np.min(np.real(np.linalg.eigvals(A3)))
A3 += identity * (-min_eigenvals * iterations**2 + spacing)
iterations += 1
return A3
def _check_matrix_symmetric_positive_definite(self, matrix):
"""Checks if a matrix is symmetric positive-definite.
Args:
matrix (list or np.ndarray): Matrix to evaluate.
Returns:
bool
"""
try:
if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:
# Not 2-dimensional or square, so not simmetric.
return False
np.linalg.cholesky(matrix)
return True
except np.linalg.LinAlgError:
return False
def _unflatten_gaussian_copula(self, model_parameters):
"""Prepare unflattened model params to recreate Gaussian Multivariate instance.
The preparations consist basically in:
- Transform sampled negative standard deviations from distributions into positive numbers
- Ensure the covariance matrix is a valid symmetric positive-semidefinite matrix.
- Add string parameters kept inside the class (as they can't be modelled),
like `distribution_type`.
Args:
model_parameters (dict): Sampled and reestructured model parameters.
Returns:
dict: Model parameters ready to recreate the model.
"""
distribution_name = self.modeler.model_kwargs['distribution']
distribution_kwargs = {
'fitted': True,
'type': distribution_name
}
model_parameters['distribution'] = distribution_name
distribs = model_parameters['distribs']
metadata = {
'name': 'std',
'type': 'number'
}
transformer = PositiveNumberTransformer(metadata)
for distribution in distribs.values():
distribution.update(distribution_kwargs)
df = pd.DataFrame({'std': [distribution['std']]})
distribution['std'] = transformer.transform(df).loc[0, 'std']
covariance = model_parameters['covariance']
covariance = self._prepare_sampled_covariance(covariance)
if not self._check_matrix_symmetric_positive_definite(covariance):
covariance = self._make_positive_definite(covariance)
model_parameters['covariance'] = covariance.tolist()
return model_parameters
def unflatten_model(self, parent_row, table_name, parent_name):
""" Takes the params from a generated parent row and creates a model from it.
Args:
parent_row (dataframe): a generated parent row
table_name (string): name of table to make model for
parent_name (string): name of parent table
"""
prefix = '__{}__'.format(table_name)
columns = [column for column in parent_row.columns if column.startswith(prefix)]
new_columns = {column: column.replace(prefix, '') for column in columns}
flat_parameters = parent_row.loc[:, columns]
flat_parameters = flat_parameters.rename(columns=new_columns).to_dict('records')[0]
model_parameters = self._unflatten_dict(flat_parameters, table_name)
model_name = get_qualified_name(self.modeler.model)
model_parameters['fitted'] = True
model_parameters['type'] = model_name
if model_name == GAUSSIAN_COPULA:
model_parameters = self._unflatten_gaussian_copula(model_parameters)
return self.modeler.model.from_dict(model_parameters)
def _get_missing_valid_rows(self, synthesized, drop_indices, valid_rows, num_rows):
"""
Args:
synthesized (pandas.DataFrame)
Returns:
tuple[int, pandas.DataFrame]: Amount of missing values and actual valid rows
"""
valid_rows = pd.concat([valid_rows, synthesized[~drop_indices].copy()])
valid_rows = valid_rows.reset_index(drop=True)
missing_rows = num_rows - valid_rows.shape[0]
return missing_rows, valid_rows
def _sample_valid_rows(self, model, num_rows, table_name):
"""Sample using `model` and discard invalid values until having `num_rows`.
Args:
model (copula.multivariate.base): Fitted model.
num_rows (int): Number of rows to sample.
table_name (str): name of table to synthesize.
Returns:
pandas.DataFrame: Sampled rows, shape (, num_rows)
"""
if model and model.fitted:
synthesized = model.sample(num_rows)
valid_rows = pd.DataFrame(columns=synthesized.columns)
drop_indices = pd.Series(False, index=synthesized.index)
categorical_columns = []
table_metadata = self._get_table_meta(self.dn.meta, table_name)
for field in table_metadata['fields']:
if field['type'] == 'categorical':
column_name = field['name']
categorical_columns.append(column_name)
column = synthesized[column_name]
filtered_values = ((column < 0) | (column > 1))
if filtered_values.any():
drop_indices |= filtered_values
missing_rows, valid_rows = self._get_missing_valid_rows(
synthesized, drop_indices, valid_rows, num_rows)
while missing_rows:
synthesized = model.sample(missing_rows)
drop_indices = pd.Series(False, index=synthesized.index)
for column_name in categorical_columns:
column = synthesized[column_name]
filtered_values = ((column < 0) | (column > 1))
if filtered_values.any():
drop_indices |= filtered_values
missing_rows, valid_rows = self._get_missing_valid_rows(
synthesized, drop_indices, valid_rows, num_rows)
return valid_rows
else:
parents = bool(self.dn.get_parents(table_name))
raise ValueError(MODEL_ERROR_MESSAGES[parents])
def sample_rows(self, table_name, num_rows):
"""Sample specified number of rows for specified table.
Args:
table_name (str): name of table to synthesize
num_rows (int): number of rows to synthesize
Returns:
pd.DataFrame: synthesized rows.
"""
parent_row = self._get_parent_row(table_name)
if parent_row:
random_parent, fk, parent_row = parent_row
# Make sure only using one row
parent_row = parent_row.loc[[0]]
# get parameters from parent to make model
model = self.unflatten_model(parent_row, table_name, random_parent)
synthesized_rows = self._sample_valid_rows(model, num_rows, table_name)
# add foreign key value to row
fk_val = parent_row.loc[0, fk]
# get foreign key name from current table
foreign_key = self.dn.foreign_keys[(table_name, random_parent)][1]
synthesized_rows[foreign_key] = fk_val
return self.transform_synthesized_rows(synthesized_rows, table_name, num_rows)
else: # there is no parent
model = self.modeler.models[table_name]
synthesized_rows = self._sample_valid_rows(model, num_rows, table_name)
return self.transform_synthesized_rows(synthesized_rows, table_name, num_rows)
def sample_table(self, table_name):
"""Sample a table equal to the size of the original.
Args:
table_name (str): name of table to synthesize
Returns:
pandas.DataFrame: Synthesized table.
"""
num_rows = self.dn.tables[table_name].data.shape[0]
return self.sample_rows(table_name, num_rows)
def _sample_child_rows(self, parent_name, parent_row, sampled_data, num_rows=5):
"""Uses parameters from parent row to synthesize child rows.
Args:
parent_name (str): name of parent table
parent_row (dataframe): synthesized parent row
sample_data (dict): maps table name to sampled data
num_rows (int): number of rows to synthesize per parent row
Returns:
synthesized children rows
"""
children = self.dn.get_children(parent_name)
for child in children:
rows = self.sample_rows(child, num_rows)
if child in sampled_data:
sampled_data[child] = pd.concat([sampled_data[child], rows])
else:
sampled_data[child] = rows
self._sample_child_rows(child, rows.iloc[0:1, :], sampled_data)
def sample_all(self, num_rows=5):
"""Samples the entire database.
Args:
num_rows (int): Number of rows to be sampled on the parent tables.
Returns:
dict: Tables sampled.
`sample_all` returns a dictionary with all the tables of the dataset sampled.
The amount of rows sampled will depend from table to table, and is only guaranteed
to match `num_rows` on tables without parents.
This is this way because the children tables are created modelling the relation
thet have with their parent tables, so it's behavior may change from one table to another.
"""
tables = self.dn.tables
sampled_data = {}
for table in tables:
if not self.dn.get_parents(table):
for _ in range(num_rows):
row = self.sample_rows(table, 1)
if table in sampled_data:
sampled_data[table] = pd.concat([sampled_data[table], row])
else:
sampled_data[table] = row
self._sample_child_rows(table, row, sampled_data)
return self.reset_indices_tables(sampled_data)
def _fill_text_columns(self, row, labels, table_name):
"""Fill in the column values for every non numeric column that isn't the primary key.
Args:
row (pandas.Series): row to fill text columns.
labels (list): Column names.
table_name (str): Name of the table.
Returns:
pd.Series: Series with text values filled.
"""
fields = self.dn.tables[table_name].meta['fields']
for label in labels:
field = fields[label]
row_columns = list(row)
if field['type'] == 'id' and field['name'] not in row_columns:
# check foreign key
ref = field.get('ref')
if ref:
# generate parent row
parent_name = ref['table']
parent_row = self.sample_rows(parent_name, 1)
# grab value of foreign key
val = parent_row[ref['field']]
row.loc[:, field['name']] = val
else:
# generate fake id
regex = field['regex']
row.loc[:, field['name']] = exrex.getone(regex)
elif field['type'] == 'text':
# generate fake text
regex = field['regex']
row.loc[:, field['name']] = exrex.getone(regex)
return row
| [
"pandas.Series",
"numpy.identity",
"numpy.eye",
"random.choice",
"copulas.get_qualified_name",
"exrex.generate",
"pandas.DataFrame",
"numpy.linalg.norm",
"numpy.diag",
"numpy.linalg.eigvals",
"pandas.to_numeric",
"pandas.concat",
"rdt.transformers.positive_number.PositiveNumberTransformer",
... | [((5584, 5610), 'random.choice', 'random.choice', (['parent_rows'], {}), '(parent_rows)\n', (5597, 5610), False, 'import random\n'), ((6111, 6126), 'pandas.Series', 'pd.Series', (['keys'], {}), '(keys)\n', (6120, 6126), True, 'import pandas as pd\n'), ((9428, 9458), 'numpy.linalg.svd', 'np.linalg.svd', (['symetric_matrix'], {}), '(symetric_matrix)\n', (9441, 9458), True, 'import numpy as np\n'), ((9761, 9784), 'numpy.eye', 'np.eye', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (9767, 9784), True, 'import numpy as np\n'), ((11719, 11754), 'rdt.transformers.positive_number.PositiveNumberTransformer', 'PositiveNumberTransformer', (['metadata'], {}), '(metadata)\n', (11744, 11754), False, 'from rdt.transformers.positive_number import PositiveNumberTransformer\n'), ((13168, 13206), 'copulas.get_qualified_name', 'get_qualified_name', (['self.modeler.model'], {}), '(self.modeler.model)\n', (13186, 13206), False, 'from copulas import get_qualified_name\n'), ((4254, 4271), 'pandas.Series', 'pd.Series', (['values'], {}), '(values)\n', (4263, 4271), True, 'import pandas as pd\n'), ((9718, 9740), 'numpy.linalg.norm', 'np.linalg.norm', (['matrix'], {}), '(matrix)\n', (9732, 9740), True, 'import numpy as np\n'), ((10512, 10538), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['matrix'], {}), '(matrix)\n', (10530, 10538), True, 'import numpy as np\n'), ((11873, 11917), 'pandas.DataFrame', 'pd.DataFrame', (["{'std': [distribution['std']]}"], {}), "({'std': [distribution['std']]})\n", (11885, 11917), True, 'import pandas as pd\n'), ((14531, 14572), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'synthesized.columns'}), '(columns=synthesized.columns)\n', (14543, 14572), True, 'import pandas as pd\n'), ((14600, 14641), 'pandas.Series', 'pd.Series', (['(False)'], {'index': 'synthesized.index'}), '(False, index=synthesized.index)\n', (14609, 14641), True, 'import pandas as pd\n'), ((2451, 2483), 'numpy.identity', 'np.identity', (['covariance.shape[0]'], {}), '(covariance.shape[0])\n', (2462, 2483), True, 'import numpy as np\n'), ((3803, 3824), 'exrex.generate', 'exrex.generate', (['regex'], {}), '(regex)\n', (3817, 3824), False, 'import exrex\n'), ((4394, 4433), 'pandas.to_numeric', 'pd.to_numeric', (['synthesized[primary_key]'], {}), '(synthesized[primary_key])\n', (4407, 4433), True, 'import pandas as pd\n'), ((9504, 9514), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (9511, 9514), True, 'import numpy as np\n'), ((15448, 15489), 'pandas.Series', 'pd.Series', (['(False)'], {'index': 'synthesized.index'}), '(False, index=synthesized.index)\n', (15457, 15489), True, 'import pandas as pd\n'), ((18542, 18580), 'pandas.concat', 'pd.concat', (['[sampled_data[child], rows]'], {}), '([sampled_data[child], rows])\n', (18551, 18580), True, 'import pandas as pd\n'), ((9921, 9942), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['A3'], {}), '(A3)\n', (9938, 9942), True, 'import numpy as np\n'), ((21135, 21154), 'exrex.getone', 'exrex.getone', (['regex'], {}), '(regex)\n', (21147, 21154), False, 'import exrex\n'), ((21318, 21337), 'exrex.getone', 'exrex.getone', (['regex'], {}), '(regex)\n', (21330, 21337), False, 'import exrex\n'), ((19701, 19738), 'pandas.concat', 'pd.concat', (['[sampled_data[table], row]'], {}), '([sampled_data[table], row])\n', (19710, 19738), True, 'import pandas as pd\n')] |
"""Tools (notably `xpSpace`) for processing and presenting experiment data."""
import collections
import copy
import warnings
import colorama
import numpy as np
from mpl_tools import place
from patlib.std import nonchalance
from struct_tools import AlignedDict, complement, intersect, transps
from tabulate import tabulate
from dapper.dpr_config import rc
from dapper.stats import align_col, unpack_uqs
from dapper.tools.colors import color_text, stripe
from dapper.tools.rounding import UncertainQtty
from dapper.tools.viz import NoneDict, default_styles
from dapper.xp_launch import xpList
class SparseSpace(dict):
"""Subclass of `dict` that enforces key conformity to a given `namedtuple`.
Like a normal `dict`, it can hold any type of objects.
But, since the keys must conform, they effectively follow a coordinate system,
so that the `dict` becomes a vector **space**.
The coordinate system is specified by the `dims`:
a list of keys defining the `namedtuple` of `self.Coord`.
In intended usage, this space is highly sparse,
meaning there are many coordinates with no entry.
Indeed, as a data format for nd-arrays, it may be called
"coordinate list representation", used e.g. by `scipy.sparse.coo_matrix`.
Thus, operations across (potentially multiple) `dims`,
such as optimization or averaging, should be carried out by iterating
-- not over the `dims` -- but over the the list of items.
The most important method is `nest`,
which is used (by `xpSpace.table_tree`) to print and plot results.
This is essentially a "groupby" operation, and indeed the case could
be made that this class should be replaced by `pandas.DataFrame`.
In addition, `__getitem__` is quite flexible, allowing accessing by:
- The actual key, a `self.Coord` object, or a standard tuple. Returns single item.
- A `slice` or `list`. Returns list.
Can be used to get single item with `dct[[idx]][0]`.
Of course, indexing by slice or list assumes that the dict is ordered,
which we inherit from the builtin `dict` since Python 3.7.
Moreover, it is a reflection of the fact that the internals of this class
work by looping over items.
Other convenience functions: `.subspace` (alias `.__call__`) and `.coords_matching`.
Inspired by
- https://stackoverflow.com/a/7728830
- https://stackoverflow.com/q/3387691
Example:
>>> dct = xpSpace(["x", "y", "z"])
>>> dct[(1, 2, 3)] = "point 1"
>>> dct[1, 2, 3] == dct[(1, 2, 3)] == dct[dct.Coord(1, 2, 3)] == "point 1"
True
This dict only has three `dims`, so this fails:
>>> dct[(1, 2, 3, 4)]
Traceback (most recent call last):
...
KeyError: (1, 2, 3, 4)
Individual coordinates can be anything. For example `None`:
>>> dct[(1, 2, None)] = "point 2"
"""
@property
def dims(self):
return self.Coord._fields
def __init__(self, dims):
"""Usually initialized through `xpSpace.from_list`.
Parameters
----------
dims: list or tuple
The attributes defining the coordinate system.
"""
# Define coordinate system
self.Coord = collections.namedtuple("Coord", dims)
def repr2(c, keys=False, str_or_repr=repr):
if keys:
lst = [f"{k}={str_or_repr(v)}" for k, v in c._asdict().items()]
else:
lst = [str_or_repr(v) for v in c]
return "(" + ", ".join(lst) + ")"
self.Coord.repr2 = repr2
def update(self, items):
"""Update dict, using the custom `__setitem__` to ensure key conformity.
NB: the `kwargs` syntax is not supported because it only works for keys that
consist of (a single) string, which is not very interesting for SparseSpace.
"""
# See https://stackoverflow.com/a/2588648
# and https://stackoverflow.com/a/2390997
try:
items = items.items()
except AttributeError:
pass
for k, v in items:
self[k] = v
def __setitem__(self, key, val):
"""Setitem ensuring coordinate conforms."""
try:
key = self.Coord(*key)
except TypeError:
raise TypeError(
f"The key {key!r} did not fit the coord. system "
f"which has dims {self.dims}"
)
super().__setitem__(key, val)
def __getitem__(self, key):
"""Also allows list-indexing by `list` and `slice`."""
# List of items (from list of indices)
if isinstance(key, list):
lst = list(self.values())
return [lst[k] for k in key]
# List of items (from slice)
elif isinstance(key, slice):
return [*self.values()][key]
# Single item (by Coord object, or tuple)
else:
# NB: Dont't use isinstance(key, self.Coord)
# coz it fails when the namedtuple (Coord) has been
# instantiated in different places (but with equal params).
# Also see bugs.python.org/issue7796
return super().__getitem__(key)
def __call__(self, **kwargs):
"""Shortcut (syntactic sugar) for `SparseSpace.subspace`."""
return self.subspace(**kwargs)
def subspace(self, **kwargs):
"""Get an affine subspace.
NB: If you're calling this repeatedly (for all values of the same `kwargs`)
then you should consider using `SparseSpace.nest` instead.
Example
-------
xp_dict.subspace(da_method="EnKF", infl=1, seed=3)
"""
# Slow version
# outer = self.nest(outer_dims=list(kwargs)) # make subspaceS
# inner = outer[outer.Coord(**kwargs)] # discard all but 1
coords = self.coords_matching(**kwargs)
inner = self.__class__(complement(self.dims, kwargs))
for coord in coords:
inner[inner.coord_from_attrs(coord)] = self[coord]
return inner
def coords_matching(self, **kwargs):
"""Get all `coord`s matching kwargs.
Used by `SparseSpace.label_xSection` and `SparseSpace.subspace`. Unlike the
latter, this function returns a *list* of *keys* of the *original subspace*.
Note that the `missingval` shenanigans of `xpList.inds` are here unnecessary
since each coordinate is complete.
"""
def match(coord):
return all(getattr(coord, k) == kwargs[k] for k in kwargs)
return [c for c in self if match(c)]
def coord_from_attrs(self, obj):
"""Form a `coord` for this `xpSpace` by extracting attrs. from `obj`.
For instances of `self.Coord`, this is the identity opeartor, i.e.
self.coord_from_attrs(coord) == coord
"""
coord = (getattr(obj, a, None) for a in self.dims)
return self.Coord(*coord)
def __repr__(self):
txt = f"<{self.__class__.__name__}>"
txt += " with Coord/dims: "
try:
txt += "(and ticks): " + str(AlignedDict(self.ticks))
except AttributeError:
txt += str(self.dims) + "\n"
# Note: print(xpList(self)) produces a more human-readable table,
# but requires prep_table(), which we don't really want to call again
# (it's only called in from_list, not (necessarily) in any nested spaces)
L = 2
keys = [k.repr2() for k in self]
if 2 * L < len(keys):
keys = keys[:L] + ["..."] + keys[-L:]
keys = "[\n " + ",\n ".join(keys) + "\n]"
return txt + f"populated by {len(self)} items with keys: {keys}"
def nest(self, inner_dims=None, outer_dims=None):
"""Project along `inner_acces` to yield a new `xpSpace` with dims `outer_dims`
The entries of this `xpSpace` are themselves `xpSpace`s, with dims `inner_dims`,
each one regrouping the entries with the same (projected) coordinate.
Note: this method could also be called `groupby`.
Note: this method is also called by `__getitem__(key)` if `key` is dict.
"""
# Default: a singleton outer space,
# with everything contained in the inner (projection) space.
if inner_dims is None and outer_dims is None:
outer_dims = ()
# Validate dims
if inner_dims is None:
assert outer_dims is not None
inner_dims = complement(self.dims, outer_dims)
else:
assert outer_dims is None
outer_dims = complement(self.dims, inner_dims)
# Fill spaces
outer_space = self.__class__(outer_dims)
for coord, entry in self.items():
# Lookup subspace coord
outer_coord = outer_space.coord_from_attrs(coord)
try:
# Get subspace
inner_space = outer_space[outer_coord]
except KeyError:
# Create subspace, embed
inner_space = self.__class__(inner_dims)
outer_space[outer_coord] = inner_space
# Add entry to subspace, similar to .fill()
inner_space[inner_space.coord_from_attrs(coord)] = entry
return outer_space
def intersect_dims(self, attrs):
"""Rm those `a` in `attrs` that are not in `self.dims`.
This enables sloppy `dims` allotment, for ease-of-use.
"""
absent = complement(attrs, self.dims)
if absent:
print(
color_text("Warning:", colorama.Fore.RED),
"The requested attributes",
color_text(str(absent), colorama.Fore.RED),
(
"were not found among the xpSpace dims"
" (attrs. used as coordinates for the set of experiments)."
" This may be no prob. if the attrs are redundant for the coord-sys."
" However, if due to confusion or mis-spelling, then it is likely"
" to cause mis-interpretation of the shown results."
),
)
attrs = complement(attrs, absent)
return attrs
def append_dim(self, dim):
"""Expand `self.Coord` by `dim`. For each item, insert `None` in new dim."""
self.__init__(self.dims + (dim,))
for coord in list(self):
entry = self.pop(coord)
self[coord + (None,)] = entry
def label_xSection(self, label, *NoneAttrs, **sub_coord):
"""Insert duplicate entries for the given cross-section.
Works by adding the attr. `xSection` to the dims of `SparseSpace`,
and setting it to `label` for entries matching `sub_coord`,
reflecting the "constance/constraint/fixation" this represents.
This distinguishes the entries in this fixed-affine subspace,
preventing them from being gobbled up by the operations of `nest`.
If you wish, you can specify the `NoneAttrs`,
which are consequently set to None for the duplicated entries,
preventing them from being shown in plot labels and tuning panels.
"""
if "xSect" not in self.dims:
self.append_dim("xSect")
for coord in self.coords_matching(**self.intersect_dims(sub_coord)):
entry = copy.deepcopy(self[coord])
coord = coord._replace(xSect=label)
coord = coord._replace(**{a: None for a in NoneAttrs})
self[coord] = entry
DIM_ROLES = dict(outer=None, inner=None, mean=None, optim=None)
class xpSpace(SparseSpace):
"""Functionality to facilitate working with `xps` and their results."""
@classmethod
def from_list(cls, xps, tick_ordering=None):
"""Init. from a list of objects, typically experiments referred to as `xp`s.
- Computes the relevant `dims` from the attributes, and
- Fills the dict by `xp`s.
- Computes and writes the attribute `ticks`.
This creates a `SparseSpace` of `xp`s. However, the nested subspaces generated
by `xpSpace.table_tree` (for printing and plotting) will hold objects of type
`UncertainQtty`, because it calls `mean` which calls `get_stat(statkey)`.
"""
# Define and fill SparseSpace
dct = xpList(xps).prep_table(nomerge=["xSect"])[0]
self = cls(dct.keys())
self.fill(xps)
self.make_ticks(dct, tick_ordering)
return self
def make_ticks(self, dct, ordering=None):
"""Unique & sort, for each individual "dim" in `dct`. Assign to `self.ticks`.
NB: `self.ticks` will not "propagate" through `SparseSpace.nest` or the like.
"""
self.ticks = dct
ordering = ordering or {}
for name, values in dct.items():
ticks = set(values) # unique (jumbles order)
order = ordering.get(name, "as-found")
# Sort key
if callable(order):
key = order
elif "as-found" in order:
key = values.index
else: # "natural"
def key(x):
return x
# Place None's at the end
def key_safe(x):
return (x is None), key(x)
# Sort
ticks = sorted(ticks, key=key_safe)
# Reverse
if isinstance(order, str) and "rev" in order:
ticks = ticks[::-1]
# Assign
dct[name] = ticks
def fill(self, xps):
"""Mass insertion."""
self.update([(self.coord_from_attrs(xp), xp) for xp in xps])
def squeeze(self):
"""Eliminate unnecessary dimensions."""
squeezed = xpSpace(xpList(self).prep_table()[0])
squeezed.fill(self)
return squeezed
def get_stat(self, statkey):
"""Make `xpSpace` with same `Coord` as `self`, but values `xp.avrgs.statkey`."""
# Init a new xpDict to hold stat
avrgs = self.__class__(self.dims)
not_found = set()
for coord, xp in self.items():
try:
avrgs[coord] = getattr(xp.avrgs, statkey)
except AttributeError:
not_found.add(coord)
if len(not_found) == len(self):
raise AttributeError(
f"The stat. '{statkey}' was not found among **any** of the xp's."
)
elif not_found:
print(color_text("Warning:", "RED"), f"no stat. '{statkey}' found for")
print(*not_found, sep="\n")
return avrgs
def mean(self, dims=None):
"""Compute mean over `dims` (a list). Returns `xpSpace` without those `dims`."""
# Note: The case `dims=()` should work w/o special treatment.
if dims is None:
return self
nested = self.nest(dims)
for coord, space in nested.items():
def getval(uq):
return uq.val if isinstance(uq, UncertainQtty) else uq
vals = [getval(uq) for uq in space.values()]
# Don't use nanmean! It would give false impressions.
mu = np.mean(vals)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
# Don't print warnings caused by N=1.
# It already correctly yield nan's.
var = np.var(vals, ddof=1)
N = len(vals)
uq = UncertainQtty(mu, np.sqrt(var / N))
uq.nTotal = N
uq.nFail = N - np.isfinite(vals).sum()
uq.nSuccess = N - uq.nFail
nested[coord] = uq
return nested
def tune(self, dims=None, costfun=None):
"""Get (compile/tabulate) a stat. optimised wrt. tuning params (`dims`)."""
# Define cost-function
costfun = (costfun or "increasing").lower()
if "increas" in costfun:
costfun = lambda x: +x
elif "decreas" in costfun:
costfun = lambda x: -x
else:
assert callable(costfun) # custom
# Note: The case `dims=()` should work w/o special treatment.
if dims is None:
return self
nested = self.nest(dims)
for coord, space in nested.items():
# Find optimal value (and coord) within space
MIN = np.inf
found_any = False
for inner_coord, uq in space.items():
cost = costfun(uq.val)
if cost <= MIN:
found_any = True
MIN = cost
uq_opt = uq
uq_opt.tuned_coord = inner_coord
if not found_any:
uq_opt = uq # one is as good as another
nDim = range(len(space.Coord._fields))
uq_opt.tuned_coord = space.Coord(*(None for _ in nDim))
nested[coord] = uq_opt
return nested
def table_tree(self, statkey, dims, *, costfun=None):
"""Make hierarchy `outer > inner > mean > optim` using `SparseSpace.nest`.
The dimension passed to `nest` (at each level) is specified by `dims`.
The dimensions of `dims['mean']` and `dims['optim']` get eliminated
by the mean/tune operations. The `dims['outer']` and `dims['inner']
become the keys for the output hierarchy.
.. note::
cannot support multiple `statkey`s because it's not (obviously) meaningful
when optimizing over `dims['optim']`.
"""
def validate_dims(dims):
"""Validate dims."""
role_register = {}
new = {}
for role in set(dims) | set(DIM_ROLES):
assert role in DIM_ROLES, f"Invalid role {role!r}"
dd = dims.get(role, DIM_ROLES[role])
if dd is None:
# Don't convert None to (), allowing None to remain special.
pass
else:
# Ensure iterable
if isinstance(dd, str) or not hasattr(dd, "__iter__"):
dd = (dd,)
# Keep relevant only
dd = self.intersect_dims(dd)
# Ensure each dim plays a single-role
for dim in dd:
if dim in role_register:
raise TypeError(
f"A dim (here {dim!r}) cannot be assigned to 2"
f" roles (here {role!r} and {role_register[dim]!r})."
)
else:
role_register[dim] = role
new[role] = dd
return new
def mean_tune(xp_dict):
"""Take mean, then tune.
Note: the `SparseSpace` implementation should be sufficiently
"uncluttered" that `mean_tune` (or a few of its code lines)
could be called anywhere above/between/below
the `nest`ing of `outer` or `inner`.
These possibile call locations are commented in the code.
"""
uq_dict = xp_dict.get_stat(statkey)
uq_dict = uq_dict.mean(dims["mean"])
uq_dict = uq_dict.tune(dims["optim"], costfun)
return uq_dict
dims = validate_dims(dims)
self2 = mean_tune(self)
# Prefer calling mean_tune() [also see its docstring]
# before doing outer/inner nesting. This is because then the dims of
# a row (xpSpace) should not include mean&optim, and thus:
# - Column header/coords may be had directly as row.keys(),
# without extraction by coord_from_attrs() from (e.g.) row[0].
# - Don't need to propagate mean&optim dims down to the row level.
# which would require defining rows by the nesting:
# rows = table.nest(outer_dims=complement(table.dims,
# *(dims['inner'] or ()),
# *(dims['mean'] or ()),
# *(dims['optim'] or ()) ))
# - Each level of the output from table_tree
# is a smaller (and more manageable) dict.
tables = self2.nest(outer_dims=dims["outer"])
for table_coord, table in tables.items():
# table = mean_tune(table)
# Should not be used (nesting as rows is more natural,
# and is required for getting distinct/row_keys).
# cols = table.nest(outer_dims=dims['inner'])
rows = table.nest(inner_dims=dims["inner"] or ())
# Overwrite table by its nesting as rows
tables[table_coord] = rows
# for row_coord, row in rows.items():
# rows[row_coord] = mean_tune(row)
args = dict(statkey=statkey, xp_dict=self, dims=dims)
tables.created_with = args
return dims, tables
def tickz(self, dim_name):
"""Dimension (axis) ticks without None"""
return [x for x in self.ticks[dim_name] if x is not None]
def print(
self,
statkey,
dims, # noqa (shadowing builtin)
subcols=True,
decimals=None,
costfun=None,
squeeze_labels=True,
colorize=True,
title=None,
):
"""Print tables of results.
Parameters
----------
statkey: str
The statistic to extract from the `xp.avrgs` for each `xp`.
Examples: `"rmse.a"` (i.e. `"err.rms.a"`), `"rmse.ocean.a"`, `"duration"`.
dims: dict
Allots (maps) the dims of `xpSpace` to different roles in the tables.
- The "role" `outer` should list the dims/attributes
used to define the splitting of the results into *separate tables*:
one table for each distinct combination of attributes.
- Similarly , the role `inner` determines which attributes
split a table into its columns.
- `mean` lists the attributes over which the mean is taken
(for that row & column)
- `optim` lists the attributes used over which the optimum
is searched for (after taking the mean).
Example:
dict(outer='da_method', inner='N', mean='seed',
optim=('infl','loc_rad'))
Equivalently, use `mean=("seed",)`.
It is acceptible to leave this empty: `mean=()` or `mean=None`.
subcols: bool
If `True`, then subcolumns are added to indicate
- `1σ`: the confidence interval. If `mean=None` is used, this simply reports
the value `.prec` of the `statkey`, providing this is an `UncertainQtty`.
Otherwise, it is computed as `sqrt(var(xps)/N)`,
where `xps` is the set of statistic gathered over the `mean` dimensions.
- `*(optim)`: the optimal point (among all `optim` attributes),
as defined by `costfun`.
- `☠`: the number of failures (non-finite values) at that point.
- `✓`: the number of successes that go into the value
decimals: int
Number of decimals to print.
If `None`, this is determined for each statistic by its uncertainty.
costfun: str or function
Use `'increasing'` (default) or `'decreasing'` to indicate that the optimum
is defined as the lowest or highest value of the `statkey` found.
squeeze_labels: bool
Don't include redundant attributes in the line labels.
Caution: `get_style` will not be able to access the eliminated attrs.
colorize: bool
Add color to tables for readability.
"""
# Title
if title is not None:
if colorize:
clrs = colorama.Back.LIGHTBLUE_EX, colorama.Fore.BLACK
title = color_text(str(title), *clrs)
print(title)
# Inform dims["mean"]
if dims.get("mean", None):
print(f"Averages (in time and) over {dims['mean']}.")
else:
print("Averages in time only" " (=> the 1σ estimates may be unreliable).")
def make_cols(rows, cc, subcols, h2):
"""Subcolumns: align, justify, join."""
# Define subcol formats
if subcols:
templ = "{val} ±{prec}"
templ += "" if dims["optim"] is None else " *{tuned_coord}"
templ += "" if dims["mean"] is None else " {nFail} {nSuccess}"
aligns = dict(prec="<", tuned_coord="<")
def align(column, idx):
if idx == 0:
headers = dict(val=statkey, prec="1σ", tuned_coord=dims["optim"])
else:
headers = dict(val="", prec="1σ", tuned_coord="")
headers.update(nFail="☠", nSuccess="✓")
col = unpack_uqs(column, decimals)
if subcols:
for key in list(col):
if key in templ:
subcolmn = [headers.get(key, key)] + col[key]
col[key] = align_col(subcolmn, just=aligns.get(key, ">"))
else:
del col[key]
col = [templ.format(**row) for row in transps(col)]
else:
col = align_col([headers["val"]] + col["val"])
return col
def super_header(col_coord, idx, col):
header, matter = col[0], col[1:]
cc = col_coord.repr2(not idx, str).strip("()").replace(", ", ",")
cc = cc.center(len(header), "_") # +1 width for wide chars like ✔️
return [cc + "\n" + header] + matter
# Transpose
columns = [list(x) for x in zip(*rows)]
# Format column
for j, (col_coord, column) in enumerate(zip(cc, columns)):
col = align(column, j)
if h2:
col = super_header(col_coord, j, col)
columns[j] = col
# Un-transpose
rows = [list(x) for x in zip(*columns)]
return rows
dims, tables = self.table_tree(statkey, dims, costfun=costfun)
for table_coord, table in tables.items():
# Get table's column coords/ticks (cc).
# cc is really a set, but we use dict for ordering.
# cc = self.ticks[dims["inner"]] # may be > needed
# cc = table[0].keys() # may be < needed
cc = {c: None for row in table.values() for c in row}
# Could additionally do cc = table.squeeze() but is it worth it?
# Convert table (rows) into rows (lists) of equal length
rows = [[row.get(c, None) for c in cc] for row in table.values()]
# Align cols
h2 = "\n" if len(cc) > 1 else "" # super-header?
headers, *rows = make_cols(rows, cc, subcols, h2)
# Prepend left-side (attr) table
if squeeze_labels:
table = table.squeeze()
headers = [h2 + k for k in table.dims] + [h2 + "⑊"] + headers
for i, (key, row) in enumerate(zip(table, rows)):
rows[i] = [*key] + ["|"] + row
print()
if dims["outer"]:
# Title
table_title = "Table for " + table_coord.repr2(True).strip("()")
if colorize:
clrs = colorama.Back.YELLOW, colorama.Fore.BLACK
table_title = color_text(table_title, *clrs)
print(table_title)
table = tabulate(rows, headers).replace("␣", " ")
if colorize:
table = stripe(table, slice(2, None))
print(table)
return tables
def plot(
self,
statkey,
dims,
get_style=default_styles,
fignum=None,
figsize=None,
panels=None,
costfun=None,
title1=None,
title2=None,
unique_labels=True,
squeeze_labels=True,
):
"""Plot (tables of) results.
Analagously to `xpSpace.print`,
the averages are grouped by `dims["inner"]`,
which here plays the role of the x-axis.
The averages can also be grouped by `dims["outer"]`,
producing a figure with multiple (columns of) panels.
The optimal points/parameters/attributes are plotted in smaller panels
below the main plot. This can be turned off by providing the figure
dims through the `panels` argument.
The parameters `statkey`, `dims`, `costfun`, `sqeeze_labels`
are documented in `xpSpace.print`.
Parameters
----------
get_style: function
A function that takes an object, and returns a dict of line styles,
usually as a function of the object's attributes.
title1: anything
Figure title (in addition to the the defaults).
title2: anything
Figure title (in addition to the defaults). Goes on a new line.
unique_labels: bool
Only show a given line label once, even if it appears in several panels.
squeeze_labels:
Don't include redundant attributes in the labels.
"""
def plot1(panelcol, row, style):
"""Plot a given line (row) in the main panel and the optim panels.
Involves: Sort, insert None's, handle constant lines.
"""
# Make a full row (yy) of vals, whether is_constant or not.
# is_constant = (len(row)==1 and next(iter(row))==row.Coord(None))
is_constant = all(x == row.Coord(None) for x in row)
if is_constant:
yy = [
row[
None,
]
for _ in xticks
]
style.marker = None
else:
yy = [row.get(row.Coord(x), None) for x in xticks]
# Plot main
row.vals = [getattr(y, "val", None) for y in yy]
row.handles = {}
row.handles["main_panel"] = panelcol[0].plot(xticks, row.vals, **style)[0]
# Plot tuning params
row.tuned_coords = {} # Store ordered, "transposed" argmins
argmins = [getattr(y, "tuned_coord", None) for y in yy]
for a, panel in zip(dims["optim"] or (), panelcol[1:]):
yy = [getattr(coord, a, None) for coord in argmins]
row.tuned_coords[a] = yy
# Plotting all None's sets axes units (like any plotting call)
# which can cause trouble if the axes units were actually supposed
# to be categorical (eg upd_a), but this is only revealed later.
if not all(y == None for y in yy):
style["alpha"] = 0.2
row.handles[a] = panel.plot(xticks, yy, **style)
def label_management(table):
def pruner(style):
label = style.get("label", None)
if unique_labels:
if label in register:
del style["label"]
elif label:
register.add(style["label"])
pruner.has_labels = True
elif label:
pruner.has_labels = True
pruner.has_labels = False
def squeezer(coord):
return intersect(coord._asdict(), label_attrs)
if squeeze_labels:
label_attrs = xpList(table.keys()).prep_table()[0]
else:
label_attrs = table.dims
return pruner, squeezer
register = set()
def beautify(panels, title, has_labels):
panel0 = panels[0]
# panel0.set_title(title)
panel0.text(
0.5,
1,
title,
fontsize=12,
ha="center",
va="bottom",
transform=panel0.transAxes,
bbox=dict(
facecolor="lightyellow",
edgecolor="k",
alpha=0.99,
boxstyle="round,pad=0.25",
# NB: padding makes label spill into axes
),
)
if has_labels:
panel0.legend()
if panel0.is_first_col():
panel0.set_ylabel(statkey)
panels[-1].set_xlabel(dims["inner"][0])
# Tuning panels:
for a, panel in zip(dims["optim"] or (), panels[1:]):
if panel.is_first_col():
panel.set_ylabel(f"Optim.\n{a}")
# Nest dims through table_tree()
dims, tables = self.table_tree(statkey, dims, costfun=costfun)
assert len(dims["inner"]) == 1, "You must chose a valid attr. for the abscissa."
if not hasattr(self, "ticks"):
# TODO 6: this is probationary.
# In case self is actually a subspace, it may be that it does not contain
# all of the ticks of the original xpSpace. This may be fine,
# and we generate the ticks here again. However, this is costly-ish, so you
# should maybe simply (manually) assign them from the original xpSpace.
# And maybe you actually want the plotted lines to have holes where self
# has no values. Changes in the ticks are not obvious to the naked eye,
# unlike the case for printed tables (where column changes are quite clear).
print(
color_text("Warning:", colorama.Fore.RED),
"Making new x-ticks."
"\nConsider assigning them yourself from the original"
" xpSpace to this subspace.",
)
self.make_ticks(xpList(self).prep_table()[0])
xticks = self.tickz(dims["inner"][0])
# Create figure axes
if panels is None:
nrows = len(dims["optim"] or ()) + 1
ncols = len(tables)
maxW = 12.7 # my mac screen
figsize = figsize or (min(5 * ncols, maxW), 7)
gs = dict(
height_ratios=[6] + [1] * (nrows - 1),
hspace=0.05,
wspace=0.05,
# eyeballed:
left=0.15 / (1 + np.log(ncols)),
right=0.97,
bottom=0.06,
top=0.9,
)
# Create
_, panels = place.freshfig(
num=fignum,
figsize=figsize,
nrows=nrows,
sharex=True,
ncols=ncols,
sharey="row",
gridspec_kw=gs,
squeeze=False,
)
else:
panels = np.atleast_2d(panels)
# Fig. Title
fig = panels[0, 0].figure
fig_title = "Averages wrt. time"
if dims["mean"] is not None:
fig_title += " and " + ", ".join([repr(c) for c in dims["mean"]])
if title1 is not None:
fig_title += ". " + title1
if title2 is not None:
with nonchalance():
title2 = title2.relative_to(rc.dirs["data"])
fig_title += "\n" + str(title2)
fig.suptitle(fig_title)
# Loop outer
for ax_column, (table_coord, table) in zip(panels.T, tables.items()):
table.panels = ax_column
label_prune, label_squeeze = label_management(table)
for coord, row in table.items():
style = get_style(NoneDict(label_squeeze(coord)))
label_prune(style)
plot1(table.panels, row, style)
beautify(
table.panels,
title=(
"" if dims["outer"] is None else table_coord.repr2(True).strip("()")
),
has_labels=label_prune.has_labels,
)
tables.fig = fig # add reference to fig
return tables
| [
"dapper.stats.unpack_uqs",
"numpy.sqrt",
"struct_tools.complement",
"numpy.log",
"struct_tools.AlignedDict",
"numpy.isfinite",
"copy.deepcopy",
"dapper.stats.align_col",
"numpy.mean",
"mpl_tools.place.freshfig",
"numpy.atleast_2d",
"dapper.xp_launch.xpList",
"patlib.std.nonchalance",
"warn... | [((3208, 3245), 'collections.namedtuple', 'collections.namedtuple', (['"""Coord"""', 'dims'], {}), "('Coord', dims)\n", (3230, 3245), False, 'import collections\n'), ((9446, 9474), 'struct_tools.complement', 'complement', (['attrs', 'self.dims'], {}), '(attrs, self.dims)\n', (9456, 9474), False, 'from struct_tools import AlignedDict, complement, intersect, transps\n'), ((5889, 5918), 'struct_tools.complement', 'complement', (['self.dims', 'kwargs'], {}), '(self.dims, kwargs)\n', (5899, 5918), False, 'from struct_tools import AlignedDict, complement, intersect, transps\n'), ((8456, 8489), 'struct_tools.complement', 'complement', (['self.dims', 'outer_dims'], {}), '(self.dims, outer_dims)\n', (8466, 8489), False, 'from struct_tools import AlignedDict, complement, intersect, transps\n'), ((8567, 8600), 'struct_tools.complement', 'complement', (['self.dims', 'inner_dims'], {}), '(self.dims, inner_dims)\n', (8577, 8600), False, 'from struct_tools import AlignedDict, complement, intersect, transps\n'), ((10137, 10162), 'struct_tools.complement', 'complement', (['attrs', 'absent'], {}), '(attrs, absent)\n', (10147, 10162), False, 'from struct_tools import AlignedDict, complement, intersect, transps\n'), ((11328, 11354), 'copy.deepcopy', 'copy.deepcopy', (['self[coord]'], {}), '(self[coord])\n', (11341, 11354), False, 'import copy\n'), ((15121, 15134), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (15128, 15134), True, 'import numpy as np\n'), ((34695, 34827), 'mpl_tools.place.freshfig', 'place.freshfig', ([], {'num': 'fignum', 'figsize': 'figsize', 'nrows': 'nrows', 'sharex': '(True)', 'ncols': 'ncols', 'sharey': '"""row"""', 'gridspec_kw': 'gs', 'squeeze': '(False)'}), "(num=fignum, figsize=figsize, nrows=nrows, sharex=True, ncols\n =ncols, sharey='row', gridspec_kw=gs, squeeze=False)\n", (34709, 34827), False, 'from mpl_tools import place\n'), ((35001, 35022), 'numpy.atleast_2d', 'np.atleast_2d', (['panels'], {}), '(panels)\n', (35014, 35022), True, 'import numpy as np\n'), ((9529, 9570), 'dapper.tools.colors.color_text', 'color_text', (['"""Warning:"""', 'colorama.Fore.RED'], {}), "('Warning:', colorama.Fore.RED)\n", (9539, 9570), False, 'from dapper.tools.colors import color_text, stripe\n'), ((15153, 15178), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (15176, 15178), False, 'import warnings\n'), ((15196, 15252), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (15217, 15252), False, 'import warnings\n'), ((15381, 15401), 'numpy.var', 'np.var', (['vals'], {'ddof': '(1)'}), '(vals, ddof=1)\n', (15387, 15401), True, 'import numpy as np\n'), ((15464, 15480), 'numpy.sqrt', 'np.sqrt', (['(var / N)'], {}), '(var / N)\n', (15471, 15480), True, 'import numpy as np\n'), ((24916, 24944), 'dapper.stats.unpack_uqs', 'unpack_uqs', (['column', 'decimals'], {}), '(column, decimals)\n', (24926, 24944), False, 'from dapper.stats import align_col, unpack_uqs\n'), ((33786, 33827), 'dapper.tools.colors.color_text', 'color_text', (['"""Warning:"""', 'colorama.Fore.RED'], {}), "('Warning:', colorama.Fore.RED)\n", (33796, 33827), False, 'from dapper.tools.colors import color_text, stripe\n'), ((35353, 35366), 'patlib.std.nonchalance', 'nonchalance', ([], {}), '()\n', (35364, 35366), False, 'from patlib.std import nonchalance\n'), ((7084, 7107), 'struct_tools.AlignedDict', 'AlignedDict', (['self.ticks'], {}), '(self.ticks)\n', (7095, 7107), False, 'from struct_tools import AlignedDict, complement, intersect, transps\n'), ((12299, 12310), 'dapper.xp_launch.xpList', 'xpList', (['xps'], {}), '(xps)\n', (12305, 12310), False, 'from dapper.xp_launch import xpList\n'), ((14433, 14462), 'dapper.tools.colors.color_text', 'color_text', (['"""Warning:"""', '"""RED"""'], {}), "('Warning:', 'RED')\n", (14443, 14462), False, 'from dapper.tools.colors import color_text, stripe\n'), ((25408, 25448), 'dapper.stats.align_col', 'align_col', (["([headers['val']] + col['val'])"], {}), "([headers['val']] + col['val'])\n", (25417, 25448), False, 'from dapper.stats import align_col, unpack_uqs\n'), ((27628, 27658), 'dapper.tools.colors.color_text', 'color_text', (['table_title', '*clrs'], {}), '(table_title, *clrs)\n', (27638, 27658), False, 'from dapper.tools.colors import color_text, stripe\n'), ((27714, 27737), 'tabulate.tabulate', 'tabulate', (['rows', 'headers'], {}), '(rows, headers)\n', (27722, 27737), False, 'from tabulate import tabulate\n'), ((13719, 13731), 'dapper.xp_launch.xpList', 'xpList', (['self'], {}), '(self)\n', (13725, 13731), False, 'from dapper.xp_launch import xpList\n'), ((15535, 15552), 'numpy.isfinite', 'np.isfinite', (['vals'], {}), '(vals)\n', (15546, 15552), True, 'import numpy as np\n'), ((25346, 25358), 'struct_tools.transps', 'transps', (['col'], {}), '(col)\n', (25353, 25358), False, 'from struct_tools import AlignedDict, complement, intersect, transps\n'), ((34026, 34038), 'dapper.xp_launch.xpList', 'xpList', (['self'], {}), '(self)\n', (34032, 34038), False, 'from dapper.xp_launch import xpList\n'), ((34538, 34551), 'numpy.log', 'np.log', (['ncols'], {}), '(ncols)\n', (34544, 34551), True, 'import numpy as np\n')] |
import numpy as np
import multiprocessing
from contextlib import closing
import copy
import past
__author__ = '<NAME>'
__license__ = "BSD-2-Clause"
__email__ = "<EMAIL>"
class InitializationException(Exception):
"""Initialization Exception"""
def multi_runs(model, execution_number=1, iteration_number=50, infection_sets=None,
nprocesses=multiprocessing.cpu_count()):
"""
Multiple executions of a given model varying the initial set of infected nodes
:param model: a configured diffusion model
:param execution_number: number of instantiations
:param iteration_number: number of iterations per execution
:param infection_sets: predefined set of infected nodes sets
:param nprocesses: number of processes. Default values cpu number.
:return: resulting trends for all the executions
"""
if nprocesses > multiprocessing.cpu_count():
nprocesses = multiprocessing.cpu_count()
executions = []
seeds = np.around(np.random.rand(execution_number)*2**32).astype(int)
if infection_sets is not None:
if len(infection_sets) != execution_number:
raise InitializationException(
{"message": "Number of infection sets provided does not match the number of executions required"})
for x in past.builtins.xrange(0, execution_number, nprocesses):
with closing(multiprocessing.Pool(processes=nprocesses, maxtasksperchild=10)) as pool:
tasks = [(seeds[i], copy.deepcopy(model).reset(infection_sets[i])) for i in
past.builtins.xrange(x, min(x + nprocesses, execution_number))]
results = [pool.apply_async(__execute, (*t, iteration_number)) for t in tasks]
for result in results:
executions.append(result.get())
else:
for x in past.builtins.xrange(0, execution_number, nprocesses):
with closing(multiprocessing.Pool(processes=nprocesses, maxtasksperchild=10)) as pool:
tasks = [(seeds[i], copy.deepcopy(model).reset()) for i in
past.builtins.xrange(x, min(x + nprocesses, execution_number))]
results = [pool.apply_async(__execute, (*t, iteration_number)) for t in tasks]
for result in results:
executions.append(result.get())
return executions
def __execute(seed, model, iteration_number):
"""
Execute a simulation model
:param model: a configured diffusion model
:param iteration_number: number of iterations
:return: computed trends
"""
np.random.seed(seed)
iterations = model.iteration_bunch(iteration_number, False)
trends = model.build_trends(iterations)[0]
del iterations
del model
return trends
| [
"numpy.random.rand",
"past.builtins.xrange",
"multiprocessing.cpu_count",
"multiprocessing.Pool",
"numpy.random.seed",
"copy.deepcopy"
] | [((362, 389), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (387, 389), False, 'import multiprocessing\n'), ((2593, 2613), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2607, 2613), True, 'import numpy as np\n'), ((867, 894), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (892, 894), False, 'import multiprocessing\n'), ((917, 944), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (942, 944), False, 'import multiprocessing\n'), ((1304, 1357), 'past.builtins.xrange', 'past.builtins.xrange', (['(0)', 'execution_number', 'nprocesses'], {}), '(0, execution_number, nprocesses)\n', (1324, 1357), False, 'import past\n'), ((1846, 1899), 'past.builtins.xrange', 'past.builtins.xrange', (['(0)', 'execution_number', 'nprocesses'], {}), '(0, execution_number, nprocesses)\n', (1866, 1899), False, 'import past\n'), ((988, 1020), 'numpy.random.rand', 'np.random.rand', (['execution_number'], {}), '(execution_number)\n', (1002, 1020), True, 'import numpy as np\n'), ((1385, 1448), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'nprocesses', 'maxtasksperchild': '(10)'}), '(processes=nprocesses, maxtasksperchild=10)\n', (1405, 1448), False, 'import multiprocessing\n'), ((1926, 1989), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'nprocesses', 'maxtasksperchild': '(10)'}), '(processes=nprocesses, maxtasksperchild=10)\n', (1946, 1989), False, 'import multiprocessing\n'), ((1495, 1515), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (1508, 1515), False, 'import copy\n'), ((2037, 2057), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (2050, 2057), False, 'import copy\n')] |
import os
import lightkurve as lk
import matplotlib.pyplot as plt
import pytest
import numpy as np
from vetting import centroid_test
from vetting import PACKAGEDIR
testdir = "/".join(PACKAGEDIR.split("/")[:-2])
def is_action():
try:
ga = os.environ["GITHUB_ACTIONS"]
return True
except KeyError:
return False
def test_centroid_test():
tpf = lk.KeplerTargetPixelFile(
f"{testdir}/tests/data/kplr005562784-2011271113734_lpd-targ.fits.gz"
)
period, t0, dur = 25.3368592, 192.91552, 8.85 / 24
r = centroid_test(tpf, period, t0, dur, aperture_mask="pipeline", plot=False)
assert r["pvalues"][0][0] < 0.01
assert len(r["pvalues"][0]) == 1
r = centroid_test(tpf, period, t0, dur, aperture_mask="pipeline", plot=True)
assert len(r["figs"]) == 1
r["figs"][0].savefig(f"{PACKAGEDIR}/demo.png", dpi=200, bbox_inches="tight")
r = centroid_test(
tpf,
[period, 22.4359873459],
[t0, 0],
[dur, dur],
aperture_mask="pipeline",
plot=False,
)
# True transit should be significant
assert r["pvalues"][0][0] < 0.01
# Random transit shouldn't be significant
assert r["pvalues"][0][1] > 0.2
assert len(r["pvalues"][0]) == 2
r = centroid_test(
[tpf, tpf],
[period, 22.4359873459],
[t0, 0],
[dur, dur],
aperture_mask="pipeline",
plot=False,
)
assert len(r["pvalues"]) == 2
assert len(r["pvalues"][0]) == 2
r = centroid_test(
tpf,
period,
t0,
dur,
aperture_mask="pipeline",
plot=True,
transit_depths=0.001499,
)
assert "1sigma_error" in r.keys()
assert hasattr(r["1sigma_error"][0], "unit")
assert r["1sigma_error"][0].value > 0
assert r["1sigma_error"][0].value < 0.5
r = centroid_test(
tpf,
period,
t0,
dur,
aperture_mask="pipeline",
plot=True,
transit_depths=0.001499,
labels="c",
)
@pytest.mark.skipif(
is_action(),
reason="Can not run on GitHub actions, because it's a pain to download.",
)
def test_FPs():
"""Produce some figures that show centroid offsets"""
names = [
"KIC 5391911",
"KIC 5866724",
"EPIC 220256496",
"EPIC 210957318",
"TIC 293435336",
"TIC 13023738",
]
kwargs = [
{"quarter": 3},
{"quarter": 3},
{"campaign": 8},
{"campaign": 4},
{},
{"sector": 2},
]
periods = [0.782068488, 2.154911236, 0.669558, 4.098503000, 1.809886, 8.0635]
t0s = [
131.8940201,
133.498613,
2457393.81364 - 2454833,
2457063.80710000 - 2454833,
2456107.85507 - 2457000,
2459104.87232 - 2457000,
]
durs = np.asarray([1.239, 3.1341, 1, 2.3208, 3.694, 3.396]) / 24
depths = [
0.01157 * 0.01,
0.00850 * 0.01,
0.015450 ** 2,
1.603 * 0.01,
1.189 * 0.01,
5180 * 1e-6,
]
for name, period, t0, dur, kwarg, depth in zip(
names, periods, t0s, durs, kwargs, depths
):
tpf = lk.search_targetpixelfile(name, **kwarg).download()
r = centroid_test(
tpf,
period,
t0,
dur,
aperture_mask="pipeline",
plot=True,
transit_depths=depth,
)
r["figs"][0].savefig(
f"{testdir}/docs/{name.replace(' ','').lower()}.png",
dpi=200,
bbox_inches="tight",
)
| [
"vetting.centroid_test",
"lightkurve.search_targetpixelfile",
"numpy.asarray",
"lightkurve.KeplerTargetPixelFile",
"vetting.PACKAGEDIR.split"
] | [((384, 483), 'lightkurve.KeplerTargetPixelFile', 'lk.KeplerTargetPixelFile', (['f"""{testdir}/tests/data/kplr005562784-2011271113734_lpd-targ.fits.gz"""'], {}), "(\n f'{testdir}/tests/data/kplr005562784-2011271113734_lpd-targ.fits.gz')\n", (408, 483), True, 'import lightkurve as lk\n'), ((556, 629), 'vetting.centroid_test', 'centroid_test', (['tpf', 'period', 't0', 'dur'], {'aperture_mask': '"""pipeline"""', 'plot': '(False)'}), "(tpf, period, t0, dur, aperture_mask='pipeline', plot=False)\n", (569, 629), False, 'from vetting import centroid_test\n'), ((713, 785), 'vetting.centroid_test', 'centroid_test', (['tpf', 'period', 't0', 'dur'], {'aperture_mask': '"""pipeline"""', 'plot': '(True)'}), "(tpf, period, t0, dur, aperture_mask='pipeline', plot=True)\n", (726, 785), False, 'from vetting import centroid_test\n'), ((907, 1013), 'vetting.centroid_test', 'centroid_test', (['tpf', '[period, 22.4359873459]', '[t0, 0]', '[dur, dur]'], {'aperture_mask': '"""pipeline"""', 'plot': '(False)'}), "(tpf, [period, 22.4359873459], [t0, 0], [dur, dur],\n aperture_mask='pipeline', plot=False)\n", (920, 1013), False, 'from vetting import centroid_test\n'), ((1271, 1384), 'vetting.centroid_test', 'centroid_test', (['[tpf, tpf]', '[period, 22.4359873459]', '[t0, 0]', '[dur, dur]'], {'aperture_mask': '"""pipeline"""', 'plot': '(False)'}), "([tpf, tpf], [period, 22.4359873459], [t0, 0], [dur, dur],\n aperture_mask='pipeline', plot=False)\n", (1284, 1384), False, 'from vetting import centroid_test\n'), ((1516, 1617), 'vetting.centroid_test', 'centroid_test', (['tpf', 'period', 't0', 'dur'], {'aperture_mask': '"""pipeline"""', 'plot': '(True)', 'transit_depths': '(0.001499)'}), "(tpf, period, t0, dur, aperture_mask='pipeline', plot=True,\n transit_depths=0.001499)\n", (1529, 1617), False, 'from vetting import centroid_test\n'), ((1859, 1972), 'vetting.centroid_test', 'centroid_test', (['tpf', 'period', 't0', 'dur'], {'aperture_mask': '"""pipeline"""', 'plot': '(True)', 'transit_depths': '(0.001499)', 'labels': '"""c"""'}), "(tpf, period, t0, dur, aperture_mask='pipeline', plot=True,\n transit_depths=0.001499, labels='c')\n", (1872, 1972), False, 'from vetting import centroid_test\n'), ((186, 207), 'vetting.PACKAGEDIR.split', 'PACKAGEDIR.split', (['"""/"""'], {}), "('/')\n", (202, 207), False, 'from vetting import PACKAGEDIR\n'), ((2843, 2895), 'numpy.asarray', 'np.asarray', (['[1.239, 3.1341, 1, 2.3208, 3.694, 3.396]'], {}), '([1.239, 3.1341, 1, 2.3208, 3.694, 3.396])\n', (2853, 2895), True, 'import numpy as np\n'), ((3246, 3344), 'vetting.centroid_test', 'centroid_test', (['tpf', 'period', 't0', 'dur'], {'aperture_mask': '"""pipeline"""', 'plot': '(True)', 'transit_depths': 'depth'}), "(tpf, period, t0, dur, aperture_mask='pipeline', plot=True,\n transit_depths=depth)\n", (3259, 3344), False, 'from vetting import centroid_test\n'), ((3182, 3222), 'lightkurve.search_targetpixelfile', 'lk.search_targetpixelfile', (['name'], {}), '(name, **kwarg)\n', (3207, 3222), True, 'import lightkurve as lk\n')] |
from __future__ import print_function
from six.moves import range
from PIL import Image
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import os
import time
import numpy as np
import torchfile
from miscc.config import cfg
from miscc.utilsv2 import mkdir_p
from miscc.utilsv2 import weights_init
from miscc.utilsv2 import save_img_results, save_model
from miscc.utilsv2 import compute_discriminator_loss, compute_generator_loss
from tensorboard import summary
from tensorboardX import FileWriter
class GANTrainer(object):
def __init__(self, output_dir):
if cfg.TRAIN.FLAG:
self.model_dir = os.path.join(output_dir, 'Model')
self.image_dir = os.path.join(output_dir, 'Image')
self.log_dir = os.path.join(output_dir, 'Log')
mkdir_p(self.model_dir)
mkdir_p(self.image_dir)
mkdir_p(self.log_dir)
self.summary_writer = FileWriter(self.log_dir)
self.max_epoch = cfg.TRAIN.MAX_EPOCH
self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL
s_gpus = cfg.GPU_ID.split(',')
self.gpus = [int(ix) for ix in s_gpus]
self.num_gpus = len(self.gpus)
self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus
#torch.cuda.set_device(self.gpus[0])
cudnn.benchmark = True
# ############# For training stageI GAN #############
def load_network_stageI(self):
from modelv2 import STAGE1_G, STAGE1_D
netG = STAGE1_G()
netG.apply(weights_init)
print(netG)
netD = STAGE1_D()
netD.apply(weights_init)
print(netD)
if cfg.NET_G != '':
state_dict = \
torch.load(cfg.NET_G,
map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load from: ', cfg.NET_G)
if cfg.NET_D != '':
state_dict = \
torch.load(cfg.NET_D,
map_location=lambda storage, loc: storage)
netD.load_state_dict(state_dict)
print('Load from: ', cfg.NET_D)
if cfg.CUDA:
netG.cuda()
netD.cuda()
return netG, netD
# ############# For training stageII GAN #############
def load_network_stageII(self):
from modelv2 import STAGE1_G, STAGE2_G, STAGE2_D
Stage1_G = STAGE1_G()
netG = STAGE2_G(Stage1_G)
netG.apply(weights_init)
print(netG)
if cfg.NET_G != '':
state_dict = \
torch.load(cfg.NET_G,
map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load from: ', cfg.NET_G)
elif cfg.STAGE1_G != '':
state_dict = \
torch.load(cfg.STAGE1_G,
map_location=lambda storage, loc: storage)
netG.STAGE1_G.load_state_dict(state_dict)
print('Load from: ', cfg.STAGE1_G)
else:
print("Please give the Stage1_G path")
return
netD = STAGE2_D()
netD.apply(weights_init)
if cfg.NET_D != '':
state_dict = \
torch.load(cfg.NET_D,
map_location=lambda storage, loc: storage)
netD.load_state_dict(state_dict)
print('Load from: ', cfg.NET_D)
print(netD)
if cfg.CUDA:
netG.cuda()
netD.cuda()
return netG, netD
def train(self, data_loader, stage=1):
if stage == 1:
netG, netD = self.load_network_stageI()
else:
netG, netD = self.load_network_stageII()
nz = cfg.Z_DIM
batch_size = self.batch_size
noise = Variable(torch.FloatTensor(batch_size, nz))
lr_decay_factor = 0.5
with torch.no_grad():
fixed_noise = \
Variable(torch.FloatTensor(batch_size, nz).normal_(0, 1))
real_labels = Variable(torch.FloatTensor(batch_size).fill_(1))
fake_labels = Variable(torch.FloatTensor(batch_size).fill_(0))
if cfg.CUDA:
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
real_labels, fake_labels = real_labels.cuda(), fake_labels.cuda()
generator_lr = cfg.TRAIN.GENERATOR_LR
discriminator_lr = cfg.TRAIN.DISCRIMINATOR_LR
lr_decay_step = cfg.TRAIN.LR_DECAY_EPOCH
optimizerD = \
optim.Adam(netD.parameters(),
lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999))
netG_para = []
for p in netG.parameters():
if p.requires_grad:
netG_para.append(p)
optimizerG = optim.Adam(netG_para,
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
count = 0
print("GPUs: " + str(self.gpus))
epoch_init = cfg.TRAIN.EPOCH_INIT
print ("Training from epoch {}".format(epoch_init))
#Adjust learning rate for a loaded model
if (epoch_init > 0):
num_decays = (epoch_init // lr_decay_step) * 1.
if epoch_init % lr_decay_step == 0: num_decays -= 1. #Guaranteed to decay on first step
generator_lr = cfg.TRAIN.GENERATOR_LR * (lr_decay_factor**num_decays)
discriminator_lr = cfg.TRAIN.DISCRIMINATOR_LR * (lr_decay_factor**num_decays)
print("Adjusted G/D learning rates: {}, {}".format(generator_lr, discriminator_lr))
else:
print("Initial G/D learning rates: {}, {}".format(generator_lr, discriminator_lr))
g_losses, d_losses, d_accs = [], [], []
for epoch in range(epoch_init, self.max_epoch + 1):
start_t = time.time()
if epoch % lr_decay_step == 0 and epoch > 0:
generator_lr *= lr_decay_factor
for param_group in optimizerG.param_groups:
param_group['lr'] = generator_lr
discriminator_lr *= lr_decay_factor
for param_group in optimizerD.param_groups:
param_group['lr'] = discriminator_lr
for i, data in enumerate(data_loader, 0):
######################################################
# (1) Prepare training data
######################################################
real_img_cpu, txt_embedding = data #txt_embedding is actually context embedding vector
real_imgs = Variable(real_img_cpu)
txt_embedding = Variable(txt_embedding).float()
if cfg.CUDA:
real_imgs = real_imgs.cuda()
txt_embedding = txt_embedding.cuda()
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
inputs = (txt_embedding, noise)
_, fake_imgs = \
nn.parallel.data_parallel(netG, inputs, self.gpus)
if stage == 2:
fake_imgs = nn.functional.avg_pool2d(fake_imgs, 2)
real_imgs = nn.functional.avg_pool2d(real_imgs, 2)
#real_imgs and fake_imgs should be (64x64) if stage 1, (128x128) if stage 2
# Note: Model saves non-downsampled (256x256) fakes at test time
############################
# (3) Update D network
###########################
netD.zero_grad()
# Run discriminator on real and fake images to generate real and fake classpreds and reconstructions
clspred_real, recon_real =\
nn.parallel.data_parallel(netD, (real_imgs), self.gpus)
clspred_fake, recon_fake =\
nn.parallel.data_parallel(netD, (fake_imgs), self.gpus)
errD = compute_discriminator_loss(real_imgs, fake_imgs, recon_real, clspred_fake, clspred_real, txt_embedding)
# print(errD, errD.size())
errD.backward(retain_graph=True)
optimizerD.step()
############################
# (2) Update G network
###########################
netG.zero_grad()
errG = compute_generator_loss(clspred_fake, fake_imgs, recon_fake, txt_embedding)
# print(errG, errG.size())
errG.backward()
optimizerG.step()
count = count + 1
if i % 10 == 0:
print ('Epoch: ' + str(epoch) + ' iteration: ' + str(i), flush=True)
print ('D_loss: ' + str(errD.data.item()), flush=True)
print ('G_loss: ' + str(errG.data.item()), flush=True)
accuracy = np.mean(torch.argmax(clspred_real, 1).cpu().numpy() == torch.argmax(txt_embedding, 1).cpu().numpy())
print('Discriminator accuracy: {}'.format(accuracy))
g_losses.append(errG.data.item())
d_losses.append(errD.data.item())
d_accs.append(accuracy)
end_t = time.time()
print('''[%d/%d] Loss_D: %.4f Loss_G: %.4f
Total Time: %.2fsec
'''
% (epoch, self.max_epoch,
errD.data.item(), errG.data.item(),
(end_t - start_t)))
inputs = (txt_embedding, fixed_noise)
lr_fake, fake = \
nn.parallel.data_parallel(netG, inputs, self.gpus)
save_img_results(real_img_cpu, fake, epoch, self.image_dir)
if lr_fake is not None:
print ("Saving generated images for epoch " + str(epoch))
save_img_results(None, lr_fake, epoch, self.image_dir)
if epoch % self.snapshot_interval == 0:
save_model(netG, netD, epoch, self.model_dir)
g_losses = np.save("../../results/G_losses.npy", np.array(g_losses))
d_losses = np.save("../../results/D_losses.npy", np.array(d_losses))
d_accs = np.save("../../results/D_accs.npy", np.array(d_accs))
save_model(netG, netD, self.max_epoch, self.model_dir)
def sample(self, datapath, stage=1):
if stage == 1:
netG, _ = self.load_network_stageI()
else:
netG, _ = self.load_network_stageII()
netG.eval()
# Load text embeddings generated from the encoder
t_file = torchfile.load(datapath)
captions_list = t_file.raw_txt
embeddings = np.concatenate(t_file.fea_txt, axis=0)
num_embeddings = len(captions_list)
print('Successfully load sentences from: ', datapath)
print('Total number of sentences:', num_embeddings)
print('num_embeddings:', num_embeddings, embeddings.shape)
# path to save generated samples
save_dir = cfg.NET_G[:cfg.NET_G.find('.pth')]
mkdir_p(save_dir)
batch_size = np.minimum(num_embeddings, self.batch_size)
nz = cfg.Z_DIM
noise = Variable(torch.FloatTensor(batch_size, nz))
if cfg.CUDA:
noise = noise.cuda()
count = 0
while count < num_embeddings:
if count > 3000:
break
iend = count + batch_size
if iend > num_embeddings:
iend = num_embeddings
count = num_embeddings - batch_size
embeddings_batch = embeddings[count:iend]
txt_embedding = Variable(torch.FloatTensor(embeddings_batch))
if cfg.CUDA:
txt_embedding = txt_embedding.cuda()
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
inputs = (txt_embedding, noise)
_, fake_imgs = \
nn.parallel.data_parallel(netG, inputs, self.gpus)
for i in range(batch_size):
save_name = '%s/%d.png' % (save_dir, count + i)
im = fake_imgs[i].data.cpu().numpy()
im = (im + 1.0) * 127.5
im = im.astype(np.uint8)
# print('im', im.shape)
im = np.transpose(im, (1, 2, 0))
# print('im', im.shape)
im = Image.fromarray(im)
im.save(save_name)
count += batch_size
| [
"numpy.array",
"miscc.config.cfg.NET_G.find",
"torch.nn.parallel.data_parallel",
"miscc.utilsv2.mkdir_p",
"torch.nn.functional.avg_pool2d",
"numpy.concatenate",
"tensorboardX.FileWriter",
"miscc.config.cfg.GPU_ID.split",
"torch.autograd.Variable",
"torch.argmax",
"modelv2.STAGE1_G",
"miscc.uti... | [((1154, 1175), 'miscc.config.cfg.GPU_ID.split', 'cfg.GPU_ID.split', (['""","""'], {}), "(',')\n", (1170, 1175), False, 'from miscc.config import cfg\n'), ((1557, 1567), 'modelv2.STAGE1_G', 'STAGE1_G', ([], {}), '()\n', (1565, 1567), False, 'from modelv2 import STAGE1_G, STAGE2_G, STAGE2_D\n'), ((1636, 1646), 'modelv2.STAGE1_D', 'STAGE1_D', ([], {}), '()\n', (1644, 1646), False, 'from modelv2 import STAGE1_G, STAGE1_D\n'), ((2474, 2484), 'modelv2.STAGE1_G', 'STAGE1_G', ([], {}), '()\n', (2482, 2484), False, 'from modelv2 import STAGE1_G, STAGE2_G, STAGE2_D\n'), ((2500, 2518), 'modelv2.STAGE2_G', 'STAGE2_G', (['Stage1_G'], {}), '(Stage1_G)\n', (2508, 2518), False, 'from modelv2 import STAGE1_G, STAGE2_G, STAGE2_D\n'), ((3196, 3206), 'modelv2.STAGE2_D', 'STAGE2_D', ([], {}), '()\n', (3204, 3206), False, 'from modelv2 import STAGE1_G, STAGE2_G, STAGE2_D\n'), ((4831, 4899), 'torch.optim.Adam', 'optim.Adam', (['netG_para'], {'lr': 'cfg.TRAIN.GENERATOR_LR', 'betas': '(0.5, 0.999)'}), '(netG_para, lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5, 0.999))\n', (4841, 4899), True, 'import torch.optim as optim\n'), ((5859, 5896), 'six.moves.range', 'range', (['epoch_init', '(self.max_epoch + 1)'], {}), '(epoch_init, self.max_epoch + 1)\n', (5864, 5896), False, 'from six.moves import range\n'), ((10688, 10742), 'miscc.utilsv2.save_model', 'save_model', (['netG', 'netD', 'self.max_epoch', 'self.model_dir'], {}), '(netG, netD, self.max_epoch, self.model_dir)\n', (10698, 10742), False, 'from miscc.utilsv2 import save_img_results, save_model\n'), ((11026, 11050), 'torchfile.load', 'torchfile.load', (['datapath'], {}), '(datapath)\n', (11040, 11050), False, 'import torchfile\n'), ((11111, 11149), 'numpy.concatenate', 'np.concatenate', (['t_file.fea_txt'], {'axis': '(0)'}), '(t_file.fea_txt, axis=0)\n', (11125, 11149), True, 'import numpy as np\n'), ((11486, 11503), 'miscc.utilsv2.mkdir_p', 'mkdir_p', (['save_dir'], {}), '(save_dir)\n', (11493, 11503), False, 'from miscc.utilsv2 import mkdir_p\n'), ((11526, 11569), 'numpy.minimum', 'np.minimum', (['num_embeddings', 'self.batch_size'], {}), '(num_embeddings, self.batch_size)\n', (11536, 11569), True, 'import numpy as np\n'), ((708, 741), 'os.path.join', 'os.path.join', (['output_dir', '"""Model"""'], {}), "(output_dir, 'Model')\n", (720, 741), False, 'import os\n'), ((771, 804), 'os.path.join', 'os.path.join', (['output_dir', '"""Image"""'], {}), "(output_dir, 'Image')\n", (783, 804), False, 'import os\n'), ((832, 863), 'os.path.join', 'os.path.join', (['output_dir', '"""Log"""'], {}), "(output_dir, 'Log')\n", (844, 863), False, 'import os\n'), ((876, 899), 'miscc.utilsv2.mkdir_p', 'mkdir_p', (['self.model_dir'], {}), '(self.model_dir)\n', (883, 899), False, 'from miscc.utilsv2 import mkdir_p\n'), ((912, 935), 'miscc.utilsv2.mkdir_p', 'mkdir_p', (['self.image_dir'], {}), '(self.image_dir)\n', (919, 935), False, 'from miscc.utilsv2 import mkdir_p\n'), ((948, 969), 'miscc.utilsv2.mkdir_p', 'mkdir_p', (['self.log_dir'], {}), '(self.log_dir)\n', (955, 969), False, 'from miscc.utilsv2 import mkdir_p\n'), ((1004, 1028), 'tensorboardX.FileWriter', 'FileWriter', (['self.log_dir'], {}), '(self.log_dir)\n', (1014, 1028), False, 'from tensorboardX import FileWriter\n'), ((1772, 1836), 'torch.load', 'torch.load', (['cfg.NET_G'], {'map_location': '(lambda storage, loc: storage)'}), '(cfg.NET_G, map_location=lambda storage, loc: storage)\n', (1782, 1836), False, 'import torch\n'), ((2024, 2088), 'torch.load', 'torch.load', (['cfg.NET_D'], {'map_location': '(lambda storage, loc: storage)'}), '(cfg.NET_D, map_location=lambda storage, loc: storage)\n', (2034, 2088), False, 'import torch\n'), ((2643, 2707), 'torch.load', 'torch.load', (['cfg.NET_G'], {'map_location': '(lambda storage, loc: storage)'}), '(cfg.NET_G, map_location=lambda storage, loc: storage)\n', (2653, 2707), False, 'import torch\n'), ((3311, 3375), 'torch.load', 'torch.load', (['cfg.NET_D'], {'map_location': '(lambda storage, loc: storage)'}), '(cfg.NET_D, map_location=lambda storage, loc: storage)\n', (3321, 3375), False, 'import torch\n'), ((3880, 3913), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size', 'nz'], {}), '(batch_size, nz)\n', (3897, 3913), False, 'import torch\n'), ((3967, 3982), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3980, 3982), False, 'import torch\n'), ((5920, 5931), 'time.time', 'time.time', ([], {}), '()\n', (5929, 5931), False, 'import time\n'), ((9617, 9628), 'time.time', 'time.time', ([], {}), '()\n', (9626, 9628), False, 'import time\n'), ((9998, 10048), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['netG', 'inputs', 'self.gpus'], {}), '(netG, inputs, self.gpus)\n', (10023, 10048), True, 'import torch.nn as nn\n'), ((10061, 10120), 'miscc.utilsv2.save_img_results', 'save_img_results', (['real_img_cpu', 'fake', 'epoch', 'self.image_dir'], {}), '(real_img_cpu, fake, epoch, self.image_dir)\n', (10077, 10120), False, 'from miscc.utilsv2 import save_img_results, save_model\n'), ((10512, 10530), 'numpy.array', 'np.array', (['g_losses'], {}), '(g_losses)\n', (10520, 10530), True, 'import numpy as np\n'), ((10589, 10607), 'numpy.array', 'np.array', (['d_losses'], {}), '(d_losses)\n', (10597, 10607), True, 'import numpy as np\n'), ((10662, 10678), 'numpy.array', 'np.array', (['d_accs'], {}), '(d_accs)\n', (10670, 10678), True, 'import numpy as np\n'), ((11618, 11651), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size', 'nz'], {}), '(batch_size, nz)\n', (11635, 11651), False, 'import torch\n'), ((12487, 12537), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['netG', 'inputs', 'self.gpus'], {}), '(netG, inputs, self.gpus)\n', (12512, 12537), True, 'import torch.nn as nn\n'), ((12559, 12576), 'six.moves.range', 'range', (['batch_size'], {}), '(batch_size)\n', (12564, 12576), False, 'from six.moves import range\n'), ((2900, 2967), 'torch.load', 'torch.load', (['cfg.STAGE1_G'], {'map_location': '(lambda storage, loc: storage)'}), '(cfg.STAGE1_G, map_location=lambda storage, loc: storage)\n', (2910, 2967), False, 'import torch\n'), ((6692, 6714), 'torch.autograd.Variable', 'Variable', (['real_img_cpu'], {}), '(real_img_cpu)\n', (6700, 6714), False, 'from torch.autograd import Variable\n'), ((7243, 7293), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['netG', 'inputs', 'self.gpus'], {}), '(netG, inputs, self.gpus)\n', (7268, 7293), True, 'import torch.nn as nn\n'), ((8054, 8107), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['netD', 'real_imgs', 'self.gpus'], {}), '(netD, real_imgs, self.gpus)\n', (8079, 8107), True, 'import torch.nn as nn\n'), ((8174, 8227), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['netD', 'fake_imgs', 'self.gpus'], {}), '(netD, fake_imgs, self.gpus)\n', (8199, 8227), True, 'import torch.nn as nn\n'), ((8269, 8376), 'miscc.utilsv2.compute_discriminator_loss', 'compute_discriminator_loss', (['real_imgs', 'fake_imgs', 'recon_real', 'clspred_fake', 'clspred_real', 'txt_embedding'], {}), '(real_imgs, fake_imgs, recon_real, clspred_fake,\n clspred_real, txt_embedding)\n', (8295, 8376), False, 'from miscc.utilsv2 import compute_discriminator_loss, compute_generator_loss\n'), ((8718, 8792), 'miscc.utilsv2.compute_generator_loss', 'compute_generator_loss', (['clspred_fake', 'fake_imgs', 'recon_fake', 'txt_embedding'], {}), '(clspred_fake, fake_imgs, recon_fake, txt_embedding)\n', (8740, 8792), False, 'from miscc.utilsv2 import compute_discriminator_loss, compute_generator_loss\n'), ((10260, 10314), 'miscc.utilsv2.save_img_results', 'save_img_results', (['None', 'lr_fake', 'epoch', 'self.image_dir'], {}), '(None, lr_fake, epoch, self.image_dir)\n', (10276, 10314), False, 'from miscc.utilsv2 import save_img_results, save_model\n'), ((10400, 10445), 'miscc.utilsv2.save_model', 'save_model', (['netG', 'netD', 'epoch', 'self.model_dir'], {}), '(netG, netD, epoch, self.model_dir)\n', (10410, 10445), False, 'from miscc.utilsv2 import save_img_results, save_model\n'), ((11454, 11476), 'miscc.config.cfg.NET_G.find', 'cfg.NET_G.find', (['""".pth"""'], {}), "('.pth')\n", (11468, 11476), False, 'from miscc.config import cfg\n'), ((12071, 12106), 'torch.FloatTensor', 'torch.FloatTensor', (['embeddings_batch'], {}), '(embeddings_batch)\n', (12088, 12106), False, 'import torch\n'), ((12837, 12864), 'numpy.transpose', 'np.transpose', (['im', '(1, 2, 0)'], {}), '(im, (1, 2, 0))\n', (12849, 12864), True, 'import numpy as np\n'), ((12926, 12945), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (12941, 12945), False, 'from PIL import Image\n'), ((4118, 4147), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size'], {}), '(batch_size)\n', (4135, 4147), False, 'import torch\n'), ((4189, 4218), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size'], {}), '(batch_size)\n', (4206, 4218), False, 'import torch\n'), ((7374, 7412), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['fake_imgs', '(2)'], {}), '(fake_imgs, 2)\n', (7398, 7412), True, 'import torch.nn as nn\n'), ((7445, 7483), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['real_imgs', '(2)'], {}), '(real_imgs, 2)\n', (7469, 7483), True, 'import torch.nn as nn\n'), ((4037, 4070), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size', 'nz'], {}), '(batch_size, nz)\n', (4054, 4070), False, 'import torch\n'), ((6747, 6770), 'torch.autograd.Variable', 'Variable', (['txt_embedding'], {}), '(txt_embedding)\n', (6755, 6770), False, 'from torch.autograd import Variable\n'), ((9258, 9287), 'torch.argmax', 'torch.argmax', (['clspred_real', '(1)'], {}), '(clspred_real, 1)\n', (9270, 9287), False, 'import torch\n'), ((9305, 9335), 'torch.argmax', 'torch.argmax', (['txt_embedding', '(1)'], {}), '(txt_embedding, 1)\n', (9317, 9335), False, 'import torch\n')] |
####################################
# Author: <NAME>
# Date: September 2016
# Project: Document Summarization
# H2020 Summa Project
# v1.2 XNET
# author: <NAME>
####################################
"""
Question Answering Modules and Models
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('../../common')
import numpy as np
import tensorflow as tf
import random
import os
import pdb
try:
import cPickle as pickle
except:
import pickle
from my_flags import FLAGS
from model_utils import convert_logits_to_softmax
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# Special IDs
PAD_ID = 0
UNK_ID = 1
#####################################################################
def saveObject(obj, name='model'):
with open(name + '.pickle', 'wb') as fd:
pickle.dump(obj, fd, protocol=pickle.HIGHEST_PROTOCOL)
def uploadObject(obj_name):
# Load tagger
with open(obj_name + '.pickle', 'rb') as fd:
obj = pickle.load(fd)
return obj
def write_prediction_summaries(batch, pred_probs, modelname, data_type):
print("Writing predictions and final summaries ...")
# Save Output Logits
np.save(FLAGS.train_dir+"/"+modelname+"."+data_type+"-prediction", pred_probs)
# Writing
#write_predictions(batch, modelname+"."+data_type, pred_logits)
def write_cos_sim(cos_sim, modelname, data_type):
print("Writing cos sim ...")
np.save(FLAGS.train_dir+"/"+modelname+"."+data_type+"-cos_sim", cos_sim)
def load_prediction(modelname):
logits = np.load(FLAGS.train_dir+"/" + modelname + '.npy')
return logits
def write_predictions(batch,file_prefix, np_predictions):
foutput = open(FLAGS.train_dir+"/"+file_prefix+".predictions", "w")
np_labels = batch.labels[:,:,0]
for fileindex,filename in enumerate(batch.docnames):
foutput.write(filename+"\n")
sentcount = 0
for sentpred, sentlabel in zip(np_predictions[fileindex], np_labels[fileindex]):
one_prob = sentpred[0] # <-------------- ISN'T INDEX 0 THE PROB OF C==0 | nope it's prob of 1
label = sentlabel[0]
if self.weights[fileindex][sentcount] == 1:
foutput.write(str(int(label))+"\t"+str(one_prob)+"\n")
else:
break
sentcount += 1
foutput.write("\n")
foutput.close()
#####################################################################
class BatchData(object):
def __init__(self,docnames,docs,labels,weights,isf,isf_id,idf,locisf):
self.docnames = docnames
self.docs = docs
self.labels = labels
self.weights = weights
self.isf_score = isf
self.idf_score = idf
self.isf_score_ids = isf_id
self.locisf_score = locisf
self.logits = None
self.cos_sim = None
self.initial_extend = True # False once start expanding the batch
def extend(self,batch):
if self.initial_extend:
self.docnames = []
self.docs = []
self.labels = []
self.weights = []
self.isf_score = []
self.idf_score = []
self.isf_score_ids = []
self.locisf_score = []
self.cos_sim = []
self.logits = []
self.initial_extend = False
self.docnames.append(batch.docnames)
self.docs.append(batch.docs)
self.labels.append(batch.labels)
self.cos_sim.append(batch.cos_sim)
self.weights.append(batch.weights)
self.isf_score.append(batch.isf_score)
self.idf_score.append(batch.idf_score)
self.isf_score_ids.append(batch.isf_score_ids)
self.locisf_score.append(batch.locisf_score)
self.logits.append(batch.logits)
def concat_batches(self):
if self.logits[0] != None:
self.logits = np.vstack(self.logits)
if self.cos_sim[0]!=None:
self.cos_sim = np.vstack(self.cos_sim)
self.docs = np.vstack(self.docs)
self.labels = np.vstack(self.labels)
self.weights = np.vstack(self.weights)
self.isf_score = np.vstack(self.isf_score)
self.idf_score = np.vstack(self.idf_score)
self.isf_score_ids = np.vstack(self.isf_score_ids)
self.locisf_score = np.vstack(self.locisf_score)
class Data(object):
def __init__(self, vocab_dict, data_type, normalizer=None, pca_model=None):
self.filenames = []
self.docs = []
self.titles = []
self.labels = []
self.isf_scores = []
self.idf_scores = []
self.locisf_scores= []
self.sorted_isf_score_indexes = []
self.weights = []
self.fileindices = []
self.normalizer = normalizer
self.pca_model = pca_model
self.data_type = data_type
# populate the data
self.populate_data(vocab_dict, data_type)
def get_batch(self, startidx, endidx):
# This is very fast if you keep everything in Numpy
# Numpy dtype
dtype = np.float16 if FLAGS.use_fp16 else np.float32
# For train, (endidx-startidx)=FLAGS.batch_size, for others its as specified
batch_docnames = np.empty((endidx-startidx), dtype="S40") # File ID of size 40
batch_docs = np.empty(((endidx-startidx), (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length +
FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.max_sent_length), dtype="int32")
batch_label = np.empty(((endidx-startidx), FLAGS.max_doc_length, FLAGS.target_label_size), dtype=dtype)
batch_weight = np.empty(((endidx-startidx), FLAGS.max_doc_length), dtype=dtype)
batch_isf_score_ids = np.empty(((endidx-startidx), FLAGS.topK), dtype=np.int32)
batch_isf_score = np.empty(((endidx-startidx), FLAGS.max_doc_length), dtype=dtype)
batch_idf_score = np.empty(((endidx-startidx), FLAGS.max_doc_length), dtype=dtype)
batch_locisf_score = np.empty(((endidx-startidx), FLAGS.max_doc_length), dtype=dtype)
batch_idx = 0
for fileindex in self.fileindices[startidx:endidx]:
# Document Names
batch_docnames[batch_idx] = self.filenames[fileindex][67:-14]
# Document
doc_wordids = self.docs[fileindex][:] # [FLAGS.max_doc_length, FLAGS.max_sent_length]
doc_wordids = [self.process_to_chop_pad(thissent, FLAGS.max_sent_length) for thissent in doc_wordids] # update sentence len
doc_wordids = doc_wordids[:FLAGS.max_doc_length] # update doc len
doc_wordids = doc_wordids + [self.process_to_chop_pad([], FLAGS.max_sent_length)]*(FLAGS.max_doc_length - len(doc_wordids))
if (FLAGS.max_title_length > 0):
title_sents = [self.process_to_chop_pad(thissent, FLAGS.max_sent_length) for thissent in self.titles[fileindex]]
title_sents = title_sents[:FLAGS.max_title_length]
title_sents = title_sents + [self.process_to_chop_pad([], FLAGS.max_sent_length)]*(FLAGS.max_title_length - len(title_sents))
doc_wordids = doc_wordids + title_sents # [FLAGS.max_title_length, FLAGS.max_sent_length]
batch_docs[batch_idx] = np.array(doc_wordids[:], dtype="int32")
# Labels
labels = self.labels[fileindex][:FLAGS.max_doc_length]
labels = labels + [0]*(FLAGS.max_doc_length - len(labels))
# labels: (max_doc_length) --> labels_vecs: (max_doc_length, target_label_size)
labels_vecs = [[1, 0] if (label==1) else [0, 1] for label in labels]
batch_label[batch_idx] = np.array(labels_vecs[:], dtype=dtype)
# Weights
weights = self.weights[fileindex][:FLAGS.max_doc_length]
weights = weights + [0]*(FLAGS.max_doc_length - len(weights))
batch_weight[batch_idx] = np.array(weights[:], dtype=dtype)
# ISF Score ids
isf_score_ids = self.sorted_isf_score_indexes[fileindex][:FLAGS.topK]
isf_score_ids = isf_score_ids + [-1]*(FLAGS.topK - len(isf_score_ids))
batch_isf_score_ids[batch_idx] = np.array(isf_score_ids[:],dtype=np.int32)
# ISF scores
isf_sc = self.isf_scores[fileindex][:FLAGS.max_doc_length]
isf_sc = isf_sc + [0]*(FLAGS.max_doc_length - len(isf_sc))
batch_isf_score[batch_idx] = np.array(isf_sc[:],dtype=dtype)
# IDF scores
idf_sc = self.idf_scores[fileindex][:FLAGS.max_doc_length]
idf_sc = idf_sc + [0]*(FLAGS.max_doc_length - len(idf_sc))
batch_idf_score[batch_idx] = np.array(idf_sc[:],dtype=dtype)
# Local ISF scores
locisf_sc = self.locisf_scores[fileindex][:FLAGS.max_doc_length]
locisf_sc = locisf_sc + [0]*(FLAGS.max_doc_length - len(locisf_sc))
batch_locisf_score[batch_idx] = np.array(locisf_sc[:],dtype=dtype)
# increase batch count
batch_idx += 1
#END-FOR-FILEIDX
batch = BatchData( docnames= batch_docnames,
docs = batch_docs,
labels = batch_label,
weights = batch_weight,
isf = batch_isf_score,
isf_id = batch_isf_score_ids,
idf = batch_idf_score,
locisf = batch_locisf_score)
return batch
def shuffle_fileindices(self):
random.shuffle(self.fileindices)
def process_to_chop_pad(self, orgids, requiredsize):
if (len(orgids) >= requiredsize):
return orgids[:requiredsize]
else:
padids = [PAD_ID] * (requiredsize - len(orgids))
return (orgids + padids)
def populate_data(self, vocab_dict, data_type):
full_data_file_prefix = ""
label_prefix = ""
scores_file_prefix = ""
full_data_file_prefix = FLAGS.preprocessed_data_directory + "/" + FLAGS.data_mode + "/" + data_type + '.org_ent'
scores_file_prefix = FLAGS.preprocessed_data_directory + "/" + FLAGS.data_mode + "/" + data_type
print("Data file prefix (.doc, .question, .label, .score): %s" % full_data_file_prefix)
# Process doc, title, image and label
doc_data_list = open(full_data_file_prefix+".doc",'r').read().strip().split("\n\n")
title_data_list = open(full_data_file_prefix+".question",'r').read().strip().split("\n\n")
label_data_list = open(full_data_file_prefix+".label",'r').read().strip().split("\n\n") # Use collective oracle
isf_scores_data_list = open(scores_file_prefix+".isf.scores",'r').read().strip().split("\n\n") # ISF scores for each sentence
if FLAGS.use_idf:
idf_scores_data_list = open(scores_file_prefix+".idf.scores",'r').read().strip().split("\n\n") # ISF scores for each sentence
if FLAGS.use_locisf:
locisf_scores_data_list = open(scores_file_prefix+".locisf.scores",'r').read().strip().split("\n\n") # Local ISF scores for each sentence
#image_data_list = open(full_data_file_prefix+".paraphr").read().strip().split("\n\n")
# insert here file init for query paraphrase data
print("Data sizes: %d %d %d"%(len(doc_data_list), len(title_data_list), len(label_data_list) ))
print("Preparing data based on model requirement ...")
doccount = 0
ndata = len(doc_data_list)
iter_indexes = range(ndata)
extra_features = [] # [total_sentences, #n_extra_feats (3 so far)]
n_features = (FLAGS.use_locisf + FLAGS.use_isf + FLAGS.use_idf)
if data_type != 'test' and FLAGS.use_subsampled_dataset:
fn = FLAGS.preprocessed_data_directory + "/" + FLAGS.data_mode + "/"+data_type+".subsampling_indexes"
iter_indexes = [int(x) for x in open(fn,'r').read().strip().split("\n")]
print("Subsampled data size: ",len(iter_indexes))
for doc_idx in iter_indexes:
doc_data = doc_data_list[doc_idx]
title_data = title_data_list[doc_idx]
label_data = label_data_list[doc_idx]
isf_data = isf_scores_data_list[doc_idx]
if FLAGS.use_idf:
idf_data = idf_scores_data_list[doc_idx]
idf_lines = idf_data.strip().split("\n")
if FLAGS.use_locisf:
locisf_data = locisf_scores_data_list[doc_idx]
locisf_lines = locisf_data.strip().split("\n")
doc_lines = doc_data.strip().split("\n")
title_lines = title_data.strip().split("\n")
label_lines = label_data.strip().split("\n")
isf_lines = isf_data.strip().split("\n")
filename = doc_lines[0].strip()
if ((filename == title_lines[0].strip()) and (filename == label_lines[0].strip())):
# Put filename
self.filenames.append(filename)
# Doc & sent_lens
thisdoc = []
doc_len = min(len(doc_lines)-1,FLAGS.max_doc_length)
for idx in range(doc_len):
thissent = [int(item) for item in doc_lines[idx+1].strip().split()]
thissent = thissent[:FLAGS.max_sent_length]
thisdoc.append(thissent)
self.docs.append(thisdoc)
# Title
thistitle = []
for idx in range(min(len(title_lines)-1,FLAGS.max_title_length)):
thissent = [int(item) for item in title_lines[idx+1].strip().split()]
thissent = thissent[:FLAGS.max_sent_length]
thistitle.append(thissent)
self.titles.append(thistitle)
# Labels 1/0, 1, 0 and 2 -> 0 || Weights
thislabel = []
thisweight = []
# Scores
this_isf = []
this_locisf = []
this_idf = []
this_isf_ids = []
doc_scores = np.zeros([doc_len,n_features],dtype=np.float32)
for idx in range(doc_len):
thissent_label = int(label_lines[idx+1].strip())
thissent_weight = 1
isf = float(isf_lines[idx+1])
sort_idx = idx
if FLAGS.use_locisf:
locisf = float(locisf_lines[idx+1])
this_locisf.append(locisf)
if FLAGS.use_idf:
idf = float(idf_lines[idx+1])
this_idf.append(idf)
thislabel.append(thissent_label)
thisweight.append(thissent_weight)
this_isf.append(isf)
this_isf_ids.append( (sort_idx,isf) )
self.labels.append(thislabel)
self.weights.append(thisweight)
this_isf_ids.sort(reverse=True,key=lambda x: x[1])
self.sorted_isf_score_indexes.append([x[0] for x in this_isf_ids])
# fill in scores
idx = 0
if FLAGS.use_isf:
doc_scores[:,idx] = this_isf
idx += 1
if FLAGS.use_idf:
doc_scores[:,idx] = this_idf
idx += 1
if FLAGS.use_locisf:
doc_scores[:,idx] = this_locisf
extra_features.append(doc_scores)
#END-IF
else:
print("Some problem with %s.* files. Exiting!" % full_data_file_prefix)
exit(0)
if doccount%10000==0:
print("%d ..."%doccount)
doccount += 1
#END-FOR-DATA
self.fileindices = list(range(len(self.filenames)))
extra_features = np.vstack(extra_features)
if FLAGS.norm_extra_feats:
print("Normalizing extra features (z-score)...")
if data_type=='training':
# define Standarizer
self.normalizer = StandardScaler()
self.normalizer.fit(extra_features)
extra_features = self.normalizer.transform(extra_features)
# only decorrelate if normalized
if FLAGS.decorrelate_extra_feats:
print("Decorrelating extra features (PCA)...")
if data_type=='training':
# define PCA model
self.pca_model = PCA(n_components=n_features-1,whiten=True)
self.pca_model.fit(extra_features)
extra_features = self.pca_model.transform(extra_features)
#END-NORM-DECORR
# fill in extra features in Data object
index = 0
doccount = 0
for doc in self.docs:
doc_len = len(doc)
idx = 0
if FLAGS.use_isf:
this_isf = list(extra_features[index:index+doc_len,idx])
self.isf_scores.append(this_isf)
idx += 1
if FLAGS.use_idf:
this_idf = list(extra_features[index:index+doc_len,idx])
self.idf_scores.append(this_idf)
idx += 1
if FLAGS.use_locisf:
this_locisf = list(extra_features[index:index+doc_len,idx])
self.locisf_scores.append(this_locisf)
index += doc_len
if doccount%10000==0:
print("%d ....."%doccount)
doccount += 1
#END-2nd-FOR-DOCS
class DataProcessor:
def prepare_news_data(self, vocab_dict, data_type="training",normalizer=None,pca_model=None):
data_obj_fn = ''
if data_type != 'test' and FLAGS.use_subsampled_dataset:
data_obj_fn = os.path.join(FLAGS.train_dir,data_type+"_subsampled")
else:
data_obj_fn = os.path.join(FLAGS.train_dir,data_type)
if os.path.exists(data_obj_fn+'.pickle') and not FLAGS.force_reading:
data = uploadObject(data_obj_fn)
else:
data = Data(vocab_dict,data_type,normalizer=normalizer,pca_model=pca_model)
saveObject(data,data_obj_fn)
return data
def prepare_vocab_embeddingdict(self):
vocab_fn = os.path.join(FLAGS.train_dir,'vocab-org')
wde_fn = os.path.join(FLAGS.train_dir,'wde-org')
if os.path.exists(vocab_fn+'.pickle'):
vocab_dict = uploadObject(vocab_fn)
word_embedding_array = uploadObject(wde_fn)
return vocab_dict,word_embedding_array
####################################
# Numpy dtype
dtype = np.float16 if FLAGS.use_fp16 else np.float32
vocab_dict = {}
word_embedding_array = []
# Add padding
vocab_dict["_PAD"] = PAD_ID
# Add UNK
vocab_dict["_UNK"] = UNK_ID
# Read word embedding file
wordembed_filename = ""
if FLAGS.anonymized_setting:
wordembed_filename = FLAGS.pretrained_wordembedding_anonymdata
else:
wordembed_filename = FLAGS.pretrained_wordembedding_orgdata
print("Reading pretrained word embeddings file: %s"%wordembed_filename)
embed_line = ""
linecount = 0
with open(wordembed_filename, "r") as fembedd:
for line in fembedd:
if linecount == 0:
vocabsize = int(line.split()[0])
# Initiate fixed size empty array
word_embedding_array = np.empty((vocabsize, FLAGS.wordembed_size), dtype=dtype)
else:
linedata = line.split()
vocab_dict[linedata[0]] = linecount + 1
embeddata = [float(item) for item in linedata[1:]][0:FLAGS.wordembed_size]
word_embedding_array[linecount-1] = np.array(embeddata, dtype=dtype)
if linecount%10000 == 0:
print(str(linecount)+" ...")
linecount += 1
print("Read pretrained embeddings: %s"%str(word_embedding_array.shape))
print("Size of vocab: %d (_PAD:0, _UNK:1)"%len(vocab_dict))
vocabfilename = ""
if FLAGS.anonymized_setting:
vocabfilename = FLAGS.train_dir+"/vocab-anonym"
else:
vocabfilename = FLAGS.train_dir+"/vocab-org"
print("Writing vocab file: %s"%vocabfilename)
foutput = open(vocabfilename,"w")
vocab_list = [(vocab_dict[key], key) for key in vocab_dict.keys()]
vocab_list.sort()
vocab_list = [item[1] for item in vocab_list]
foutput.write("\n".join(vocab_list)+"\n")
foutput.close()
saveObject(vocab_dict,vocab_fn)
saveObject(word_embedding_array,wde_fn)
return vocab_dict, word_embedding_array
| [
"os.path.exists",
"pickle.dump",
"random.shuffle",
"sklearn.decomposition.PCA",
"pickle.load",
"os.path.join",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.vstack",
"numpy.load",
"sys.path.append",
"numpy.save"
] | [((371, 402), 'sys.path.append', 'sys.path.append', (['"""../../common"""'], {}), "('../../common')\n", (386, 402), False, 'import sys\n'), ((1260, 1352), 'numpy.save', 'np.save', (["(FLAGS.train_dir + '/' + modelname + '.' + data_type + '-prediction')", 'pred_probs'], {}), "(FLAGS.train_dir + '/' + modelname + '.' + data_type + '-prediction',\n pred_probs)\n", (1267, 1352), True, 'import numpy as np\n'), ((1510, 1596), 'numpy.save', 'np.save', (["(FLAGS.train_dir + '/' + modelname + '.' + data_type + '-cos_sim')", 'cos_sim'], {}), "(FLAGS.train_dir + '/' + modelname + '.' + data_type + '-cos_sim',\n cos_sim)\n", (1517, 1596), True, 'import numpy as np\n'), ((1629, 1680), 'numpy.load', 'np.load', (["(FLAGS.train_dir + '/' + modelname + '.npy')"], {}), "(FLAGS.train_dir + '/' + modelname + '.npy')\n", (1636, 1680), True, 'import numpy as np\n'), ((903, 957), 'pickle.dump', 'pickle.dump', (['obj', 'fd'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(obj, fd, protocol=pickle.HIGHEST_PROTOCOL)\n', (914, 957), False, 'import pickle\n'), ((1069, 1084), 'pickle.load', 'pickle.load', (['fd'], {}), '(fd)\n', (1080, 1084), False, 'import pickle\n'), ((4092, 4112), 'numpy.vstack', 'np.vstack', (['self.docs'], {}), '(self.docs)\n', (4101, 4112), True, 'import numpy as np\n'), ((4135, 4157), 'numpy.vstack', 'np.vstack', (['self.labels'], {}), '(self.labels)\n', (4144, 4157), True, 'import numpy as np\n'), ((4181, 4204), 'numpy.vstack', 'np.vstack', (['self.weights'], {}), '(self.weights)\n', (4190, 4204), True, 'import numpy as np\n'), ((4230, 4255), 'numpy.vstack', 'np.vstack', (['self.isf_score'], {}), '(self.isf_score)\n', (4239, 4255), True, 'import numpy as np\n'), ((4281, 4306), 'numpy.vstack', 'np.vstack', (['self.idf_score'], {}), '(self.idf_score)\n', (4290, 4306), True, 'import numpy as np\n'), ((4336, 4365), 'numpy.vstack', 'np.vstack', (['self.isf_score_ids'], {}), '(self.isf_score_ids)\n', (4345, 4365), True, 'import numpy as np\n'), ((4394, 4422), 'numpy.vstack', 'np.vstack', (['self.locisf_score'], {}), '(self.locisf_score)\n', (4403, 4422), True, 'import numpy as np\n'), ((5301, 5341), 'numpy.empty', 'np.empty', (['(endidx - startidx)'], {'dtype': '"""S40"""'}), "(endidx - startidx, dtype='S40')\n", (5309, 5341), True, 'import numpy as np\n'), ((5384, 5601), 'numpy.empty', 'np.empty', (['(endidx - startidx, FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.\n max_image_length + FLAGS.max_firstsentences_length + FLAGS.\n max_randomsentences_length, FLAGS.max_sent_length)'], {'dtype': '"""int32"""'}), "((endidx - startidx, FLAGS.max_doc_length + FLAGS.max_title_length +\n FLAGS.max_image_length + FLAGS.max_firstsentences_length + FLAGS.\n max_randomsentences_length, FLAGS.max_sent_length), dtype='int32')\n", (5392, 5601), True, 'import numpy as np\n'), ((5669, 5762), 'numpy.empty', 'np.empty', (['(endidx - startidx, FLAGS.max_doc_length, FLAGS.target_label_size)'], {'dtype': 'dtype'}), '((endidx - startidx, FLAGS.max_doc_length, FLAGS.target_label_size),\n dtype=dtype)\n', (5677, 5762), True, 'import numpy as np\n'), ((5782, 5846), 'numpy.empty', 'np.empty', (['(endidx - startidx, FLAGS.max_doc_length)'], {'dtype': 'dtype'}), '((endidx - startidx, FLAGS.max_doc_length), dtype=dtype)\n', (5790, 5846), True, 'import numpy as np\n'), ((5878, 5935), 'numpy.empty', 'np.empty', (['(endidx - startidx, FLAGS.topK)'], {'dtype': 'np.int32'}), '((endidx - startidx, FLAGS.topK), dtype=np.int32)\n', (5886, 5935), True, 'import numpy as np\n'), ((5972, 6036), 'numpy.empty', 'np.empty', (['(endidx - startidx, FLAGS.max_doc_length)'], {'dtype': 'dtype'}), '((endidx - startidx, FLAGS.max_doc_length), dtype=dtype)\n', (5980, 6036), True, 'import numpy as np\n'), ((6072, 6136), 'numpy.empty', 'np.empty', (['(endidx - startidx, FLAGS.max_doc_length)'], {'dtype': 'dtype'}), '((endidx - startidx, FLAGS.max_doc_length), dtype=dtype)\n', (6080, 6136), True, 'import numpy as np\n'), ((6167, 6231), 'numpy.empty', 'np.empty', (['(endidx - startidx, FLAGS.max_doc_length)'], {'dtype': 'dtype'}), '((endidx - startidx, FLAGS.max_doc_length), dtype=dtype)\n', (6175, 6231), True, 'import numpy as np\n'), ((9758, 9790), 'random.shuffle', 'random.shuffle', (['self.fileindices'], {}), '(self.fileindices)\n', (9772, 9790), False, 'import random\n'), ((16224, 16249), 'numpy.vstack', 'np.vstack', (['extra_features'], {}), '(extra_features)\n', (16233, 16249), True, 'import numpy as np\n'), ((18636, 18678), 'os.path.join', 'os.path.join', (['FLAGS.train_dir', '"""vocab-org"""'], {}), "(FLAGS.train_dir, 'vocab-org')\n", (18648, 18678), False, 'import os\n'), ((18695, 18735), 'os.path.join', 'os.path.join', (['FLAGS.train_dir', '"""wde-org"""'], {}), "(FLAGS.train_dir, 'wde-org')\n", (18707, 18735), False, 'import os\n'), ((18746, 18782), 'os.path.exists', 'os.path.exists', (["(vocab_fn + '.pickle')"], {}), "(vocab_fn + '.pickle')\n", (18760, 18782), False, 'import os\n'), ((3964, 3986), 'numpy.vstack', 'np.vstack', (['self.logits'], {}), '(self.logits)\n', (3973, 3986), True, 'import numpy as np\n'), ((4048, 4071), 'numpy.vstack', 'np.vstack', (['self.cos_sim'], {}), '(self.cos_sim)\n', (4057, 4071), True, 'import numpy as np\n'), ((7438, 7477), 'numpy.array', 'np.array', (['doc_wordids[:]'], {'dtype': '"""int32"""'}), "(doc_wordids[:], dtype='int32')\n", (7446, 7477), True, 'import numpy as np\n'), ((7848, 7885), 'numpy.array', 'np.array', (['labels_vecs[:]'], {'dtype': 'dtype'}), '(labels_vecs[:], dtype=dtype)\n', (7856, 7885), True, 'import numpy as np\n'), ((8090, 8123), 'numpy.array', 'np.array', (['weights[:]'], {'dtype': 'dtype'}), '(weights[:], dtype=dtype)\n', (8098, 8123), True, 'import numpy as np\n'), ((8363, 8405), 'numpy.array', 'np.array', (['isf_score_ids[:]'], {'dtype': 'np.int32'}), '(isf_score_ids[:], dtype=np.int32)\n', (8371, 8405), True, 'import numpy as np\n'), ((8627, 8659), 'numpy.array', 'np.array', (['isf_sc[:]'], {'dtype': 'dtype'}), '(isf_sc[:], dtype=dtype)\n', (8635, 8659), True, 'import numpy as np\n'), ((8868, 8900), 'numpy.array', 'np.array', (['idf_sc[:]'], {'dtype': 'dtype'}), '(idf_sc[:], dtype=dtype)\n', (8876, 8900), True, 'import numpy as np\n'), ((9134, 9169), 'numpy.array', 'np.array', (['locisf_sc[:]'], {'dtype': 'dtype'}), '(locisf_sc[:], dtype=dtype)\n', (9142, 9169), True, 'import numpy as np\n'), ((18152, 18208), 'os.path.join', 'os.path.join', (['FLAGS.train_dir', "(data_type + '_subsampled')"], {}), "(FLAGS.train_dir, data_type + '_subsampled')\n", (18164, 18208), False, 'import os\n'), ((18246, 18286), 'os.path.join', 'os.path.join', (['FLAGS.train_dir', 'data_type'], {}), '(FLAGS.train_dir, data_type)\n', (18258, 18286), False, 'import os\n'), ((18298, 18337), 'os.path.exists', 'os.path.exists', (["(data_obj_fn + '.pickle')"], {}), "(data_obj_fn + '.pickle')\n", (18312, 18337), False, 'import os\n'), ((14403, 14452), 'numpy.zeros', 'np.zeros', (['[doc_len, n_features]'], {'dtype': 'np.float32'}), '([doc_len, n_features], dtype=np.float32)\n', (14411, 14452), True, 'import numpy as np\n'), ((16455, 16471), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (16469, 16471), False, 'from sklearn.preprocessing import StandardScaler\n'), ((16868, 16913), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(n_features - 1)', 'whiten': '(True)'}), '(n_components=n_features - 1, whiten=True)\n', (16871, 16913), False, 'from sklearn.decomposition import PCA\n'), ((19903, 19959), 'numpy.empty', 'np.empty', (['(vocabsize, FLAGS.wordembed_size)'], {'dtype': 'dtype'}), '((vocabsize, FLAGS.wordembed_size), dtype=dtype)\n', (19911, 19959), True, 'import numpy as np\n'), ((20237, 20269), 'numpy.array', 'np.array', (['embeddata'], {'dtype': 'dtype'}), '(embeddata, dtype=dtype)\n', (20245, 20269), True, 'import numpy as np\n')] |
import os
import numpy as np
for i in np.arange(10, 301, 10):
for j in range(1):
os.system('python3 alu.py {}'.format(i))
os.system('python3 email_notification.py') | [
"os.system",
"numpy.arange"
] | [((39, 61), 'numpy.arange', 'np.arange', (['(10)', '(301)', '(10)'], {}), '(10, 301, 10)\n', (48, 61), True, 'import numpy as np\n'), ((126, 168), 'os.system', 'os.system', (['"""python3 email_notification.py"""'], {}), "('python3 email_notification.py')\n", (135, 168), False, 'import os\n')] |
import cv2 , os, time
from PIL import Image
import numpy as np
key = cv2. waitKey(1)
webcam = cv2.VideoCapture(0)
while True:
try:
check, frame = webcam.read()
print(check) #prints true as long as the webcam is running
print(frame) #prints matrix values of each framecd
cv2.imshow("Capturing", frame)
key = cv2.waitKey(1)
if os.path.isfile('saved_img.jpg'):
print("Exist..")
print("-----------------------")
print("Lets Check..")
img_rgb = cv2.imread('saved_img.jpg')
template = frame
w, h = template.shape[:-1]
res = cv2.matchTemplate(img_rgb,template,cv2.TM_CCOEFF)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 3)
cv2.imwrite('res.png',img_rgb)
try:
img = Image.open('res.png')
img.show()
time.sleep(5)
img.close()
print("Loged In")
except:
print("AUTHENTICATION FAILED")
print("--------------------------------------------------")
break
elif key == ord('s'):
cv2.imwrite(filename='saved_img.jpg', img=frame)
webcam.release()
img_new = cv2.imread('saved_img.jpg')
img_show = cv2.imshow("Captured Image", img_new)
cv2.waitKey(1650)
cv2.destroyAllWindows()
print("Processing image...")
img_ = cv2.imread('saved_img.jpg', cv2.IMREAD_ANYCOLOR)
print("Image saved!")
break
elif key == ord('q'):
print("Turning off camera.")
webcam.release()
print("Camera off.")
print("Program ended.")
cv2.destroyAllWindows()
break
except(KeyboardInterrupt):
print("Turning off camera.")
webcam.release()
print("Camera off.")
print("Program ended.")
cv2.destroyAllWindows()
break | [
"cv2.rectangle",
"cv2.imwrite",
"PIL.Image.open",
"numpy.where",
"time.sleep",
"cv2.imshow",
"os.path.isfile",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.matchTemplate",
"cv2.imread"
] | [((69, 83), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (80, 83), False, 'import cv2, os, time\n'), ((94, 113), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (110, 113), False, 'import cv2, os, time\n'), ((307, 337), 'cv2.imshow', 'cv2.imshow', (['"""Capturing"""', 'frame'], {}), "('Capturing', frame)\n", (317, 337), False, 'import cv2, os, time\n'), ((352, 366), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (363, 366), False, 'import cv2, os, time\n'), ((378, 409), 'os.path.isfile', 'os.path.isfile', (['"""saved_img.jpg"""'], {}), "('saved_img.jpg')\n", (392, 409), False, 'import cv2, os, time\n'), ((541, 568), 'cv2.imread', 'cv2.imread', (['"""saved_img.jpg"""'], {}), "('saved_img.jpg')\n", (551, 568), False, 'import cv2, os, time\n'), ((655, 706), 'cv2.matchTemplate', 'cv2.matchTemplate', (['img_rgb', 'template', 'cv2.TM_CCOEFF'], {}), '(img_rgb, template, cv2.TM_CCOEFF)\n', (672, 706), False, 'import cv2, os, time\n'), ((751, 777), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (759, 777), True, 'import numpy as np\n'), ((911, 942), 'cv2.imwrite', 'cv2.imwrite', (['"""res.png"""', 'img_rgb'], {}), "('res.png', img_rgb)\n", (922, 942), False, 'import cv2, os, time\n'), ((2136, 2159), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2157, 2159), False, 'import cv2, os, time\n'), ((834, 900), 'cv2.rectangle', 'cv2.rectangle', (['img_rgb', 'pt', '(pt[0] + w, pt[1] + h)', '(0, 0, 255)', '(3)'], {}), '(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 3)\n', (847, 900), False, 'import cv2, os, time\n'), ((981, 1002), 'PIL.Image.open', 'Image.open', (['"""res.png"""'], {}), "('res.png')\n", (991, 1002), False, 'from PIL import Image\n'), ((1046, 1059), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1056, 1059), False, 'import cv2, os, time\n'), ((1326, 1374), 'cv2.imwrite', 'cv2.imwrite', ([], {'filename': '"""saved_img.jpg"""', 'img': 'frame'}), "(filename='saved_img.jpg', img=frame)\n", (1337, 1374), False, 'import cv2, os, time\n'), ((1426, 1453), 'cv2.imread', 'cv2.imread', (['"""saved_img.jpg"""'], {}), "('saved_img.jpg')\n", (1436, 1453), False, 'import cv2, os, time\n'), ((1477, 1514), 'cv2.imshow', 'cv2.imshow', (['"""Captured Image"""', 'img_new'], {}), "('Captured Image', img_new)\n", (1487, 1514), False, 'import cv2, os, time\n'), ((1527, 1544), 'cv2.waitKey', 'cv2.waitKey', (['(1650)'], {}), '(1650)\n', (1538, 1544), False, 'import cv2, os, time\n'), ((1557, 1580), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1578, 1580), False, 'import cv2, os, time\n'), ((1641, 1689), 'cv2.imread', 'cv2.imread', (['"""saved_img.jpg"""', 'cv2.IMREAD_ANYCOLOR'], {}), "('saved_img.jpg', cv2.IMREAD_ANYCOLOR)\n", (1651, 1689), False, 'import cv2, os, time\n'), ((1923, 1946), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1944, 1946), False, 'import cv2, os, time\n')] |
import argparse
import numpy as np
from os import path
import struct
from internal import db_handling
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--sift_feature_dir', required=True)
parser.add_argument('--query_txt_file', required=True)
parser.add_argument('--database_file', required=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
db = db_handling.COLMAPDatabase.connect(args.database_file)
db.create_tables()
with open(args.query_txt_file) as f:
for line in f:
name, _, h, w, fx, fy, cx, cy = line.split(' ')
params = np.array([float(fx), float(fy), float(cx), float(cy)])
camera_id = db.add_camera(1, int(h), int(w), params)
image_id = db.add_image(path.join('images', name), camera_id)
featurefile = path.join(args.sift_feature_dir,
path.splitext(name)[0] + '.sift')
with open(featurefile, 'rb') as f:
data = f.read()
header = struct.unpack_from('iiiii', data, 0)
_, _, num_points, num_entries, desc_size = header
assert num_entries == 5 and desc_size == 128
offset = 20
keypoints = np.zeros((num_points, 2))
for i in range(num_points):
point = struct.unpack_from('fffff', data, offset)
offset += 20
keypoints[i, :] = np.array((point[1], point[0]))
descriptors = np.zeros((num_points, desc_size))
for i in range(num_points):
descriptor = struct.unpack_from('128B', data, offset)
offset += desc_size
descriptors[i, :] = np.asarray(descriptor)
db.add_keypoints(image_id, keypoints)
db.add_descriptors(image_id, descriptors)
db.commit()
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"os.path.join",
"numpy.asarray",
"os.path.splitext",
"numpy.array",
"numpy.zeros",
"internal.db_handling.COLMAPDatabase.connect",
"struct.unpack_from"
] | [((136, 161), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (159, 161), False, 'import argparse\n'), ((436, 490), 'internal.db_handling.COLMAPDatabase.connect', 'db_handling.COLMAPDatabase.connect', (['args.database_file'], {}), '(args.database_file)\n', (470, 490), False, 'from internal import db_handling\n'), ((1086, 1122), 'struct.unpack_from', 'struct.unpack_from', (['"""iiiii"""', 'data', '(0)'], {}), "('iiiii', data, 0)\n", (1104, 1122), False, 'import struct\n'), ((1291, 1316), 'numpy.zeros', 'np.zeros', (['(num_points, 2)'], {}), '((num_points, 2))\n', (1299, 1316), True, 'import numpy as np\n'), ((1544, 1577), 'numpy.zeros', 'np.zeros', (['(num_points, desc_size)'], {}), '((num_points, desc_size))\n', (1552, 1577), True, 'import numpy as np\n'), ((817, 842), 'os.path.join', 'path.join', (['"""images"""', 'name'], {}), "('images', name)\n", (826, 842), False, 'from os import path\n'), ((1381, 1422), 'struct.unpack_from', 'struct.unpack_from', (['"""fffff"""', 'data', 'offset'], {}), "('fffff', data, offset)\n", (1399, 1422), False, 'import struct\n'), ((1486, 1516), 'numpy.array', 'np.array', (['(point[1], point[0])'], {}), '((point[1], point[0]))\n', (1494, 1516), True, 'import numpy as np\n'), ((1647, 1687), 'struct.unpack_from', 'struct.unpack_from', (['"""128B"""', 'data', 'offset'], {}), "('128B', data, offset)\n", (1665, 1687), False, 'import struct\n'), ((1760, 1782), 'numpy.asarray', 'np.asarray', (['descriptor'], {}), '(descriptor)\n', (1770, 1782), True, 'import numpy as np\n'), ((951, 970), 'os.path.splitext', 'path.splitext', (['name'], {}), '(name)\n', (964, 970), False, 'from os import path\n')] |
import numpy as np
import torch
import torch.nn.functional as F
import sys
import pandas as pd
from progressbar import ProgressBar, AnimatedMarker, Percentage
import math
from tqdm import trange
def Video_Cmc(features, ids, cams, query_idx,rank_size):
"""
features: numpy array of shape (n, d)
label`s: numpy array of shape (n)
"""
# Sample query
data = {'feature':features, 'id':ids, 'cam':cams}
q_idx = query_idx
g_idx = np.arange(len(ids))
q_data = {k:v[q_idx] for k, v in data.items()}
g_data = {k:v[g_idx] for k, v in data.items()}
if len(g_idx) < rank_size: rank_size = len(g_idx)
CMC, mAP = Cmc(q_data, g_data, rank_size)
return CMC, mAP
def Image_Cmc(gallery_features, gallery_ids, gallery_cams, query_features, query_ids, query_cams, rank_size):
"""
features: numpy array of shape (n, d)
label`s: numpy array of shape (n)
"""
# Sample query
data = {'feature':gallery_features, 'id':gallery_ids, 'cam':gallery_cams}
g_idx = np.arange(len(gallery_ids))
g_data = {k:v[g_idx] for k, v in data.items()}
if len(g_idx) < rank_size: rank_size = len(g_idx)
data = {'feature':query_features, 'id':query_ids, 'cam':query_cams}
query_idx = np.arange(len(query_ids))
query_data = {k:v[query_idx] for k, v in data.items()}
CMC, mAP = Cmc(query_data, g_data, rank_size)
return CMC, mAP
def Cmc(q_data, g_data, rank_size):
n_query = q_data['feature'].shape[0]
n_gallery = g_data['feature'].shape[0]
dist = np_cdist(q_data['feature'], g_data['feature']) # Reture a n_query*n_gallery array
cmc = np.zeros((n_query, rank_size))
ap = np.zeros(n_query)
widgets = ["I'm calculating cmc! ", AnimatedMarker(markers='←↖↑↗→↘↓↙'), ' (', Percentage(), ')']
pbar = ProgressBar(widgets=widgets, max_value=n_query)
for k in range(n_query):
good_idx = np.where((q_data['id'][k]==g_data['id']) & (q_data['cam'][k]!=g_data['cam']))[0]
junk_mask1 = (g_data['id'] == -1)
junk_mask2 = (q_data['id'][k]==g_data['id']) & (q_data['cam'][k]==g_data['cam'])
junk_idx = np.where(junk_mask1 | junk_mask2)[0]
score = dist[k, :]
sort_idx = np.argsort(score)
sort_idx = sort_idx[:rank_size]
ap[k], cmc[k, :] = Compute_AP(good_idx, junk_idx, sort_idx)
pbar.update(k)
pbar.finish()
CMC = np.mean(cmc, axis=0)
mAP = np.mean(ap)
return CMC, mAP
def Compute_AP(good_image, junk_image, index):
cmc = np.zeros((len(index),))
ngood = len(good_image)
old_recall = 0
old_precision = 1.
ap = 0
intersect_size = 0
j = 0
good_now = 0
njunk = 0
for n in range(len(index)):
flag = 0
if np.any(good_image == index[n]):
cmc[n-njunk:] = 1
flag = 1 # good image
good_now += 1
if np.any(junk_image == index[n]):
njunk += 1
continue # junk image
if flag == 1:
intersect_size += 1
recall = intersect_size/ngood
precision = intersect_size/(j+1)
ap += (recall-old_recall) * (old_precision+precision) / 2
old_recall = recall
old_precision = precision
j += 1
if good_now == ngood:
return ap, cmc
return ap, cmc
def cdist(feat1, feat2):
"""Cosine distance"""
feat1 = torch.FloatTensor(feat1)#.cuda()
feat2 = torch.FloatTensor(feat2)#.cuda()
feat1 = torch.nn.functional.normalize(feat1, dim=1)
feat2 = torch.nn.functional.normalize(feat2, dim=1).transpose(0, 1)
dist = -1 * torch.mm(feat1, feat2)
return dist.cpu().numpy()
def np_cdist(feat1, feat2):
"""Cosine distance"""
feat1_u = feat1 / np.linalg.norm(feat1, axis=1, keepdims=True) # n * d -> n
feat2_u = feat2 / np.linalg.norm(feat2, axis=1, keepdims=True) # n * d -> n
return -1 * np.dot(feat1_u, feat2_u.T)
def np_norm_eudist(feat1,feat2):
feat1_u = feat1 / np.linalg.norm(feat1, axis=1, keepdims=True) # n * d -> n
feat2_u = feat2 / np.linalg.norm(feat2, axis=1, keepdims=True) # n * d -> n
feat1_sq = np.sum(feat1_M * feat1, axis=1)
feat2_sq = np.sum(feat2_M * feat2, axis=1)
return np.sqrt(feat1_sq.reshape(-1,1) + feat2_sq.reshape(1,-1) - 2*np.dot(feat1_M, feat2.T)+ 1e-12)
def sqdist(feat1, feat2, M=None):
"""Mahanalobis/Euclidean distance"""
if M is None: M = np.eye(feat1.shape[1])
feat1_M = np.dot(feat1, M)
feat2_M = np.dot(feat2, M)
feat1_sq = np.sum(feat1_M * feat1, axis=1)
feat2_sq = np.sum(feat2_M * feat2, axis=1)
return feat1_sq.reshape(-1,1) + feat2_sq.reshape(1,-1) - 2*np.dot(feat1_M, feat2.T)
if __name__ == '__main__':
from scipy.io import loadmat
q_feature = loadmat(sys.argv[1])['ff']
q_db_txt = sys.argv[2]
g_feature = loadmat(sys.argv[3])['ff']
g_db_txt = sys.argv[4]
#print(feature.shape)
CMC, mAP = Self_Cmc(g_feature, g_db_txt, 100)
#CMC, mAP = Vanilla_Cmc(q_feature, q_db_txt, g_feature, g_db_txt)
print('r1 precision = %f, mAP = %f' % (CMC[0], mAP))
| [
"numpy.mean",
"numpy.eye",
"numpy.where",
"scipy.io.loadmat",
"numpy.linalg.norm",
"numpy.any",
"torch.nn.functional.normalize",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"progressbar.Percentage",
"progressbar.AnimatedMarker",
"numpy.argsort",
"torch.mm",
"torch.FloatTensor",
"progressb... | [((1629, 1659), 'numpy.zeros', 'np.zeros', (['(n_query, rank_size)'], {}), '((n_query, rank_size))\n', (1637, 1659), True, 'import numpy as np\n'), ((1669, 1686), 'numpy.zeros', 'np.zeros', (['n_query'], {}), '(n_query)\n', (1677, 1686), True, 'import numpy as np\n'), ((1804, 1851), 'progressbar.ProgressBar', 'ProgressBar', ([], {'widgets': 'widgets', 'max_value': 'n_query'}), '(widgets=widgets, max_value=n_query)\n', (1815, 1851), False, 'from progressbar import ProgressBar, AnimatedMarker, Percentage\n'), ((2392, 2412), 'numpy.mean', 'np.mean', (['cmc'], {'axis': '(0)'}), '(cmc, axis=0)\n', (2399, 2412), True, 'import numpy as np\n'), ((2423, 2434), 'numpy.mean', 'np.mean', (['ap'], {}), '(ap)\n', (2430, 2434), True, 'import numpy as np\n'), ((3399, 3423), 'torch.FloatTensor', 'torch.FloatTensor', (['feat1'], {}), '(feat1)\n', (3416, 3423), False, 'import torch\n'), ((3444, 3468), 'torch.FloatTensor', 'torch.FloatTensor', (['feat2'], {}), '(feat2)\n', (3461, 3468), False, 'import torch\n'), ((3489, 3532), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['feat1'], {'dim': '(1)'}), '(feat1, dim=1)\n', (3518, 3532), False, 'import torch\n'), ((4141, 4172), 'numpy.sum', 'np.sum', (['(feat1_M * feat1)'], {'axis': '(1)'}), '(feat1_M * feat1, axis=1)\n', (4147, 4172), True, 'import numpy as np\n'), ((4188, 4219), 'numpy.sum', 'np.sum', (['(feat2_M * feat2)'], {'axis': '(1)'}), '(feat2_M * feat2, axis=1)\n', (4194, 4219), True, 'import numpy as np\n'), ((4464, 4480), 'numpy.dot', 'np.dot', (['feat1', 'M'], {}), '(feat1, M)\n', (4470, 4480), True, 'import numpy as np\n'), ((4495, 4511), 'numpy.dot', 'np.dot', (['feat2', 'M'], {}), '(feat2, M)\n', (4501, 4511), True, 'import numpy as np\n'), ((4527, 4558), 'numpy.sum', 'np.sum', (['(feat1_M * feat1)'], {'axis': '(1)'}), '(feat1_M * feat1, axis=1)\n', (4533, 4558), True, 'import numpy as np\n'), ((4574, 4605), 'numpy.sum', 'np.sum', (['(feat2_M * feat2)'], {'axis': '(1)'}), '(feat2_M * feat2, axis=1)\n', (4580, 4605), True, 'import numpy as np\n'), ((1732, 1766), 'progressbar.AnimatedMarker', 'AnimatedMarker', ([], {'markers': '"""←↖↑↗→↘↓↙"""'}), "(markers='←↖↑↗→↘↓↙')\n", (1746, 1766), False, 'from progressbar import ProgressBar, AnimatedMarker, Percentage\n'), ((1790, 1802), 'progressbar.Percentage', 'Percentage', ([], {}), '()\n', (1800, 1802), False, 'from progressbar import ProgressBar, AnimatedMarker, Percentage\n'), ((2214, 2231), 'numpy.argsort', 'np.argsort', (['score'], {}), '(score)\n', (2224, 2231), True, 'import numpy as np\n'), ((2743, 2773), 'numpy.any', 'np.any', (['(good_image == index[n])'], {}), '(good_image == index[n])\n', (2749, 2773), True, 'import numpy as np\n'), ((2876, 2906), 'numpy.any', 'np.any', (['(junk_image == index[n])'], {}), '(junk_image == index[n])\n', (2882, 2906), True, 'import numpy as np\n'), ((3621, 3643), 'torch.mm', 'torch.mm', (['feat1', 'feat2'], {}), '(feat1, feat2)\n', (3629, 3643), False, 'import torch\n'), ((3751, 3795), 'numpy.linalg.norm', 'np.linalg.norm', (['feat1'], {'axis': '(1)', 'keepdims': '(True)'}), '(feat1, axis=1, keepdims=True)\n', (3765, 3795), True, 'import numpy as np\n'), ((3831, 3875), 'numpy.linalg.norm', 'np.linalg.norm', (['feat2'], {'axis': '(1)', 'keepdims': '(True)'}), '(feat2, axis=1, keepdims=True)\n', (3845, 3875), True, 'import numpy as np\n'), ((3905, 3931), 'numpy.dot', 'np.dot', (['feat1_u', 'feat2_u.T'], {}), '(feat1_u, feat2_u.T)\n', (3911, 3931), True, 'import numpy as np\n'), ((3988, 4032), 'numpy.linalg.norm', 'np.linalg.norm', (['feat1'], {'axis': '(1)', 'keepdims': '(True)'}), '(feat1, axis=1, keepdims=True)\n', (4002, 4032), True, 'import numpy as np\n'), ((4068, 4112), 'numpy.linalg.norm', 'np.linalg.norm', (['feat2'], {'axis': '(1)', 'keepdims': '(True)'}), '(feat2, axis=1, keepdims=True)\n', (4082, 4112), True, 'import numpy as np\n'), ((4427, 4449), 'numpy.eye', 'np.eye', (['feat1.shape[1]'], {}), '(feat1.shape[1])\n', (4433, 4449), True, 'import numpy as np\n'), ((4771, 4791), 'scipy.io.loadmat', 'loadmat', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (4778, 4791), False, 'from scipy.io import loadmat\n'), ((4841, 4861), 'scipy.io.loadmat', 'loadmat', (['sys.argv[3]'], {}), '(sys.argv[3])\n', (4848, 4861), False, 'from scipy.io import loadmat\n'), ((1900, 1986), 'numpy.where', 'np.where', (["((q_data['id'][k] == g_data['id']) & (q_data['cam'][k] != g_data['cam']))"], {}), "((q_data['id'][k] == g_data['id']) & (q_data['cam'][k] != g_data[\n 'cam']))\n", (1908, 1986), True, 'import numpy as np\n'), ((2131, 2164), 'numpy.where', 'np.where', (['(junk_mask1 | junk_mask2)'], {}), '(junk_mask1 | junk_mask2)\n', (2139, 2164), True, 'import numpy as np\n'), ((3545, 3588), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['feat2'], {'dim': '(1)'}), '(feat2, dim=1)\n', (3574, 3588), False, 'import torch\n'), ((4669, 4693), 'numpy.dot', 'np.dot', (['feat1_M', 'feat2.T'], {}), '(feat1_M, feat2.T)\n', (4675, 4693), True, 'import numpy as np\n'), ((4291, 4315), 'numpy.dot', 'np.dot', (['feat1_M', 'feat2.T'], {}), '(feat1_M, feat2.T)\n', (4297, 4315), True, 'import numpy as np\n')] |
""" ImageNet: VGGNet, ResNet, Inception, and Xception with Keras """
import os
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.applications.inception_v3 import preprocess_input
from tensorflow.keras.applications import imagenet_utils
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
MODEL = InceptionV3(weights="imagenet")
def classify_image(image_file):
""" Classify image using Inception_V3"""
input_shape = (299, 299)
img = image.load_img(image_file, target_size=input_shape)
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
preds = MODEL.predict(img)
p_from_im = imagenet_utils.decode_predictions(preds)
(_, label, prob) = p_from_im[0][0]
return [label, prob]
| [
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.applications.InceptionV3",
"tensorflow.keras.applications.imagenet_utils.decode_predictions",
"numpy.expand_dims",
"tensorflow.keras.applications.inception_v3.preprocess_input",
"tensorflow.keras.preprocessing.image.img_to_array"
] | [((381, 412), 'tensorflow.keras.applications.InceptionV3', 'InceptionV3', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (392, 412), False, 'from tensorflow.keras.applications import InceptionV3\n'), ((532, 583), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['image_file'], {'target_size': 'input_shape'}), '(image_file, target_size=input_shape)\n', (546, 583), False, 'from tensorflow.keras.preprocessing import image\n'), ((594, 617), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (612, 617), False, 'from tensorflow.keras.preprocessing import image\n'), ((628, 655), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (642, 655), True, 'import numpy as np\n'), ((666, 687), 'tensorflow.keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['img'], {}), '(img)\n', (682, 687), False, 'from tensorflow.keras.applications.inception_v3 import preprocess_input\n'), ((736, 776), 'tensorflow.keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', (['preds'], {}), '(preds)\n', (769, 776), False, 'from tensorflow.keras.applications import imagenet_utils\n')] |
import gym
import numpy as np
import matplotlib.pyplot as plt
ɣ = 0.99
𝛼 = 0.005
MAX_EPISODES = 1000
N_STEPS = 50000
SEED = 2020
env = gym.make("CartPole-v1")
env.seed(SEED)
np.random.seed(SEED)
def get_action_from_policy(policy_weights, state):
return np.argmax(np.matmul(policy_weights.T, state))
def compute_gradient(w, action, state_current, state_next, reward, done):
q_values = np.matmul(w.T, state_current)
td_target_q = q_values.copy()
td_target_q[action] = reward
if not done:
td_target_q[action] += ɣ*np.max(np.matmul(w.T, state_next))
loss = td_target_q - q_values
loss = np.reshape(loss, (1, 2))
grad_Q = np.reshape(state_current, (4, 1))
gradient = 𝛼 * np.matmul(grad_Q, loss)
norm = np.linalg.norm(gradient)
if norm > 10:
gradient *= 10 / norm;
return gradient
if __name__ == "__main__":
w = np.random.uniform(0, 1, (4,2))
# w = np.array([[ 0.91264621, 2.32775906],[ 8.36576436, -7.8797786 ], [ 4.15530658, -3.73058892], [-2.16779329, 2.8840473 ]])
t = 0
plot_episodes = []
plot_rewards = []
for episode in range(MAX_EPISODES):
state_current = env.reset()
total_reward = 0
done = False
while not done:
t += 1
action = get_action_from_policy(w, state_current)
state_next, reward, done, info = env.step(action)
w += compute_gradient(w, action, state_current, state_next, reward, done)
state_current = state_next
total_reward += reward
# env.render()
print("Episode number: " + str(episode) + "; Total Reward: " + str(total_reward) + "; t: " + str(t))
plot_rewards.append(total_reward)
plot_episodes.append(episode)
env.close()
print("Weights of network", w)
print("Average reward: ", np.mean(plot_rewards))
plt.plot(plot_episodes, plot_rewards)
plt.title("LFA: Total Reward During Training")
plt.xlabel("Episodes")
plt.ylabel("Reward")
plt.show()
| [
"numpy.mean",
"numpy.reshape",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.random.uniform",
"numpy.matmul",
"numpy.random.seed",
"numpy.linalg.norm",
"matplotlib.pyplot.title",
"gym.make",
"matplotlib.pyplot.show"
] | [((137, 160), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (145, 160), False, 'import gym\n'), ((176, 196), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (190, 196), True, 'import numpy as np\n'), ((398, 427), 'numpy.matmul', 'np.matmul', (['w.T', 'state_current'], {}), '(w.T, state_current)\n', (407, 427), True, 'import numpy as np\n'), ((628, 652), 'numpy.reshape', 'np.reshape', (['loss', '(1, 2)'], {}), '(loss, (1, 2))\n', (638, 652), True, 'import numpy as np\n'), ((666, 699), 'numpy.reshape', 'np.reshape', (['state_current', '(4, 1)'], {}), '(state_current, (4, 1))\n', (676, 699), True, 'import numpy as np\n'), ((755, 779), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (769, 779), True, 'import numpy as np\n'), ((888, 919), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(4, 2)'], {}), '(0, 1, (4, 2))\n', (905, 919), True, 'import numpy as np\n'), ((1885, 1922), 'matplotlib.pyplot.plot', 'plt.plot', (['plot_episodes', 'plot_rewards'], {}), '(plot_episodes, plot_rewards)\n', (1893, 1922), True, 'import matplotlib.pyplot as plt\n'), ((1927, 1973), 'matplotlib.pyplot.title', 'plt.title', (['"""LFA: Total Reward During Training"""'], {}), "('LFA: Total Reward During Training')\n", (1936, 1973), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2000), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episodes"""'], {}), "('Episodes')\n", (1988, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2005, 2025), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reward"""'], {}), "('Reward')\n", (2015, 2025), True, 'import matplotlib.pyplot as plt\n'), ((2030, 2040), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2038, 2040), True, 'import matplotlib.pyplot as plt\n'), ((271, 305), 'numpy.matmul', 'np.matmul', (['policy_weights.T', 'state'], {}), '(policy_weights.T, state)\n', (280, 305), True, 'import numpy as np\n'), ((722, 745), 'numpy.matmul', 'np.matmul', (['grad_Q', 'loss'], {}), '(grad_Q, loss)\n', (731, 745), True, 'import numpy as np\n'), ((1858, 1879), 'numpy.mean', 'np.mean', (['plot_rewards'], {}), '(plot_rewards)\n', (1865, 1879), True, 'import numpy as np\n'), ((554, 580), 'numpy.matmul', 'np.matmul', (['w.T', 'state_next'], {}), '(w.T, state_next)\n', (563, 580), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from numpy import abs, newaxis, array
def get_cell_area(self, indices=None):
"""
Return the area of the cells on the outer surface.
#TODO address multiple cell type issue, i.e. distracted indices
Parameters
----------
self : MeshMat
a MeshMat object
indices : list
list of the points to extract (optional)
Returns
-------
areas: ndarray
Area of the cells
"""
logger = self.get_logger()
area = []
vertices_dict = self.get_vertice(indices=indices)
for key, vertices in vertices_dict.items():
if len(vertices) != 0:
try:
A = self.cell[key].interpolation.ref_cell.get_cell_area(vertices)
A = A.tolist()
except:
logger.warning(
f'MeshMat: Reference Cell for "{key}" not found. '
+ "Respective area set to zero."
)
A = [0 for i in range(vertices.shape[0])]
area.extend(A)
return array(area)
| [
"numpy.array"
] | [((1055, 1066), 'numpy.array', 'array', (['area'], {}), '(area)\n', (1060, 1066), False, 'from numpy import abs, newaxis, array\n')] |
import os
import numpy as np
import copy
from PIL import Image, ImageDraw
from collections.abc import Sequence
from paddle.io import Dataset
from data.operators import *
from eval_model import get_categories, get_infer_results
class ImageFolder(Dataset):
def __init__(self,
dataset_dir=None,
image_dir=None,
anno_path=None,
data_fields=['image'],
sample_num=-1,
use_default_label=None,
**kwargs):
super(ImageFolder, self).__init__()
self.dataset_dir = dataset_dir if dataset_dir is not None else ''
self.anno_path = anno_path
self.image_dir = image_dir if image_dir is not None else ''
self.data_fields = data_fields
self.sample_num = sample_num
self.use_default_label = use_default_label
self._epoch = 0
self._curr_iter = 0
self._imid2path = {}
self.roidbs = None
self.sample_num = sample_num
def __len__(self, ):
return len(self.roidbs)
def __getitem__(self, idx):
# data batch
roidb = copy.deepcopy(self.roidbs[idx])
if self.mixup_epoch == 0 or self._epoch < self.mixup_epoch:
n = len(self.roidbs)
idx = np.random.randint(n)
roidb = [roidb, copy.deepcopy(self.roidbs[idx])]
elif self.cutmix_epoch == 0 or self._epoch < self.cutmix_epoch:
n = len(self.roidbs)
idx = np.random.randint(n)
roidb = [roidb, copy.deepcopy(self.roidbs[idx])]
elif self.mosaic_epoch == 0 or self._epoch < self.mosaic_epoch:
n = len(self.roidbs)
roidb = [roidb, ] + [
copy.deepcopy(self.roidbs[np.random.randint(n)])
for _ in range(3)
]
if isinstance(roidb, Sequence):
for r in roidb:
r['curr_iter'] = self._curr_iter
else:
roidb['curr_iter'] = self._curr_iter
self._curr_iter += 1
return self.transform(roidb)
def check_or_download_dataset(self):
return
def set_kwargs(self, **kwargs):
self.mixup_epoch = kwargs.get('mixup_epoch', -1)
self.cutmix_epoch = kwargs.get('cutmix_epoch', -1)
self.mosaic_epoch = kwargs.get('mosaic_epoch', -1)
def set_transform(self, transform):
self.transform = transform
def set_epoch(self, epoch_id):
self._epoch = epoch_id
def parse_dataset(self, ):
if not self.roidbs:
self.roidbs = self._load_images()
def get_anno(self):
if self.anno_path is None:
return
return os.path.join(self.dataset_dir, self.anno_path)
def _parse(self):
image_dir = self.image_dir
if not isinstance(image_dir, Sequence):
image_dir = [image_dir]
images = []
for im_dir in image_dir:
if os.path.isdir(im_dir):
im_dir = os.path.join(self.dataset_dir, im_dir)
images.extend(_make_dataset(im_dir))
elif os.path.isfile(im_dir) and _is_valid_file(im_dir):
images.append(im_dir)
return images
def _load_images(self):
images = self._parse()
ct = 0
records = []
for image in images:
assert image != '' and os.path.isfile(image), \
"Image {} not found".format(image)
if self.sample_num > 0 and ct >= self.sample_num:
break
rec = {'im_id': np.array([ct]), 'im_file': image}
self._imid2path[ct] = image
ct += 1
records.append(rec)
assert len(records) > 0, "No image file found"
return records
def get_imid2path(self):
return self._imid2path
def set_images(self, images):
self.image_dir = images
self.roidbs = self._load_images()
def _is_valid_file(f, extensions=('.jpg', '.jpeg', '.png', '.bmp')):
return f.lower().endswith(extensions)
def _make_dataset(dir):
dir = os.path.expanduser(dir)
if not os.path.isdir(dir):
raise ('{} should be a dir'.format(dir))
images = []
for root, _, fnames in sorted(os.walk(dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if _is_valid_file(path):
images.append(path)
return images
def draw_bbox(image, bbox_res, im_id, catid2name, threshold=0.5):
"""
Draw bbox on image
"""
draw = ImageDraw.Draw(image)
catid2color = {}
color_list = colormap(rgb=True)[:40]
for dt in np.array(bbox_res):
if im_id != dt['image_id']:
continue
catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']
if score < threshold:
continue
if catid not in catid2color:
idx = np.random.randint(len(color_list))
catid2color[catid] = color_list[idx]
color = tuple(catid2color[catid])
# draw bbox
xmin, ymin, w, h = bbox
xmax = xmin + w
ymax = ymin + h
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=2,
fill=color)
# draw label
text = "{} {:.2f}".format(catid2name[catid], score)
tw, th = draw.textsize(text)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
return image
def colormap(rgb=False):
"""
Get colormap
"""
color_list = np.array([
0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,
0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078,
0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,
1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000,
0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667,
0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,
0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000,
1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,
0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,
0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667,
0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333,
0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,
0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333,
0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000,
1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,
1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167,
0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000,
0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,
0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000,
0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000,
0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,
0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286,
0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714,
0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000
]).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list
def predict(images,
model,
draw_threshold=0.5,
output_dir='output',
anno_path=None):
status = {}
dataset = ImageFolder(anno_path=anno_path)
dataset.set_images(images)
sample_transforms = [{Decode: {}}, {Resize: {'target_size': [800, 1333], 'keep_ratio': True}}, {NormalizeImage: {'is_scale': True, 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}, {Permute: {}}]
batch_transforms = [{PadMaskBatch: {'pad_to_stride': -1, 'return_pad_mask': True}}]
loader = BaseDataLoader(sample_transforms, batch_transforms, batch_size=1, shuffle=False, drop_last=False)(dataset, 0)
imid2path = dataset.get_imid2path()
anno_file = dataset.get_anno()
clsid2catid, catid2name = get_categories('COCO', anno_file=anno_file)
# Run Infer
status['mode'] = 'test'
model.eval()
for step_id, data in enumerate(loader):
status['step_id'] = step_id
# forward
outs = model(data)
for key in ['im_shape', 'scale_factor', 'im_id']:
outs[key] = data[key]
for key, value in outs.items():
if hasattr(value, 'numpy'):
outs[key] = value.numpy()
batch_res = get_infer_results(outs, clsid2catid)
bbox_num = outs['bbox_num']
start = 0
for i, im_id in enumerate(outs['im_id']):
image_path = imid2path[int(im_id)]
image = Image.open(image_path).convert('RGB')
status['original_image'] = np.array(image.copy())
end = start + bbox_num[i]
bbox_res = batch_res['bbox'][start:end] if 'bbox' in batch_res else None
if bbox_res is not None:
image = draw_bbox(image, bbox_res,int(im_id), catid2name, draw_threshold)
status['result_image'] = np.array(image.copy())
# save image with detection
if not os.path.exists(output_dir):
os.makedirs(output_dir)
image_name = os.path.split(image_path)[-1]
name, ext = os.path.splitext(image_name)
save_name = os.path.join(output_dir, "{}".format(name)) + ext
print("Detection bbox results save in {}".format(save_name))
image.save(save_name, quality=95)
start = end
def get_test_images(infer_img,infer_dir=None):
"""
Get image path list in TEST mode
"""
assert infer_img is not None or infer_dir is not None, \
"--infer_img or --infer_dir should be set"
assert infer_img is None or os.path.isfile(infer_img), \
"{} is not a file".format(infer_img)
assert infer_dir is None or os.path.isdir(infer_dir), \
"{} is not a directory".format(infer_dir)
# infer_img has a higher priority
if infer_img and os.path.isfile(infer_img):
return [infer_img]
images = set()
infer_dir = os.path.abspath(infer_dir)
assert os.path.isdir(infer_dir), \
"infer_dir {} is not a directory".format(infer_dir)
exts = ['jpg', 'jpeg', 'png', 'bmp']
exts += [ext.upper() for ext in exts]
for ext in exts:
images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
images = list(images)
assert len(images) > 0, "no image found in {}".format(infer_dir)
print("Found {} inference images in total.".format(len(images)))
return images | [
"os.path.exists",
"PIL.Image.open",
"eval_model.get_categories",
"os.makedirs",
"eval_model.get_infer_results",
"os.walk",
"os.path.join",
"os.path.splitext",
"os.path.split",
"os.path.isfile",
"numpy.array",
"PIL.ImageDraw.Draw",
"os.path.isdir",
"numpy.random.randint",
"copy.deepcopy",... | [((4092, 4115), 'os.path.expanduser', 'os.path.expanduser', (['dir'], {}), '(dir)\n', (4110, 4115), False, 'import os\n'), ((4569, 4590), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (4583, 4590), False, 'from PIL import Image, ImageDraw\n'), ((4668, 4686), 'numpy.array', 'np.array', (['bbox_res'], {}), '(bbox_res)\n', (4676, 4686), True, 'import numpy as np\n'), ((8489, 8532), 'eval_model.get_categories', 'get_categories', (['"""COCO"""'], {'anno_file': 'anno_file'}), "('COCO', anno_file=anno_file)\n", (8503, 8532), False, 'from eval_model import get_categories, get_infer_results\n'), ((10617, 10643), 'os.path.abspath', 'os.path.abspath', (['infer_dir'], {}), '(infer_dir)\n', (10632, 10643), False, 'import os\n'), ((10655, 10679), 'os.path.isdir', 'os.path.isdir', (['infer_dir'], {}), '(infer_dir)\n', (10668, 10679), False, 'import os\n'), ((1144, 1175), 'copy.deepcopy', 'copy.deepcopy', (['self.roidbs[idx]'], {}), '(self.roidbs[idx])\n', (1157, 1175), False, 'import copy\n'), ((2694, 2740), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'self.anno_path'], {}), '(self.dataset_dir, self.anno_path)\n', (2706, 2740), False, 'import os\n'), ((4127, 4145), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (4140, 4145), False, 'import os\n'), ((4246, 4276), 'os.walk', 'os.walk', (['dir'], {'followlinks': '(True)'}), '(dir, followlinks=True)\n', (4253, 4276), False, 'import os\n'), ((8958, 8994), 'eval_model.get_infer_results', 'get_infer_results', (['outs', 'clsid2catid'], {}), '(outs, clsid2catid)\n', (8975, 8994), False, 'from eval_model import get_categories, get_infer_results\n'), ((10275, 10300), 'os.path.isfile', 'os.path.isfile', (['infer_img'], {}), '(infer_img)\n', (10289, 10300), False, 'import os\n'), ((10385, 10409), 'os.path.isdir', 'os.path.isdir', (['infer_dir'], {}), '(infer_dir)\n', (10398, 10409), False, 'import os\n'), ((10527, 10552), 'os.path.isfile', 'os.path.isfile', (['infer_img'], {}), '(infer_img)\n', (10541, 10552), False, 'import os\n'), ((1295, 1315), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (1312, 1315), True, 'import numpy as np\n'), ((2951, 2972), 'os.path.isdir', 'os.path.isdir', (['im_dir'], {}), '(im_dir)\n', (2964, 2972), False, 'import os\n'), ((4335, 4360), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (4347, 4360), False, 'import os\n'), ((5692, 7167), 'numpy.array', 'np.array', (['[0.0, 0.447, 0.741, 0.85, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, \n 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, \n 0.3, 0.3, 0.3, 0.6, 0.6, 0.6, 1.0, 0.0, 0.0, 1.0, 0.5, 0.0, 0.749, \n 0.749, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.667, 0.0, 1.0, 0.333, 0.333,\n 0.0, 0.333, 0.667, 0.0, 0.333, 1.0, 0.0, 0.667, 0.333, 0.0, 0.667, \n 0.667, 0.0, 0.667, 1.0, 0.0, 1.0, 0.333, 0.0, 1.0, 0.667, 0.0, 1.0, 1.0,\n 0.0, 0.0, 0.333, 0.5, 0.0, 0.667, 0.5, 0.0, 1.0, 0.5, 0.333, 0.0, 0.5, \n 0.333, 0.333, 0.5, 0.333, 0.667, 0.5, 0.333, 1.0, 0.5, 0.667, 0.0, 0.5,\n 0.667, 0.333, 0.5, 0.667, 0.667, 0.5, 0.667, 1.0, 0.5, 1.0, 0.0, 0.5, \n 1.0, 0.333, 0.5, 1.0, 0.667, 0.5, 1.0, 1.0, 0.5, 0.0, 0.333, 1.0, 0.0, \n 0.667, 1.0, 0.0, 1.0, 1.0, 0.333, 0.0, 1.0, 0.333, 0.333, 1.0, 0.333, \n 0.667, 1.0, 0.333, 1.0, 1.0, 0.667, 0.0, 1.0, 0.667, 0.333, 1.0, 0.667,\n 0.667, 1.0, 0.667, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.333, 1.0, 1.0, 0.667,\n 1.0, 0.167, 0.0, 0.0, 0.333, 0.0, 0.0, 0.5, 0.0, 0.0, 0.667, 0.0, 0.0, \n 0.833, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.167, 0.0, 0.0, 0.333, 0.0, 0.0, \n 0.5, 0.0, 0.0, 0.667, 0.0, 0.0, 0.833, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, \n 0.167, 0.0, 0.0, 0.333, 0.0, 0.0, 0.5, 0.0, 0.0, 0.667, 0.0, 0.0, 0.833,\n 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.143, 0.143, 0.143, 0.286, 0.286, 0.286,\n 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714, 0.714, 0.857, \n 0.857, 0.857, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.447, 0.741, 0.85, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,\n 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, \n 0.184, 0.3, 0.3, 0.3, 0.6, 0.6, 0.6, 1.0, 0.0, 0.0, 1.0, 0.5, 0.0, \n 0.749, 0.749, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.667, 0.0, 1.0, 0.333,\n 0.333, 0.0, 0.333, 0.667, 0.0, 0.333, 1.0, 0.0, 0.667, 0.333, 0.0, \n 0.667, 0.667, 0.0, 0.667, 1.0, 0.0, 1.0, 0.333, 0.0, 1.0, 0.667, 0.0, \n 1.0, 1.0, 0.0, 0.0, 0.333, 0.5, 0.0, 0.667, 0.5, 0.0, 1.0, 0.5, 0.333, \n 0.0, 0.5, 0.333, 0.333, 0.5, 0.333, 0.667, 0.5, 0.333, 1.0, 0.5, 0.667,\n 0.0, 0.5, 0.667, 0.333, 0.5, 0.667, 0.667, 0.5, 0.667, 1.0, 0.5, 1.0, \n 0.0, 0.5, 1.0, 0.333, 0.5, 1.0, 0.667, 0.5, 1.0, 1.0, 0.5, 0.0, 0.333, \n 1.0, 0.0, 0.667, 1.0, 0.0, 1.0, 1.0, 0.333, 0.0, 1.0, 0.333, 0.333, 1.0,\n 0.333, 0.667, 1.0, 0.333, 1.0, 1.0, 0.667, 0.0, 1.0, 0.667, 0.333, 1.0,\n 0.667, 0.667, 1.0, 0.667, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.333, 1.0, 1.0,\n 0.667, 1.0, 0.167, 0.0, 0.0, 0.333, 0.0, 0.0, 0.5, 0.0, 0.0, 0.667, 0.0,\n 0.0, 0.833, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.167, 0.0, 0.0, 0.333, 0.0, \n 0.0, 0.5, 0.0, 0.0, 0.667, 0.0, 0.0, 0.833, 0.0, 0.0, 1.0, 0.0, 0.0, \n 0.0, 0.167, 0.0, 0.0, 0.333, 0.0, 0.0, 0.5, 0.0, 0.0, 0.667, 0.0, 0.0, \n 0.833, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.143, 0.143, 0.143, 0.286, 0.286,\n 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714, 0.714, \n 0.857, 0.857, 0.857, 1.0, 1.0, 1.0])\n', (5700, 7167), True, 'import numpy as np\n'), ((9785, 9813), 'os.path.splitext', 'os.path.splitext', (['image_name'], {}), '(image_name)\n', (9801, 9813), False, 'import os\n'), ((1344, 1375), 'copy.deepcopy', 'copy.deepcopy', (['self.roidbs[idx]'], {}), '(self.roidbs[idx])\n', (1357, 1375), False, 'import copy\n'), ((1500, 1520), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (1517, 1520), True, 'import numpy as np\n'), ((2999, 3037), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'im_dir'], {}), '(self.dataset_dir, im_dir)\n', (3011, 3037), False, 'import os\n'), ((3379, 3400), 'os.path.isfile', 'os.path.isfile', (['image'], {}), '(image)\n', (3393, 3400), False, 'import os\n'), ((3571, 3585), 'numpy.array', 'np.array', (['[ct]'], {}), '([ct])\n', (3579, 3585), True, 'import numpy as np\n'), ((9638, 9664), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (9652, 9664), False, 'import os\n'), ((9682, 9705), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (9693, 9705), False, 'import os\n'), ((9731, 9756), 'os.path.split', 'os.path.split', (['image_path'], {}), '(image_path)\n', (9744, 9756), False, 'import os\n'), ((1549, 1580), 'copy.deepcopy', 'copy.deepcopy', (['self.roidbs[idx]'], {}), '(self.roidbs[idx])\n', (1562, 1580), False, 'import copy\n'), ((3108, 3130), 'os.path.isfile', 'os.path.isfile', (['im_dir'], {}), '(im_dir)\n', (3122, 3130), False, 'import os\n'), ((9167, 9189), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (9177, 9189), False, 'from PIL import Image, ImageDraw\n'), ((1763, 1783), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (1780, 1783), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import numpy as np
from pathlib import Path
import pdb
import torch
from mseg.utils.names_utils import (
load_class_names,
get_universal_class_names,
get_classname_to_dataloaderid_map
)
from mseg.utils.tsv_utils import read_tsv_column_vals
from mseg.taxonomy.taxonomy_converter import (
parse_entry,
parse_uentry,
parse_test_entry,
TaxonomyConverter,
populate_linear_mapping,
RELABELED_TRAIN_DATASETS,
UNRELABELED_TRAIN_DATASETS
)
_ROOT = Path(__file__).resolve().parent.parent
def entries_equal(dname, tsv_fpath, is_train_dataset):
"""
Compare classnames in *_names.txt file against tsv column entries.
For training datasets, these must be *exactly* the same.
"""
tsv_classnames = read_tsv_column_vals(tsv_fpath, col_name=dname, convert_val_to_int=False)
nonempty_classnames = [name for name in tsv_classnames if name != '']
tsv_classnames = []
for entry in nonempty_classnames:
tsv_classnames.extend(parse_entry(entry))
txt_classnames = load_class_names(dname)
if set(txt_classnames) != set(tsv_classnames):
pdb.set_trace()
if is_train_dataset:
assert len(txt_classnames) == len(tsv_classnames)
# ensure no duplicates among training dataset classnames
assert len(list(tsv_classnames)) == len(set(tsv_classnames))
return set(txt_classnames) == set(tsv_classnames)
def test_names_complete():
"""
Test on dataset_config and on TaxonomyConverter
Make sure tsv entries in a single column match EXACTLY
to _names.txt file.
"""
tsv_fpath = f'{_ROOT}/mseg/class_remapping_files/MSeg_master.tsv'
train_dnames = UNRELABELED_TRAIN_DATASETS + RELABELED_TRAIN_DATASETS
for dname in train_dnames:
print(f'On {dname}...')
assert entries_equal(dname, tsv_fpath, is_train_dataset=True)
print(f'{dname} passed.')
print()
test_dnames = [
'camvid-11',
'kitti-19',
#'pascal-context-60', # {'flower', 'wood'} missing
'scannet-20',
'voc2012',
'wilddash-19'
]
for dname in test_dnames:
print(f'On {dname}')
assert entries_equal(dname, tsv_fpath, is_train_dataset=False)
def test_parse_entry_blank():
""" """
entry = ''
classes = parse_entry(entry)
assert classes == []
def test_parse_entry_brackets1():
"""
"""
entry = '{house,building, skyscraper, booth, hovel, tower, grandstand}'
classes = parse_entry(entry)
gt_classes = [
'house',
'building',
'skyscraper',
'booth',
'hovel',
'tower',
'grandstand'
]
assert classes == gt_classes
def test_parse_entry_space_sep():
"""
Note: ADE20K class "conveyer" is typo of "conveyor"
"""
entry = 'conveyer belt'
classes = parse_entry(entry)
assert classes == ['conveyer belt']
def test_parse_uentry():
""" """
uentry = 'animal_other'
fullname = parse_uentry(uentry)
assert fullname == 'animal_other'
def test_label_transform():
"""
Bring label from training taxonomy (mapillary-public65)
to the universal taxonomy.
21 is the motorcyclist class in mapillary-public65
"""
dname = 'mapillary-public65'
txt_classnames = load_class_names(dname)
train_idx = txt_classnames.index('Motorcyclist')
tc = TaxonomyConverter()
# training dataset label
traind_label = torch.ones(4,4)*train_idx
traind_label = traind_label.type(torch.LongTensor)
# Get back the universal label
u_label = tc.transform_label(traind_label, dname)
u_idx = get_universal_class_names().index('motorcyclist')
gt_u_label = np.ones((4,4)).astype(np.int64) * u_idx
assert np.allclose(u_label.numpy(), gt_u_label)
def test_label_transform_unlabeled():
"""
Make sure 255 stays mapped to 255 at each level (to be ignored in cross-entropy loss).
"""
IGNORE_LABEL = 255
dname = 'mapillary-public65'
txt_classnames = load_class_names(dname)
name2id = get_classname_to_dataloaderid_map(dname, include_ignore_idx_cls = True)
train_idx = name2id['unlabeled']
tc = TaxonomyConverter()
# training dataset label
traind_label = torch.ones(4,4)*train_idx
traind_label = traind_label.type(torch.LongTensor)
# Get back the universal label
u_label = tc.transform_label(traind_label, dname)
u_idx = IGNORE_LABEL
gt_u_label = np.ones((4,4)).astype(np.int64) * u_idx
assert np.allclose(u_label.numpy(), gt_u_label)
def test_transform_predictions_test():
"""
Consider predictions made within the universal taxonomy
over a tiny 2x3 image. We use a linear mapping to bring
these predictions into a test dataset's taxonomy
(summing the probabilities where necessary).
For Camvid, universal probabilities for `person',`bicycle'
should both go into the 'Bicyclist' class.
"""
u_classnames = get_universal_class_names()
person_uidx = u_classnames.index('person')
bicycle_uidx = u_classnames.index('bicycle')
sky_uidx = u_classnames.index('sky')
tc = TaxonomyConverter()
input = np.zeros((194,2,3))
input[sky_uidx,0,:] = 1.0 # top row is sky
input[person_uidx,1,:] = 0.5 # bottom row is 50/50 person or bicyclist
input[bicycle_uidx,1,:] = 0.5 # bottom row is 50/50 person or bicyclist
input = torch.from_numpy(input)
input = input.unsqueeze(0).float() # CHW -> NCHW
assert input.shape == (1,194,2,3)
test_dname = 'camvid-11'
output = tc.transform_predictions_test(input, test_dname)
output = output.squeeze() # NCHW -> CHW
prediction = torch.argmax(output, dim=0).numpy()
camvid_classnames = load_class_names(test_dname)
# Camvid should have predictions across 11 classes.
prediction_gt = np.zeros((2,3))
prediction_gt[0,:] = camvid_classnames.index('Sky')
prediction_gt[1,:] = camvid_classnames.index('Bicyclist')
assert np.allclose(prediction, prediction_gt)
def test_populate_linear_mapping1():
"""
Implement simple matrix multiplication as 1x1 convolutions in PyTorch.
[0] [1 0 1 0] [0]
[2] = [0 1 0 1] [1]
[2] [1 1 1 1] [0]
[1]
"""
# (j,i) tuples
inid2outid = [
(0,0),
(2,0),
(1,1),
(3,1),
(0,2),
(1,2),
(2,2),
(3,2)
]
in_channel = 4
out_channel = 3
conv = populate_linear_mapping(in_channel, out_channel, inid2outid)
x = np.array([0,1,0,1]).reshape(1,4,1,1).astype(np.float32)
x = torch.from_numpy(x)
y = conv(x)
y_gt = np.array([0,2,2]).reshape(1,3,1,1).astype(np.float32)
y_gt = torch.from_numpy(y_gt)
assert torch.allclose(y, y_gt)
def test_populate_linear_mapping2():
"""
Implement simple matrix multiplication as 1x1 convolutions in PyTorch.
[2] [1 0 1 0] [1]
[2] = [0 1 0 1] [1]
[4] [1 1 1 1] [1]
[1]
"""
# (j,i) tuples
inid2outid = [
(0,0),
(2,0),
(1,1),
(3,1),
(0,2),
(1,2),
(2,2),
(3,2)
]
in_channel = 4
out_channel = 3
conv = populate_linear_mapping(in_channel, out_channel, inid2outid)
x = torch.ones(1,4,1,1).type(torch.FloatTensor)
y = conv(x)
y_gt = np.array([2,2,4]).reshape(1,3,1,1).astype(np.float32)
y_gt = torch.from_numpy(y_gt)
assert torch.allclose(y, y_gt)
def test_populate_linear_mapping3():
"""
Implement simple matrix multiplication as 1x1 convolutions in PyTorch.
Consider the following example with universal predictions at a single px:
armchair, swivel chair -> sum up to chair
motorcycle -> motorcycle
bicyclist, motorcyclist -> sum up to rider
chair [0.3] [1 1 0 0 0] [0.0] armchair
motorcycle [0.1] = [0 0 1 0 0] [0.3] swivel_chair
rider [0.6] [0 0 0 1 1] [0.1] motorcycle
[0.1] bicyclist
[0.5] motorcyclist
"""
# (j,i) tuples
inid2outid = [
(0,0), # armchair -> chair
(1,0), # swivel_chair -> chair
(2,1), # motorcycle -> motorcycle
(3,2), # bicyclist -> rider
(4,2) # motorcyclist -> rider
]
in_channel = 5
out_channel = 3
conv = populate_linear_mapping(in_channel, out_channel, inid2outid)
x = np.array([0.0,0.3,0.1,0.1,0.5])
x = torch.from_numpy(x)
x = x.reshape(1,5,1,1).type(torch.FloatTensor)
y = conv(x)
y_gt = np.array([0.3, 0.1, 0.6]).reshape(1,3,1,1).astype(np.float32)
y_gt = torch.from_numpy(y_gt)
assert torch.allclose(y, y_gt)
def test_constructor_types():
""" """
tc = TaxonomyConverter()
for dname, conv in tc.convs.items():
assert isinstance(conv, torch.nn.Module)
def test_label_mapping_arrs():
""" """
tc = TaxonomyConverter()
train_idx = load_class_names('ade20k-150').index('minibike')
u_idx = get_universal_class_names().index('motorcycle')
assert tc.label_mapping_arr_dict['ade20k-150'][train_idx] == u_idx
train_idx = load_class_names('mapillary-public65').index('Bird')
u_idx = get_universal_class_names().index('bird')
assert tc.label_mapping_arr_dict['mapillary-public65'][train_idx] == u_idx
if __name__ == '__main__':
test_names_complete()
test_parse_entry_blank()
test_parse_entry_brackets1()
test_parse_entry_space_sep()
test_parse_uentry()
test_label_transform()
test_label_transform_unlabeled()
test_label_transform_unlabeled()
test_transform_predictions_test()
test_populate_linear_mapping1()
test_populate_linear_mapping2()
test_populate_linear_mapping3()
test_constructor_types()
test_label_mapping_arrs()
| [
"numpy.allclose",
"numpy.ones",
"pathlib.Path",
"mseg.utils.names_utils.load_class_names",
"mseg.taxonomy.taxonomy_converter.TaxonomyConverter",
"mseg.utils.tsv_utils.read_tsv_column_vals",
"mseg.utils.names_utils.get_universal_class_names",
"torch.from_numpy",
"mseg.taxonomy.taxonomy_converter.popu... | [((722, 795), 'mseg.utils.tsv_utils.read_tsv_column_vals', 'read_tsv_column_vals', (['tsv_fpath'], {'col_name': 'dname', 'convert_val_to_int': '(False)'}), '(tsv_fpath, col_name=dname, convert_val_to_int=False)\n', (742, 795), False, 'from mseg.utils.tsv_utils import read_tsv_column_vals\n'), ((985, 1008), 'mseg.utils.names_utils.load_class_names', 'load_class_names', (['dname'], {}), '(dname)\n', (1001, 1008), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((2110, 2128), 'mseg.taxonomy.taxonomy_converter.parse_entry', 'parse_entry', (['entry'], {}), '(entry)\n', (2121, 2128), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((2284, 2302), 'mseg.taxonomy.taxonomy_converter.parse_entry', 'parse_entry', (['entry'], {}), '(entry)\n', (2295, 2302), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((2576, 2594), 'mseg.taxonomy.taxonomy_converter.parse_entry', 'parse_entry', (['entry'], {}), '(entry)\n', (2587, 2594), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((2705, 2725), 'mseg.taxonomy.taxonomy_converter.parse_uentry', 'parse_uentry', (['uentry'], {}), '(uentry)\n', (2717, 2725), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((2988, 3011), 'mseg.utils.names_utils.load_class_names', 'load_class_names', (['dname'], {}), '(dname)\n', (3004, 3011), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((3068, 3087), 'mseg.taxonomy.taxonomy_converter.TaxonomyConverter', 'TaxonomyConverter', ([], {}), '()\n', (3085, 3087), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((3661, 3684), 'mseg.utils.names_utils.load_class_names', 'load_class_names', (['dname'], {}), '(dname)\n', (3677, 3684), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((3696, 3765), 'mseg.utils.names_utils.get_classname_to_dataloaderid_map', 'get_classname_to_dataloaderid_map', (['dname'], {'include_ignore_idx_cls': '(True)'}), '(dname, include_ignore_idx_cls=True)\n', (3729, 3765), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((3809, 3828), 'mseg.taxonomy.taxonomy_converter.TaxonomyConverter', 'TaxonomyConverter', ([], {}), '()\n', (3826, 3828), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((4547, 4574), 'mseg.utils.names_utils.get_universal_class_names', 'get_universal_class_names', ([], {}), '()\n', (4572, 4574), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((4710, 4729), 'mseg.taxonomy.taxonomy_converter.TaxonomyConverter', 'TaxonomyConverter', ([], {}), '()\n', (4727, 4729), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((4739, 4760), 'numpy.zeros', 'np.zeros', (['(194, 2, 3)'], {}), '((194, 2, 3))\n', (4747, 4760), True, 'import numpy as np\n'), ((4957, 4980), 'torch.from_numpy', 'torch.from_numpy', (['input'], {}), '(input)\n', (4973, 4980), False, 'import torch\n'), ((5265, 5293), 'mseg.utils.names_utils.load_class_names', 'load_class_names', (['test_dname'], {}), '(test_dname)\n', (5281, 5293), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((5364, 5380), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (5372, 5380), True, 'import numpy as np\n'), ((5500, 5538), 'numpy.allclose', 'np.allclose', (['prediction', 'prediction_gt'], {}), '(prediction, prediction_gt)\n', (5511, 5538), True, 'import numpy as np\n'), ((5893, 5953), 'mseg.taxonomy.taxonomy_converter.populate_linear_mapping', 'populate_linear_mapping', (['in_channel', 'out_channel', 'inid2outid'], {}), '(in_channel, out_channel, inid2outid)\n', (5916, 5953), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((6021, 6040), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (6037, 6040), False, 'import torch\n'), ((6124, 6146), 'torch.from_numpy', 'torch.from_numpy', (['y_gt'], {}), '(y_gt)\n', (6140, 6146), False, 'import torch\n'), ((6155, 6178), 'torch.allclose', 'torch.allclose', (['y', 'y_gt'], {}), '(y, y_gt)\n', (6169, 6178), False, 'import torch\n'), ((6533, 6593), 'mseg.taxonomy.taxonomy_converter.populate_linear_mapping', 'populate_linear_mapping', (['in_channel', 'out_channel', 'inid2outid'], {}), '(in_channel, out_channel, inid2outid)\n', (6556, 6593), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((6727, 6749), 'torch.from_numpy', 'torch.from_numpy', (['y_gt'], {}), '(y_gt)\n', (6743, 6749), False, 'import torch\n'), ((6758, 6781), 'torch.allclose', 'torch.allclose', (['y', 'y_gt'], {}), '(y, y_gt)\n', (6772, 6781), False, 'import torch\n'), ((7580, 7640), 'mseg.taxonomy.taxonomy_converter.populate_linear_mapping', 'populate_linear_mapping', (['in_channel', 'out_channel', 'inid2outid'], {}), '(in_channel, out_channel, inid2outid)\n', (7603, 7640), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((7647, 7682), 'numpy.array', 'np.array', (['[0.0, 0.3, 0.1, 0.1, 0.5]'], {}), '([0.0, 0.3, 0.1, 0.1, 0.5])\n', (7655, 7682), True, 'import numpy as np\n'), ((7684, 7703), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (7700, 7703), False, 'import torch\n'), ((7843, 7865), 'torch.from_numpy', 'torch.from_numpy', (['y_gt'], {}), '(y_gt)\n', (7859, 7865), False, 'import torch\n'), ((7874, 7897), 'torch.allclose', 'torch.allclose', (['y', 'y_gt'], {}), '(y, y_gt)\n', (7888, 7897), False, 'import torch\n'), ((7945, 7964), 'mseg.taxonomy.taxonomy_converter.TaxonomyConverter', 'TaxonomyConverter', ([], {}), '()\n', (7962, 7964), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((8093, 8112), 'mseg.taxonomy.taxonomy_converter.TaxonomyConverter', 'TaxonomyConverter', ([], {}), '()\n', (8110, 8112), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((1059, 1074), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1072, 1074), False, 'import pdb\n'), ((3130, 3146), 'torch.ones', 'torch.ones', (['(4)', '(4)'], {}), '(4, 4)\n', (3140, 3146), False, 'import torch\n'), ((3871, 3887), 'torch.ones', 'torch.ones', (['(4)', '(4)'], {}), '(4, 4)\n', (3881, 3887), False, 'import torch\n'), ((947, 965), 'mseg.taxonomy.taxonomy_converter.parse_entry', 'parse_entry', (['entry'], {}), '(entry)\n', (958, 965), False, 'from mseg.taxonomy.taxonomy_converter import parse_entry, parse_uentry, parse_test_entry, TaxonomyConverter, populate_linear_mapping, RELABELED_TRAIN_DATASETS, UNRELABELED_TRAIN_DATASETS\n'), ((3301, 3328), 'mseg.utils.names_utils.get_universal_class_names', 'get_universal_class_names', ([], {}), '()\n', (3326, 3328), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((5207, 5234), 'torch.argmax', 'torch.argmax', (['output'], {'dim': '(0)'}), '(output, dim=0)\n', (5219, 5234), False, 'import torch\n'), ((6600, 6622), 'torch.ones', 'torch.ones', (['(1)', '(4)', '(1)', '(1)'], {}), '(1, 4, 1, 1)\n', (6610, 6622), False, 'import torch\n'), ((8126, 8156), 'mseg.utils.names_utils.load_class_names', 'load_class_names', (['"""ade20k-150"""'], {}), "('ade20k-150')\n", (8142, 8156), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((8184, 8211), 'mseg.utils.names_utils.get_universal_class_names', 'get_universal_class_names', ([], {}), '()\n', (8209, 8211), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((8314, 8352), 'mseg.utils.names_utils.load_class_names', 'load_class_names', (['"""mapillary-public65"""'], {}), "('mapillary-public65')\n", (8330, 8352), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((8376, 8403), 'mseg.utils.names_utils.get_universal_class_names', 'get_universal_class_names', ([], {}), '()\n', (8401, 8403), False, 'from mseg.utils.names_utils import load_class_names, get_universal_class_names, get_classname_to_dataloaderid_map\n'), ((472, 486), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (476, 486), False, 'from pathlib import Path\n'), ((3365, 3380), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (3372, 3380), True, 'import numpy as np\n'), ((4069, 4084), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (4076, 4084), True, 'import numpy as np\n'), ((5960, 5982), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (5968, 5982), True, 'import numpy as np\n'), ((6062, 6081), 'numpy.array', 'np.array', (['[0, 2, 2]'], {}), '([0, 2, 2])\n', (6070, 6081), True, 'import numpy as np\n'), ((6665, 6684), 'numpy.array', 'np.array', (['[2, 2, 4]'], {}), '([2, 2, 4])\n', (6673, 6684), True, 'import numpy as np\n'), ((7773, 7798), 'numpy.array', 'np.array', (['[0.3, 0.1, 0.6]'], {}), '([0.3, 0.1, 0.6])\n', (7781, 7798), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
from numpy.testing import assert_allclose
from statsmodels.regression.linear_model import WLS
from statsmodels.regression._tools import _MinimalWLS
class TestMinimalWLS(TestCase):
@classmethod
def setUpClass(cls):
rs = np.random.RandomState(1234)
cls.exog1 = rs.randn(200,5)
cls.endog1 = cls.exog1.sum(1) + rs.randn(200)
cls.weights1 = 1.0 + np.sin(np.arange(200.0)/100.0*np.pi)
cls.exog2 = rs.randn(50,1)
cls.endog2 = 0.3 * cls.exog2.ravel() + rs.randn(50)
cls.weights2 = 1.0 + np.log(np.arange(1.0,51.0))
def test_equivalence_with_wls(self):
res = WLS(self.endog1, self.exog1).fit()
minres = _MinimalWLS(self.endog1, self.exog1).fit()
assert_allclose(res.params, minres.params)
assert_allclose(res.resid, minres.resid)
res = WLS(self.endog2, self.exog2).fit()
minres = _MinimalWLS(self.endog2, self.exog2).fit()
assert_allclose(res.params, minres.params)
assert_allclose(res.resid, minres.resid)
res = WLS(self.endog1, self.exog1, weights=self.weights1).fit()
minres = _MinimalWLS(self.endog1, self.exog1, weights=self.weights1).fit()
assert_allclose(res.params, minres.params)
assert_allclose(res.resid, minres.resid)
res = WLS(self.endog2, self.exog2, weights=self.weights2).fit()
minres = _MinimalWLS(self.endog2, self.exog2, weights=self.weights2).fit()
assert_allclose(res.params, minres.params)
assert_allclose(res.resid, minres.resid)
| [
"statsmodels.regression.linear_model.WLS",
"numpy.arange",
"numpy.testing.assert_allclose",
"statsmodels.regression._tools._MinimalWLS",
"numpy.random.RandomState"
] | [((287, 314), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (308, 314), True, 'import numpy as np\n'), ((782, 824), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.params', 'minres.params'], {}), '(res.params, minres.params)\n', (797, 824), False, 'from numpy.testing import assert_allclose\n'), ((833, 873), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.resid', 'minres.resid'], {}), '(res.resid, minres.resid)\n', (848, 873), False, 'from numpy.testing import assert_allclose\n'), ((992, 1034), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.params', 'minres.params'], {}), '(res.params, minres.params)\n', (1007, 1034), False, 'from numpy.testing import assert_allclose\n'), ((1043, 1083), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.resid', 'minres.resid'], {}), '(res.resid, minres.resid)\n', (1058, 1083), False, 'from numpy.testing import assert_allclose\n'), ((1248, 1290), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.params', 'minres.params'], {}), '(res.params, minres.params)\n', (1263, 1290), False, 'from numpy.testing import assert_allclose\n'), ((1299, 1339), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.resid', 'minres.resid'], {}), '(res.resid, minres.resid)\n', (1314, 1339), False, 'from numpy.testing import assert_allclose\n'), ((1504, 1546), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.params', 'minres.params'], {}), '(res.params, minres.params)\n', (1519, 1546), False, 'from numpy.testing import assert_allclose\n'), ((1555, 1595), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.resid', 'minres.resid'], {}), '(res.resid, minres.resid)\n', (1570, 1595), False, 'from numpy.testing import assert_allclose\n'), ((602, 622), 'numpy.arange', 'np.arange', (['(1.0)', '(51.0)'], {}), '(1.0, 51.0)\n', (611, 622), True, 'import numpy as np\n'), ((679, 707), 'statsmodels.regression.linear_model.WLS', 'WLS', (['self.endog1', 'self.exog1'], {}), '(self.endog1, self.exog1)\n', (682, 707), False, 'from statsmodels.regression.linear_model import WLS\n'), ((731, 767), 'statsmodels.regression._tools._MinimalWLS', '_MinimalWLS', (['self.endog1', 'self.exog1'], {}), '(self.endog1, self.exog1)\n', (742, 767), False, 'from statsmodels.regression._tools import _MinimalWLS\n'), ((889, 917), 'statsmodels.regression.linear_model.WLS', 'WLS', (['self.endog2', 'self.exog2'], {}), '(self.endog2, self.exog2)\n', (892, 917), False, 'from statsmodels.regression.linear_model import WLS\n'), ((941, 977), 'statsmodels.regression._tools._MinimalWLS', '_MinimalWLS', (['self.endog2', 'self.exog2'], {}), '(self.endog2, self.exog2)\n', (952, 977), False, 'from statsmodels.regression._tools import _MinimalWLS\n'), ((1099, 1150), 'statsmodels.regression.linear_model.WLS', 'WLS', (['self.endog1', 'self.exog1'], {'weights': 'self.weights1'}), '(self.endog1, self.exog1, weights=self.weights1)\n', (1102, 1150), False, 'from statsmodels.regression.linear_model import WLS\n'), ((1174, 1233), 'statsmodels.regression._tools._MinimalWLS', '_MinimalWLS', (['self.endog1', 'self.exog1'], {'weights': 'self.weights1'}), '(self.endog1, self.exog1, weights=self.weights1)\n', (1185, 1233), False, 'from statsmodels.regression._tools import _MinimalWLS\n'), ((1355, 1406), 'statsmodels.regression.linear_model.WLS', 'WLS', (['self.endog2', 'self.exog2'], {'weights': 'self.weights2'}), '(self.endog2, self.exog2, weights=self.weights2)\n', (1358, 1406), False, 'from statsmodels.regression.linear_model import WLS\n'), ((1430, 1489), 'statsmodels.regression._tools._MinimalWLS', '_MinimalWLS', (['self.endog2', 'self.exog2'], {'weights': 'self.weights2'}), '(self.endog2, self.exog2, weights=self.weights2)\n', (1441, 1489), False, 'from statsmodels.regression._tools import _MinimalWLS\n'), ((441, 457), 'numpy.arange', 'np.arange', (['(200.0)'], {}), '(200.0)\n', (450, 457), True, 'import numpy as np\n')] |
import torch
from torch.nn import functional as F
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
from ..config import eps
class ConvBlock4(torch.nn.Module):
def __init__(self, inpt_kernel, output_kernel, kernel_size=4, stride=1, padding=0):
super().__init__()
self.conv = nn.Conv2d(in_channels=inpt_kernel, out_channels=output_kernel, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = nn.BatchNorm2d(output_kernel)
self.act = nn.LeakyReLU(inplace=True)
# self.drp = nn.Dropout2d(0.3)
gain = nn.init.calculate_gain('leaky_relu')
nn.init.xavier_uniform_(self.conv.weight, gain=gain)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
# x = self.drp(x)
x = self.act(x)
return x
class DeconvBlock4(torch.nn.Module):
def __init__(self, inpt_kernel, output_kernel, kernel_size=4, stride=1, padding=0):
super().__init__()
self.deconv = nn.ConvTranspose2d(in_channels=inpt_kernel, out_channels=output_kernel, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = nn.BatchNorm2d(output_kernel)
self.act = nn.LeakyReLU(inplace=True)
# self.drp = nn.Dropout2d(0.3)
gain = nn.init.calculate_gain('leaky_relu')
nn.init.xavier_uniform_(self.deconv.weight, gain=gain)
def forward(self, x):
x = self.deconv(x)
x = self.bn(x)
# x = self.drp(x)
x = self.act(x)
return x
class VAE5(nn.Module):
"""
VAE. Vector Quantised Variational Auto-Encoder.
Refs:
- https://github.com/nakosung/VQ-VAE/blob/master/model.py
- https://github.com/JunhongXu/world-models-pytorch/blob/master/vae.py
"""
def __init__(self, image_size=64, z_dim=32, conv_dim=64, code_dim=16, k_dim=256, channels=3):
"""
Args:
- image_size (int) height and weight of image
- conv_dim (int) the amound of output channels in the first conv layer (all others are multiples)
- z_dim (int) the channels in the encoded output
- code_dim (int) the height and width in the encoded output
- k_dim (int) dimensions of the latent vector
"""
super().__init__()
self.k_dim = k_dim
self.z_dim = z_dim
self.code_dim = code_dim
hidden_size = z_dim * code_dim * code_dim
latent_vector_dim = k_dim
self.logvar = nn.Linear(hidden_size, latent_vector_dim)
self.mu = nn.Linear(hidden_size, latent_vector_dim)
self.z = nn.Linear(latent_vector_dim, hidden_size)
nn.init.xavier_uniform_(self.logvar.weight)
nn.init.xavier_uniform_(self.mu.weight)
nn.init.xavier_uniform_(self.z.weight)
# Encoder (increasing #filter linearly)
layers = []
layers.append(ConvBlock4(channels, conv_dim, kernel_size=3, padding=1))
repeat_num = int(math.log2(image_size / code_dim))
curr_dim = conv_dim
for i in range(repeat_num):
layers.append(ConvBlock4(curr_dim, conv_dim * (i + 2), kernel_size=4, stride=2, padding=1))
curr_dim = conv_dim * (i + 2)
# Now we have (code_dim,code_dim,curr_dim)
layers.append(nn.Conv2d(curr_dim, z_dim, kernel_size=1))
# (code_dim,code_dim,z_dim)
self.encoder = nn.Sequential(*layers)
# Decoder (320 - 256 - 192 - 128 - 64)
layers = []
layers.append(DeconvBlock4(z_dim, curr_dim, kernel_size=1))
for i in reversed(range(repeat_num)):
layers.append(DeconvBlock4(curr_dim, conv_dim * (i + 1), kernel_size=4, stride=2, padding=1))
curr_dim = conv_dim * (i + 1)
layers.append(nn.Conv2d(curr_dim, channels, kernel_size=3, padding=1))
self.decoder = nn.Sequential(*layers)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
"""Returns reconstructed image, mean, and log variance."""
mu, logvar = self.encode(x)
z = self.sample(mu, logvar)
x = self.decode(z)
return x, mu, logvar
def encode(self, x):
"""Returns mean and log variance, which describe the distributions of Z"""
x = self.encoder(x)
x = x.view(x.size()[0], -1)
return self.mu(x), self.logvar(x).clamp(np.log(eps), -np.log(eps))
def decode(self, z):
"""Reconstruct image X using z sampled from Z."""
z = self.z(z)
n, d = z.size()
z = z.view(n, -1, self.code_dim, self.code_dim)
reconstruction = self.decoder(z)
reconstruction = self.sigmoid(reconstruction)
return reconstruction
def sample(self, mu, logvar):
"""Sample z from Z."""
if self.training:
std = logvar.exp()
std = std * Variable(std.data.new(std.size()).normal_())
return mu + std
else:
return mu
def loss(self, *args, **kwargs):
return loss_function_vae(*args, **kwargs)
def loss_function_vae(recon_x, x, mu, logvar):
# Reconstruction + KL divergence losses summed over all elements and batch
# https://github.com/pytorch/examples/blob/master/vae/main.py
n, c, h, w = recon_x.size()
recon_x = recon_x.view(n, -1)
x = x.view(n, -1)
# L2 distance
loss_recon = F.mse_loss(x, recon_x, reduce=False).sum(1)
# see Appendix B from VAE paper:
# <NAME>. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
loss_KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), 1)
return loss_recon, loss_KLD
| [
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"torch.nn.functional.mse_loss",
"torch.nn.LeakyReLU",
"torch.nn.init.xavier_uniform_",
"torch.nn.Sequential",
"numpy.log",
"math.log2",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.init.calculate_gain",
"torch.nn.ConvTranspose2d"
] | [((336, 460), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'inpt_kernel', 'out_channels': 'output_kernel', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=inpt_kernel, out_channels=output_kernel, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (345, 460), True, 'import torch.nn as nn\n'), ((474, 503), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['output_kernel'], {}), '(output_kernel)\n', (488, 503), True, 'import torch.nn as nn\n'), ((523, 549), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (535, 549), True, 'import torch.nn as nn\n'), ((605, 641), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""leaky_relu"""'], {}), "('leaky_relu')\n", (627, 641), True, 'import torch.nn as nn\n'), ((650, 702), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.conv.weight'], {'gain': 'gain'}), '(self.conv.weight, gain=gain)\n', (673, 702), True, 'import torch.nn as nn\n'), ((1021, 1153), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'inpt_kernel', 'out_channels': 'output_kernel', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=inpt_kernel, out_channels=output_kernel,\n kernel_size=kernel_size, stride=stride, padding=padding)\n', (1039, 1153), True, 'import torch.nn as nn\n'), ((1168, 1197), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['output_kernel'], {}), '(output_kernel)\n', (1182, 1197), True, 'import torch.nn as nn\n'), ((1217, 1243), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1229, 1243), True, 'import torch.nn as nn\n'), ((1299, 1335), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""leaky_relu"""'], {}), "('leaky_relu')\n", (1321, 1335), True, 'import torch.nn as nn\n'), ((1344, 1398), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.deconv.weight'], {'gain': 'gain'}), '(self.deconv.weight, gain=gain)\n', (1367, 1398), True, 'import torch.nn as nn\n'), ((2482, 2523), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'latent_vector_dim'], {}), '(hidden_size, latent_vector_dim)\n', (2491, 2523), True, 'import torch.nn as nn\n'), ((2542, 2583), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'latent_vector_dim'], {}), '(hidden_size, latent_vector_dim)\n', (2551, 2583), True, 'import torch.nn as nn\n'), ((2601, 2642), 'torch.nn.Linear', 'nn.Linear', (['latent_vector_dim', 'hidden_size'], {}), '(latent_vector_dim, hidden_size)\n', (2610, 2642), True, 'import torch.nn as nn\n'), ((2652, 2695), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.logvar.weight'], {}), '(self.logvar.weight)\n', (2675, 2695), True, 'import torch.nn as nn\n'), ((2704, 2743), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.mu.weight'], {}), '(self.mu.weight)\n', (2727, 2743), True, 'import torch.nn as nn\n'), ((2752, 2790), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.z.weight'], {}), '(self.z.weight)\n', (2775, 2790), True, 'import torch.nn as nn\n'), ((3387, 3409), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3400, 3409), True, 'import torch.nn as nn\n'), ((3845, 3867), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3858, 3867), True, 'import torch.nn as nn\n'), ((3892, 3904), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3902, 3904), True, 'import torch.nn as nn\n'), ((2966, 2998), 'math.log2', 'math.log2', (['(image_size / code_dim)'], {}), '(image_size / code_dim)\n', (2975, 2998), False, 'import math\n'), ((3284, 3325), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', 'z_dim'], {'kernel_size': '(1)'}), '(curr_dim, z_dim, kernel_size=1)\n', (3293, 3325), True, 'import torch.nn as nn\n'), ((3765, 3820), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', 'channels'], {'kernel_size': '(3)', 'padding': '(1)'}), '(curr_dim, channels, kernel_size=3, padding=1)\n', (3774, 3820), True, 'import torch.nn as nn\n'), ((5349, 5385), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['x', 'recon_x'], {'reduce': '(False)'}), '(x, recon_x, reduce=False)\n', (5359, 5385), True, 'from torch.nn import functional as F\n'), ((4348, 4359), 'numpy.log', 'np.log', (['eps'], {}), '(eps)\n', (4354, 4359), True, 'import numpy as np\n'), ((4362, 4373), 'numpy.log', 'np.log', (['eps'], {}), '(eps)\n', (4368, 4373), True, 'import numpy as np\n')] |
import math
from typing import Optional, Sequence, Union
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score, explained_variance_score
from sklearn.metrics._classification import _check_targets
from sklearn.metrics._regression import _check_reg_targets
# Regression
def adjusted_r2_score(
y_true: Union[Sequence[float], np.ndarray, pd.Series],
y_pred: Union[Sequence[float], np.ndarray, pd.Series],
features_vector: Optional[Union[Sequence[str], np.ndarray, pd.Series]] = None,
num_features: Optional[int] = None,
) -> float:
"""Calculates an adjusted R-squared that penalizes models that use too many superfluous features
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
features_vector: list or array like, default=None
A list of all features used for our model.
num_features: int, default=None
The number of features used for our model. Used if features_vector is None
Returns
-------
Our adjusted R-squared.
"""
y_problem, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput="raw_values"
)
if features_vector:
if len(features_vector) >= len(y_true) - 1:
raise Exception(
"Number of features is greater than number of rows and 1 degree of freedom"
)
if len(features_vector) < 1:
raise Exception("Cannot have less than one feature")
p = len(features_vector)
n = len(y_true)
elif num_features:
if num_features >= len(y_true) - 1:
raise Exception(
"Number of features is greater than number of rows and 1 degree of freedom"
)
if num_features < 1:
raise Exception("Cannot have less than one feature")
p = num_features
n = len(y_true)
else:
raise Exception("No features available to calculate adjusted score")
r2 = r2_score(y_true, y_pred) if r2_score(y_true, y_pred) > 0 else 0
return 1 - (1 - r2) * (n - 1) / (n - p - 1)
def adjusted_explained_variance_score(
y_true: Union[Sequence[float], np.ndarray, pd.Series],
y_pred: Union[Sequence[float], np.ndarray, pd.Series],
features_vector: Optional[Union[Sequence[str], np.ndarray, pd.Series]] = None,
num_features: Optional[int] = None,
) -> float:
"""Calculates an adjusted explained_variance_score that penalizes models that use too many superfluous features
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
features_vector: list or array like, default=None
A list of all features used for our model.
num_features: int, default=None
The number of features used for our model. Used if features_vector is None
Returns
-------
Our adjusted explained_variance_score.
"""
y_problem, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput="raw_values"
)
if features_vector:
if len(features_vector) >= len(y_true) - 1:
raise Exception(
"Number of features is greater than number of rows and 1 degree of freedom"
)
if len(features_vector) < 1:
raise Exception("Cannot have less than one feature")
p = len(features_vector)
n = len(y_true)
elif num_features:
if num_features >= len(y_true) - 1:
raise Exception(
"Number of features is greater than number of rows and 1 degree of freedom"
)
if num_features < 1:
raise Exception("Cannot have less than one feature")
p = num_features
n = len(y_true)
else:
raise Exception("No features available to calculate adjusted score")
evs = (
explained_variance_score(y_true, y_pred)
if explained_variance_score(y_true, y_pred) > 0
else 0
)
return 1 - (1 - evs) * (n - 1) / (n - p - 1)
def mape_score(
y_true: Union[Sequence[float], np.ndarray, pd.Series],
y_pred: Union[Sequence[float], np.ndarray, pd.Series],
) -> float:
"""Calculates the Mean Absolute Percentage Error, a common metric used for Time Series Problems
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
Our MAPE score
"""
y_problem, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput="raw_values"
)
if 0 in y_true:
raise Exception("Cannot divide by zero")
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def smape_score(
y_true: Union[Sequence[float], np.ndarray, pd.Series],
y_pred: Union[Sequence[float], np.ndarray, pd.Series],
) -> float:
"""Calculates the Symmetric Mean Absolute Percentage Error. Used when there are zeros in our y_true that would cause
MAPE to be undefined.
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
Our SMAPE score
"""
y_problem, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput="raw_values"
)
error = np.abs(y_true - y_pred)
total = np.abs(y_true) + np.abs(y_pred)
return 100 * np.sum(error / total) / len(error)
def root_mean_squared_error(
y_true: Union[Sequence[float], np.ndarray, pd.Series],
y_pred: Union[Sequence[float], np.ndarray, pd.Series],
) -> float:
"""Calculates the Root Mean Squared Error for regression problems.
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
Our RMSE score
"""
y_problem, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput="raw_values"
)
n = len(y_true)
return math.sqrt(np.sum((y_true - y_pred) ** 2) / n)
def group_mean_log_mae(
y_true: Union[pd.DataFrame, pd.Series, Sequence, np.ndarray],
y_pred: Union[pd.DataFrame, pd.Series, Sequence, np.ndarray],
groups: Union[Sequence, np.ndarray, pd.Series],
floor: float = 1e-9,
) -> float:
"""Calculates the Group Mean Log Mean Absolute Error. Used in a Kaggle competition.
Parameters
----------
y_true: list or array-like
The true, or the expected, values of our problem; along with the group attached
y_pred: list or array-like
The predicted values of our problem; along with the group attached
groups: list or array like
What our data is being grouped by.
floor: float, default=1e-9
The minimum value our Group Mean Log MAE can be (as 0 is undefined for log transformations).
Returns
-------
Our Group Mean Log MAE score
"""
y_problem, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput="raw_values"
)
y_true = pd.Series([i[0] for i in y_true])
y_pred = pd.Series([i[0] for i in y_pred])
maes = (y_true - y_pred).abs().groupby(groups).mean()
return np.log(maes.map(lambda x: max(x, floor))).mean()
# Classification
def get_classification_labels(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> Sequence[int, int, int, int]:
"""Calculates the true positive, false positive, false negative and true negative values for a classification
problem.
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The true positive, false positive, false negative and true negative values for our classification problem
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if len(np.unique(y_true)) > 2:
raise Exception("We have more than two classes for a Binary problem")
if len(np.unique(y_pred)) > 2:
raise Exception("We have more than two classes for a Binary problem")
label_1 = sorted(np.unique(y_true))[1]
label_0 = sorted(np.unique(y_true))[0]
true_positive = len(np.where((y_true == label_1) & (y_pred == label_1))[0])
false_positive = len(np.where((y_true == label_0) & (y_pred == label_1))[0])
false_negative = len(np.where((y_true == label_1) * (y_pred == label_0))[0])
true_negative = len(np.where((y_true == label_0) & (y_pred == label_0))[0])
return true_positive, false_positive, false_negative, true_negative
def specificity_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""Calculates the specificity of a classification problem
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: {'binary', 'multiclass'}
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The specificity score
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise TypeError("Cannot discern positive class for multiclass problem")
else:
raise ValueError("Cannot calculate specificity score with None")
else:
raise ValueError("Cannot determine problem type")
return tn / (tn + fp)
def average_specificity_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average specificty score. Used for when we have more than 2 classes and want our models' average
performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each specificity score for each group/class
"""
if len(np.unique(y_true)) < 3:
return specificity_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += specificity_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def sensitivity_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""This is exactly the same as recall
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The sensitivity score
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise Exception("Cannot discern positive class for multiclass problem")
else:
raise Exception("Cannot calculate sensitivity score with None")
else:
raise ValueError("Cannot determine problem type")
return tp / (tp + fn)
def average_sensitivity_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average sensitivity score. Used for when we have more than 2 classes and want our models' average
performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each sensitivity score for each group/class
"""
if len(np.unique(y_true)) < 3:
return sensitivity_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += sensitivity_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def power_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""This is just another way of saying sensitivity
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The sensitivity score
"""
return sensitivity_score(
y_true, y_pred, problem=problem, positive_class=positive_class
)
def average_power_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""This is another way of saying average_sensitivity_score
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each sensitivity score for each group/class
"""
return average_sensitivity_score(y_true, y_pred)
def negative_predictive_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""Also known as problem II error score. Calculates the percentage of true negatives we correctly identified compared to
the number of true negative and false negatives.
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The negative predictive score
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise Exception("Cannot discern positive class for multiclass problem")
else:
raise Exception("Cannot calculate negative predictive score with None")
else:
raise ValueError("Cannot determine problem type")
return tn / (tn + fn)
def average_negative_predictive_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average negative predictive score. Used for when we have more than 2 classes and want our models'
average performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each negative predictive score for each group/class
"""
if len(np.unique(y_true)) < 3:
return negative_predictive_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += negative_predictive_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def false_negative_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""The inverse of our false positive score, calculates the number of false negatives compared to the number of
false negatives and true positives.
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The false positive score
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise Exception("Cannot discern positive class for multiclass problem")
else:
raise Exception("Cannot calculate false negative score with None")
else:
raise ValueError("Cannot determine problem type")
return fn / (fn + tp)
def average_false_negative_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average false negative score. Used for when we have more than 2 classes and want our models'
average performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each false negative score for each group/class
"""
if len(np.unique(y_true)) < 3:
return false_negative_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += false_negative_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def problem_two_error_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""This is exactly the same as false negative score
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The problem II error score
"""
return false_negative_score(
y_true, y_pred, problem=problem, positive_class=positive_class
)
def average_problem_two_error_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""This is exactly the same as average false negative score
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each problem II error score for each group/class
"""
return average_false_negative_score(y_true, y_pred)
def false_positive_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""Calculates the ratio of false positives to false positives and true negatives.
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The false positive score
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise Exception("Cannot discern positive class for multiclass problem")
else:
raise Exception("Cannot calculate false positive score with None")
else:
raise ValueError("Cannot determine problem type")
return fp / (fp + tn)
def average_false_positive_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average false positive score. Used for when we have more than 2 classes and want our models'
average performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each false positive score for each group/class
"""
if len(np.unique(y_true)) < 3:
return false_positive_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += false_positive_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def problem_one_error_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""This is exactly the same as false positive score
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The problem I error score
"""
return false_positive_score(
y_true, y_pred, problem=problem, positive_class=positive_class
)
def average_problem_one_error_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""This is exactly the same as average false positive score
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average problem one error score
"""
return average_false_positive_score(y_true, y_pred)
def false_discovery_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""Calculates the ratio of false positives to false positives and true positives
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The false discovery score
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise Exception("Cannot discern positive class for multiclass problem")
else:
raise Exception("Cannot calculate false discovery score with None")
else:
raise ValueError("Cannot determine problem type")
return fp / (fp + tp)
def average_false_discovery_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average false discovery score. Used for when we have more than 2 classes and want our models'
average performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average false discovery score
"""
if len(np.unique(y_true)) < 3:
return false_discovery_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += false_discovery_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def false_omission_rate(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""Calculates the ratio of false negatives to false negatives and true negatives
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The false omission rate
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise Exception("Cannot discern positive class for multiclass problem")
else:
raise Exception("Cannot calculate false omission score with None")
else:
raise ValueError("Cannot determine problem type")
return fn / (fn + tn)
def average_false_omission_rate(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average false omission rate. Used for when we have more than 2 classes and want our models'
average performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average false omission rate
"""
if len(np.unique(y_true)) < 3:
return false_omission_rate(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += false_omission_rate(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def j_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""Calculate the j-score, or our sensitivity + specificity - 1
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The j score
"""
return (
sensitivity_score(
y_true, y_pred, problem=problem, positive_class=positive_class
)
+ specificity_score(
y_true, y_pred, problem=problem, positive_class=positive_class
)
- 1
)
def markedness_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""Calculates the markedness score, or the precision + negative predictive score - 1
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The markedness score
"""
def precision_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise Exception(
"Cannot discern positive class for multiclass problem"
)
else:
raise Exception("Cannot calculate precision score with None")
else:
raise ValueError("Cannot determine problem type")
return tp / (tp + fp)
return (
precision_score(y_true, y_pred, problem=problem, positive_class=positive_class)
+ negative_predictive_score(y_true, y_pred, problem=problem, positive_class=positive_class)
- 1
)
def likelihood_ratio_positive(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""Calculates the likehood ratio positive, or sensitivity / (1 - specificity)
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The positive likelihood ratio
"""
return sensitivity_score(
y_true, y_pred, problem=problem, positive_class=positive_class
) / (
1
- specificity_score(
y_true, y_pred, problem=problem, positive_class=positive_class
)
)
def likelihood_ratio_negative(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""Calculates the likelihood ratio negative, or specificity / (1 - sensitivity)
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The negative likelihood ratio
"""
return specificity_score(
y_true, y_pred, problem=problem, positive_class=positive_class
) / (
1
- sensitivity_score(
y_true, y_pred, problem=problem, positive_class=positive_class
)
)
| [
"pandas.Series",
"numpy.abs",
"numpy.unique",
"sklearn.metrics._regression._check_reg_targets",
"sklearn.metrics._classification._check_targets",
"numpy.where",
"numpy.sum",
"sklearn.metrics.r2_score",
"sklearn.metrics.explained_variance_score"
] | [((1198, 1258), 'sklearn.metrics._regression._check_reg_targets', '_check_reg_targets', (['y_true', 'y_pred'], {'multioutput': '"""raw_values"""'}), "(y_true, y_pred, multioutput='raw_values')\n", (1216, 1258), False, 'from sklearn.metrics._regression import _check_reg_targets\n'), ((3145, 3205), 'sklearn.metrics._regression._check_reg_targets', '_check_reg_targets', (['y_true', 'y_pred'], {'multioutput': '"""raw_values"""'}), "(y_true, y_pred, multioutput='raw_values')\n", (3163, 3205), False, 'from sklearn.metrics._regression import _check_reg_targets\n'), ((4748, 4808), 'sklearn.metrics._regression._check_reg_targets', '_check_reg_targets', (['y_true', 'y_pred'], {'multioutput': '"""raw_values"""'}), "(y_true, y_pred, multioutput='raw_values')\n", (4766, 4808), False, 'from sklearn.metrics._regression import _check_reg_targets\n'), ((5541, 5601), 'sklearn.metrics._regression._check_reg_targets', '_check_reg_targets', (['y_true', 'y_pred'], {'multioutput': '"""raw_values"""'}), "(y_true, y_pred, multioutput='raw_values')\n", (5559, 5601), False, 'from sklearn.metrics._regression import _check_reg_targets\n'), ((5628, 5651), 'numpy.abs', 'np.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (5634, 5651), True, 'import numpy as np\n'), ((6271, 6331), 'sklearn.metrics._regression._check_reg_targets', '_check_reg_targets', (['y_true', 'y_pred'], {'multioutput': '"""raw_values"""'}), "(y_true, y_pred, multioutput='raw_values')\n", (6289, 6331), False, 'from sklearn.metrics._regression import _check_reg_targets\n'), ((7331, 7391), 'sklearn.metrics._regression._check_reg_targets', '_check_reg_targets', (['y_true', 'y_pred'], {'multioutput': '"""raw_values"""'}), "(y_true, y_pred, multioutput='raw_values')\n", (7349, 7391), False, 'from sklearn.metrics._regression import _check_reg_targets\n'), ((7419, 7452), 'pandas.Series', 'pd.Series', (['[i[0] for i in y_true]'], {}), '([i[0] for i in y_true])\n', (7428, 7452), True, 'import pandas as pd\n'), ((7466, 7499), 'pandas.Series', 'pd.Series', (['[i[0] for i in y_pred]'], {}), '([i[0] for i in y_pred])\n', (7475, 7499), True, 'import pandas as pd\n'), ((8318, 8348), 'sklearn.metrics._classification._check_targets', '_check_targets', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8332, 8348), False, 'from sklearn.metrics._classification import _check_targets\n'), ((9923, 9953), 'sklearn.metrics._classification._check_targets', '_check_targets', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (9937, 9953), False, 'from sklearn.metrics._classification import _check_targets\n'), ((12595, 12625), 'sklearn.metrics._classification._check_targets', '_check_targets', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (12609, 12625), False, 'from sklearn.metrics._classification import _check_targets\n'), ((16927, 16957), 'sklearn.metrics._classification._check_targets', '_check_targets', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (16941, 16957), False, 'from sklearn.metrics._classification import _check_targets\n'), ((19766, 19796), 'sklearn.metrics._classification._check_targets', '_check_targets', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (19780, 19796), False, 'from sklearn.metrics._classification import _check_targets\n'), ((24055, 24085), 'sklearn.metrics._classification._check_targets', '_check_targets', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (24069, 24085), False, 'from sklearn.metrics._classification import _check_targets\n'), ((28314, 28344), 'sklearn.metrics._classification._check_targets', '_check_targets', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (28328, 28344), False, 'from sklearn.metrics._classification import _check_targets\n'), ((31026, 31056), 'sklearn.metrics._classification._check_targets', '_check_targets', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (31040, 31056), False, 'from sklearn.metrics._classification import _check_targets\n'), ((2084, 2108), 'sklearn.metrics.r2_score', 'r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2092, 2108), False, 'from sklearn.metrics import r2_score, explained_variance_score\n'), ((4042, 4082), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (4066, 4082), False, 'from sklearn.metrics import r2_score, explained_variance_score\n'), ((5664, 5678), 'numpy.abs', 'np.abs', (['y_true'], {}), '(y_true)\n', (5670, 5678), True, 'import numpy as np\n'), ((5681, 5695), 'numpy.abs', 'np.abs', (['y_pred'], {}), '(y_pred)\n', (5687, 5695), True, 'import numpy as np\n'), ((11473, 11490), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (11482, 11490), True, 'import numpy as np\n'), ((14145, 14162), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (14154, 14162), True, 'import numpy as np\n'), ((18517, 18534), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (18526, 18534), True, 'import numpy as np\n'), ((21331, 21348), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (21340, 21348), True, 'import numpy as np\n'), ((25620, 25637), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (25629, 25637), True, 'import numpy as np\n'), ((29855, 29872), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (29864, 29872), True, 'import numpy as np\n'), ((32558, 32575), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (32567, 32575), True, 'import numpy as np\n'), ((35074, 35104), 'sklearn.metrics._classification._check_targets', '_check_targets', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (35088, 35104), False, 'from sklearn.metrics._classification import _check_targets\n'), ((2112, 2136), 'sklearn.metrics.r2_score', 'r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2120, 2136), False, 'from sklearn.metrics import r2_score, explained_variance_score\n'), ((4094, 4134), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (4118, 4134), False, 'from sklearn.metrics import r2_score, explained_variance_score\n'), ((4911, 4945), 'numpy.abs', 'np.abs', (['((y_true - y_pred) / y_true)'], {}), '((y_true - y_pred) / y_true)\n', (4917, 4945), True, 'import numpy as np\n'), ((5713, 5734), 'numpy.sum', 'np.sum', (['(error / total)'], {}), '(error / total)\n', (5719, 5734), True, 'import numpy as np\n'), ((6387, 6417), 'numpy.sum', 'np.sum', (['((y_true - y_pred) ** 2)'], {}), '((y_true - y_pred) ** 2)\n', (6393, 6417), True, 'import numpy as np\n'), ((8360, 8377), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (8369, 8377), True, 'import numpy as np\n'), ((8473, 8490), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (8482, 8490), True, 'import numpy as np\n'), ((8596, 8613), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (8605, 8613), True, 'import numpy as np\n'), ((8639, 8656), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (8648, 8656), True, 'import numpy as np\n'), ((8685, 8736), 'numpy.where', 'np.where', (['((y_true == label_1) & (y_pred == label_1))'], {}), '((y_true == label_1) & (y_pred == label_1))\n', (8693, 8736), True, 'import numpy as np\n'), ((8766, 8817), 'numpy.where', 'np.where', (['((y_true == label_0) & (y_pred == label_1))'], {}), '((y_true == label_0) & (y_pred == label_1))\n', (8774, 8817), True, 'import numpy as np\n'), ((8847, 8898), 'numpy.where', 'np.where', (['((y_true == label_1) * (y_pred == label_0))'], {}), '((y_true == label_1) * (y_pred == label_0))\n', (8855, 8898), True, 'import numpy as np\n'), ((8927, 8978), 'numpy.where', 'np.where', (['((y_true == label_0) & (y_pred == label_0))'], {}), '((y_true == label_0) & (y_pred == label_0))\n', (8935, 8978), True, 'import numpy as np\n'), ((11339, 11356), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (11348, 11356), True, 'import numpy as np\n'), ((14011, 14028), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (14020, 14028), True, 'import numpy as np\n'), ((18375, 18392), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (18384, 18392), True, 'import numpy as np\n'), ((21194, 21211), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (21203, 21211), True, 'import numpy as np\n'), ((25483, 25500), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (25492, 25500), True, 'import numpy as np\n'), ((29717, 29734), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (29726, 29734), True, 'import numpy as np\n'), ((32422, 32439), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (32431, 32439), True, 'import numpy as np\n'), ((10244, 10284), 'numpy.where', 'np.where', (['(y_true == positive_class)', '(1)', '(0)'], {}), '(y_true == positive_class, 1, 0)\n', (10252, 10284), True, 'import numpy as np\n'), ((10314, 10354), 'numpy.where', 'np.where', (['(y_pred == positive_class)', '(1)', '(0)'], {}), '(y_pred == positive_class, 1, 0)\n', (10322, 10354), True, 'import numpy as np\n'), ((12916, 12956), 'numpy.where', 'np.where', (['(y_true == positive_class)', '(1)', '(0)'], {}), '(y_true == positive_class, 1, 0)\n', (12924, 12956), True, 'import numpy as np\n'), ((12986, 13026), 'numpy.where', 'np.where', (['(y_pred == positive_class)', '(1)', '(0)'], {}), '(y_pred == positive_class, 1, 0)\n', (12994, 13026), True, 'import numpy as np\n'), ((17248, 17288), 'numpy.where', 'np.where', (['(y_true == positive_class)', '(1)', '(0)'], {}), '(y_true == positive_class, 1, 0)\n', (17256, 17288), True, 'import numpy as np\n'), ((17318, 17358), 'numpy.where', 'np.where', (['(y_pred == positive_class)', '(1)', '(0)'], {}), '(y_pred == positive_class, 1, 0)\n', (17326, 17358), True, 'import numpy as np\n'), ((20087, 20127), 'numpy.where', 'np.where', (['(y_true == positive_class)', '(1)', '(0)'], {}), '(y_true == positive_class, 1, 0)\n', (20095, 20127), True, 'import numpy as np\n'), ((20157, 20197), 'numpy.where', 'np.where', (['(y_pred == positive_class)', '(1)', '(0)'], {}), '(y_pred == positive_class, 1, 0)\n', (20165, 20197), True, 'import numpy as np\n'), ((24376, 24416), 'numpy.where', 'np.where', (['(y_true == positive_class)', '(1)', '(0)'], {}), '(y_true == positive_class, 1, 0)\n', (24384, 24416), True, 'import numpy as np\n'), ((24446, 24486), 'numpy.where', 'np.where', (['(y_pred == positive_class)', '(1)', '(0)'], {}), '(y_pred == positive_class, 1, 0)\n', (24454, 24486), True, 'import numpy as np\n'), ((28635, 28675), 'numpy.where', 'np.where', (['(y_true == positive_class)', '(1)', '(0)'], {}), '(y_true == positive_class, 1, 0)\n', (28643, 28675), True, 'import numpy as np\n'), ((28705, 28745), 'numpy.where', 'np.where', (['(y_pred == positive_class)', '(1)', '(0)'], {}), '(y_pred == positive_class, 1, 0)\n', (28713, 28745), True, 'import numpy as np\n'), ((31347, 31387), 'numpy.where', 'np.where', (['(y_true == positive_class)', '(1)', '(0)'], {}), '(y_true == positive_class, 1, 0)\n', (31355, 31387), True, 'import numpy as np\n'), ((31417, 31457), 'numpy.where', 'np.where', (['(y_pred == positive_class)', '(1)', '(0)'], {}), '(y_pred == positive_class, 1, 0)\n', (31425, 31457), True, 'import numpy as np\n'), ((35419, 35459), 'numpy.where', 'np.where', (['(y_true == positive_class)', '(1)', '(0)'], {}), '(y_true == positive_class, 1, 0)\n', (35427, 35459), True, 'import numpy as np\n'), ((35493, 35533), 'numpy.where', 'np.where', (['(y_pred == positive_class)', '(1)', '(0)'], {}), '(y_pred == positive_class, 1, 0)\n', (35501, 35533), True, 'import numpy as np\n')] |
import os
import glob
import setuptools
from Cython.Distutils import build_ext
class NumpyBuildExtCommand(build_ext):
"""
build_ext command for use when numpy headers are needed.
from https://stackoverflow.com/questions/2379898/
and https://stackoverflow.com/questions/48283503/
"""
def run(self):
import numpy
self.distribution.fetch_build_eggs(["numpy"])
self.include_dirs.append(numpy.get_include())
build_ext.run(self)
def extract_version(CYTHON_FNAME):
version = None
with open(CYTHON_FNAME) as fi:
for line in fi:
if line.startswith("__version__"):
_, version = line.split("=")
version = version.strip()[1:-1] # Remove quotation characters.
break
return version
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join("..", path, filename))
return paths
package_data = {}
if os.environ.get("PYGRIB_WHEEL") is not None:
package_data[""] = package_files("eccodes")
cmdclass = {"build_ext": NumpyBuildExtCommand}
searchdirs = []
if os.environ.get("GRIBAPI_DIR"):
searchdirs.append(os.environ["GRIBAPI_DIR"])
if os.environ.get("ECCODES_DIR"):
searchdirs.append(os.environ["ECCODES_DIR"])
if os.environ.get("CONDA_PREFIX"):
searchdirs.append(os.environ["CONDA_PREFIX"])
searchdirs += [
os.path.expanduser("~"),
"/usr",
"/usr/local",
"/opt/local",
"/opt",
"/sw",
]
# look for grib_api.h in searchdirs
eccdir = None
for d in searchdirs:
try:
incpath = os.path.join(os.path.join(d, "include"), "grib_api.h")
f = open(incpath)
eccdir = d
print("eccodes found in %s" % eccdir)
break
except IOError:
continue
if eccdir is not None:
incdirs = [os.path.join(eccdir, "include")]
libdirs = [os.path.join(eccdir, "lib"), os.path.join(eccdir, "lib64")]
else:
print("eccodes not found, build may fail...")
incdirs = []
libdirs = []
ext_modules = [
setuptools.Extension(
"pygrib._pygrib",
["pygrib/_pygrib.pyx"],
include_dirs=incdirs,
library_dirs=libdirs,
runtime_library_dirs=libdirs,
libraries=["eccodes"],
)
]
# Import README.md as PyPi long_description
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md")) as f:
long_description = f.read()
# man pages installed in MAN_DIR/man1
if os.environ.get("MAN_DIR"):
man_dir = os.environ.get("MAN_DIR")
manpages = glob.glob(os.path.join("man", "*.1"))
data_files = [(os.path.join(man_dir, "man1"), manpages)]
# if MAN_DIR not set, man pages not installed
else:
data_files = None
setuptools.setup(
name="pygrib",
version=extract_version("pygrib/_pygrib.pyx"),
description="Python module for reading/writing GRIB files",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/jswhit/pygrib",
download_url="http://python.org/pypi/pygrib",
license="License :: OSI Approved :: MIT License",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries :: Python Modules",
],
cmdclass=cmdclass,
long_description=long_description,
long_description_content_type="text/markdown",
scripts=[
"utils/grib_list",
"utils/grib_repack",
"utils/cnvgrib1to2",
"utils/cnvgrib2to1",
],
ext_modules=ext_modules,
data_files=data_files,
packages=["pygrib"],
package_data=package_data,
setup_requires=["setuptools", "cython"],
install_requires=[
"pyproj",
"numpy",
],
)
| [
"Cython.Distutils.build_ext.run",
"os.walk",
"os.environ.get",
"os.path.join",
"setuptools.Extension",
"os.path.dirname",
"numpy.get_include",
"os.path.expanduser"
] | [((1216, 1245), 'os.environ.get', 'os.environ.get', (['"""GRIBAPI_DIR"""'], {}), "('GRIBAPI_DIR')\n", (1230, 1245), False, 'import os\n'), ((1299, 1328), 'os.environ.get', 'os.environ.get', (['"""ECCODES_DIR"""'], {}), "('ECCODES_DIR')\n", (1313, 1328), False, 'import os\n'), ((1382, 1412), 'os.environ.get', 'os.environ.get', (['"""CONDA_PREFIX"""'], {}), "('CONDA_PREFIX')\n", (1396, 1412), False, 'import os\n'), ((2588, 2613), 'os.environ.get', 'os.environ.get', (['"""MAN_DIR"""'], {}), "('MAN_DIR')\n", (2602, 2613), False, 'import os\n'), ((900, 918), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (907, 918), False, 'import os\n'), ((1056, 1086), 'os.environ.get', 'os.environ.get', (['"""PYGRIB_WHEEL"""'], {}), "('PYGRIB_WHEEL')\n", (1070, 1086), False, 'import os\n'), ((1484, 1507), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1502, 1507), False, 'import os\n'), ((2133, 2302), 'setuptools.Extension', 'setuptools.Extension', (['"""pygrib._pygrib"""', "['pygrib/_pygrib.pyx']"], {'include_dirs': 'incdirs', 'library_dirs': 'libdirs', 'runtime_library_dirs': 'libdirs', 'libraries': "['eccodes']"}), "('pygrib._pygrib', ['pygrib/_pygrib.pyx'], include_dirs\n =incdirs, library_dirs=libdirs, runtime_library_dirs=libdirs, libraries\n =['eccodes'])\n", (2153, 2302), False, 'import setuptools\n'), ((2428, 2453), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2443, 2453), False, 'import os\n'), ((2629, 2654), 'os.environ.get', 'os.environ.get', (['"""MAN_DIR"""'], {}), "('MAN_DIR')\n", (2643, 2654), False, 'import os\n'), ((463, 482), 'Cython.Distutils.build_ext.run', 'build_ext.run', (['self'], {}), '(self)\n', (476, 482), False, 'from Cython.Distutils import build_ext\n'), ((1915, 1946), 'os.path.join', 'os.path.join', (['eccdir', '"""include"""'], {}), "(eccdir, 'include')\n", (1927, 1946), False, 'import os\n'), ((1963, 1990), 'os.path.join', 'os.path.join', (['eccdir', '"""lib"""'], {}), "(eccdir, 'lib')\n", (1975, 1990), False, 'import os\n'), ((1992, 2021), 'os.path.join', 'os.path.join', (['eccdir', '"""lib64"""'], {}), "(eccdir, 'lib64')\n", (2004, 2021), False, 'import os\n'), ((2465, 2506), 'os.path.join', 'os.path.join', (['this_directory', '"""README.md"""'], {}), "(this_directory, 'README.md')\n", (2477, 2506), False, 'import os\n'), ((2680, 2706), 'os.path.join', 'os.path.join', (['"""man"""', '"""*.1"""'], {}), "('man', '*.1')\n", (2692, 2706), False, 'import os\n'), ((434, 453), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (451, 453), False, 'import numpy\n'), ((1693, 1719), 'os.path.join', 'os.path.join', (['d', '"""include"""'], {}), "(d, 'include')\n", (1705, 1719), False, 'import os\n'), ((2727, 2756), 'os.path.join', 'os.path.join', (['man_dir', '"""man1"""'], {}), "(man_dir, 'man1')\n", (2739, 2756), False, 'import os\n'), ((980, 1014), 'os.path.join', 'os.path.join', (['""".."""', 'path', 'filename'], {}), "('..', path, filename)\n", (992, 1014), False, 'import os\n')] |
import json
import plantpredict
from plantpredict.enumerations import WeatherDataProviderEnum, LibraryStatusEnum, WeatherDataTypeEnum, \
WeatherPLevelEnum
import numpy as np
# authenticate using API credentials
api = plantpredict.Api(
username="insert username here",
password="<PASSWORD>",
client_id="insert client_id here",
client_secret="insert client_secret here"
)
# load JSON file containing weather time series
with open('weather_details.json', 'rb') as json_file:
weather_details = json.load(json_file)
# get location info from latitude and longitude
latitude = 35.0
longitude = -119.0
geo = api.geo(latitude=latitude, longitude=longitude)
location_info = geo.get_location_info()
# initial the weather file and populate REQUIRED weather fields
weather = api.weather()
weather.name = "Python SDK Test Weather"
weather.latitude = 35.0
weather.longitude = -119.0
weather.country = location_info['country']
weather.country_code = location_info['country_code']
weather.data_provider = WeatherDataProviderEnum.METEONORM
weather.weather_details = weather_details
# populate additional weather metadata
weather.elevation = round(geo.get_elevation()["elevation"], 2)
weather.locality = location_info['locality']
weather.region = location_info['region']
weather.state_province = location_info['state_province']
weather.state_province_code = location_info['state_province_code']
weather.time_zone = geo.get_time_zone()['time_zone']
weather.status = LibraryStatusEnum.DRAFT_PRIVATE
weather.data_type = WeatherDataTypeEnum.MEASURED
weather.p_level = WeatherPLevelEnum.P95
weather.time_interval = 60 # minutes
weather.global_horizontal_irradiance_sum = round(
sum([w['global_horizontal_irradiance'] for w in weather_details])/1000, 2
)
weather.diffuse_horizontal_irradiance_sum = round(
sum([w['diffuse_horizontal_irradiance'] for w in weather_details])/1000, 2
)
weather.direct_normal_irradiance_sum = round(
sum([w['direct_normal_irradiance'] for w in weather_details])/1000, 2
)
weather.average_air_temperature = np.round(np.mean([w['temperature'] for w in weather_details]), 2)
weather.average_relative_humidity = np.round(np.mean([w['relative_humidity'] for w in weather_details]), 2)
weather.average_wind_speed = np.round(np.mean([w['windspeed'] for w in weather_details]), 2)
weather.max_air_temperature = np.round(max([w['temperature'] for w in weather_details]), 2)
# create weather file in PlantPredict
weather.create()
| [
"json.load",
"numpy.mean",
"plantpredict.Api"
] | [((223, 382), 'plantpredict.Api', 'plantpredict.Api', ([], {'username': '"""insert username here"""', 'password': '"""<PASSWORD>"""', 'client_id': '"""insert client_id here"""', 'client_secret': '"""insert client_secret here"""'}), "(username='insert username here', password='<PASSWORD>',\n client_id='insert client_id here', client_secret=\n 'insert client_secret here')\n", (239, 382), False, 'import plantpredict\n'), ((517, 537), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (526, 537), False, 'import json\n'), ((2062, 2114), 'numpy.mean', 'np.mean', (["[w['temperature'] for w in weather_details]"], {}), "([w['temperature'] for w in weather_details])\n", (2069, 2114), True, 'import numpy as np\n'), ((2164, 2222), 'numpy.mean', 'np.mean', (["[w['relative_humidity'] for w in weather_details]"], {}), "([w['relative_humidity'] for w in weather_details])\n", (2171, 2222), True, 'import numpy as np\n'), ((2265, 2315), 'numpy.mean', 'np.mean', (["[w['windspeed'] for w in weather_details]"], {}), "([w['windspeed'] for w in weather_details])\n", (2272, 2315), True, 'import numpy as np\n')] |
###################################################################################################
#
# EventTypeIdentification.py
#
# Copyright (C) by <NAME>, <NAME> & <NAME>.
# All rights reserved.
#
# Please see the file License.txt in the main repository for the copyright-notice.
#
###################################################################################################
# TODO: Train and test all multiplicities
# TODO: Test performance as a function of energy
# TODO: Test performance as a function of zenith angle
# TODO: Test deep neural Networks
# TODO: Test different libraries
###################################################################################################
import ROOT
import array
import os
import sys
import random
import time
import collections
import numpy as np
import math, datetime
from voxnet import *
#from volumetric_data import ShapeNet40Vox30
###################################################################################################
class EventTypeIdentification:
"""
This class performs energy loss training. A typical usage would look like this:
AI = EventTypeIdentification("Ling2.seq3.quality.root", "Results", "TF:VOXNET", 1000000)
AI.train()
AI.test()
"""
###################################################################################################
def __init__(self, FileName, Output, Algorithm, MaxEvents):
"""
The default constructor for class EventClustering
Attributes
----------
FileName : string
Data file name (something like: X.maxhits2.eventclusterizer.root)
OutputPrefix: string
Output filename prefix as well as outout directory name
Algorithms: string
The algorithms used during training. Seperate multiples by commma (e.g. "MLP,DNNCPU")
MaxEvents: integer
The maximum amount of events to use
"""
self.FileName = FileName
self.OutputPrefix = Output
self.Algorithms = Algorithm
self.MaxEvents = MaxEvents
self.EventTypes = []
self.EventHits = []
self.LastEventIndex = 0
self.BatchSize = 20
self.XBins = 110
self.YBins = 110
self.ZBins = 48
self.MaxLabel = 0
###################################################################################################
def train(self):
"""
Switch between the various machine-learning libraries based on self.Algorithm
"""
if self.Algorithms.startswith("TF:"):
self.trainTFMethods()
#elif self.Algorithms.startswith("TMVA:"):
# self.trainTMVAMethods()
#elif self.Algorithms.startswith("SKL:"):
# self.trainSKLMethods()
else:
print("ERROR: Unknown algorithm: {}".format(self.Algorithms))
return
###################################################################################################
def loadData(self):
"""
Prepare numpy array datasets for scikit-learn and tensorflow models
Returns:
list: list of the events types in numerical form: 1x: Compton event, 2x pair event, with x the detector (0: passive material, 1: tracker, 2: absober)
list: list of all hits as a numpy array containing (x, y, z, energy) as row
"""
print("{}: Load data from sim file".format(time.time()))
import ROOT as M
# Load MEGAlib into ROOT
M.gSystem.Load("$(MEGALIB)/lib/libMEGAlib.so")
# Initialize MEGAlib
G = M.MGlobal()
G.Initialize()
# Fixed for the time being
GeometryName = "$(MEGALIB)/resource/examples/geomega/GRIPS/GRIPS.geo.setup"
# Load geometry:
Geometry = M.MDGeometryQuest()
if Geometry.ScanSetupFile(M.MString(GeometryName)) == True:
print("Geometry " + GeometryName + " loaded!")
else:
print("Unable to load geometry " + GeometryName + " - Aborting!")
quit()
Reader = M.MFileEventsSim(Geometry)
if Reader.Open(M.MString(self.FileName)) == False:
print("Unable to open file " + FileName + ". Aborting!")
quit()
#Hist = M.TH2D("Energy", "Energy", 100, 0, 600, 100, 0, 600)
#Hist.SetXTitle("Input energy [keV]")
#Hist.SetYTitle("Measured energy [keV]")
EventTypes = []
EventHits = []
NEvents = 0
while True:
Event = Reader.GetNextEvent()
if not Event:
break
Type = 0
if Event.GetNIAs() > 0:
if Event.GetIAAt(1).GetProcess() == M.MString("COMP"):
Type += 0 + Event.GetIAAt(1).GetDetectorType()
elif Event.GetIAAt(1).GetProcess() == M.MString("PAIR"):
Type += 10 + Event.GetIAAt(1).GetDetectorType()
else:
break
if Type+1 > self.MaxLabel:
self.MaxLabel = Type +1
Hits = np.zeros((Event.GetNHTs(), 4))
for i in range(0, Event.GetNHTs()):
Hits[i, 0] = Event.GetHTAt(i).GetPosition().X()
Hits[i, 1] = Event.GetHTAt(i).GetPosition().Y()
Hits[i, 2] = Event.GetHTAt(i).GetPosition().Z()
Hits[i, 3] = Event.GetHTAt(i).GetEnergy()
NEvents += 1
EventTypes.append(Type)
EventHits.append(Hits)
if NEvents >= self.MaxEvents:
break
print("Occurances of different event types:")
print(collections.Counter(EventTypes))
self.LastEventIndex = 0
self.EventHits = EventHits
self.EventTypes = EventTypes
return
###################################################################################################
def trainTFMethods(self):
# Load the data
#eventtypes: what we want to train {21:11, }
#EventHits: what to conver to the point cloud
#numpy array
self.loadData()
# Add VoxNet here
voxnet = VoxNet(self.BatchSize, self.XBins, self.YBins, self.ZBins, self.MaxLabel)
batch_size = 1
p = dict() # placeholders
p['labels'] = tf.placeholder(tf.float32, [None, self.MaxLabel])
p['loss'] = tf.nn.softmax_cross_entropy_with_logits(logits=voxnet[-2], labels=p['labels'])
p['loss'] = tf.reduce_mean(p['loss'])
p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in voxnet.kernels])
p['correct_prediction'] = tf.equal(tf.argmax(voxnet[-1], 1), tf.argmax(p['labels'], 1))
p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'], tf.float32))
p['learning_rate'] = tf.placeholder(tf.float32)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
p['train'] = tf.train.AdamOptimizer(p['learning_rate'], epsilon=1e-3).minimize(p['loss'])
p['weights_decay'] = tf.train.GradientDescentOptimizer(p['learning_rate']).minimize(p['l2_loss'])
# Hyperparameters
num_batches = 2147483647
#batch_size = 50
initial_learning_rate = 0.001
min_learning_rate = 0.000001
learning_rate_decay_limit = 0.0001
#TODO://
#not sure what supposed to go inside len
num_batches_per_epoch = len(self.EventTypes) / float(batch_size)
learning_decay = 10 * num_batches_per_epoch
weights_decay_after = 5 * num_batches_per_epoch
checkpoint_num = 0
learning_step = 0
min_loss = 1e308
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
with open('checkpoints/accuracies.txt', 'w') as f:
f.write('')
with tf.Session() as session:
session.run(tf.global_variables_initializer())
for batch_index in range(num_batches):
print("Iteration {0}".format(batch_index+1))
learning_rate = max(min_learning_rate, initial_learning_rate * 0.5**(learning_step / learning_decay))
learning_step += 1
if batch_index > weights_decay_after and batch_index % 256 == 0:
session.run(p['weights_decay'], feed_dict=feed_dict)
voxs, labels = self.get_batch(self.BatchSize)
tf.logging.set_verbosity(tf.logging.DEBUG)
print("Starting training run")
start = time.time()
feed_dict = {voxnet[0]: voxs, p['labels']: labels, p['learning_rate']: learning_rate, voxnet.training: True}
session.run(p['train'], feed_dict=feed_dict)
print("Done with training run after {0} seconds".format(round(time.time() - start, 2)))
if batch_index and batch_index % 8 == 0:
print("{} batch: {}".format(datetime.datetime.now(), batch_index))
print('learning rate: {}'.format(learning_rate))
feed_dict[voxnet.training] = False
loss = session.run(p['loss'], feed_dict=feed_dict)
print('loss: {}'.format(loss))
if (batch_index and loss > 1.5 * min_loss and learning_rate > learning_rate_decay_limit):
min_loss = loss
learning_step *= 1.2
print("decreasing learning rate...")
min_loss = min(loss, min_loss)
if batch_index and batch_index % 2048 == 0:
num_accuracy_batches = 30
total_accuracy = 0
for x in range(num_accuracy_batches):
#TODO://
#replace with actual data
voxs, labels = self.get_batch(batch_size)
feed_dict = {voxnet[0]: voxs, p['labels']: labels, voxnet.training: False}
total_accuracy += session.run(p['accuracy'], feed_dict=feed_dict)
training_accuracy = total_accuracy / num_accuracy_batches
print('training accuracy: {}'.format(training_accuracy))
num_accuracy_batches = 90
total_accuracy = 0
for x in range(num_accuracy_batches):
voxs, labels = self.get_batch(batch_size)
feed_dict = {voxnet[0]: voxs, p['labels']: labels, voxnet.training: False}
total_accuracy += session.run(p['accuracy'], feed_dict=feed_dict)
test_accuracy = total_accuracy / num_accuracy_batches
print('test accuracy: {}'.format(test_accuracy))
print('saving checkpoint {}...'.format(checkpoint_num))
voxnet.npz_saver.save(session, 'checkpoints/c-{}.npz'.format(checkpoint_num))
with open('checkpoints/accuracies.txt', 'a') as f:
f.write(' '.join(map(str, (checkpoint_num, training_accuracy, test_accuracy)))+'\n')
print('checkpoint saved!')
checkpoint_num += 1
return
###################################################################################################
def get_batch(self, batch_size):
"""
Main test function
Returns
-------
bool
True is everything went well, False in case of an error
"""
rn = random.randint
bs = batch_size
xmin = -55
ymin = -55
zmin = 0
xmax = 55
ymax = 55
zmax = 48
voxs = np.zeros([bs, self.XBins, self.YBins, self.ZBins, 1], dtype=np.float32)
one_hots = np.zeros([bs, self.MaxLabel], dtype=np.float32)
#fill event hits
for bi in range(bs):
self.LastEventIndex += 1
if self.LastEventIndex == len(self.EventHits):
self.LastEventIndex = 0
while len(self.EventHits[self.LastEventIndex]) == 0:
self.LastEventIndex += 1
if self.LastEventIndex == len(self.EventHits):
self.LastEventIndex = 0
for i in self.EventHits[self.LastEventIndex]:
xbin = (int) (((i[0] - xmin) / (xmax - xmin)) * self.XBins)
ybin = (int) (((i[1] - ymin) / (ymax - ymin)) * self.YBins)
zbin = (int) (((i[2] - zmin) / (zmax - zmin)) * self.ZBins)
#print(bi, xbin, ybin, zbin)
voxs[bi, xbin, ybin, zbin] += i[3]
#fills event types
one_hots[bi][self.EventTypes[self.LastEventIndex]] = 1
return voxs, one_hots
###################################################################################################
def test(self):
"""
Main test function
Returns
-------
bool
True is everything went well, False in case of an error
"""
return True
# END
###################################################################################################
| [
"ROOT.MGlobal",
"ROOT.MFileEventsSim",
"ROOT.MString",
"ROOT.gSystem.Load",
"collections.Counter",
"datetime.datetime.now",
"numpy.zeros",
"os.path.isdir",
"os.mkdir",
"ROOT.MDGeometryQuest",
"time.time"
] | [((3308, 3354), 'ROOT.gSystem.Load', 'M.gSystem.Load', (['"""$(MEGALIB)/lib/libMEGAlib.so"""'], {}), "('$(MEGALIB)/lib/libMEGAlib.so')\n", (3322, 3354), True, 'import ROOT as M\n'), ((3389, 3400), 'ROOT.MGlobal', 'M.MGlobal', ([], {}), '()\n', (3398, 3400), True, 'import ROOT as M\n'), ((3569, 3588), 'ROOT.MDGeometryQuest', 'M.MDGeometryQuest', ([], {}), '()\n', (3586, 3588), True, 'import ROOT as M\n'), ((3816, 3842), 'ROOT.MFileEventsSim', 'M.MFileEventsSim', (['Geometry'], {}), '(Geometry)\n', (3832, 3842), True, 'import ROOT as M\n'), ((10469, 10540), 'numpy.zeros', 'np.zeros', (['[bs, self.XBins, self.YBins, self.ZBins, 1]'], {'dtype': 'np.float32'}), '([bs, self.XBins, self.YBins, self.ZBins, 1], dtype=np.float32)\n', (10477, 10540), True, 'import numpy as np\n'), ((10556, 10603), 'numpy.zeros', 'np.zeros', (['[bs, self.MaxLabel]'], {'dtype': 'np.float32'}), '([bs, self.MaxLabel], dtype=np.float32)\n', (10564, 10603), True, 'import numpy as np\n'), ((5148, 5179), 'collections.Counter', 'collections.Counter', (['EventTypes'], {}), '(EventTypes)\n', (5167, 5179), False, 'import collections\n'), ((7002, 7030), 'os.path.isdir', 'os.path.isdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (7015, 7030), False, 'import os\n'), ((7038, 7061), 'os.mkdir', 'os.mkdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (7046, 7061), False, 'import os\n'), ((3237, 3248), 'time.time', 'time.time', ([], {}), '()\n', (3246, 3248), False, 'import time\n'), ((3619, 3642), 'ROOT.MString', 'M.MString', (['GeometryName'], {}), '(GeometryName)\n', (3628, 3642), True, 'import ROOT as M\n'), ((3862, 3886), 'ROOT.MString', 'M.MString', (['self.FileName'], {}), '(self.FileName)\n', (3871, 3886), True, 'import ROOT as M\n'), ((7761, 7772), 'time.time', 'time.time', ([], {}), '()\n', (7770, 7772), False, 'import time\n'), ((4361, 4378), 'ROOT.MString', 'M.MString', (['"""COMP"""'], {}), "('COMP')\n", (4370, 4378), True, 'import ROOT as M\n'), ((4483, 4500), 'ROOT.MString', 'M.MString', (['"""PAIR"""'], {}), "('PAIR')\n", (4492, 4500), True, 'import ROOT as M\n'), ((8127, 8150), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8148, 8150), False, 'import math, datetime\n'), ((8013, 8024), 'time.time', 'time.time', ([], {}), '()\n', (8022, 8024), False, 'import time\n')] |
import json
import os
import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from keras.layers import Dropout, Dense, LSTM, TimeDistributed
from keras.models import Sequential
from sklearn.preprocessing import normalize, MinMaxScaler
"""
Created by <NAME> on 7/25/18.
Email : <EMAIL> or <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ..., t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg.as_matrix()
def normalize_data(data):
return normalize(data, norm='max', axis=1, copy=True)
def create_model(X, layers):
model = Sequential()
for i in range(len(layers) - 1):
if i == 0:
model.add(LSTM(layers[0], input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
elif i == len(layers) - 2:
model.add(LSTM(layers[i], return_sequences=True))
else:
model.add(LSTM(layers[i], return_sequences=True))
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(layers[-1], activation='linear')))
model.compile(loss=keras.losses.mae, optimizer="adam")
model.summary()
return model
def download_data(coin):
endpoint = 'https://min-api.cryptocompare.com/data/histoday'
res = requests.get(endpoint + '?fsym=' + coin + '&tsym=USD&limit=2000')
hist = pd.DataFrame(json.loads(res.content)['Data'])
hist = hist.set_index('time')
hist.index = pd.to_datetime(hist.index, unit='s')
return hist
def load_data(filename, sequence_len=100, n_lag=3, n_seq=3, target_idx=5, output_cols=None, test_split=0.1):
raw_data = pd.read_csv(filename, header=None)
if raw_data.columns.__contains__('Date'):
raw_data = raw_data.drop(['date'], axis=1)
else:
deleted_cols = [i for i in range(raw_data.shape[1]) if i != target_idx]
# for col in raw_data.columns:
# if isinstance(raw_data.iloc[0, col], str): # is either symbol or date
# deleted_cols.append(col)
raw_data = raw_data.drop(deleted_cols, axis=1)
raw_data_values = series_to_supervised(raw_data.values, n_in=n_lag, n_out=n_seq, dropnan=True)
print(pd.DataFrame(raw_data_values).head())
scalar = MinMaxScaler((-1, 1))
test_size = int(test_split * raw_data_values.shape[0])
train_data = np.array(raw_data_values[:-test_size, :])
test_data = np.array(raw_data_values[-test_size:, :])
train_data = scalar.fit_transform(train_data)
test_data = scalar.transform(test_data)
plt.figure(figsize=(20, 10))
x = np.arange(0, raw_data_values.shape[0])
plt.plot(x[:train_data.shape[0]], train_data[:, 0], label="Training Data")
plt.plot(x[train_data.shape[0]:], test_data[:, 0], label="Test Data")
plt.legend(loc="best")
plt.show()
data = np.concatenate([train_data, test_data], axis=0)
x_data = np.array(data[:, :n_lag])
y_data = np.array(data[:, n_lag:])
x_train = train_data[:, :n_lag]
y_train = train_data[:, n_lag:]
x_test = test_data[:, :n_lag]
y_test = test_data[:, n_lag:]
x_train = x_train.reshape((x_train.shape[0], 1, x_train.shape[1]))
y_train = y_train.reshape((y_train.shape[0], 1, y_train.shape[1]))
x_test = x_test.reshape((x_test.shape[0], 1, x_test.shape[1]))
y_test = y_test.reshape((y_test.shape[0], 1, y_test.shape[1]))
print("x_data has shape\t:\t", x_data.shape)
print("y_data has shape\t:\t", y_data.shape)
print("x_train has shape\t:\t", x_train.shape)
print("y_train has shape\t:\t", y_train.shape)
print("x_test has shape\t:\t", x_test.shape)
print("y_test has shape\t:\t", y_test.shape)
return x_train, y_train, x_test, y_test, x_data, y_data, scalar
def plot(data, predictions):
plt.plot(data, color="blue", label="data")
plt.plot(predictions, color="red", label="prediction")
plt.show()
def plot_seq(data, predictions, n_seq, start_idx=1606, step=1):
predictions = np.array(predictions)
plt.figure(figsize=(20, 10))
plt.plot(data, color="blue", label="data")
if n_seq == 1:
x = np.arange(start_idx, start_idx + predictions.shape[0])
plt.plot(x, predictions, color='red', label="prediction")
for i in range(predictions.shape[0]):
x = np.arange(start_idx + i, start_idx + i + n_seq)
if n_seq != 1:
plt.plot(x, predictions[i], color="red", label="prediction")
else:
plt.plot(x, predictions[i], 'ro', color="red", label="prediction")
if step != 1:
start_idx += step
# plt.legend(handles=[data_plot, prediction_line], labels=['data', 'prediction'], loc="best")
plt.ylabel("Scaled Close Price")
plt.xlabel("Time")
plt.show()
def main():
filename = "../Data/data.csv"
n_lag = 5 # past data to be used
n_seq = 1 # number of future days to predict
model_path = "./predictor%d_%d.h5" % (n_seq, n_lag)
target_idx = 5
epochs = 5
batch_size = 64
tensorboard = keras.callbacks.TensorBoard(log_dir='./Graph/', histogram_freq=0,
write_graph=True, write_images=True)
if not os.path.exists(filename):
coin = 'BTC'
data = download_data(coin)
data.to_csv(filename)
x_train, y_train, x_test, y_test, x_data, y_data, scalar = load_data(filename, sequence_len=50,
n_lag=n_lag,
n_seq=n_seq,
target_idx=target_idx,
output_cols=None)
else:
x_train, y_train, x_test, y_test, x_data, y_data, scalar = load_data(filename,
sequence_len=50,
n_lag=n_lag,
n_seq=n_seq,
test_split=0.1,
target_idx=target_idx,
output_cols=None)
if not os.path.exists(model_path):
model = create_model(x_train, layers=[1024, 512, 256, 128, n_seq])
model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2,
# validation_split=0.2,
validation_data=(x_test, y_test),
callbacks=[tensorboard],
shuffle=True)
model.save(model_path)
print("The network has been saved!")
else:
print("The network exists. Trying to load ...")
model = keras.models.load_model(model_path)
print("The network has been loaded!")
# candidate_data = x_data[[i for i in range(0, x_data.shape[0], 50)], :]
# candidate_data = candidate_data.reshape((candidate_data.shape[0], 1, candidate_data.shape[1]))
candidate_data = x_test
predictions = model.predict(candidate_data)
# print(pd.DataFrame(predictions).head())
plot_seq(y_data[:, 0], predictions.reshape((predictions.shape[0], 1)), n_seq, start_idx=x_train.shape[0], step=1)
if __name__ == '__main__':
main()
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.array",
"keras.layers.Dense",
"numpy.arange",
"pandas.to_datetime",
"os.path.exists",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.layers.LSTM",
"numpy.concatenate",
"pandas.DataFrame",
"sklearn.preprocessing.MinMaxScaler"... | [((596, 614), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (608, 614), True, 'import pandas as pd\n'), ((1141, 1164), 'pandas.concat', 'pd.concat', (['cols'], {'axis': '(1)'}), '(cols, axis=1)\n', (1150, 1164), True, 'import pandas as pd\n'), ((1337, 1383), 'sklearn.preprocessing.normalize', 'normalize', (['data'], {'norm': '"""max"""', 'axis': '(1)', 'copy': '(True)'}), "(data, norm='max', axis=1, copy=True)\n", (1346, 1383), False, 'from sklearn.preprocessing import normalize, MinMaxScaler\n'), ((1427, 1439), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1437, 1439), False, 'from keras.models import Sequential\n'), ((2070, 2135), 'requests.get', 'requests.get', (["(endpoint + '?fsym=' + coin + '&tsym=USD&limit=2000')"], {}), "(endpoint + '?fsym=' + coin + '&tsym=USD&limit=2000')\n", (2082, 2135), False, 'import requests\n'), ((2244, 2280), 'pandas.to_datetime', 'pd.to_datetime', (['hist.index'], {'unit': '"""s"""'}), "(hist.index, unit='s')\n", (2258, 2280), True, 'import pandas as pd\n'), ((2423, 2457), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': 'None'}), '(filename, header=None)\n', (2434, 2457), True, 'import pandas as pd\n'), ((3031, 3052), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', (['(-1, 1)'], {}), '((-1, 1))\n', (3043, 3052), False, 'from sklearn.preprocessing import normalize, MinMaxScaler\n'), ((3131, 3172), 'numpy.array', 'np.array', (['raw_data_values[:-test_size, :]'], {}), '(raw_data_values[:-test_size, :])\n', (3139, 3172), True, 'import numpy as np\n'), ((3189, 3230), 'numpy.array', 'np.array', (['raw_data_values[-test_size:, :]'], {}), '(raw_data_values[-test_size:, :])\n', (3197, 3230), True, 'import numpy as np\n'), ((3331, 3359), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (3341, 3359), True, 'import matplotlib.pyplot as plt\n'), ((3368, 3406), 'numpy.arange', 'np.arange', (['(0)', 'raw_data_values.shape[0]'], {}), '(0, raw_data_values.shape[0])\n', (3377, 3406), True, 'import numpy as np\n'), ((3411, 3485), 'matplotlib.pyplot.plot', 'plt.plot', (['x[:train_data.shape[0]]', 'train_data[:, 0]'], {'label': '"""Training Data"""'}), "(x[:train_data.shape[0]], train_data[:, 0], label='Training Data')\n", (3419, 3485), True, 'import matplotlib.pyplot as plt\n'), ((3490, 3559), 'matplotlib.pyplot.plot', 'plt.plot', (['x[train_data.shape[0]:]', 'test_data[:, 0]'], {'label': '"""Test Data"""'}), "(x[train_data.shape[0]:], test_data[:, 0], label='Test Data')\n", (3498, 3559), True, 'import matplotlib.pyplot as plt\n'), ((3564, 3586), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3574, 3586), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3601), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3599, 3601), True, 'import matplotlib.pyplot as plt\n'), ((3614, 3661), 'numpy.concatenate', 'np.concatenate', (['[train_data, test_data]'], {'axis': '(0)'}), '([train_data, test_data], axis=0)\n', (3628, 3661), True, 'import numpy as np\n'), ((3675, 3700), 'numpy.array', 'np.array', (['data[:, :n_lag]'], {}), '(data[:, :n_lag])\n', (3683, 3700), True, 'import numpy as np\n'), ((3714, 3739), 'numpy.array', 'np.array', (['data[:, n_lag:]'], {}), '(data[:, n_lag:])\n', (3722, 3739), True, 'import numpy as np\n'), ((4564, 4606), 'matplotlib.pyplot.plot', 'plt.plot', (['data'], {'color': '"""blue"""', 'label': '"""data"""'}), "(data, color='blue', label='data')\n", (4572, 4606), True, 'import matplotlib.pyplot as plt\n'), ((4611, 4665), 'matplotlib.pyplot.plot', 'plt.plot', (['predictions'], {'color': '"""red"""', 'label': '"""prediction"""'}), "(predictions, color='red', label='prediction')\n", (4619, 4665), True, 'import matplotlib.pyplot as plt\n'), ((4670, 4680), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4678, 4680), True, 'import matplotlib.pyplot as plt\n'), ((4765, 4786), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (4773, 4786), True, 'import numpy as np\n'), ((4791, 4819), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (4801, 4819), True, 'import matplotlib.pyplot as plt\n'), ((4824, 4866), 'matplotlib.pyplot.plot', 'plt.plot', (['data'], {'color': '"""blue"""', 'label': '"""data"""'}), "(data, color='blue', label='data')\n", (4832, 4866), True, 'import matplotlib.pyplot as plt\n'), ((5464, 5496), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scaled Close Price"""'], {}), "('Scaled Close Price')\n", (5474, 5496), True, 'import matplotlib.pyplot as plt\n'), ((5501, 5519), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (5511, 5519), True, 'import matplotlib.pyplot as plt\n'), ((5524, 5534), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5532, 5534), True, 'import matplotlib.pyplot as plt\n'), ((5800, 5906), 'keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': '"""./Graph/"""', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)'}), "(log_dir='./Graph/', histogram_freq=0,\n write_graph=True, write_images=True)\n", (5827, 5906), False, 'import keras\n'), ((4898, 4952), 'numpy.arange', 'np.arange', (['start_idx', '(start_idx + predictions.shape[0])'], {}), '(start_idx, start_idx + predictions.shape[0])\n', (4907, 4952), True, 'import numpy as np\n'), ((4961, 5018), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'predictions'], {'color': '"""red"""', 'label': '"""prediction"""'}), "(x, predictions, color='red', label='prediction')\n", (4969, 5018), True, 'import matplotlib.pyplot as plt\n'), ((5073, 5120), 'numpy.arange', 'np.arange', (['(start_idx + i)', '(start_idx + i + n_seq)'], {}), '(start_idx + i, start_idx + i + n_seq)\n', (5082, 5120), True, 'import numpy as np\n'), ((5960, 5984), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (5974, 5984), False, 'import os\n'), ((7222, 7248), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (7236, 7248), False, 'import os\n'), ((7813, 7848), 'keras.models.load_model', 'keras.models.load_model', (['model_path'], {}), '(model_path)\n', (7836, 7848), False, 'import keras\n'), ((1787, 1799), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1794, 1799), False, 'from keras.layers import Dropout, Dense, LSTM, TimeDistributed\n'), ((1831, 1869), 'keras.layers.Dense', 'Dense', (['layers[-1]'], {'activation': '"""linear"""'}), "(layers[-1], activation='linear')\n", (1836, 1869), False, 'from keras.layers import Dropout, Dense, LSTM, TimeDistributed\n'), ((2160, 2183), 'json.loads', 'json.loads', (['res.content'], {}), '(res.content)\n', (2170, 2183), False, 'import json\n'), ((5156, 5216), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'predictions[i]'], {'color': '"""red"""', 'label': '"""prediction"""'}), "(x, predictions[i], color='red', label='prediction')\n", (5164, 5216), True, 'import matplotlib.pyplot as plt\n'), ((5243, 5309), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'predictions[i]', '"""ro"""'], {'color': '"""red"""', 'label': '"""prediction"""'}), "(x, predictions[i], 'ro', color='red', label='prediction')\n", (5251, 5309), True, 'import matplotlib.pyplot as plt\n'), ((1518, 1594), 'keras.layers.LSTM', 'LSTM', (['layers[0]'], {'input_shape': '(X.shape[1], X.shape[2])', 'return_sequences': '(True)'}), '(layers[0], input_shape=(X.shape[1], X.shape[2]), return_sequences=True)\n', (1522, 1594), False, 'from keras.layers import Dropout, Dense, LSTM, TimeDistributed\n'), ((2979, 3008), 'pandas.DataFrame', 'pd.DataFrame', (['raw_data_values'], {}), '(raw_data_values)\n', (2991, 3008), True, 'import pandas as pd\n'), ((1653, 1691), 'keras.layers.LSTM', 'LSTM', (['layers[i]'], {'return_sequences': '(True)'}), '(layers[i], return_sequences=True)\n', (1657, 1691), False, 'from keras.layers import Dropout, Dense, LSTM, TimeDistributed\n'), ((1729, 1767), 'keras.layers.LSTM', 'LSTM', (['layers[i]'], {'return_sequences': '(True)'}), '(layers[i], return_sequences=True)\n', (1733, 1767), False, 'from keras.layers import Dropout, Dense, LSTM, TimeDistributed\n')] |
"""Forward and back projector for PET data reconstruction"""
import logging
import cuvec as cu
import numpy as np
from .. import mmraux
from ..img import mmrimg
from . import petprj
log = logging.getLogger(__name__)
ISUB_DEFAULT = np.array([-1], dtype=np.int32)
# ========================================================================
# transaxial (one-slice) projector
# ------------------------------------------------------------------------
def trnx_prj(scanner_params, sino=None, im=None):
Cnt = scanner_params['Cnt']
txLUT = scanner_params['txLUT']
# if sino==None and im==None:
# raise ValueError('Input sinogram or image has to be given.')
if sino is not None and im is not None:
raise ValueError('Only one input should be given: sinogram or image.')
if sino is None:
sino = np.zeros((txLUT['Naw'],), dtype=np.float32)
if im is None:
im = np.zeros((Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32)
tv = np.zeros(Cnt['NTV'] * Cnt['Naw'], dtype=np.uint8)
tt = np.zeros(Cnt['NTT'] * Cnt['Naw'], dtype=np.float32)
petprj.tprj(sino, im, tv, tt, txLUT, Cnt)
return {'tv': tv, 'tt': tt}
# ========================================================================
# forward projector
# ------------------------------------------------------------------------
def frwd_prj(im, scanner_params, isub=ISUB_DEFAULT, dev_out=False, attenuation=False,
fullsino_out=True, output=None):
"""
Calculate forward projection (a set of sinograms) for the provided input image.
Arguments:
im -- input image (can be emission or mu-map image).
scanner_params -- dictionary of all scanner parameters, containing scanner constants,
transaxial and axial look up tables (LUT).
isub -- array of transaxial indices of all sinograms (angles x bins) used for subsets.
when the first element is negative, all transaxial bins are used (as in pure EM-ML).
dev_out -- if True, output sinogram is in the device form, i.e., with two dimensions
(# bins/angles, # sinograms) instead of default three (# sinograms, # bins, # angles).
attenuation -- controls whether emission or LOR attenuation probability sinogram
is calculated; the default is False, meaning emission sinogram; for attenuation
calculations (attenuation=True), the exponential of the negative of the integrated
mu-values along LOR path is taken at the end.
output(CuVec, optional) -- output sinogram.
"""
# Get particular scanner parameters: Constants, transaxial and axial LUTs
Cnt = scanner_params['Cnt']
txLUT = scanner_params['txLUT']
axLUT = scanner_params['axLUT']
# >choose between attenuation forward projection (mu-map is the input)
# >or the default for emission image forward projection
if attenuation:
att = 1
else:
att = 0
if Cnt['SPN'] == 1:
# number of rings calculated for the given ring range
# (optionally we can use only part of the axial FOV)
NRNG_c = Cnt['RNG_END'] - Cnt['RNG_STRT']
# number of sinos in span-1
nsinos = NRNG_c**2
# correct for the max. ring difference in the full axial extent
# (don't use ring range (1,63) as for this case no correction)
if NRNG_c == 64:
nsinos -= 12
elif Cnt['SPN'] == 11:
nsinos = Cnt['NSN11']
elif Cnt['SPN'] == 0:
nsinos = Cnt['NSEG0']
if im.shape[0] == Cnt['SO_IMZ'] and im.shape[1] == Cnt['SO_IMY'] and im.shape[2] == Cnt[
'SO_IMX']:
ims = mmrimg.convert2dev(im, Cnt)
elif im.shape[0] == Cnt['SZ_IMX'] and im.shape[1] == Cnt['SZ_IMY'] and im.shape[2] == Cnt[
'SZ_IMZ']:
ims = im
elif im.shape[0] == Cnt['rSO_IMZ'] and im.shape[1] == Cnt['SO_IMY'] and im.shape[2] == Cnt[
'SO_IMX']:
ims = mmrimg.convert2dev(im, Cnt)
elif im.shape[0] == Cnt['SZ_IMX'] and im.shape[1] == Cnt['SZ_IMY'] and im.shape[2] == Cnt[
'rSZ_IMZ']:
ims = im
else:
raise ValueError('wrong image size;'
' it has to be one of these: (z,y,x) = (127,344,344)'
' or (y,x,z) = (320,320,128)')
log.debug('number of sinos: %d', nsinos)
# predefine the sinogram.
# if subsets are used then only preallocate those bins which will be used.
if isub[0] < 0:
out_shape = txLUT['Naw'], nsinos
else:
out_shape = len(isub), nsinos
if output is None:
sinog = cu.zeros(out_shape, dtype=np.float32)
else:
sinog = cu.asarray(output)
assert sinog.shape == out_shape
assert sinog.dtype == np.dtype('float32')
# --------------------
petprj.fprj(sinog.cuvec, cu.asarray(ims).cuvec, txLUT, axLUT, isub, Cnt, att)
# --------------------
# get the sinogram bins in a full sinogram if requested
if fullsino_out and isub[0] >= 0:
sino = cu.zeros((txLUT['Naw'], nsinos), dtype=np.float32)
sino[isub, :] = sinog
else:
sino = sinog
# put the gaps back to form displayable sinogram
if not dev_out and fullsino_out:
sino = mmraux.putgaps(sino, txLUT, Cnt)
return sino
# ========================================================================
# back projector
# ------------------------------------------------------------------------
def back_prj(sino, scanner_params, isub=ISUB_DEFAULT, dev_out=False):
'''
Calculate forward projection for the provided input image.
Arguments:
sino -- input emission sinogram to be back projected to the image space.
scanner_params -- dictionary of all scanner parameters, containing scanner constants,
transaxial and axial look up tables (LUT).
isub -- array of transaxial indices of all sinograms (angles x bins) used for subsets;
when the first element is negative, all transaxial bins are used (as in pure EM-ML).
'''
# Get particular scanner parameters: Constants, transaxial and axial LUTs
Cnt = scanner_params['Cnt']
txLUT = scanner_params['txLUT']
axLUT = scanner_params['axLUT']
if Cnt['SPN'] == 1:
# number of rings calculated for the given ring range
# (optionally we can use only part of the axial FOV)
NRNG_c = Cnt['RNG_END'] - Cnt['RNG_STRT']
# number of sinos in span-1
nsinos = NRNG_c**2
# correct for the max. ring difference in the full axial extent
# (don't use ring range (1,63) as for this case no correction)
if NRNG_c == 64:
nsinos -= 12
elif Cnt['SPN'] == 11:
nsinos = Cnt['NSN11']
elif Cnt['SPN'] == 0:
nsinos = Cnt['NSEG0']
# > check first the Siemens default sinogram;
# > for this default shape only full sinograms are expected--no subsets.
if len(sino.shape) == 3:
if sino.shape[0] != nsinos or sino.shape[1] != Cnt['NSANGLES'] or sino.shape[2] != Cnt[
'NSBINS']:
raise ValueError('Unexpected sinogram array dimensions/shape for Siemens defaults.')
sinog = mmraux.remgaps(sino, txLUT, Cnt)
elif len(sino.shape) == 2:
if isub[0] < 0 and sino.shape[0] != txLUT["Naw"]:
raise ValueError('Unexpected number of transaxial elements in the full sinogram.')
elif isub[0] >= 0 and sino.shape[0] != len(isub):
raise ValueError('Unexpected number of transaxial elements in the subset sinogram.')
# > check if the number of sinograms is correct
if sino.shape[1] != nsinos:
raise ValueError('Inconsistent number of sinograms in the array.')
# > when found the dimensions/shape are fine:
sinog = sino
else:
raise ValueError('Unexpected shape of the input sinogram.')
# predefine the output image depending on the number of rings used
if Cnt['SPN'] == 1 and 'rSZ_IMZ' in Cnt:
nvz = Cnt['rSZ_IMZ']
else:
nvz = Cnt['SZ_IMZ']
bimg = cu.zeros((Cnt['SZ_IMX'], Cnt['SZ_IMY'], nvz), dtype=np.float32)
# > run back-projection
petprj.bprj(bimg.cuvec, cu.asarray(sinog).cuvec, txLUT, axLUT, isub, Cnt)
if not dev_out:
# > change from GPU optimised image dimensions to the standard Siemens shape
bimg = mmrimg.convert2e7(bimg, Cnt)
return bimg
| [
"logging.getLogger",
"numpy.array",
"numpy.zeros",
"cuvec.asarray",
"numpy.dtype",
"cuvec.zeros"
] | [((191, 218), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (208, 218), False, 'import logging\n'), ((234, 264), 'numpy.array', 'np.array', (['[-1]'], {'dtype': 'np.int32'}), '([-1], dtype=np.int32)\n', (242, 264), True, 'import numpy as np\n'), ((982, 1031), 'numpy.zeros', 'np.zeros', (["(Cnt['NTV'] * Cnt['Naw'])"], {'dtype': 'np.uint8'}), "(Cnt['NTV'] * Cnt['Naw'], dtype=np.uint8)\n", (990, 1031), True, 'import numpy as np\n'), ((1041, 1092), 'numpy.zeros', 'np.zeros', (["(Cnt['NTT'] * Cnt['Naw'])"], {'dtype': 'np.float32'}), "(Cnt['NTT'] * Cnt['Naw'], dtype=np.float32)\n", (1049, 1092), True, 'import numpy as np\n'), ((8089, 8152), 'cuvec.zeros', 'cu.zeros', (["(Cnt['SZ_IMX'], Cnt['SZ_IMY'], nvz)"], {'dtype': 'np.float32'}), "((Cnt['SZ_IMX'], Cnt['SZ_IMY'], nvz), dtype=np.float32)\n", (8097, 8152), True, 'import cuvec as cu\n'), ((837, 880), 'numpy.zeros', 'np.zeros', (["(txLUT['Naw'],)"], {'dtype': 'np.float32'}), "((txLUT['Naw'],), dtype=np.float32)\n", (845, 880), True, 'import numpy as np\n'), ((913, 971), 'numpy.zeros', 'np.zeros', (["(Cnt['SO_IMY'], Cnt['SO_IMX'])"], {'dtype': 'np.float32'}), "((Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32)\n", (921, 971), True, 'import numpy as np\n'), ((4607, 4644), 'cuvec.zeros', 'cu.zeros', (['out_shape'], {'dtype': 'np.float32'}), '(out_shape, dtype=np.float32)\n', (4615, 4644), True, 'import cuvec as cu\n'), ((4671, 4689), 'cuvec.asarray', 'cu.asarray', (['output'], {}), '(output)\n', (4681, 4689), True, 'import cuvec as cu\n'), ((5030, 5080), 'cuvec.zeros', 'cu.zeros', (["(txLUT['Naw'], nsinos)"], {'dtype': 'np.float32'}), "((txLUT['Naw'], nsinos), dtype=np.float32)\n", (5038, 5080), True, 'import cuvec as cu\n'), ((4760, 4779), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (4768, 4779), True, 'import numpy as np\n'), ((4836, 4851), 'cuvec.asarray', 'cu.asarray', (['ims'], {}), '(ims)\n', (4846, 4851), True, 'import cuvec as cu\n'), ((8210, 8227), 'cuvec.asarray', 'cu.asarray', (['sinog'], {}), '(sinog)\n', (8220, 8227), True, 'import cuvec as cu\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""tests for eig"""
import unittest
import numpy as np
from sknetwork.linalg import LanczosEig, HalkoEig, SparseLR
from sknetwork.data import miserables, karate_club
def eigenvector_err(matrix, eigenvectors, eigenvalues):
"""Approximation error for eigenvectors."""
err = matrix.dot(eigenvectors) - eigenvectors * eigenvalues
return np.linalg.norm(err)
# noinspection DuplicatedCode
class TestSolvers(unittest.TestCase):
def setUp(self):
"""Instanciate les Miserables and regularized version"""
self.adjacency = miserables()
self.random_state = np.random.RandomState(123)
n = self.adjacency.shape[0]
x = np.random.random(n)
self.slr = SparseLR(self.adjacency, [(x, x)])
def test_lanczos(self):
solver = LanczosEig('LM')
solver.fit(self.adjacency, 2)
self.assertEqual(len(solver.eigenvalues_), 2)
self.assertAlmostEqual(eigenvector_err(self.adjacency, solver.eigenvectors_, solver.eigenvalues_), 0)
solver.fit(self.slr, 2)
self.assertEqual(len(solver.eigenvalues_), 2)
self.assertAlmostEqual(eigenvector_err(self.slr, solver.eigenvectors_, solver.eigenvalues_), 0)
adjacency = karate_club()
solver = LanczosEig('SM')
solver.fit(adjacency, 2)
self.assertEqual(len(solver.eigenvalues_), 2)
self.assertAlmostEqual(eigenvector_err(adjacency, solver.eigenvectors_, solver.eigenvalues_), 0)
def test_halko(self):
solver = HalkoEig('LM', random_state=self.random_state)
solver.fit(self.adjacency, 2)
self.assertEqual(len(solver.eigenvalues_), 2)
self.assertAlmostEqual(eigenvector_err(self.adjacency, solver.eigenvectors_, solver.eigenvalues_), 0)
solver.fit(self.slr, 2)
self.assertEqual(len(solver.eigenvalues_), 2)
self.assertAlmostEqual(eigenvector_err(self.slr, solver.eigenvectors_, solver.eigenvalues_), 0)
solver = HalkoEig('SM', random_state=self.random_state)
solver.fit(self.adjacency, 2)
self.assertEqual(len(solver.eigenvalues_), 2)
# self.assertAlmostEqual(eigenvector_err(self.adjacency, solver.eigenvectors_, solver.eigenvalues_), 0)
def test_compare_solvers(self):
lanczos = LanczosEig('LM')
halko = HalkoEig('LM', random_state=self.random_state)
lanczos.fit(self.adjacency, 2)
halko.fit(self.adjacency, 2)
self.assertAlmostEqual(np.linalg.norm(lanczos.eigenvalues_ - halko.eigenvalues_), 0.)
lanczos.fit(self.slr, 2)
halko.fit(self.slr, 2)
self.assertAlmostEqual(np.linalg.norm(lanczos.eigenvalues_ - halko.eigenvalues_), 0.)
| [
"sknetwork.linalg.LanczosEig",
"numpy.random.random",
"sknetwork.linalg.SparseLR",
"numpy.linalg.norm",
"sknetwork.linalg.HalkoEig",
"sknetwork.data.miserables",
"sknetwork.data.karate_club",
"numpy.random.RandomState"
] | [((397, 416), 'numpy.linalg.norm', 'np.linalg.norm', (['err'], {}), '(err)\n', (411, 416), True, 'import numpy as np\n'), ((599, 611), 'sknetwork.data.miserables', 'miserables', ([], {}), '()\n', (609, 611), False, 'from sknetwork.data import miserables, karate_club\n'), ((640, 666), 'numpy.random.RandomState', 'np.random.RandomState', (['(123)'], {}), '(123)\n', (661, 666), True, 'import numpy as np\n'), ((715, 734), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (731, 734), True, 'import numpy as np\n'), ((754, 788), 'sknetwork.linalg.SparseLR', 'SparseLR', (['self.adjacency', '[(x, x)]'], {}), '(self.adjacency, [(x, x)])\n', (762, 788), False, 'from sknetwork.linalg import LanczosEig, HalkoEig, SparseLR\n'), ((835, 851), 'sknetwork.linalg.LanczosEig', 'LanczosEig', (['"""LM"""'], {}), "('LM')\n", (845, 851), False, 'from sknetwork.linalg import LanczosEig, HalkoEig, SparseLR\n'), ((1266, 1279), 'sknetwork.data.karate_club', 'karate_club', ([], {}), '()\n', (1277, 1279), False, 'from sknetwork.data import miserables, karate_club\n'), ((1297, 1313), 'sknetwork.linalg.LanczosEig', 'LanczosEig', (['"""SM"""'], {}), "('SM')\n", (1307, 1313), False, 'from sknetwork.linalg import LanczosEig, HalkoEig, SparseLR\n'), ((1550, 1596), 'sknetwork.linalg.HalkoEig', 'HalkoEig', (['"""LM"""'], {'random_state': 'self.random_state'}), "('LM', random_state=self.random_state)\n", (1558, 1596), False, 'from sknetwork.linalg import LanczosEig, HalkoEig, SparseLR\n'), ((2008, 2054), 'sknetwork.linalg.HalkoEig', 'HalkoEig', (['"""SM"""'], {'random_state': 'self.random_state'}), "('SM', random_state=self.random_state)\n", (2016, 2054), False, 'from sknetwork.linalg import LanczosEig, HalkoEig, SparseLR\n'), ((2314, 2330), 'sknetwork.linalg.LanczosEig', 'LanczosEig', (['"""LM"""'], {}), "('LM')\n", (2324, 2330), False, 'from sknetwork.linalg import LanczosEig, HalkoEig, SparseLR\n'), ((2347, 2393), 'sknetwork.linalg.HalkoEig', 'HalkoEig', (['"""LM"""'], {'random_state': 'self.random_state'}), "('LM', random_state=self.random_state)\n", (2355, 2393), False, 'from sknetwork.linalg import LanczosEig, HalkoEig, SparseLR\n'), ((2502, 2559), 'numpy.linalg.norm', 'np.linalg.norm', (['(lanczos.eigenvalues_ - halko.eigenvalues_)'], {}), '(lanczos.eigenvalues_ - halko.eigenvalues_)\n', (2516, 2559), True, 'import numpy as np\n'), ((2661, 2718), 'numpy.linalg.norm', 'np.linalg.norm', (['(lanczos.eigenvalues_ - halko.eigenvalues_)'], {}), '(lanczos.eigenvalues_ - halko.eigenvalues_)\n', (2675, 2718), True, 'import numpy as np\n')] |
import logging
import string
import numpy as np
from ...tools.constants import DISEASE_PLACEHOLDER
from ...tools.constants import GENE_PLACEHOLDER
module_logger = logging.getLogger(__name__)
def generate_embedding_matrix(sentences_tokenized, word_vectors, word_to_index, max_length, min_words_mapped=0,
replace_disease_gene_tokens=True):
"""
Generate word vector matrix for a set of tokenized sentences by creating a feature matrix of word vectors.
:param sentences_tokenized: list of list of strings - the tokenized sentences
:param word_vectors: dict mapping integer word indices to their word vectors
:param word_to_index: dict mapping words to their integer index in word_vectors
:param max_length: the maximal number of words to be be converted to word vectors in each sentence
:param min_words_mapped: the minimal number of words (not counting tokens representing tagged diseases and genes)
that need to be mapped to word vectors in each sentence. If fewer words are mapped in a given sentence,
the matrix corresponding to the sentence is filled with numpy NaNs.
:param replace_disease_gene_tokens: boolean indicating if tokens representing tagged diseases and genes
are mapped to the word vectors for 'disease' and 'gene', respectively. If False, the tokens are ignored.
:return: a three dimensional numpy array, first dimension is sentence, second is word, third is word vector
"""
n_samples = len(sentences_tokenized)
vector_dim = len(word_vectors[0])
embedding_matrix = np.zeros((n_samples, max_length, vector_dim))
for i, sentence in enumerate(sentences_tokenized):
words_mapped = 0
for j, word in enumerate(sentence):
if j >= max_length:
break
if word not in word_to_index and replace_disease_gene_tokens and word == DISEASE_PLACEHOLDER.lower():
embedding_matrix[i, j] = word_vectors[word_to_index['disease']]
elif word not in word_to_index and replace_disease_gene_tokens and word == GENE_PLACEHOLDER.lower():
embedding_matrix[i, j] = word_vectors[word_to_index['gene']]
elif word in word_to_index:
words_mapped += 1
embedding_matrix[i, j] = word_vectors[word_to_index[word]]
if words_mapped < min_words_mapped:
embedding_matrix[i, :, :] = np.full((max_length, vector_dim), np.nan)
return embedding_matrix
def get_sentence_vector_array(sentences_tokenized, word_vectors, word_to_index, min_vector_count, remove_punctuation,
replace_disease_gene_tokens=True):
# :param replace_disease_gene_tokens: boolean indicating if tokens representing tagged diseases and genes
# are mapped to the word vectors for 'disease' and 'gene', respectively. If False, the tokens are ignored.
# simply average all word vectors corresponding to words in the sentence
vector_dim = len(word_vectors[0])
sentence_array = np.zeros((len(sentences_tokenized), vector_dim))
for sentence_index, sentence in enumerate(sentences_tokenized):
vector_count = 0
sentence_vec = np.zeros(vector_dim)
for word in sentence:
if remove_punctuation and word.strip() in string.punctuation:
continue
if word not in word_to_index and replace_disease_gene_tokens and word == DISEASE_PLACEHOLDER.lower():
vector_count += 1
sentence_vec += word_vectors[word_to_index['disease']]
elif word not in word_to_index and replace_disease_gene_tokens and word == GENE_PLACEHOLDER.lower():
vector_count += 1
sentence_vec += word_vectors[word_to_index['gene']]
elif word in word_to_index:
vector_count += 1
sentence_vec += word_vectors[word_to_index[word]]
if vector_count < min_vector_count:
sentence_vec = np.full(vector_dim, np.nan)
module_logger.warning('Following sentence could not be represented as a vector since only {:d} words were '
'mapped to vectors: {}'.format(vector_count, ' '.join(sentence)))
sentence_array[sentence_index] = sentence_vec / vector_count
return sentence_array
| [
"logging.getLogger",
"numpy.full",
"numpy.zeros"
] | [((166, 193), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (183, 193), False, 'import logging\n'), ((1581, 1626), 'numpy.zeros', 'np.zeros', (['(n_samples, max_length, vector_dim)'], {}), '((n_samples, max_length, vector_dim))\n', (1589, 1626), True, 'import numpy as np\n'), ((3208, 3228), 'numpy.zeros', 'np.zeros', (['vector_dim'], {}), '(vector_dim)\n', (3216, 3228), True, 'import numpy as np\n'), ((2422, 2463), 'numpy.full', 'np.full', (['(max_length, vector_dim)', 'np.nan'], {}), '((max_length, vector_dim), np.nan)\n', (2429, 2463), True, 'import numpy as np\n'), ((4003, 4030), 'numpy.full', 'np.full', (['vector_dim', 'np.nan'], {}), '(vector_dim, np.nan)\n', (4010, 4030), True, 'import numpy as np\n')] |
# coding=utf-8
"""Dimensionality reduction Principal Component Analysis algorithm implementation using Numpy."""
import numpy as np
def normalize(X):
"""Normalize the given dataset X
Args:
X: ndarray, dataset
Returns:
(Xbar, mean, std): tuple of ndarray, Xbar is the normalized dataset
with mean 0 and standard deviation 1; mean and std are the
mean and standard deviation respectively.
"""
mu = np.mean(X, axis=0)
std = np.std(X, axis=0)
std_filled = std.copy()
std_filled[std == 0] = 1.
Xbar = (X - mu) / std_filled
return Xbar, mu, std
def eig(S):
"""Compute the eigenvalues and corresponding eigenvectors
for the covariance matrix S.
Args:
S: ndarray, covariance matrix
Returns:
(eigvals, eigvecs): ndarray, the eigenvalues and eigenvectors
Note:
the eigenvals and eigenvecs should be sorted in descending
order of the eigen values
"""
return np.linalg.eig(S)
def projection_matrix(B):
"""Compute the projection matrix onto the space spanned by `B`
Args:
B: ndarray of dimension (D, M), the basis for the subspace
Returns:
P: the projection matrix
"""
return B @ B.T
def PCA(X, num_components):
"""
Args:
X: ndarray of size (N, D), where D is the dimension of the data,
and N is the number of datapoints
num_components: the number of principal components to use.
Returns:
X_reconstruct: ndarray of the reconstruction
of X from the first `num_components` principal components.
"""
S = projection_matrix(X) / len(X)
lam, eig_v = eig(S)
s_ids = np.argsort(-lam)
eig_v = eig_v[:, s_ids]
B = eig_v[:, :num_components]
P = projection_matrix(B)
return P @ X
| [
"numpy.argsort",
"numpy.mean",
"numpy.linalg.eig",
"numpy.std"
] | [((452, 470), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (459, 470), True, 'import numpy as np\n'), ((481, 498), 'numpy.std', 'np.std', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (487, 498), True, 'import numpy as np\n'), ((991, 1007), 'numpy.linalg.eig', 'np.linalg.eig', (['S'], {}), '(S)\n', (1004, 1007), True, 'import numpy as np\n'), ((1702, 1718), 'numpy.argsort', 'np.argsort', (['(-lam)'], {}), '(-lam)\n', (1712, 1718), True, 'import numpy as np\n')] |
# unittest file for sampling of rotation space SO(3)
import sys
sys.path.append('../')
import pyEMsoft
import numpy as np
import unittest
from random import randint
class Test_SO3(unittest.TestCase):
def setUp(self):
pass
def test_01_IsinsideFZ(self):
# default integer seed vector
seed =pyEMsoft.rng.rng_t()
print('The default seed vector:', seed,'\n')
# seeds the RNG with a single random integer and a default seed vector
seed_rand_int = randint(1, 100000000)
pyEMsoft.rng.rng_seed(seed, seed_rand_int)
print('The new seed vector:', seed, '\n')
q_rand = pyEMsoft.quaternions.quat_marsaglia(seed)
print('Random quaternion using the Marsaglia approach', q_rand, '\n', '\n')
# quaternion to Rodrigues coordinates conversion
rod = pyEMsoft.rotations.qu2ro(q_rand)
# now pick the point group
pyEMsoft.symmetry.listpointgroups()
point_group_number = input('\nSelect a point group:')
print('Point group selected is', point_group_number, '\n')
# now get the FZ type and order
fztype, fzorder = pyEMsoft.so3.getfztypeandorder(point_group_number)
print('FZ type and order for the selected point group ', point_group_number, 'is', fztype,'and', fzorder,'\n')
# is it inside the FZ? return a boolean
insideFZ = pyEMsoft.so3.isinsidefz(rod, fztype, fzorder)
print('Does Rodrigues point', rod, 'lie in the FZ? \nAnswer: %s' % bool(insideFZ), '\n')
def test_02_CubochoricNeighbors(self):
# define an arbitrary quaternion
q = np.asarray([1, 2, 3, 4], dtype=np.float64)
# normaliztion of quaternion
q = q / pyEMsoft.quaternions.cabs(q)
# convert to cubochoric coordinates
cub = pyEMsoft.rotations.qu2cu(q)
# number of nearest neighbor in each direction (should be an odd number for symmetric meshing)
nn = 1
# define the cubneighbor with fortran ordering
cubneighbor = np.asfortranarray(np.zeros([3, (2*nn+1)**3]), dtype=np.float64)
# get the cubochoric coordinates of the neighbors
pyEMsoft.so3.cubochoricneighbors(cubneighbor, nn, cub, 0.1)
print('Cubochoric coordinates of the neighbors:\n', cubneighbor,'\n')
if __name__ == '__main__':
unittest.main() | [
"pyEMsoft.quaternions.quat_marsaglia",
"pyEMsoft.so3.getfztypeandorder",
"pyEMsoft.so3.isinsidefz",
"pyEMsoft.rng.rng_t",
"pyEMsoft.so3.cubochoricneighbors",
"pyEMsoft.rotations.qu2cu",
"pyEMsoft.rng.rng_seed",
"numpy.asarray",
"pyEMsoft.quaternions.cabs",
"numpy.zeros",
"unittest.main",
"pyEM... | [((64, 86), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (79, 86), False, 'import sys\n'), ((2339, 2354), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2352, 2354), False, 'import unittest\n'), ((334, 354), 'pyEMsoft.rng.rng_t', 'pyEMsoft.rng.rng_t', ([], {}), '()\n', (352, 354), False, 'import pyEMsoft\n'), ((511, 532), 'random.randint', 'randint', (['(1)', '(100000000)'], {}), '(1, 100000000)\n', (518, 532), False, 'from random import randint\n'), ((541, 583), 'pyEMsoft.rng.rng_seed', 'pyEMsoft.rng.rng_seed', (['seed', 'seed_rand_int'], {}), '(seed, seed_rand_int)\n', (562, 583), False, 'import pyEMsoft\n'), ((651, 692), 'pyEMsoft.quaternions.quat_marsaglia', 'pyEMsoft.quaternions.quat_marsaglia', (['seed'], {}), '(seed)\n', (686, 692), False, 'import pyEMsoft\n'), ((848, 880), 'pyEMsoft.rotations.qu2ro', 'pyEMsoft.rotations.qu2ro', (['q_rand'], {}), '(q_rand)\n', (872, 880), False, 'import pyEMsoft\n'), ((924, 959), 'pyEMsoft.symmetry.listpointgroups', 'pyEMsoft.symmetry.listpointgroups', ([], {}), '()\n', (957, 959), False, 'import pyEMsoft\n'), ((1155, 1205), 'pyEMsoft.so3.getfztypeandorder', 'pyEMsoft.so3.getfztypeandorder', (['point_group_number'], {}), '(point_group_number)\n', (1185, 1205), False, 'import pyEMsoft\n'), ((1392, 1437), 'pyEMsoft.so3.isinsidefz', 'pyEMsoft.so3.isinsidefz', (['rod', 'fztype', 'fzorder'], {}), '(rod, fztype, fzorder)\n', (1415, 1437), False, 'import pyEMsoft\n'), ((1632, 1674), 'numpy.asarray', 'np.asarray', (['[1, 2, 3, 4]'], {'dtype': 'np.float64'}), '([1, 2, 3, 4], dtype=np.float64)\n', (1642, 1674), True, 'import numpy as np\n'), ((1816, 1843), 'pyEMsoft.rotations.qu2cu', 'pyEMsoft.rotations.qu2cu', (['q'], {}), '(q)\n', (1840, 1843), False, 'import pyEMsoft\n'), ((2169, 2228), 'pyEMsoft.so3.cubochoricneighbors', 'pyEMsoft.so3.cubochoricneighbors', (['cubneighbor', 'nn', 'cub', '(0.1)'], {}), '(cubneighbor, nn, cub, 0.1)\n', (2201, 2228), False, 'import pyEMsoft\n'), ((1729, 1757), 'pyEMsoft.quaternions.cabs', 'pyEMsoft.quaternions.cabs', (['q'], {}), '(q)\n', (1754, 1757), False, 'import pyEMsoft\n'), ((2057, 2089), 'numpy.zeros', 'np.zeros', (['[3, (2 * nn + 1) ** 3]'], {}), '([3, (2 * nn + 1) ** 3])\n', (2065, 2089), True, 'import numpy as np\n')] |
import numpy as np
class ExpansionProcedure:
"""Function which takes a node::PartitionNode and returns a set of node obtained by partitioning the cell associated to the node.
"""
def _get_side_to_split(self, node):
side_lengths = np.abs(node.partition[:, 1] - node.partition[:, 0])
max_len = np.max(side_lengths)
return np.argmax(side_lengths)
def __call__(self, node):
pass
class GreedyExpansion(ExpansionProcedure):
"""Expansion procedure which consists in splitting cells along their longest side.
Parameters
----------
seed : int
integer used to initialize a random number generator
random_dim : bool
if True the side to split is randomly choosen otherwise the longest side is choosen
random_split : bool
if True partition size are randomly choosen otherwise the split is uniform
"""
def __init__(self, seed = 4242, random_dim = False, random_split = False):
self.random_state = np.random.RandomState(seed)
self.random_dim = random_dim
self.random_split = random_split
def _get_side_to_split(self, node):
side_lengths = np.abs(node.partition[:, 1] - node.partition[:, 0])
max_len = np.max(side_lengths)
if self.random_dim:
return self.random_state.choice(range(0, node.partition.shape[0]), 1)
return np.argmax(side_lengths)
def _standard_split(self, id, node):
max_len = float(np.abs(node.partition[:, 1][id] - node.partition[:, 0][id])/node.N)
children = []
for i in range(node.N):
new_partition = node.partition.copy()
lb = new_partition[:, 0]
ub = new_partition[:, 1]
lb[id] = float(node.partition[:, 0][id] + max_len * float(i))
ub[id] = float(node.partition[:, 0][id] + max_len * float(i + 1))
children.append((new_partition, node.index*node.N + i ))
return children
def _random_split(self, id, node):
m, M = node.partition[:, 0][id], node.partition[:, 1][id]
pivots = (M - m) * self.random_state.random(node.N) + m
pivots.sort()
lenghts = [piv - m for piv in pivots]
children = []
last_lb = m
for i in range(node.N):
new_partition = node.partition.copy()
lb, ub = new_partition[:, 0], new_partition[:, 1]
lb[id] = last_lb
ub[id] = pivots[i]
children.append((new_partition, node.index*node.N + i))
last_lb = ub[id]
return children
def __call__(self, node):
id = self._get_side_to_split(node)
if self.random_split:
return self._random_split(id, node)
return self._standard_split(id, node) | [
"numpy.abs",
"numpy.argmax",
"numpy.random.RandomState",
"numpy.max"
] | [((251, 302), 'numpy.abs', 'np.abs', (['(node.partition[:, 1] - node.partition[:, 0])'], {}), '(node.partition[:, 1] - node.partition[:, 0])\n', (257, 302), True, 'import numpy as np\n'), ((321, 341), 'numpy.max', 'np.max', (['side_lengths'], {}), '(side_lengths)\n', (327, 341), True, 'import numpy as np\n'), ((357, 380), 'numpy.argmax', 'np.argmax', (['side_lengths'], {}), '(side_lengths)\n', (366, 380), True, 'import numpy as np\n'), ((1002, 1029), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1023, 1029), True, 'import numpy as np\n'), ((1172, 1223), 'numpy.abs', 'np.abs', (['(node.partition[:, 1] - node.partition[:, 0])'], {}), '(node.partition[:, 1] - node.partition[:, 0])\n', (1178, 1223), True, 'import numpy as np\n'), ((1242, 1262), 'numpy.max', 'np.max', (['side_lengths'], {}), '(side_lengths)\n', (1248, 1262), True, 'import numpy as np\n'), ((1388, 1411), 'numpy.argmax', 'np.argmax', (['side_lengths'], {}), '(side_lengths)\n', (1397, 1411), True, 'import numpy as np\n'), ((1479, 1538), 'numpy.abs', 'np.abs', (['(node.partition[:, 1][id] - node.partition[:, 0][id])'], {}), '(node.partition[:, 1][id] - node.partition[:, 0][id])\n', (1485, 1538), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.decomposition import RandomizedPCA, MiniBatchSparsePCA
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from sklearn.preprocessing import normalize, MinMaxScaler
import clumpy
class Cluster(object):
def __init__(self, numeric_columns=[], categorical_columns=[]):
self.numeric_columns = numeric_columns
self.categorical_columns = categorical_columns
self.clusterer_ = None
self.importances_ = None
#@property
#def feature_names(self):
# return self.numeric_columns + self.categorical_columns
@property
def n_clusters(self):
return self.clusterer_.n_clusters
def find_clusters(self, df):
X = np.hstack([X for X in clumpy.preprocessing.process_data(df) if X is not None])
# reduction using pca
#pca = RandomizedPCA(n_components=50, random_state=123, iterated_power=7)
pca = TruncatedSVD(n_components=50, random_state=123)
scaled_X = pca.fit_transform(X)
scaled_X = MinMaxScaler().fit_transform(scaled_X)
#pca = MiniBatchSparsePCA(n_components=50, alpha=0.8, n_iter=100, random_state=123)
#scaled_X = np.hstack((X[:, :len(num_columns)], pca_X))
#scaled_X = scaled_X - np.mean(scaled_X, axis=0)
#max_x = np.max(np.abs(scaled_X), axis=0)
#max_x[max_x == 0] = 1.
#scaled_X = scaled_X / max_x
#ptp_scale = np.ptp(scaled_X, axis=0)
#ptp_scale[ptp_scale == 0] = 1.
#scaled_X /= ptp_scale
#scaled_X = normalize(scaled_X, norm='l2', axis=1, copy=False)
#self.clusterer_ = clumpy.cluster.auto_kmeans(scaled_X, n_clusters=[2, 3, 4])
#self.find_rules(X)
#self.rules_ = clumpy.rules.prim_descriptions(
# data[self.numeric_columns + self.categorical_columns], self.clusterer_.labels_, feature_names=self.importances_)
##self.rules_ = clumpy.rules.tree_descriptions(
# data[self.feature_names], self.clusterer_.labels_,
# categorical_columns=self.categorical_columns,
# feature_names=self.importances_)
tsne = TSNE(n_components=2, random_state=1234, verbose=True)
self.embedding_ = tsne.fit_transform(scaled_X)
self.embedding_ -= np.mean(self.embedding_, axis=0)
#self.clusterer_ = clumpy.cluster.auto_kmeans(self.embedding_, n_clusters=[2, 3, 4])
def find_rules(self, X):
self.importances_ = clumpy.importance.anova_importance(
X,
self.clusterer_.labels_,
feature_names=self.feature_names,
n_features=5)
def cluster(X, numeric_columns=None, categorical_columns=None):
clusterer = Cluster(numeric_columns=numeric_columns,
categorical_columns=categorical_columns)
clusterer.find_clusters(X)
return clusterer
def plot(clusterer, data, cluster_id):
cluster_importances = clusterer.importances_[cluster_id]
cat_vars = [var for var in cluster_importances if var in clusterer.categorical_columns]
num_vars = [var for var in cluster_importances if var in clusterer.numeric_columns]
return clumpy.plots.plot_cluster_statistics(
cluster_labels=clusterer.clusterer_.labels_,
cluster_id=cluster_id,
data=data,
scale=True,
quant_var=num_vars,
qual_var=cat_vars,
figsize=(15,15))
| [
"numpy.mean",
"sklearn.manifold.TSNE",
"sklearn.decomposition.TruncatedSVD",
"clumpy.plots.plot_cluster_statistics",
"clumpy.preprocessing.process_data",
"clumpy.importance.anova_importance",
"sklearn.preprocessing.MinMaxScaler"
] | [((3199, 3393), 'clumpy.plots.plot_cluster_statistics', 'clumpy.plots.plot_cluster_statistics', ([], {'cluster_labels': 'clusterer.clusterer_.labels_', 'cluster_id': 'cluster_id', 'data': 'data', 'scale': '(True)', 'quant_var': 'num_vars', 'qual_var': 'cat_vars', 'figsize': '(15, 15)'}), '(cluster_labels=clusterer.clusterer_.\n labels_, cluster_id=cluster_id, data=data, scale=True, quant_var=\n num_vars, qual_var=cat_vars, figsize=(15, 15))\n', (3235, 3393), False, 'import clumpy\n'), ((945, 992), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(50)', 'random_state': '(123)'}), '(n_components=50, random_state=123)\n', (957, 992), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((2163, 2216), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(1234)', 'verbose': '(True)'}), '(n_components=2, random_state=1234, verbose=True)\n', (2167, 2216), False, 'from sklearn.manifold import TSNE\n'), ((2299, 2331), 'numpy.mean', 'np.mean', (['self.embedding_'], {'axis': '(0)'}), '(self.embedding_, axis=0)\n', (2306, 2331), True, 'import numpy as np\n'), ((2485, 2599), 'clumpy.importance.anova_importance', 'clumpy.importance.anova_importance', (['X', 'self.clusterer_.labels_'], {'feature_names': 'self.feature_names', 'n_features': '(5)'}), '(X, self.clusterer_.labels_,\n feature_names=self.feature_names, n_features=5)\n', (2519, 2599), False, 'import clumpy\n'), ((1052, 1066), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1064, 1066), False, 'from sklearn.preprocessing import normalize, MinMaxScaler\n'), ((761, 798), 'clumpy.preprocessing.process_data', 'clumpy.preprocessing.process_data', (['df'], {}), '(df)\n', (794, 798), False, 'import clumpy\n')] |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from kernpart import Kernpart
import numpy as np
class Fixed(Kernpart):
def __init__(self, input_dim, K, variance=1.):
"""
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
self.input_dim = input_dim
self.fixed_K = K
self.num_params = 1
self.name = 'fixed'
self._set_params(np.array([variance]).flatten())
def _get_params(self):
return self.variance
def _set_params(self, x):
assert x.shape == (1,)
self.variance = x
def _get_param_names(self):
return ['variance']
def K(self, X, X2, target):
target += self.variance * self.fixed_K
def dK_dtheta(self, partial, X, X2, target):
target += (partial * self.fixed_K).sum()
def dK_dX(self, partial, X, X2, target):
pass
def dKdiag_dX(self, partial, X, target):
pass
| [
"numpy.array"
] | [((571, 591), 'numpy.array', 'np.array', (['[variance]'], {}), '([variance])\n', (579, 591), True, 'import numpy as np\n')] |
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
# prediction function
def ValuePredictor(to_predict_list):
to_predict = np.array(to_predict_list).reshape(1, 7)
loaded_model = pickle.load(open("model.pkl", "rb"))
result = loaded_model.predict(to_predict)
return result[0]
@app.route('/')
def home():
return render_template("index.html")
@app.route('/predict',methods=['POST','GET'])
def predict():
if request.method == 'POST':
to_predict_list = request.form.to_dict()
to_predict_list = list(to_predict_list.values())
to_predict_list = list(map(float, to_predict_list))
result = ValuePredictor(to_predict_list)
if int(result)== 1:
prediction ='Given transaction is fradulent'
else:
prediction ='Given transaction is NOT fradulent'
return render_template("result.html", prediction = prediction)
if __name__ == "__main__":
app.run(debug=True)
| [
"flask.render_template",
"numpy.array",
"flask.request.form.to_dict",
"flask.Flask"
] | [((103, 118), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (108, 118), False, 'from flask import Flask, request, jsonify, render_template\n'), ((421, 450), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (436, 450), False, 'from flask import Flask, request, jsonify, render_template\n'), ((943, 996), 'flask.render_template', 'render_template', (['"""result.html"""'], {'prediction': 'prediction'}), "('result.html', prediction=prediction)\n", (958, 996), False, 'from flask import Flask, request, jsonify, render_template\n'), ((577, 599), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (597, 599), False, 'from flask import Flask, request, jsonify, render_template\n'), ((199, 224), 'numpy.array', 'np.array', (['to_predict_list'], {}), '(to_predict_list)\n', (207, 224), True, 'import numpy as np\n')] |
import sys
import pandas as pd
import numpy as np
from os.path import isfile, splitext
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
dat = '<DAT>'
cur_raref = '<CUR_RAREF>'
tab_pd = pd.read_table('<TAB_FP>', index_col=0)
if tab_pd.shape[0] > 1000:
print('No plotting all %s features...' % tab_pd.shape[0])
else:
meta_pd = pd.read_table('<META_FP>', dtype={'sample_name': str})
colors_sample = '<COLORS_SAMPLE>'
colors_feature = '<COLORS_FEATURE>'
stats_tax_dat = '<STATS_TAX_DAT>'
split_taxa_fp = '<SPLIT_TAXA_FP>'
level = '<LEVEL>'
collapsed = '<COLLAPSED>'
nestedness_raref = '<NESTEDNESS_RAREF>'
if colors_sample:
meta_pd = meta_pd.rename(columns={meta_pd.columns[0]: 'SAMPLE_ID'})
meta_pd = meta_pd.set_index('SAMPLE_ID')
if set(colors_sample).issubset(meta_pd.columns):
colors_in = list(set(colors_sample) & set(meta_pd.columns))
tab_pd = np.log10(tab_pd + 1).stack().reset_index().rename(
columns={'level_1': 'SAMPLE_ID', 0: 'log10_reads'})
tab_pd = tab_pd.rename(columns={tab_pd.columns[0]: 'OBSERVATION_ID'})
tax_cols = []
if colors_feature and split_taxa_fp:
tax_pd = pd.read_table(split_taxa_fp, index_col=0)
if level != 'feature':
if level not in collapsed[stats_tax_dat]:
sys.exit(0)
tax_pd = tax_pd.iloc[
:, :collapsed[stats_tax_dat][level]
].drop_duplicates()
tax_pd['OBSERVATION_ID'] = tax_pd.apply(func=lambda x: ';'.join([y for y in x if str(y) != 'nan']), axis=1)
else:
tax_pd = tax_pd.reset_index()
tax_pd = tax_pd.rename(columns={tax_pd.columns[0]: 'OBSERVATION_ID'})
tax_pd = tax_pd.set_index('OBSERVATION_ID')
tax_cols = [tax_pd.columns.tolist()[( x -1)] if isinstance(x, int)
and x not in tax_pd.columns and tax_pd.shape[1] >= x
else x for x in colors_feature]
tax_cols = [x for x in tax_cols if x in tax_pd.columns]
if tax_cols:
tax_pd = tax_pd[tax_cols]
for (group, case), res in nestedness_raref.items():
# fields_fp = res['fields']
graphs_fp = res['graph']
# if not isfile(fields_fp) or not isfile(graphs_fp):
if not isfile(graphs_fp):
continue
graphs = pd.read_csv(graphs_fp, header=0, sep=',', dtype={'SAMPLE_ID': str})
samples_order = [y for x, y in sorted(dict(graphs[['SAMPLE_RANK', 'SAMPLE_ID']].values).items())][::-1]
features_order = [y for x, y in sorted(dict(graphs[['OBSERVATION_RANK', 'OBSERVATION_ID']].values).items())][::-1]
graphs = graphs.merge(tab_pd, on=['SAMPLE_ID', 'OBSERVATION_ID'], how='left')
matrix = graphs[['OBSERVATION_ID', 'SAMPLE_ID', 'log10_reads']].pivot_table(
values='log10_reads', index='OBSERVATION_ID', columns='SAMPLE_ID')
matrix = matrix.loc[features_order, samples_order]
# fields = [x.strip() for x in open(fields_fp).readlines()]
cur_meta_pd = meta_pd.loc[matrix.columns.astype(str).tolist(), colors_in].copy()
# new_meta = 'metadata'
# if new_meta in cur_meta_pd.columns:
# new_meta = 'passed_metadata'
# if len(fields) > 1:
# cur_meta_pd[new_meta] = cur_meta_pd[fields].apply(func=lambda x: '-'.join(x), axis=1)
cur_meta_leg_hex = cur_meta_pd.apply(func=lambda x: dict(
zip(x.unique(), sns.color_palette(palette='Set1', n_colors=x.unique().size).as_hex())
)).to_dict()
cur_meta_leg_rgb = cur_meta_pd.apply(func=lambda x: dict(
zip(x.unique(), sns.color_palette(palette='Set1', n_colors=x.unique().size))
)).to_dict()
cur_meta_pd_hex = cur_meta_pd.apply(func=lambda x: x.map(dict(
zip(x.unique(), sns.color_palette(palette='Set1', n_colors=x.unique().size).as_hex())
)))
cur_meta_pd_rgb = cur_meta_pd.apply(func=lambda x: x.map(dict(
zip(x.unique(), sns.color_palette(palette='Set1', n_colors=x.unique().size))
)))
if tax_cols:
# print()
# print(tax_cols)
# print()
# print(tax_pd)
# print(tax_pd.index)
# print()
# print(matrix)
# print(matrix.index)
cur_tax_pd = tax_pd.loc[matrix.index.astype(str).tolist(), :].copy()
cur_tax_leg_hex = cur_tax_pd.apply(func=lambda x: dict(
zip(x.unique(), sns.color_palette(palette='Paired', n_colors=x.unique().size).as_hex())
)).to_dict()
cur_tax_leg_rgb = cur_tax_pd.apply(func=lambda x: dict(
zip(x.unique(), sns.color_palette(palette='Paired', n_colors=x.unique().size))
)).to_dict()
cur_tax_pd_hex = cur_tax_pd.apply(func=lambda x: x.map(dict(
zip(x.unique(), sns.color_palette(palette='Paired', n_colors=x.unique().size).as_hex())
)))
cur_tax_pd_rgb = cur_tax_pd.apply(func=lambda x: x.map(dict(
zip(x.unique(), sns.color_palette(palette='Paired', n_colors=x.unique().size))
)))
X, Y = matrix.shape
graphs_pdf = res['graph_pdf']
graphs_pdf_complex = '%s_complex.pdf' % splitext(graphs_pdf)[0]
with PdfPages(graphs_pdf_complex) as pdf:
fig, ax = plt.subplots(figsize=(20, (20 * Y/X)))
if tax_cols:
g = sns.clustermap(
matrix, col_cluster=False, row_cluster=False,
linewidths=0.1, cmap='coolwarm',
row_colors=cur_tax_pd_hex, col_colors=cur_meta_pd_hex,
yticklabels=False, xticklabels=False
)
simples = [('feature', cur_tax_pd_rgb, cur_tax_leg_rgb),
('sample', cur_meta_pd_rgb, cur_meta_leg_rgb)]
else:
g = sns.clustermap(
matrix, col_cluster=False, row_cluster=False,
linewidths=0.1, cmap='coolwarm',
col_colors=cur_meta_pd_hex,
yticklabels=False, xticklabels=False
)
simples = [('sample', cur_meta_pd_rgb, cur_meta_leg_rgb)]
n = 0
N = 1 / cur_meta_pd_hex.columns.size
for cdx, col in enumerate(cur_meta_pd_hex.columns):
n += N
if not cdx:
first_leg = g.ax_col_dendrogram.legend(handles=[
mpatches.Patch(label=x, color=y) for x, y in cur_meta_leg_hex[col].items()
], loc="upper left")
g.ax_col_dendrogram.add_artist(first_leg)
else:
g.ax_col_dendrogram.legend(handles=[
mpatches.Patch(label=x, color=y) for x, y in cur_meta_leg_hex[col].items()
], loc="upper left", bbox_to_anchor=(n, 1))
if tax_cols:
n = 0
N = 1 / cur_tax_pd_hex.columns.size
for cdx, col in enumerate(cur_tax_pd_hex.columns):
n += N
if not cdx:
first_leg = g.ax_row_dendrogram.legend(handles=[
mpatches.Patch(label=x, color=y) for x, y in cur_tax_leg_hex[col].items()
], loc="lower left")
g.ax_row_dendrogram.add_artist(first_leg)
else:
g.ax_row_dendrogram.legend(handles=[
mpatches.Patch(label=x, color=y) for x, y in cur_tax_leg_hex[col].items()
], loc="lower left", bbox_to_anchor=(0, n))
g.ax_heatmap.set_xlabel('Samples (sorted by richness)')
g.ax_heatmap.set_ylabel('Taxon (sorted by prevalence)')
suptitle = '%s%s' % (dat, cur_raref)
if case != 'ALL':
suptitle = suptitle + '\nsamples subset: %s' % case
if group != '':
suptitle = suptitle + '\nfeatures subset: %s' % group
plt.suptitle(suptitle, fontsize=20)
plt.subplots_adjust(top=.95, hspace=0.3)
pdf.savefig(bbox_inches='tight', dpi=300)
plt.close()
print('[Nestedness] Written:', graphs_pdf_complex)
graphs_pdf_simples = '%s_simples.pdf' % splitext(graphs_pdf)[0]
with PdfPages(graphs_pdf_simples) as pdf:
for (typ, tab, leg) in simples:
num = tab.columns.size
fig, axes = plt.subplots(num, 1, figsize=(9, (tab.columns.size * 4)))
for cdx, col in enumerate(tab.columns):
leg_col2rgb = dict((x, leg[col][x]) for idx, x in enumerate(leg[col]))
cols_d = tab[col].to_dict()
if typ == 'sample':
mat = [
[
[np.nan, np.nan, np.nan, 0.] if str(x[1]) == 'nan' else
([y for y in cols_d[x[0]]] + [1.]) for x in row.reset_index().values
] for r, row in matrix.iterrows()
]
if typ == 'feature':
mat = [
[
[np.nan, np.nan, np.nan, 0.] if str(x) == 'nan' else
([y for y in cols_d[r]] + [1.]) for x in row
] for r, row in matrix.iterrows()
]
mat = np.array(mat)
hands = [mpatches.Patch(label=x, color=y) for x, y in leg_col2rgb.items()]
if num == 1:
axes.imshow(mat, aspect="auto")
axes.legend(handles=hands, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
axes.set_title(col)
axes.set_xlabel('Samples (sorted by richness)')
axes.set_ylabel('%s (sorted by prevalence)' % level)
else:
axes[cdx].imshow(mat, aspect="auto")
axes[cdx].legend(handles=hands, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
axes[cdx].set_title(col)
axes[cdx].set_xlabel('Samples (sorted by richness)')
axes[cdx].set_ylabel('%s (sorted by prevalence)' % level)
suptitle = '%s%s (%s coloring)' % (dat, cur_raref, typ)
top = .93
if case != 'ALL':
top -= 0.03
suptitle = suptitle + '\nsamples subset: %s' % case
if group != '':
top -= 0.03
suptitle = suptitle + '\nfeatures subset: %s' % group
plt.suptitle(suptitle, fontsize=15)
plt.subplots_adjust(top=top, hspace=0.3)
pdf.savefig(bbox_inches='tight', dpi=300)
plt.close()
print('[Nestedness] Written:', graphs_pdf_simples)
| [
"numpy.log10",
"pandas.read_csv",
"seaborn.clustermap",
"os.path.splitext",
"os.path.isfile",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.patches.Patch",
"pandas.read_table",
"sys.exit",
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.su... | [((284, 322), 'pandas.read_table', 'pd.read_table', (['"""<TAB_FP>"""'], {'index_col': '(0)'}), "('<TAB_FP>', index_col=0)\n", (297, 322), True, 'import pandas as pd\n'), ((432, 486), 'pandas.read_table', 'pd.read_table', (['"""<META_FP>"""'], {'dtype': "{'sample_name': str}"}), "('<META_FP>', dtype={'sample_name': str})\n", (445, 486), True, 'import pandas as pd\n'), ((1290, 1331), 'pandas.read_table', 'pd.read_table', (['split_taxa_fp'], {'index_col': '(0)'}), '(split_taxa_fp, index_col=0)\n', (1303, 1331), True, 'import pandas as pd\n'), ((2506, 2573), 'pandas.read_csv', 'pd.read_csv', (['graphs_fp'], {'header': '(0)', 'sep': '""","""', 'dtype': "{'SAMPLE_ID': str}"}), "(graphs_fp, header=0, sep=',', dtype={'SAMPLE_ID': str})\n", (2517, 2573), True, 'import pandas as pd\n'), ((2448, 2465), 'os.path.isfile', 'isfile', (['graphs_fp'], {}), '(graphs_fp)\n', (2454, 2465), False, 'from os.path import isfile, splitext\n'), ((5482, 5510), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['graphs_pdf_complex'], {}), '(graphs_pdf_complex)\n', (5490, 5510), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((5541, 5579), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 20 * Y / X)'}), '(figsize=(20, 20 * Y / X))\n', (5553, 5579), True, 'import matplotlib.pyplot as plt\n'), ((8287, 8322), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['suptitle'], {'fontsize': '(20)'}), '(suptitle, fontsize=20)\n', (8299, 8322), True, 'import matplotlib.pyplot as plt\n'), ((8335, 8376), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)', 'hspace': '(0.3)'}), '(top=0.95, hspace=0.3)\n', (8354, 8376), True, 'import matplotlib.pyplot as plt\n'), ((8442, 8453), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8451, 8453), True, 'import matplotlib.pyplot as plt\n'), ((8600, 8628), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['graphs_pdf_simples'], {}), '(graphs_pdf_simples)\n', (8608, 8628), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((1433, 1444), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1441, 1444), False, 'import sys\n'), ((5445, 5465), 'os.path.splitext', 'splitext', (['graphs_pdf'], {}), '(graphs_pdf)\n', (5453, 5465), False, 'from os.path import isfile, splitext\n'), ((5625, 5819), 'seaborn.clustermap', 'sns.clustermap', (['matrix'], {'col_cluster': '(False)', 'row_cluster': '(False)', 'linewidths': '(0.1)', 'cmap': '"""coolwarm"""', 'row_colors': 'cur_tax_pd_hex', 'col_colors': 'cur_meta_pd_hex', 'yticklabels': '(False)', 'xticklabels': '(False)'}), "(matrix, col_cluster=False, row_cluster=False, linewidths=0.1,\n cmap='coolwarm', row_colors=cur_tax_pd_hex, col_colors=cur_meta_pd_hex,\n yticklabels=False, xticklabels=False)\n", (5639, 5819), True, 'import seaborn as sns\n'), ((6095, 6262), 'seaborn.clustermap', 'sns.clustermap', (['matrix'], {'col_cluster': '(False)', 'row_cluster': '(False)', 'linewidths': '(0.1)', 'cmap': '"""coolwarm"""', 'col_colors': 'cur_meta_pd_hex', 'yticklabels': '(False)', 'xticklabels': '(False)'}), "(matrix, col_cluster=False, row_cluster=False, linewidths=0.1,\n cmap='coolwarm', col_colors=cur_meta_pd_hex, yticklabels=False,\n xticklabels=False)\n", (6109, 6262), True, 'import seaborn as sns\n'), ((8563, 8583), 'os.path.splitext', 'splitext', (['graphs_pdf'], {}), '(graphs_pdf)\n', (8571, 8583), False, 'from os.path import isfile, splitext\n'), ((8748, 8803), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num', '(1)'], {'figsize': '(9, tab.columns.size * 4)'}), '(num, 1, figsize=(9, tab.columns.size * 4))\n', (8760, 8803), True, 'import matplotlib.pyplot as plt\n'), ((11044, 11079), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['suptitle'], {'fontsize': '(15)'}), '(suptitle, fontsize=15)\n', (11056, 11079), True, 'import matplotlib.pyplot as plt\n'), ((11096, 11136), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': 'top', 'hspace': '(0.3)'}), '(top=top, hspace=0.3)\n', (11115, 11136), True, 'import matplotlib.pyplot as plt\n'), ((11211, 11222), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11220, 11222), True, 'import matplotlib.pyplot as plt\n'), ((9759, 9772), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (9767, 9772), True, 'import numpy as np\n'), ((9802, 9834), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'label': 'x', 'color': 'y'}), '(label=x, color=y)\n', (9816, 9834), True, 'import matplotlib.patches as mpatches\n'), ((1028, 1048), 'numpy.log10', 'np.log10', (['(tab_pd + 1)'], {}), '(tab_pd + 1)\n', (1036, 1048), True, 'import numpy as np\n'), ((6703, 6735), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'label': 'x', 'color': 'y'}), '(label=x, color=y)\n', (6717, 6735), True, 'import matplotlib.patches as mpatches\n'), ((6984, 7016), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'label': 'x', 'color': 'y'}), '(label=x, color=y)\n', (6998, 7016), True, 'import matplotlib.patches as mpatches\n'), ((7450, 7482), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'label': 'x', 'color': 'y'}), '(label=x, color=y)\n', (7464, 7482), True, 'import matplotlib.patches as mpatches\n'), ((7750, 7782), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'label': 'x', 'color': 'y'}), '(label=x, color=y)\n', (7764, 7782), True, 'import matplotlib.patches as mpatches\n')] |
import os
import numpy as np
import cv2
import time
from tqdm import tqdm
import torch
from torch.autograd import Variable
def w1bs_extract_descs_and_save(input_img_fname, model, desc_name, mean_img=0.443728476019,
std_img=0.20197947209, cuda = False, out_dir = None):
if out_dir is None:
out_fname = input_img_fname.replace("data/W1BS", "data/out_descriptors").replace(".bmp", "." + desc_name)
out_dir = os.path.dirname(out_fname)
else:
out_fname = out_dir + input_img_fname[input_img_fname.find('data/W1BS'):].replace("data/W1BS", "").replace(".bmp", "." + desc_name)
out_fname = out_fname.replace('//', '/')
out_dir = os.path.dirname(out_fname)
if len(out_dir) > 0:
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
image = cv2.imread(input_img_fname, 0)
h, w = image.shape
# patch in W1BS is saved as (w*n_patches, w)
n_patches = int(h / w)
patches_for_net = np.zeros((n_patches, 1, 32, 32))
for i in range(n_patches):
patch = cv2.resize(image[i * (w): (i + 1) * (w), 0:w], (32, 32))
patches_for_net[i, 0, :, :] = patch[0:w, 0:w]
# patches input nornalization
patches_for_net = patches_for_net/255
patches_for_net -= mean_img # np.mean(patches_for_net)
patches_for_net /= std_img # np.std(patches_for_net)
t = time.time()
###
model.eval()
outs = []
labels, distances = [], []
# pbar = tqdm(enumerate(patches_for_net))
batch_size = 128
n_batches = int(n_patches / batch_size) + 1
for batch_idx in range(n_batches):
if batch_idx == n_batches - 1:
if (batch_idx + 1) * batch_size > n_patches:
end = n_patches
else:
end = (batch_idx + 1) * batch_size
else:
end = (batch_idx + 1) * batch_size
data_a = patches_for_net[batch_idx * batch_size: end, :, :, :].astype(np.float32)
data_a = torch.from_numpy(data_a)
if cuda:
data_a = data_a.cuda()
data_a = Variable(data_a, volatile=True)
out_a = model(data_a)
outs.append(out_a.data.cpu().numpy().reshape(-1, 128))
###
res_desc = np.concatenate(outs)
print(res_desc.shape, n_patches) #here will cause an bug
res_desc = np.reshape(res_desc, (n_patches, -1))
np.savetxt(out_fname, res_desc, delimiter=' ', fmt='%10.7f')
return
# end of the W1BS.py file | [
"numpy.reshape",
"os.makedirs",
"torch.from_numpy",
"os.path.dirname",
"numpy.zeros",
"os.path.isdir",
"numpy.concatenate",
"time.time",
"numpy.savetxt",
"cv2.resize",
"torch.autograd.Variable",
"cv2.imread"
] | [((839, 869), 'cv2.imread', 'cv2.imread', (['input_img_fname', '(0)'], {}), '(input_img_fname, 0)\n', (849, 869), False, 'import cv2\n'), ((992, 1024), 'numpy.zeros', 'np.zeros', (['(n_patches, 1, 32, 32)'], {}), '((n_patches, 1, 32, 32))\n', (1000, 1024), True, 'import numpy as np\n'), ((1387, 1398), 'time.time', 'time.time', ([], {}), '()\n', (1396, 1398), False, 'import time\n'), ((2233, 2253), 'numpy.concatenate', 'np.concatenate', (['outs'], {}), '(outs)\n', (2247, 2253), True, 'import numpy as np\n'), ((2330, 2367), 'numpy.reshape', 'np.reshape', (['res_desc', '(n_patches, -1)'], {}), '(res_desc, (n_patches, -1))\n', (2340, 2367), True, 'import numpy as np\n'), ((2372, 2432), 'numpy.savetxt', 'np.savetxt', (['out_fname', 'res_desc'], {'delimiter': '""" """', 'fmt': '"""%10.7f"""'}), "(out_fname, res_desc, delimiter=' ', fmt='%10.7f')\n", (2382, 2432), True, 'import numpy as np\n'), ((458, 484), 'os.path.dirname', 'os.path.dirname', (['out_fname'], {}), '(out_fname)\n', (473, 484), False, 'import os\n'), ((702, 728), 'os.path.dirname', 'os.path.dirname', (['out_fname'], {}), '(out_fname)\n', (717, 728), False, 'import os\n'), ((1073, 1124), 'cv2.resize', 'cv2.resize', (['image[i * w:(i + 1) * w, 0:w]', '(32, 32)'], {}), '(image[i * w:(i + 1) * w, 0:w], (32, 32))\n', (1083, 1124), False, 'import cv2\n'), ((1990, 2014), 'torch.from_numpy', 'torch.from_numpy', (['data_a'], {}), '(data_a)\n', (2006, 2014), False, 'import torch\n'), ((2084, 2115), 'torch.autograd.Variable', 'Variable', (['data_a'], {'volatile': '(True)'}), '(data_a, volatile=True)\n', (2092, 2115), False, 'from torch.autograd import Variable\n'), ((770, 792), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (783, 792), False, 'import os\n'), ((806, 826), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (817, 826), False, 'import os\n')] |
import numpy as np
from csv import reader
def get_one_hot_rep(id):
base = np.zeros(25)
base[id-1] = 1
return base
def load_dataset(index=None):
file_path = r'.\dataset\train.csv'
csv_reader = reader(open(file_path, 'rt'))
X_train = list()
y_train = list()
first_row = True
for row in csv_reader:
if first_row:
first_row = False
continue
class_idx = int(row[len(row)-1])
if index is not None and index != class_idx:
continue
sample = list()
for cell_index in range(1, len(row)-3, 7):
feature_set = row[cell_index:cell_index+7]
if feature_set[0] == 'NaN':
sample.append(np.zeros(7))
else:
sample.append(list(map(float, feature_set)))
sample_class = get_one_hot_rep(int(row[len(row)-1]))
X_train.append(sample)
y_train.append(sample_class)
return np.array(X_train[1:int(len(X_train)*0.7)]), np.array(y_train[1:int(len(X_train)*0.7)]), \
np.array(X_train[int(len(X_train)*0.7):]), np.array(y_train[int(len(X_train)*0.7):])
| [
"numpy.zeros"
] | [((87, 99), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (95, 99), True, 'import numpy as np\n'), ((761, 772), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (769, 772), True, 'import numpy as np\n')] |
# Aalto University, School of Science
# T-61.5140 Machine Learning: Advanced probabilistic Methods
# Author: <EMAIL>, 2016
import copy
import numpy as np
class EM_algo():
"""
A superclass for different EM-fitted models.
"""
def __init__(self, hyperparams, X=None, Y=None, ndata=0, pdata=0):
"""
Initialize model based either on given data (X, Y) or
on given data dimensionality (ndata, pdata).
"""
if X != None and Y != None:
self.X = X
self.Y = Y
self.ndata = len(self.X)
self.pdata = len(self.X[0])
if ndata and pdata:
self.X = None
self.Y = None
self.ndata = ndata
self.pdata = pdata
self.h = hyperparams
self.p = dict() # model parameters
self.loglVals = []
self.reset()
if X != None and Y != None:
self.current_logl, self.cll = self.logl()
def reset(self):
"""
Reset priors and draw parameter estimates from prior.
"""
raise NotImplementedError("Subclass implements")
def draw(self, item):
"""
Draw a data sample from the current predictive distribution.
Returns the drawn y and z-values.
"""
raise NotImplementedError("Subclass implements")
def logl(self):
"""
Calculates the full log likelihood for this model.
Returns the logl (and the values of each term for debugging purposes)
"""
raise NotImplementedError("Subclass implements")
def EM_iter(self):
"""
Executes a single round of EM updates for this model.
"""
raise NotImplementedError("Subclass implements")
def EM_fit(self, alim=1e-10, maxit=1e4):
"""
Calls the EM_iter repeatedly until the log likelihood
of the model increases less than 'alim' in absolute
value or after 'maxit' iterations have been done.
Returns the number of EM-iterations, final log likelihood
value and a string that explains the end condition.
"""
self.loglVals = []
logl, ll = self.logl()
for i in range(int(maxit)):
self.EM_iter()
logl2, ll2 = self.logl()
self.loglVals.append(logl2)
adiff = abs(logl2 - logl)
if adiff < alim:
return i+1, logl2, "alim"
logl = logl2
return maxit, logl2, "maxit"
def assert_logl_increased(self, event):
"""
Checks that the log likelihood increased since model
initialization or the time this function was last called.
"""
newlogl, ll = self.logl()
if self.current_logl - newlogl > 1e-3:
self.debug_logl(self.cll, ll)
raise ValueError("logl increased after %s" % (event))
self.current_logl, self.cll = newlogl, ll
def get_p(self):
"""
Returns a copy of the model parameters.
"""
return copy.deepcopy(self.p)
def get_loglVals(self):
return self.loglVals
def set_p(self, p):
"""
Sets the model parameters.
"""
self.p = p.copy()
def print_p(self):
"""
Prints the model parameters, one at each line.
"""
for k, v in self.p.items():
print("%s = %s" % (k, v))
def pretty_vector(self, x):
"""
Returns a formatted version of a vector.
"""
s = ["("]
s.extend(["%.2f, " % (xi) for xi in x[:-1]])
s.append("%.2f)" % (x[-1]))
return "".join(s)
def debug_logl(self, ll1, ll2):
"""
Prints an analysis of the per-term change in
log likelihood from ll1 to ll2.
"""
print("Logl before after")
for v1, v2, i in zip(ll1, ll2, range(len(ll1))):
if v1 > v2:
d = ">"
elif v2 > v1:
d = "<"
else:
d = "="
print("Term %02d: %7.3f %s %7.3f" % (i, v1, d, v2))
print("Total %7.3f %7.3f" % (sum(ll1), sum(ll2)))
def get_model_type(self):
"""
Returns linear or Mixture Model
"""
return None
def mse(self,a,b):
a = np.asarray(a)
b = np.asarray(b)
# print("MSE:")
# pprint.pprint(zip(a,b))
mse = np.mean((a-b)**2)
# print("%.3f" % mse)
return mse
| [
"numpy.mean",
"numpy.asarray",
"copy.deepcopy"
] | [((3116, 3137), 'copy.deepcopy', 'copy.deepcopy', (['self.p'], {}), '(self.p)\n', (3129, 3137), False, 'import copy\n'), ((4419, 4432), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (4429, 4432), True, 'import numpy as np\n'), ((4445, 4458), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (4455, 4458), True, 'import numpy as np\n'), ((4531, 4552), 'numpy.mean', 'np.mean', (['((a - b) ** 2)'], {}), '((a - b) ** 2)\n', (4538, 4552), True, 'import numpy as np\n')] |
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import concurrent.futures
import os
from pathlib import Path
from string import Template
from typing import Dict, List, Tuple
# Libs
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.preprocessing import LabelEncoder
# Custom
from rest_rpc import app
from rest_rpc.core.pipelines.base import BasePipe
from rest_rpc.core.pipelines.dataset import PipeData
##################
# Configurations #
##################
SOURCE_FILE = os.path.abspath(__file__)
logging = app.config['NODE_LOGGER'].synlog
logging.debug("imagepipe.py logged", Description="No Changes")
########################################
# Data Preprocessing Class - ImagePipe #
########################################
class ImagePipe(BasePipe):
"""
The ImagePipe class implement preprocessing tasks generalised for handling
image data. The general workflow is as follows:
1) Converts images into .csv format, with each pixel arranged in a single
row, alongside annotated target labels
2) Downscales images to lowest common denominator of all parties in the
federated grid.
3) Augment each image to bring out best local features (Automl: DeepAugment)
4) Convert numpy to torch tensors for dataloading
Prerequisite: Data MUST have its labels headered as 'target'
Attributes:
des_dir (str): Destination directory to save data in
data (list(str)): Loaded data to be processed
output (pd.DataFrame): Processed data (with interpolations applied)
"""
def __init__(
self,
data: List[str],
des_dir: str,
use_grayscale: bool = True,
use_alpha: bool = False,
use_deepaugment: bool = True,
):
super().__init__(datatype="image", data=data, des_dir=des_dir)
self.use_grayscale = use_grayscale
self.use_alpha = use_alpha
self.use_deepaugment = use_deepaugment
###########
# Helpers #
###########
def parse_output(self) -> Tuple[List[str], np.ndarray, Dict[str, str]]:
""" In order for the pipelines to be symmetrical, images which are
higher dimensional objects are reshaped to (n, height * width, -1)
ndarrays, and stored as a dataframe, where each column represents
all values in a pixel. This function ensures that this adapted
structure can be properly transformed back into a numpy array when
necessary.
Returns:
headers (list(str))
Image array (np.ndarray)
schema (dict(str, str))
"""
headers, values, schema = super().parse_output()
formatted_values = np.array(values.tolist())
return headers, formatted_values, schema
def load_image(self, img_class: str, img_path: str) -> pd.DataFrame:
""" Loads in a single image and retrieves its pixel values
Args:
img_class (str): Classification label of image
img_path (str): Path to image
Returns:
Pixel Map (pd.DataFrame)
"""
with Image.open(img_path) as img:
img_format = Template("$color$alpha")
color = "L" if self.use_grayscale else "RGB"
alpha = "A" if self.use_alpha else ""
img_format = img_format.substitute(color=color, alpha=alpha)
# Generate column names according to dimensions of image. This will
# allow for auto-padding during feature alignment, both locally
# (between declared image datasets), and across the grid (between
# datasets amongst workers)
width, height = img.size
pix_col_names = [
f"{img_format}x{h_idx}x{w_idx}"
for h_idx in range(height)
for w_idx in range(width)
]
# Originally need to cast to [Batch x Channels x Height x Width]
# But for the sake of alignment, flatten first. Allow Preprocessor
# to handle the necessary operations for formatting the images for
# use in WebsocketServerWorker
# np.asarray(pd.concat([df1, df2]).values.tolist()).reshape(2, -1, 28, 28)
formatted_img = img.convert(img_format)
pix_vals = np.asarray(formatted_img).reshape(
(1, height * width, -1)
).tolist()
pix_map = pd.DataFrame(data=pix_vals, columns=pix_col_names)
pix_map['target'] = img_class
logging.debug(
f"Image on {img_path} has been loaded.",
img_path=img_path,
ID_path=SOURCE_FILE,
ID_class=ImagePipe.__name__,
ID_function=ImagePipe.load_image.__name__
)
return pix_map
def load_images(self) -> pd.DataFrame:
""" Loads in all images found in the declared path sets
Returns
Output (pd.DataFrame)
"""
all_images = []
for img_class, img_paths in self.data:
with concurrent.futures.ThreadPoolExecutor() as executor:
class_images = list(executor.map(
lambda x: self.load_image(img_class, img_path=x),
img_paths
))
all_images += class_images
aggregated_df = pd.concat(all_images)
aggregated_df['target'] = aggregated_df['target'].astype('category')
return aggregated_df
def apply_deepaugment(self, df):
""" Apply AutoML methods to search for the appropriate preprocessing
operations to use for transforming the images
"""
return df
##################
# Core Functions #
##################
def run(self) -> pd.DataFrame:
""" Wrapper function that automates the image-specific preprocessing of
the declared datasets
Returns
Output (pd.DataFrame)
"""
if not self.is_processed():
aggregated_df = self.load_images()
self.output = self.apply_deepaugment(aggregated_df)
return self.output
| [
"PIL.Image.open",
"string.Template",
"pandas.DataFrame",
"numpy.asarray",
"os.path.abspath",
"pandas.concat"
] | [((559, 584), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (574, 584), False, 'import os\n'), ((4508, 4558), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'pix_vals', 'columns': 'pix_col_names'}), '(data=pix_vals, columns=pix_col_names)\n', (4520, 4558), True, 'import pandas as pd\n'), ((5417, 5438), 'pandas.concat', 'pd.concat', (['all_images'], {}), '(all_images)\n', (5426, 5438), True, 'import pandas as pd\n'), ((3186, 3206), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (3196, 3206), False, 'from PIL import Image\n'), ((3242, 3266), 'string.Template', 'Template', (['"""$color$alpha"""'], {}), "('$color$alpha')\n", (3250, 3266), False, 'from string import Template\n'), ((4379, 4404), 'numpy.asarray', 'np.asarray', (['formatted_img'], {}), '(formatted_img)\n', (4389, 4404), True, 'import numpy as np\n')] |
import os
import torch
import csv
import random
import numpy
import json
from math import pi
from torchvision import transforms
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# Image Processing
def shuffle(ts, dim=0, inv=False):
if inv:
idx = torch.arange(ts.size(dim) - 1, -1, step=-1, device=ts.device)
else:
idx = torch.randperm(ts.size(dim)).to(ts.device)
return ts.index_select(index=idx.long(), dim=dim)
def flip(ts, dim=-1):
return shuffle(ts, dim=dim, inv=True)
def one_hot(y, dim=7):
y = y.view(-1, 1)
label = torch.zeros(y.size(0), dim, device=y.device)
return label.scatter(1, y, 1)
def ts2pil(ts):
if ts.dim() == 4:
assert ts.size(0) == 1
ts = ts[0]
if ts.min() < 0:
ts = ts * 0.5 + 0.5
return transforms.ToPILImage()(ts)
def pil2ts(img):
return transforms.ToTensor()(img) # (0, 1)
# Log
def save_log(log, config, print_items=[], summary_writer=None):
log_path = os.path.join(config.log_path, "log.csv")
write_header = not os.path.exists(log_path)
with open(log_path, "a+") as f:
f_csv = csv.DictWriter(f, sorted(log.keys()))
if write_header:
f_csv.writeheader()
f_csv.writerows([log])
if summary_writer is not None:
for key in log:
if not ("step" in key):
if key == 'vector/lmk_weight':
fig = plt.figure()
x = list(range(len(log[key][0])))
plt.xticks(x, log[key][0], rotation=270, fontsize=3)
plt.plot(x, log[key][1])
plt.grid()
#plt.tight_layout()
#plt.savefig('lmk_weight.pdf')
#assert 0
summary_writer.add_figure(key, fig, log["step"])
#lmk_dict = dict(zip(log[key][0], log[key][1]))
#summary_writer.add_scalars(key, lmk_dict, log["step"])
else:
summary_writer.add_scalar(key, log[key], log["step"])
if config.print_log:
logg = ""
logg += "[{}/{}] time:{:.3f} ".format(
log["step"], log["nsteps"], log["time_elapse"]
)
if print_items:
for items in print_items:
for item in items:
logg += "{}:{:.3f} ".format(item, log[item])
print("\r%s" % logg, end="\n")
def save_config(config):
path = os.path.join(config.log_path, "config.js")
if os.path.exists(path):
os.remove(path)
with open(path, "w") as f:
json.dump(vars(config), f)
# Model
def save_checkpoint(models, optimizors, epoch, save_path, device=torch.device("cuda:0")):
for key, opt in optimizors.items():
data = opt.state_dict()
data_save_path = os.path.join(save_path, "opt-%s-%s.cpkt" % (key, epoch))
torch.save(data, data_save_path)
latest_link = os.path.join(save_path, "opt-%s-latest.cpkt" % key)
if os.path.islink(latest_link):
os.remove(latest_link)
os.symlink(data_save_path, latest_link)
for key, model in models.items():
if key in ("image", "text"):
continue
model = model.module if hasattr(model, "module") else model
if not hasattr(model, "state_dict"):
continue
data = model.state_dict()
data_save_path = os.path.join(save_path, "model-%s-%s.cpkt" % (key, epoch))
torch.save(data, data_save_path)
latest_link = os.path.join(save_path, "model-%s-latest.cpkt" % key)
if os.path.islink(latest_link):
os.remove(latest_link)
os.symlink(data_save_path, latest_link)
# model.to(device)
print("Save checkpoint, epoch: %s" % epoch)
def load_checkpoint(models, save_path, optimizors={}, epoch=0):
model_marker = epoch if epoch > 0 else "latest"
for key, model in models.items():
if key in ("image", "text"):
continue
data_save_path = os.path.join(save_path, f"model-{key}-{model_marker}.cpkt")
model = model.module if hasattr(model, "module") else model
if not hasattr(model, "state_dict"):
continue
pretrained_dict = torch.load(data_save_path)
model_dict = model.state_dict()
#pretrained_dict = {k: v for k, v in pretrained_dict.items() if 'gridstn' not in k}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
for key, opt in optimizors.items():
data_save_path = os.path.join(save_path, "opt-%s-%s.cpkt" % (key, model_marker))
opt.load_state_dict(torch.load(data_save_path))
if model_marker == "latest":
path = os.readlink(data_save_path)
epoch = eval(path[:-5].rsplit("-", 1)[1])
print("Load checkpoint, epoch: %s" % epoch)
return epoch
# Help functions
def makedir(path):
if not os.path.exists(path):
os.makedirs(path)
def set_random_seed(seed):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_data(data_iter, data_loader):
try:
phos = next(data_iter)
except:
data_iter = iter(data_loader)
phos = next(data_iter)
return phos, data_iter
def merge_list(lst):
res = []
for l in lst:
res.extend(l)
return res
def get_GPU_info():
os.system("nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp")
memory_gpu = [int(x.split()[2]) for x in open("tmp", "r").readlines()]
return memory_gpu
def pose2label(poses):
poses_ = poses.clone()
poses_[poses < -pi / 3] = 0
poses_[poses >= -pi / 3] = 1
poses_[poses >= -pi / 6] = 2
poses_[poses >= pi / 6] = 3
poses_[poses >= pi / 3] = 4
return poses_.long()
| [
"matplotlib.pyplot.grid",
"torchvision.transforms.ToPILImage",
"os.path.islink",
"os.remove",
"os.path.exists",
"os.readlink",
"matplotlib.pyplot.plot",
"numpy.random.seed",
"torchvision.transforms.ToTensor",
"matplotlib.pyplot.xticks",
"matplotlib.use",
"torch.save",
"torch.device",
"torc... | [((146, 167), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (160, 167), False, 'import matplotlib\n'), ((1000, 1040), 'os.path.join', 'os.path.join', (['config.log_path', '"""log.csv"""'], {}), "(config.log_path, 'log.csv')\n", (1012, 1040), False, 'import os\n'), ((2494, 2536), 'os.path.join', 'os.path.join', (['config.log_path', '"""config.js"""'], {}), "(config.log_path, 'config.js')\n", (2506, 2536), False, 'import os\n'), ((2544, 2564), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2558, 2564), False, 'import os\n'), ((2731, 2753), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2743, 2753), False, 'import torch\n'), ((5027, 5044), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5038, 5044), False, 'import random\n'), ((5049, 5072), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (5066, 5072), False, 'import numpy\n'), ((5077, 5100), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (5094, 5100), False, 'import torch\n'), ((5105, 5133), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (5127, 5133), False, 'import torch\n'), ((5138, 5170), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (5164, 5170), False, 'import torch\n'), ((5477, 5542), 'os.system', 'os.system', (['"""nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp"""'], {}), "('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n", (5486, 5542), False, 'import os\n'), ((818, 841), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (839, 841), False, 'from torchvision import transforms\n'), ((876, 897), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (895, 897), False, 'from torchvision import transforms\n'), ((1064, 1088), 'os.path.exists', 'os.path.exists', (['log_path'], {}), '(log_path)\n', (1078, 1088), False, 'import os\n'), ((2574, 2589), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (2583, 2589), False, 'import os\n'), ((2853, 2909), 'os.path.join', 'os.path.join', (['save_path', "('opt-%s-%s.cpkt' % (key, epoch))"], {}), "(save_path, 'opt-%s-%s.cpkt' % (key, epoch))\n", (2865, 2909), False, 'import os\n'), ((2918, 2950), 'torch.save', 'torch.save', (['data', 'data_save_path'], {}), '(data, data_save_path)\n', (2928, 2950), False, 'import torch\n'), ((2973, 3024), 'os.path.join', 'os.path.join', (['save_path', "('opt-%s-latest.cpkt' % key)"], {}), "(save_path, 'opt-%s-latest.cpkt' % key)\n", (2985, 3024), False, 'import os\n'), ((3036, 3063), 'os.path.islink', 'os.path.islink', (['latest_link'], {}), '(latest_link)\n', (3050, 3063), False, 'import os\n'), ((3108, 3147), 'os.symlink', 'os.symlink', (['data_save_path', 'latest_link'], {}), '(data_save_path, latest_link)\n', (3118, 3147), False, 'import os\n'), ((3438, 3496), 'os.path.join', 'os.path.join', (['save_path', "('model-%s-%s.cpkt' % (key, epoch))"], {}), "(save_path, 'model-%s-%s.cpkt' % (key, epoch))\n", (3450, 3496), False, 'import os\n'), ((3505, 3537), 'torch.save', 'torch.save', (['data', 'data_save_path'], {}), '(data, data_save_path)\n', (3515, 3537), False, 'import torch\n'), ((3560, 3613), 'os.path.join', 'os.path.join', (['save_path', "('model-%s-latest.cpkt' % key)"], {}), "(save_path, 'model-%s-latest.cpkt' % key)\n", (3572, 3613), False, 'import os\n'), ((3625, 3652), 'os.path.islink', 'os.path.islink', (['latest_link'], {}), '(latest_link)\n', (3639, 3652), False, 'import os\n'), ((3697, 3736), 'os.symlink', 'os.symlink', (['data_save_path', 'latest_link'], {}), '(data_save_path, latest_link)\n', (3707, 3736), False, 'import os\n'), ((4052, 4111), 'os.path.join', 'os.path.join', (['save_path', 'f"""model-{key}-{model_marker}.cpkt"""'], {}), "(save_path, f'model-{key}-{model_marker}.cpkt')\n", (4064, 4111), False, 'import os\n'), ((4273, 4299), 'torch.load', 'torch.load', (['data_save_path'], {}), '(data_save_path)\n', (4283, 4299), False, 'import torch\n'), ((4583, 4646), 'os.path.join', 'os.path.join', (['save_path', "('opt-%s-%s.cpkt' % (key, model_marker))"], {}), "(save_path, 'opt-%s-%s.cpkt' % (key, model_marker))\n", (4595, 4646), False, 'import os\n'), ((4753, 4780), 'os.readlink', 'os.readlink', (['data_save_path'], {}), '(data_save_path)\n', (4764, 4780), False, 'import os\n'), ((4946, 4966), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4960, 4966), False, 'import os\n'), ((4976, 4993), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (4987, 4993), False, 'import os\n'), ((3077, 3099), 'os.remove', 'os.remove', (['latest_link'], {}), '(latest_link)\n', (3086, 3099), False, 'import os\n'), ((3666, 3688), 'os.remove', 'os.remove', (['latest_link'], {}), '(latest_link)\n', (3675, 3688), False, 'import os\n'), ((4676, 4702), 'torch.load', 'torch.load', (['data_save_path'], {}), '(data_save_path)\n', (4686, 4702), False, 'import torch\n'), ((1437, 1449), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1447, 1449), True, 'import matplotlib.pyplot as plt\n'), ((1524, 1576), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'log[key][0]'], {'rotation': '(270)', 'fontsize': '(3)'}), '(x, log[key][0], rotation=270, fontsize=3)\n', (1534, 1576), True, 'import matplotlib.pyplot as plt\n'), ((1597, 1621), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'log[key][1]'], {}), '(x, log[key][1])\n', (1605, 1621), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1652), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1650, 1652), True, 'import matplotlib.pyplot as plt\n')] |
import math
import random
import matplotlib.pyplot as plt
import numpy as np
from math import exp
#Returns intersections between 2 circles given the centers and radii
def get_intersections(a, r1, b, r2):
distance = np.linalg.norm(a-b)
#Case 1: they don't intersect since they are too far
if distance > r1 + r2:
return -1
#Case 2: they don't intersect since one circle is contained inside the other
if distance < abs(r1 - r2):
return -2
#Case 3: they intersect in infinite points since they are coincident
if distance == 0 and r1 == r2:
return 0
#Case 4: they intersect in two points, which we will calculate
x = (r1 ** 2 - r2 ** 2 + distance ** 2) / (distance * 2)
y = math.sqrt(r1 ** 2 - x ** 2)
x3 = a[0] + x * (b[0] - a[0]) / distance
y3 = a[1] + x * (b[1] - b[1]) / distance
x4 = x3 + y * (b[1] - a[1]) / distance
y4 = y3 - y * (b[0] - a[0]) / distance
x5 = x3 - y * (b[1] - b[0]) / distance
y5 = y3 + y * (b[0] - a[0]) / distance
return ({'x1': round(x4,5),'y1': round(y4,5),'x2':round(x5,5),'y2':round(y5,5)})
#Returns an approximated intersection between 2 circles given the centers and radii
def approximate(p1, r1, p2, r2):
"""Approximate a circle intersection point."""
d = np.linalg.norm(p1-p2)
if abs(d)<exp(1e-5):
return None
dr1, dr2 = r1 / d, r2 / d
p = p2-p1
dp1, dp2 = dr1*p, dr2*p
p11, p12, p21, p22 = p1 + dp1, p1-dp1, p2 + dp2, p2-dp2 # Find nearest pair of intersection point belonging to different # circles.
n1, n2 = p11, p21
d1, dt = np.linalg.norm(p11-p21), np.linalg.norm(p11-p22)
if dt < d1: d1, n2 = dt, p22
dt = np.linalg.norm(p12-p21)
if dt < d1: d1, n1, n2 = dt, p12, p21
dt = np.linalg.norm(p12-p22)
if dt < d1: n1, n2 = p12, p22
# return middle of line between two nearest points as result
return n1 / 2 + n2 / 2
#Remove duplicate from intersection in order to filter data
def filterdata(circles,distances,intersections):
inter = intersections
inte_not_dupl = []
for e in inter:
if e not in inte_not_dupl:
inte_not_dupl.append(e)
return inte_not_dupl
#Returns the centroid using different weights for intersections and approximated ones
def centroid(intersection):
xw = 0
yw = 0
w = 0
for k in intersection:
xw+=k['p'][0]*k['w']
yw+=k['p'][1]*k['w']
w+=k['w']
return {'x':xw/w,'y':yw/w}
#Core of clustering that returns the cluster point using previous functions
def core(positions,distances):
x = []
y = []
centroidcond = False
inter = []
for k in range(len(positions)):
for j in range(len(positions)):
i = get_intersections(positions[k], distances[k],positions[j],distances[j])
if i==-1 or i==-2:
i = approximate(positions[k],distances[k],positions[j],distances[j])
if i is not None:
inter.append({'p':[i[0],i[1]],'w':1})
elif i==0:
pass
else:
inter.append({'p':[i['x1'],i['y1']],'w':3})
inter.append({'p':[i['x2'],i['y2']],'w':3})
inter_filtered=filterdata(positions, distances, inter)
centre = centroid(inter_filtered)
return [centre['x'],centre['y']]
| [
"math.exp",
"math.sqrt",
"numpy.linalg.norm"
] | [((220, 241), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (234, 241), True, 'import numpy as np\n'), ((750, 777), 'math.sqrt', 'math.sqrt', (['(r1 ** 2 - x ** 2)'], {}), '(r1 ** 2 - x ** 2)\n', (759, 777), False, 'import math\n'), ((1310, 1333), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1 - p2)'], {}), '(p1 - p2)\n', (1324, 1333), True, 'import numpy as np\n'), ((1712, 1737), 'numpy.linalg.norm', 'np.linalg.norm', (['(p12 - p21)'], {}), '(p12 - p21)\n', (1726, 1737), True, 'import numpy as np\n'), ((1787, 1812), 'numpy.linalg.norm', 'np.linalg.norm', (['(p12 - p22)'], {}), '(p12 - p22)\n', (1801, 1812), True, 'import numpy as np\n'), ((1346, 1356), 'math.exp', 'exp', (['(1e-05)'], {}), '(1e-05)\n', (1349, 1356), False, 'from math import exp\n'), ((1621, 1646), 'numpy.linalg.norm', 'np.linalg.norm', (['(p11 - p21)'], {}), '(p11 - p21)\n', (1635, 1646), True, 'import numpy as np\n'), ((1646, 1671), 'numpy.linalg.norm', 'np.linalg.norm', (['(p11 - p22)'], {}), '(p11 - p22)\n', (1660, 1671), True, 'import numpy as np\n')] |
""" Converter for Faceswap """
import logging
import cv2
import numpy as np
from plugins.plugin_loader import PluginLoader
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Converter():
""" The converter is responsible for swapping the original face(s) in a frame with the output
of a trained Faceswap model.
Parameters
----------
output_size: int
The size of the face, in pixels, that is output from the Faceswap model
coverage_ratio: float
The ratio of the training image that was used for training the Faceswap model
centering: str
The extracted face centering that the model was trained on (`"face"` or "`legacy`")
draw_transparent: bool
Whether the final output should be drawn onto a transparent layer rather than the original
frame. Only available with certain writer plugins.
pre_encode: python function
Some writer plugins support the pre-encoding of images prior to saving out. As patching is
done in multiple threads, but writing is done in a single thread, it can speed up the
process to do any pre-encoding as part of the converter process.
arguments: :class:`argparse.Namespace`
The arguments that were passed to the convert process as generated from Faceswap's command
line arguments
configfile: str, optional
Optional location of custom configuration ``ini`` file. If ``None`` then use the default
config location. Default: ``None``
"""
def __init__(self, output_size, coverage_ratio, centering, draw_transparent, pre_encode,
arguments, configfile=None):
logger.debug("Initializing %s: (output_size: %s, coverage_ratio: %s, centering: %s, "
"draw_transparent: %s, pre_encode: %s, arguments: %s, configfile: %s)",
self.__class__.__name__, output_size, coverage_ratio, centering,
draw_transparent, pre_encode, arguments, configfile)
self._output_size = output_size
self._coverage_ratio = coverage_ratio
self._centering = centering
self._draw_transparent = draw_transparent
self._writer_pre_encode = pre_encode
self._args = arguments
self._configfile = configfile
self._scale = arguments.output_scale / 100
self._adjustments = dict(box=None, mask=None, color=None, seamless=None, sharpening=None)
self._load_plugins()
logger.debug("Initialized %s", self.__class__.__name__)
@property
def cli_arguments(self):
""":class:`argparse.Namespace`: The command line arguments passed to the convert
process """
return self._args
def reinitialize(self, config):
""" Reinitialize this :class:`Converter`.
Called as part of the :mod:`~tools.preview` tool. Resets all adjustments then loads the
plugins as specified in the given config.
Parameters
----------
config: :class:`lib.config.FaceswapConfig`
Pre-loaded :class:`lib.config.FaceswapConfig`. used over any configuration on disk.
"""
logger.debug("Reinitializing converter")
self._adjustments = dict(box=None, mask=None, color=None, seamless=None, sharpening=None)
self._load_plugins(config=config, disable_logging=True)
logger.debug("Reinitialized converter")
def _load_plugins(self, config=None, disable_logging=False):
""" Load the requested adjustment plugins.
Loads the :mod:`plugins.converter` plugins that have been requested for this conversion
session.
Parameters
----------
config: :class:`lib.config.FaceswapConfig`, optional
Optional pre-loaded :class:`lib.config.FaceswapConfig`. If passed, then this will be
used over any configuration on disk. If ``None`` then it is ignored. Default: ``None``
disable_logging: bool, optional
Plugin loader outputs logging info every time a plugin is loaded. Set to ``True`` to
suppress these messages otherwise ``False``. Default: ``False``
"""
logger.debug("Loading plugins. config: %s", config)
self._adjustments["box"] = PluginLoader.get_converter(
"mask",
"box_blend",
disable_logging=disable_logging)(self._output_size,
configfile=self._configfile,
config=config)
self._adjustments["mask"] = PluginLoader.get_converter(
"mask",
"mask_blend",
disable_logging=disable_logging)(self._args.mask_type,
self._output_size,
self._coverage_ratio,
configfile=self._configfile,
config=config)
if self._args.color_adjustment != "none" and self._args.color_adjustment is not None:
self._adjustments["color"] = PluginLoader.get_converter(
"color",
self._args.color_adjustment,
disable_logging=disable_logging)(configfile=self._configfile, config=config)
sharpening = PluginLoader.get_converter(
"scaling",
"sharpen",
disable_logging=disable_logging)(configfile=self._configfile, config=config)
if sharpening.config.get("method", None) is not None:
self._adjustments["sharpening"] = sharpening
logger.debug("Loaded plugins: %s", self._adjustments)
def process(self, in_queue, out_queue):
""" Main convert process.
Takes items from the in queue, runs the relevant adjustments, patches faces to final frame
and outputs patched frame to the out queue.
Parameters
----------
in_queue: :class:`queue.Queue`
The output from :class:`scripts.convert.Predictor`. Contains detected faces from the
Faceswap model as well as the frame to be patched.
out_queue: :class:`queue.Queue`
The queue to place patched frames into for writing by one of Faceswap's
:mod:`plugins.convert.writer` plugins.
"""
logger.debug("Starting convert process. (in_queue: %s, out_queue: %s)",
in_queue, out_queue)
log_once = False
while True:
items = in_queue.get()
if items == "EOF":
logger.debug("EOF Received")
logger.debug("Patch queue finished")
# Signal EOF to other processes in pool
logger.debug("Putting EOF back to in_queue")
in_queue.put(items)
break
if isinstance(items, dict):
items = [items]
for item in items:
logger.trace("Patch queue got: '%s'", item["filename"])
try:
image = self._patch_image(item)
except Exception as err: # pylint: disable=broad-except
# Log error and output original frame
logger.error("Failed to convert image: '%s'. Reason: %s",
item["filename"], str(err))
image = item["image"]
loglevel = logger.trace if log_once else logger.warning
loglevel("Convert error traceback:", exc_info=True)
log_once = True
# UNCOMMENT THIS CODE BLOCK TO PRINT TRACEBACK ERRORS
# import sys ; import traceback
# exc_info = sys.exc_info() ; traceback.print_exception(*exc_info)
logger.trace("Out queue put: %s", item["filename"])
out_queue.put((item["filename"], image))
logger.debug("Completed convert process")
def _patch_image(self, predicted):
""" Patch a swapped face onto a frame.
Run selected adjustments and swap the faces in a frame.
Parameters
----------
predicted: dict
The output from :class:`scripts.convert.Predictor`.
Returns
-------
:class: `numpy.ndarray` or pre-encoded image output
The final frame ready for writing by a :mod:`plugins.convert.writer` plugin.
Frame is either an array, or the pre-encoded output from the writer's pre-encode
function (if it has one)
"""
logger.trace("Patching image: '%s'", predicted["filename"])
frame_size = (predicted["image"].shape[1], predicted["image"].shape[0])
new_image, background = self._get_new_image(predicted, frame_size)
patched_face = self._post_warp_adjustments(background, new_image)
patched_face = self._scale_image(patched_face)
patched_face *= 255.0
patched_face = np.rint(patched_face,
out=np.empty(patched_face.shape, dtype="uint8"),
casting='unsafe')
if self._writer_pre_encode is not None:
patched_face = self._writer_pre_encode(patched_face)
logger.trace("Patched image: '%s'", predicted["filename"])
return patched_face
def _get_new_image(self, predicted, frame_size):
""" Get the new face from the predictor and apply pre-warp manipulations.
Applies any requested adjustments to the raw output of the Faceswap model
before transforming the image into the target frame.
Parameters
----------
predicted: dict
The output from :class:`scripts.convert.Predictor`.
frame_size: tuple
The (`width`, `height`) of the final frame in pixels
Returns
-------
placeholder: :class: `numpy.ndarray`
The original frame with the swapped faces patched onto it
background: :class: `numpy.ndarray`
The original frame
"""
logger.trace("Getting: (filename: '%s', faces: %s)",
predicted["filename"], len(predicted["swapped_faces"]))
placeholder = np.zeros((frame_size[1], frame_size[0], 4), dtype="float32")
background = predicted["image"] / np.array(255.0, dtype="float32")
placeholder[:, :, :3] = background
for new_face, detected_face, reference_face in zip(predicted["swapped_faces"],
predicted["detected_faces"],
predicted["reference_faces"]):
predicted_mask = new_face[:, :, -1] if new_face.shape[2] == 4 else None
new_face = new_face[:, :, :3]
interpolator = reference_face.interpolators[1]
new_face = self._pre_warp_adjustments(new_face,
detected_face,
reference_face,
predicted_mask)
# Warp face with the mask
cv2.warpAffine(new_face,
reference_face.adjusted_matrix,
frame_size,
placeholder,
flags=cv2.WARP_INVERSE_MAP | interpolator,
borderMode=cv2.BORDER_TRANSPARENT)
logger.trace("Got filename: '%s'. (placeholders: %s)",
predicted["filename"], placeholder.shape)
return placeholder, background
def _pre_warp_adjustments(self, new_face, detected_face, reference_face, predicted_mask):
""" Run any requested adjustments that can be performed on the raw output from the Faceswap
model.
Any adjustments that can be performed before warping the face into the final frame are
performed here.
Parameters
----------
new_face: :class:`numpy.ndarray`
The swapped face received from the faceswap model.
detected_face: :class:`~lib.align.DetectedFace`
The detected_face object as defined in :class:`scripts.convert.Predictor`
reference_face: :class:`~lib.align.AlignedFace`
The aligned face object sized to the model output of the original face for reference
predicted_mask: :class:`numpy.ndarray` or ``None``
The predicted mask output from the Faceswap model. ``None`` if the model
did not learn a mask
Returns
-------
:class:`numpy.ndarray`
The face output from the Faceswap Model with any requested pre-warp adjustments
performed.
"""
logger.trace("new_face shape: %s, predicted_mask shape: %s", new_face.shape,
predicted_mask.shape if predicted_mask is not None else None)
old_face = reference_face.face[..., :3] / 255.0
new_face = self._adjustments["box"].run(new_face)
new_face, raw_mask = self._get_image_mask(new_face,
detected_face,
predicted_mask,
reference_face)
if self._adjustments["color"] is not None:
new_face = self._adjustments["color"].run(old_face, new_face, raw_mask)
if self._adjustments["seamless"] is not None:
new_face = self._adjustments["seamless"].run(old_face, new_face, raw_mask)
logger.trace("returning: new_face shape %s", new_face.shape)
return new_face
def _get_image_mask(self, new_face, detected_face, predicted_mask, reference_face):
""" Return any selected image mask and intersect with any box mask.
Places the requested mask into the new face's Alpha channel, intersecting with any box
mask that has already been applied.
Parameters
----------
new_face: :class:`numpy.ndarray`
The swapped face received from the faceswap model, with any box mask applied
detected_face: :class:`~lib.DetectedFace`
The detected_face object as defined in :class:`scripts.convert.Predictor`
predicted_mask: :class:`numpy.ndarray` or ``None``
The predicted mask output from the Faceswap model. ``None`` if the model
did not learn a mask
reference_face: :class:`~lib.align.AlignedFace`
The aligned face object sized to the model output of the original face for reference
Returns
-------
:class:`numpy.ndarray`
The swapped face with the requested mask added to the Alpha channel
"""
logger.trace("Getting mask. Image shape: %s", new_face.shape)
if self._args.mask_type != "none":
mask_centering = detected_face.mask[self._args.mask_type].stored_centering
else:
mask_centering = "face" # Unused but requires a valid value
crop_offset = (reference_face.pose.offset[self._centering] -
reference_face.pose.offset[mask_centering])
mask, raw_mask = self._adjustments["mask"].run(detected_face, crop_offset, self._centering,
predicted_mask=predicted_mask)
if new_face.shape[2] == 4:
logger.trace("Combining mask with alpha channel box mask")
new_face[:, :, -1] = np.minimum(new_face[:, :, -1], mask.squeeze())
else:
logger.trace("Adding mask to alpha channel")
new_face = np.concatenate((new_face, mask), -1)
logger.trace("Got mask. Image shape: %s", new_face.shape)
return new_face, raw_mask
def _post_warp_adjustments(self, background, new_image):
""" Perform any requested adjustments to the swapped faces after they have been transformed
into the final frame.
Parameters
----------
background: :class:`numpy.ndarray`
The original frame
new_image: :class:`numpy.ndarray`
A blank frame of original frame size with the faces warped onto it
Returns
-------
:class:`numpy.ndarray`
The final merged and swapped frame with any requested post-warp adjustments applied
"""
if self._adjustments["sharpening"] is not None:
new_image = self._adjustments["sharpening"].run(new_image)
if self._draw_transparent:
frame = new_image
else:
foreground, mask = np.split(new_image, # pylint:disable=unbalanced-tuple-unpacking
(3, ),
axis=-1)
foreground *= mask
background *= (1.0 - mask)
background += foreground
frame = background
np.clip(frame, 0.0, 1.0, out=frame)
return frame
def _scale_image(self, frame):
""" Scale the final image if requested.
If output scale has been requested in command line arguments, scale the output
otherwise return the final frame.
Parameters
----------
frame: :class:`numpy.ndarray`
The final frame with faces swapped
Returns
-------
:class:`numpy.ndarray`
The final frame scaled by the requested scaling factor
"""
if self._scale == 1:
return frame
logger.trace("source frame: %s", frame.shape)
interp = cv2.INTER_CUBIC if self._scale > 1 else cv2.INTER_AREA
dims = (round((frame.shape[1] / 2 * self._scale) * 2),
round((frame.shape[0] / 2 * self._scale) * 2))
frame = cv2.resize(frame, dims, interpolation=interp)
logger.trace("resized frame: %s", frame.shape)
np.clip(frame, 0.0, 1.0, out=frame)
return frame | [
"logging.getLogger",
"numpy.clip",
"cv2.warpAffine",
"plugins.plugin_loader.PluginLoader.get_converter",
"numpy.zeros",
"numpy.array",
"numpy.split",
"numpy.empty",
"numpy.concatenate",
"cv2.resize"
] | [((137, 164), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (154, 164), False, 'import logging\n'), ((10217, 10277), 'numpy.zeros', 'np.zeros', (['(frame_size[1], frame_size[0], 4)'], {'dtype': '"""float32"""'}), "((frame_size[1], frame_size[0], 4), dtype='float32')\n", (10225, 10277), True, 'import numpy as np\n'), ((16919, 16954), 'numpy.clip', 'np.clip', (['frame', '(0.0)', '(1.0)'], {'out': 'frame'}), '(frame, 0.0, 1.0, out=frame)\n', (16926, 16954), True, 'import numpy as np\n'), ((17776, 17821), 'cv2.resize', 'cv2.resize', (['frame', 'dims'], {'interpolation': 'interp'}), '(frame, dims, interpolation=interp)\n', (17786, 17821), False, 'import cv2\n'), ((17885, 17920), 'numpy.clip', 'np.clip', (['frame', '(0.0)', '(1.0)'], {'out': 'frame'}), '(frame, 0.0, 1.0, out=frame)\n', (17892, 17920), True, 'import numpy as np\n'), ((4259, 4344), 'plugins.plugin_loader.PluginLoader.get_converter', 'PluginLoader.get_converter', (['"""mask"""', '"""box_blend"""'], {'disable_logging': 'disable_logging'}), "('mask', 'box_blend', disable_logging=disable_logging\n )\n", (4285, 4344), False, 'from plugins.plugin_loader import PluginLoader\n'), ((4567, 4653), 'plugins.plugin_loader.PluginLoader.get_converter', 'PluginLoader.get_converter', (['"""mask"""', '"""mask_blend"""'], {'disable_logging': 'disable_logging'}), "('mask', 'mask_blend', disable_logging=\n disable_logging)\n", (4593, 4653), False, 'from plugins.plugin_loader import PluginLoader\n'), ((5322, 5408), 'plugins.plugin_loader.PluginLoader.get_converter', 'PluginLoader.get_converter', (['"""scaling"""', '"""sharpen"""'], {'disable_logging': 'disable_logging'}), "('scaling', 'sharpen', disable_logging=\n disable_logging)\n", (5348, 5408), False, 'from plugins.plugin_loader import PluginLoader\n'), ((10320, 10352), 'numpy.array', 'np.array', (['(255.0)'], {'dtype': '"""float32"""'}), "(255.0, dtype='float32')\n", (10328, 10352), True, 'import numpy as np\n'), ((11156, 11324), 'cv2.warpAffine', 'cv2.warpAffine', (['new_face', 'reference_face.adjusted_matrix', 'frame_size', 'placeholder'], {'flags': '(cv2.WARP_INVERSE_MAP | interpolator)', 'borderMode': 'cv2.BORDER_TRANSPARENT'}), '(new_face, reference_face.adjusted_matrix, frame_size,\n placeholder, flags=cv2.WARP_INVERSE_MAP | interpolator, borderMode=cv2.\n BORDER_TRANSPARENT)\n', (11170, 11324), False, 'import cv2\n'), ((15641, 15677), 'numpy.concatenate', 'np.concatenate', (['(new_face, mask)', '(-1)'], {}), '((new_face, mask), -1)\n', (15655, 15677), True, 'import numpy as np\n'), ((16612, 16646), 'numpy.split', 'np.split', (['new_image', '(3,)'], {'axis': '(-1)'}), '(new_image, (3,), axis=-1)\n', (16620, 16646), True, 'import numpy as np\n'), ((5109, 5210), 'plugins.plugin_loader.PluginLoader.get_converter', 'PluginLoader.get_converter', (['"""color"""', 'self._args.color_adjustment'], {'disable_logging': 'disable_logging'}), "('color', self._args.color_adjustment,\n disable_logging=disable_logging)\n", (5135, 5210), False, 'from plugins.plugin_loader import PluginLoader\n'), ((9022, 9065), 'numpy.empty', 'np.empty', (['patched_face.shape'], {'dtype': '"""uint8"""'}), "(patched_face.shape, dtype='uint8')\n", (9030, 9065), True, 'import numpy as np\n')] |
import numpy as np
from ..core import proposals, densities, DefaultMetropolis, MixingMarkovUpdate,\
MultiChannelMC, MultiChannel
def get_sampler(target, ndim, initial, centers, widths, beta,
nintegral=1000, local_width=.1):
if np.isscalar(initial):
initial = np.full(ndim, initial)
# importance
channels = MultiChannel(
[densities.Gaussian(ndim, mu=center, scale=width)
for center, width in zip(centers, widths)])
mc_importance = MultiChannelMC(channels)
integration_sample = mc_importance(target, [], [nintegral], [])
is_sampler = DefaultMetropolis(ndim, target, channels)
local_proposal = proposals.Gaussian(ndim, scale=local_width)
local_sampler = DefaultMetropolis(ndim, target, local_proposal)
updates = [is_sampler, local_sampler]
sampler = MixingMarkovUpdate(ndim, updates, [beta, 1 - beta])
return sampler, initial
| [
"numpy.full",
"numpy.isscalar"
] | [((253, 273), 'numpy.isscalar', 'np.isscalar', (['initial'], {}), '(initial)\n', (264, 273), True, 'import numpy as np\n'), ((293, 315), 'numpy.full', 'np.full', (['ndim', 'initial'], {}), '(ndim, initial)\n', (300, 315), True, 'import numpy as np\n')] |
import numpy as np
import scipy.special as ss
from scipy.optimize import root_scalar
from scipy import integrate
from csr2d.core2 import psi_x0, psi_s, Es_case_B, Fx_case_B_Chris, Es_case_A, Fx_case_A, Es_case_C, Fx_case_C, Es_case_D, Fx_case_D, psi_s_case_E, Es_case_E
from csr2d.core2 import alpha_exact_case_B_brentq, alpha_exact_case_D_brentq
from numba import njit
from quantecon.optimize.root_finding import newton
from scipy import optimize
from scipy.signal import find_peaks
def symmetric_vec(n, d):
"""
Returns a symmetric vector about 0 of length 2*n with spacing d.
The center = 0 is at [n-1]
"""
return np.arange(-n+1,n+1,1)*d
def green_mesh(density_shape, deltas, rho=None, gamma=None, offset=(0,0,0),
component='psi_s', map_f=map, phi=None, phi_m=None, lamb=None,
include_break_points=True, debug=False):
"""
Computes Green funcion meshes for a particular component
These meshes are in real space (not scaled space).
Parameters
----------
shape : tuple(int, int)
Shape of the charge mesh (nz, nx)
deltas : tuple(float, float)
mesh spacing corresonding to dz, dx
gamma : float
relativistic gamma
map_f : map function for creating potential grids.
Examples:
map (default)
executor.map
Returns:
Double-sized array for the Green function with the speficied component
"""
nz, nx = tuple(density_shape)
dz, dx = tuple(deltas) # Convenience
if debug:
print('component:', component)
# Change to internal coordinates
if (component != 'psi_s_case_E') & (component != 'Es_case_E_IGF') :
if debug:
print('Change to internal coordinates...')
# handle negative rho
#rho_sign = np.sign(rho)
#rho = abs(rho)
dx = dx/rho
dz = dz/(2*rho)
# Make an offset grid
vecs = [symmetric_vec(n, delta) for n, delta, o in zip(density_shape, [dz,dx], offset)]
#vecs[0] = rho_sign*vecs[0] # Flip sign of x
meshes = np.meshgrid(*vecs, indexing='ij') # this gives zm2 and xm2
# Only case B has a potential form of psi_s
if component == 'psi_s':
green = psi_s(*meshes, gamma)
# psi_x is incorrect
#elif component == 'psi_x':
# green = rho_sign*psi_x0(*meshes, gamma, dz, dx)
elif component == 'psi_s_case_E':
green = psi_s_case_E(*meshes, gamma)
#elif component == 'Es_case_E':
# green = Es_case_E(*meshes, gamma)
elif component == 'Es_case_D':
assert lamb>=0 , "lamb (exit distance over rho) must be positive for case D !"
green = Es_case_D(*meshes, gamma, lamb)
# Case A fields
elif component =='Es_case_A':
assert phi>=0 , "phi (entrance angle) must be positive for case A !"
green = Es_case_A(*meshes, gamma, phi/2)
elif component =='Fx_case_A':
assert phi>=0 , "phi (entrance angle) must be positive for case A !"
green = Fx_case_A(*meshes, gamma, phi/2)
# Case C fields
elif component =='Es_case_C':
assert phi_m>=0 , "phi_m must be positive for case C !"
assert lamb>=0 , "lamb (exit distance over rho) must be positive for case C !"
green = Es_case_C(zm2, xm2, gamma, phi_m/2, lamb)
elif component =='Fx_case_C':
assert phi_m>=0 , "phi_m must be positive for case C !"
assert lamb>=0 , "lamb (exit distance over rho) must be positive for case C !"
green = Fx_case_C(zm2, xm2, gamma, phi_m/2, lamb)
# ===================================================
# Case B fields IGF
elif component in ['Fx_case_B_IGF', 'Es_case_B_IGF','Es_case_E_IGF']:
if component == 'Es_case_B_IGF':
F = Es_case_B
elif component == 'Fx_case_B_IGF':
F = Fx_case_B_Chris
else:
F = Es_case_E
# Flat meshes
Z = meshes[0].flatten()
X = meshes[1].flatten()
# Select special points for IGF
ix_for_IGF = np.where(abs(Z) < dz*2.5)
# ix_for_IGF = np.where(np.logical_and( abs(Z)<dz*2, abs(X)<dx*2 ))
if debug:
print(f'Finding IGF for {len(ix_for_IGF[0])} points...')
Z_special = Z[ix_for_IGF]
X_special = X[ix_for_IGF]
if include_break_points == True:
xvec2 = vecs[1]
# The spike_list can not be an numpy array since its elements have potentially different sizes
def find_case_B_spike_x(x):
return find_Es_or_Fx_case_B_spike(F, x, gamma)
spike_list = list(map(find_case_B_spike_x, xvec2))
fzx = lambda z, x: IGF_z_case_B(F, z, x, dz, dx, gamma, xvec2=xvec2, spike_list=spike_list)/dz # evaluate special
else:
fzx = lambda z, x: IGF_z_case_B(F, z, x, dz, dx, gamma)/dz # evaluate special
res = map(fzx, Z_special, X_special)
G_short = np.array(list(res))
if debug:
print(f'Done. Starting midpoint method...')
G = F(Z, X, gamma) # Simple midpoint evaluation
G[ix_for_IGF] = G_short # Replace at special points with calculated IGF
green = G.reshape(meshes[0].shape) # reshape
# ===================================================
# Case D fields IGF
elif component in ['Fx_case_D_IGF', 'Es_case_D_IGF']:
assert lamb>=0 , "lamb (exit distance over rho) must be positive for case D !"
if component == 'Es_case_D_IGF':
F = Es_case_D
else:
F = Fx_case_D
# Flat meshes
Z = meshes[0].flatten()
X = meshes[1].flatten()
# Select special points for IGF
ix_for_IGF = np.where(abs(Z) < dz*3.5)
# ix_for_IGF = np.where(np.logical_and( abs(Z)<dz*2, abs(X)<dx*2 ))
if debug:
print(f'Finding IGF for {len(ix_for_IGF[0])} points...')
Z_special = Z[ix_for_IGF]
X_special = X[ix_for_IGF]
if include_break_points == True:
xvec2 = vecs[1]
# The spike_list can not be an numpy array since its elements have potentially different sizes
def find_case_D_spike_x(x):
return find_Es_or_Fx_case_D_spike(F, x, gamma, lamb)
spike_list = list(map(find_case_D_spike_x, xvec2))
fzx = lambda z, x: IGF_z_case_D(F, z, x, dz, dx, gamma, lamb, xvec2=xvec2, spike_list=spike_list)/dz # evaluate special
else:
fzx = lambda z, x: IGF_z_case_D(F, z, x, dz, dx, gamma, lamb)/dz # evaluate special
res = map(fzx, Z_special, X_special)
G_short = np.array(list(res))
print(f'Done. Starting midpoint method...')
G = F(Z, X, gamma, lamb) # Simple midpoint evaluation
G[ix_for_IGF] = G_short # Replace at special points with calculated IGF
green = G.reshape(meshes[0].shape) # reshape
else:
raise ValueError(f'Unknown component: {component}')
return green
def IGF_z_case_B(func, z, x, dz, dx, gamma, xvec2=None, spike_list=None):
"""
Special Integrated Green Function (IGF) in the z direction only
"""
#func_x = lambda x: func(z, x, gamma)
func_z = lambda z: func(z, x, gamma)
#if abs(z) < 1e-14:
# if (abs(x) < 1e-14):
# return 0
points = [z]
if spike_list != None:
x_index = np.argmin(np.abs(xvec2 - x))
spikes = spike_list[x_index] # a list of z_poisition of the spikes at xvecs[x_index]
spikes_in_dz = [zp for zp in spikes if zp < z+dz/2 and zp > z-dz/2]
# A rare situation in which too many break points are found (oscillatory curve)
# The integrator cannot take more than 100(?) of them
# This seems to happen for x = 0
# When this happens, neglect these points
if len(spikes_in_dz) > 10:
points = [z]
else:
points = [z] + spikes_in_dz
return integrate.quad(func_z, z-dz/2, z+dz/2, points = points, epsrel=1e-6, limit=100)[0]
def IGF_z_case_D(func, z, x, dz, dx, gamma, lamb, xvec2=None, spike_list=None):
"""
Special Integrated Green Function (IGF) in the z direction only
"""
#func_x = lambda x: func(z, x, gamma)
func_z = lambda z: func(z, x, gamma, lamb)
#if abs(z) < 1e-14:
# if (abs(x) < 1e-14):
# return 0
points = [z]
if spike_list != None:
x_index = np.argmin(np.abs(xvec2 - x))
spikes = spike_list[x_index] # a list of z_poisition of the spikes at xvecs[x_index]
spikes_in_dz = [zp for zp in spikes if zp < z+dz/2 and zp > z-dz/2]
# A rare situation in which too many break points are found (oscillatory curve)
# The integrator cannot take more than 100(?) of them
# This seems to happen for x = 0
# When this happens, neglect these points
if len(spikes_in_dz) > 10:
points = [z]
else:
points = [z] + spikes_in_dz
return integrate.quad(func_z, z-dz/2, z+dz/2, points = points, epsrel=1e-6, limit=100)[0]
def IGF_z_case_E(func, z, x, dz, dx, gamma):
"""
Special Integrated Green Function (IGF) in the z direction only
"""
#func_x = lambda x: func(z, x, gamma)
func_z = lambda z: func(z, x, gamma)
if abs(z) < 1e-14:
if (abs(x) < 1e-14):
return 0
return integrate.quad(func_z, z-dz/2, z+dz/2,
points = [z],
epsrel=1e-6, # Coarse
limit=100)[0]
def case_B_denom(z,x,gamma):
"""
The second numerator of Es_case_B and Fx_case_B
"""
beta2 = 1-1/gamma**2
beta = np.sqrt(beta2)
alp = alpha_exact_case_B_brentq(z, x, beta)
sin2a = np.sin(2*alp)
kap = (2*(alp - z))/beta # kappa for case B
return kap - beta*(1+x)*sin2a
def find_Es_or_Fx_case_B_spike(func, xval, gamma):
"""
Return a list of z values at which Es_case_B(z,xval) has spikes.
func has to be either "Es_case_B" or "Fx_case_B"
"""
def case_B_denom_z(z):
return case_B_denom(z,xval,gamma)
# First find where denom ~ 0, a good reference point close to spike(s)
op = optimize.root(case_B_denom_z, 0, tol=1E-6)
if op.success == False:
#print('no root found for denom!! Might be due to small gamma')
return [0]
root = op.x[0]
def func_z(z):
return func(z, xval, gamma)
# The range and resolution are subjected to changes...
zv = np.linspace( root - 2E-11, root + 2E-11, 2001 )
peak_ix = np.union1d(find_peaks( func_z(zv))[0], find_peaks( -func_z(zv))[0])
return list(zv[peak_ix])
def case_D_denom(z, x, gamma, lamb):
beta2 = 1-1/gamma**2
beta = np.sqrt(beta2)
alp = alpha_exact_case_D_brentq(z, x, beta, lamb)
sin2a = np.sin(2*alp)
cos2a = np.cos(2*alp)
kap = (2*(alp - z) + lamb)/beta # kappa for case D
return kap - beta*(lamb*cos2a + (1+x)*sin2a)
def find_Es_or_Fx_case_D_spike(func, xval, gamma, lamb):
"""
Return a list of z values at which Es_case_D(z,xval) has spikes
func has to be either "Es_case_D" or "Fx_case_D"
"""
def case_D_denom_z(z):
return case_D_denom(z, xval, gamma, lamb)
# First find where denom ~ 0, and we are close to spike
op = optimize.root(case_D_denom_z, 0, tol=1E-6)
if op.success == False:
#print('no root found for denom!! Might be due to small gamma')
return np.array([0])
root = op.x[0]
def func_z(z):
return func(z, xval, gamma, lamb)
zv = np.linspace( root - 2E-11, root + 2E-11, 2001 )
peak_ix = np.union1d(find_peaks( func_z(zv))[0], find_peaks( -func_z(zv))[0])
return list(zv[peak_ix])
## ============== Below are higher level functions ===================================
@njit
def my_2d_convolve2(g1, g2, ix1, ix2):
"""
Convolution for a specific observation point only, at (ix1, ix2)
Assumption: g2 is a double-sized grid of g1.
Parameters
----------
g1 : 2D array of size (nz, nx)
g2 : 2D array of size (2*nz, 2*nx)
ix1, ix2 : int
Returns:
A single value, the convolution result at (ix1, ix2)
"""
d1, d2 = g1.shape
g2_flip = np.flip(g2)
g2_cut = g2_flip[d1-ix1:2*d1-ix1, d2-ix2:2*d2-ix2]
sums = 0
for i in range(d1):
for j in range(d2):
sums+= g1[i,j]*g2_cut[i,j]
return sums
@njit
def boundary_convolve(case, z_observe, x_observe, zvec, xvec, dz, dx, lambda_grid_filtered, Green, gamma=None, rho=None, phi=None):
beta2 = 1-1/gamma**2
beta = np.sqrt(beta2)
x_observe_index = np.argmin(np.abs(xvec - x_observe))
z_observe_index = np.argmin(np.abs(zvec - z_observe))
nz = len(zvec)
nx = len(xvec)
cond = np.zeros( (nz,nx) ) # To be filled with True and Flase
# Boundary condition
temp = (x_observe - xvec)/rho
if case == 1:
zi_vec = rho*( phi - beta*np.sqrt(temp**2 + 4*(1 + temp)*np.sin(phi/2)**2))
for i in range(nx):
cond[:,i] = (zvec > z_observe - zi_vec[i])
elif case == 2:
zi_vec = rho*( phi - beta*np.sqrt(temp**2 + 4*(1 + temp)*np.sin(phi/2)**2))
zo_vec = -beta*np.abs(x_observe - xvec)
for i in range(nx):
cond[:,i] = (zvec > z_observe - zo_vec[i]) | (zvec < z_observe - zi_vec[i])
else:
print('Unknown case !!!')
#raise ValueError(f'Unknown case: {case} !!!')
lambda_grid_filtered_bounded = np.where(cond, 0, lambda_grid_filtered)
conv = my_2d_convolve2(lambda_grid_filtered_bounded, Green, z_observe_index, x_observe_index)
return conv | [
"csr2d.core2.psi_s",
"csr2d.core2.Fx_case_C",
"numpy.sqrt",
"csr2d.core2.Es_case_A",
"numpy.array",
"numpy.sin",
"csr2d.core2.alpha_exact_case_B_brentq",
"numpy.arange",
"numpy.flip",
"csr2d.core2.Fx_case_A",
"numpy.where",
"numpy.linspace",
"csr2d.core2.Es_case_C",
"numpy.meshgrid",
"cs... | [((2135, 2168), 'numpy.meshgrid', 'np.meshgrid', (['*vecs'], {'indexing': '"""ij"""'}), "(*vecs, indexing='ij')\n", (2146, 2168), True, 'import numpy as np\n'), ((10170, 10184), 'numpy.sqrt', 'np.sqrt', (['beta2'], {}), '(beta2)\n', (10177, 10184), True, 'import numpy as np\n'), ((10196, 10233), 'csr2d.core2.alpha_exact_case_B_brentq', 'alpha_exact_case_B_brentq', (['z', 'x', 'beta'], {}), '(z, x, beta)\n', (10221, 10233), False, 'from csr2d.core2 import alpha_exact_case_B_brentq, alpha_exact_case_D_brentq\n'), ((10246, 10261), 'numpy.sin', 'np.sin', (['(2 * alp)'], {}), '(2 * alp)\n', (10252, 10261), True, 'import numpy as np\n'), ((10697, 10740), 'scipy.optimize.root', 'optimize.root', (['case_B_denom_z', '(0)'], {'tol': '(1e-06)'}), '(case_B_denom_z, 0, tol=1e-06)\n', (10710, 10740), False, 'from scipy import optimize\n'), ((11012, 11057), 'numpy.linspace', 'np.linspace', (['(root - 2e-11)', '(root + 2e-11)', '(2001)'], {}), '(root - 2e-11, root + 2e-11, 2001)\n', (11023, 11057), True, 'import numpy as np\n'), ((11257, 11271), 'numpy.sqrt', 'np.sqrt', (['beta2'], {}), '(beta2)\n', (11264, 11271), True, 'import numpy as np\n'), ((11283, 11326), 'csr2d.core2.alpha_exact_case_D_brentq', 'alpha_exact_case_D_brentq', (['z', 'x', 'beta', 'lamb'], {}), '(z, x, beta, lamb)\n', (11308, 11326), False, 'from csr2d.core2 import alpha_exact_case_B_brentq, alpha_exact_case_D_brentq\n'), ((11344, 11359), 'numpy.sin', 'np.sin', (['(2 * alp)'], {}), '(2 * alp)\n', (11350, 11359), True, 'import numpy as np\n'), ((11370, 11385), 'numpy.cos', 'np.cos', (['(2 * alp)'], {}), '(2 * alp)\n', (11376, 11385), True, 'import numpy as np\n'), ((11843, 11886), 'scipy.optimize.root', 'optimize.root', (['case_D_denom_z', '(0)'], {'tol': '(1e-06)'}), '(case_D_denom_z, 0, tol=1e-06)\n', (11856, 11886), False, 'from scipy import optimize\n'), ((12115, 12160), 'numpy.linspace', 'np.linspace', (['(root - 2e-11)', '(root + 2e-11)', '(2001)'], {}), '(root - 2e-11, root + 2e-11, 2001)\n', (12126, 12160), True, 'import numpy as np\n'), ((12809, 12820), 'numpy.flip', 'np.flip', (['g2'], {}), '(g2)\n', (12816, 12820), True, 'import numpy as np\n'), ((13178, 13192), 'numpy.sqrt', 'np.sqrt', (['beta2'], {}), '(beta2)\n', (13185, 13192), True, 'import numpy as np\n'), ((13364, 13382), 'numpy.zeros', 'np.zeros', (['(nz, nx)'], {}), '((nz, nx))\n', (13372, 13382), True, 'import numpy as np\n'), ((14112, 14151), 'numpy.where', 'np.where', (['cond', '(0)', 'lambda_grid_filtered'], {}), '(cond, 0, lambda_grid_filtered)\n', (14120, 14151), True, 'import numpy as np\n'), ((652, 679), 'numpy.arange', 'np.arange', (['(-n + 1)', '(n + 1)', '(1)'], {}), '(-n + 1, n + 1, 1)\n', (661, 679), True, 'import numpy as np\n'), ((2306, 2327), 'csr2d.core2.psi_s', 'psi_s', (['*meshes', 'gamma'], {}), '(*meshes, gamma)\n', (2311, 2327), False, 'from csr2d.core2 import psi_x0, psi_s, Es_case_B, Fx_case_B_Chris, Es_case_A, Fx_case_A, Es_case_C, Fx_case_C, Es_case_D, Fx_case_D, psi_s_case_E, Es_case_E\n'), ((8364, 8454), 'scipy.integrate.quad', 'integrate.quad', (['func_z', '(z - dz / 2)', '(z + dz / 2)'], {'points': 'points', 'epsrel': '(1e-06)', 'limit': '(100)'}), '(func_z, z - dz / 2, z + dz / 2, points=points, epsrel=1e-06,\n limit=100)\n', (8378, 8454), False, 'from scipy import integrate\n'), ((9459, 9549), 'scipy.integrate.quad', 'integrate.quad', (['func_z', '(z - dz / 2)', '(z + dz / 2)'], {'points': 'points', 'epsrel': '(1e-06)', 'limit': '(100)'}), '(func_z, z - dz / 2, z + dz / 2, points=points, epsrel=1e-06,\n limit=100)\n', (9473, 9549), False, 'from scipy import integrate\n'), ((9858, 9945), 'scipy.integrate.quad', 'integrate.quad', (['func_z', '(z - dz / 2)', '(z + dz / 2)'], {'points': '[z]', 'epsrel': '(1e-06)', 'limit': '(100)'}), '(func_z, z - dz / 2, z + dz / 2, points=[z], epsrel=1e-06,\n limit=100)\n', (9872, 9945), False, 'from scipy import integrate\n'), ((12001, 12014), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (12009, 12014), True, 'import numpy as np\n'), ((13230, 13254), 'numpy.abs', 'np.abs', (['(xvec - x_observe)'], {}), '(xvec - x_observe)\n', (13236, 13254), True, 'import numpy as np\n'), ((13288, 13312), 'numpy.abs', 'np.abs', (['(zvec - z_observe)'], {}), '(zvec - z_observe)\n', (13294, 13312), True, 'import numpy as np\n'), ((2500, 2528), 'csr2d.core2.psi_s_case_E', 'psi_s_case_E', (['*meshes', 'gamma'], {}), '(*meshes, gamma)\n', (2512, 2528), False, 'from csr2d.core2 import psi_x0, psi_s, Es_case_B, Fx_case_B_Chris, Es_case_A, Fx_case_A, Es_case_C, Fx_case_C, Es_case_D, Fx_case_D, psi_s_case_E, Es_case_E\n'), ((7776, 7793), 'numpy.abs', 'np.abs', (['(xvec2 - x)'], {}), '(xvec2 - x)\n', (7782, 7793), True, 'import numpy as np\n'), ((8871, 8888), 'numpy.abs', 'np.abs', (['(xvec2 - x)'], {}), '(xvec2 - x)\n', (8877, 8888), True, 'import numpy as np\n'), ((2751, 2782), 'csr2d.core2.Es_case_D', 'Es_case_D', (['*meshes', 'gamma', 'lamb'], {}), '(*meshes, gamma, lamb)\n', (2760, 2782), False, 'from csr2d.core2 import psi_x0, psi_s, Es_case_B, Fx_case_B_Chris, Es_case_A, Fx_case_A, Es_case_C, Fx_case_C, Es_case_D, Fx_case_D, psi_s_case_E, Es_case_E\n'), ((13818, 13842), 'numpy.abs', 'np.abs', (['(x_observe - xvec)'], {}), '(x_observe - xvec)\n', (13824, 13842), True, 'import numpy as np\n'), ((2940, 2974), 'csr2d.core2.Es_case_A', 'Es_case_A', (['*meshes', 'gamma', '(phi / 2)'], {}), '(*meshes, gamma, phi / 2)\n', (2949, 2974), False, 'from csr2d.core2 import psi_x0, psi_s, Es_case_B, Fx_case_B_Chris, Es_case_A, Fx_case_A, Es_case_C, Fx_case_C, Es_case_D, Fx_case_D, psi_s_case_E, Es_case_E\n'), ((3101, 3135), 'csr2d.core2.Fx_case_A', 'Fx_case_A', (['*meshes', 'gamma', '(phi / 2)'], {}), '(*meshes, gamma, phi / 2)\n', (3110, 3135), False, 'from csr2d.core2 import psi_x0, psi_s, Es_case_B, Fx_case_B_Chris, Es_case_A, Fx_case_A, Es_case_C, Fx_case_C, Es_case_D, Fx_case_D, psi_s_case_E, Es_case_E\n'), ((3370, 3413), 'csr2d.core2.Es_case_C', 'Es_case_C', (['zm2', 'xm2', 'gamma', '(phi_m / 2)', 'lamb'], {}), '(zm2, xm2, gamma, phi_m / 2, lamb)\n', (3379, 3413), False, 'from csr2d.core2 import psi_x0, psi_s, Es_case_B, Fx_case_B_Chris, Es_case_A, Fx_case_A, Es_case_C, Fx_case_C, Es_case_D, Fx_case_D, psi_s_case_E, Es_case_E\n'), ((3614, 3657), 'csr2d.core2.Fx_case_C', 'Fx_case_C', (['zm2', 'xm2', 'gamma', '(phi_m / 2)', 'lamb'], {}), '(zm2, xm2, gamma, phi_m / 2, lamb)\n', (3623, 3657), False, 'from csr2d.core2 import psi_x0, psi_s, Es_case_B, Fx_case_B_Chris, Es_case_A, Fx_case_A, Es_case_C, Fx_case_C, Es_case_D, Fx_case_D, psi_s_case_E, Es_case_E\n'), ((13572, 13587), 'numpy.sin', 'np.sin', (['(phi / 2)'], {}), '(phi / 2)\n', (13578, 13587), True, 'import numpy as np\n'), ((13776, 13791), 'numpy.sin', 'np.sin', (['(phi / 2)'], {}), '(phi / 2)\n', (13782, 13791), True, 'import numpy as np\n')] |
import numpy as np
import datetime
import matplotlib.pyplot as plt
def createnosignaldata(n, d):
"""
Data points are random Gaussian vectors.
Class labels are random and uniform
"""
X_train = np.random.normal(0, 1, (n, d + 1))
X_train[:, d] = np.sign(X_train[:, d])
X_holdout = np.random.normal(0, 1, (n, d + 1))
X_holdout[:, d] = np.sign(X_holdout[:, d])
X_test = np.random.normal(0, 1, (n, d + 1))
X_test[:, d] = np.sign(X_test[:, d])
return X_train, X_holdout, X_test
def createhighsignaldata(n, d):
"""
Data points are random Gaussian vectors.
Class labels are random and uniform
First nbiased are biased with bias towards the class label
"""
X_train = np.random.normal(0, 1, (n, d + 1))
X_train[:, d] = np.sign(X_train[:, d])
X_holdout = np.random.normal(0, 1, (n, d + 1))
X_holdout[:, d] = np.sign(X_holdout[:, d])
X_test = np.random.normal(0, 1, (n, d + 1))
X_test[:, d] = np.sign(X_test[:, d])
# Add correlation with the sign
nbiased = 20
bias = 6.0 / np.sqrt(n)
b = np.zeros(nbiased)
for i in range(n):
b[:nbiased] = bias * X_holdout[i, d]
X_holdout[i, :nbiased] = np.add(X_holdout[i, :nbiased], b)
b[:nbiased] = bias * X_train[i, d]
X_train[i, :nbiased] = np.add(X_train[i, :nbiased], b)
b[:nbiased] = bias * X_test[i, d]
X_test[i, :nbiased] = np.add(X_test[i, :nbiased], b)
return X_train, X_holdout, X_test
def runClassifier(n, d, krange, X_train, X_holdout, X_test):
"""
Variable selection and basic boosting on synthetic data. Variables
with largest correlation with target are selected first.
"""
# Compute values on the standard holdout
tolerance = 1.0 / np.sqrt(n)
threshold = 4.0 / np.sqrt(n)
vals = []
trainanswers = np.dot(X_train[:, :d].T, X_train[:, d]) / n
holdoutanswers = np.dot(X_holdout[:, :d].T, X_holdout[:, d]) / n
trainpos = trainanswers > 1.0 / np.sqrt(n)
holdopos = holdoutanswers > 1.0 / np.sqrt(n)
trainneg = trainanswers < -1.0 / np.sqrt(n)
holdoneg = holdoutanswers < -1.0 / np.sqrt(n)
selected = (trainpos & holdopos) | (trainneg & holdoneg)
trainanswers[~selected] = 0
sortanswers = np.abs(trainanswers).argsort()
for k in krange:
weights = np.zeros(d + 1)
topk = sortanswers[-k:]
weights[topk] = np.sign(trainanswers[topk])
ftrain = 1.0 * np.count_nonzero(np.sign(np.dot(X_train, weights)) == X_train[:, d]) / n
fholdout = 1.0 * np.count_nonzero(np.sign(np.dot(X_holdout, weights)) == X_holdout[:, d]) / n
ftest = 1.0 * np.count_nonzero(np.sign(np.dot(X_test, weights)) == X_test[:, d]) / n
if k == 0:
vals.append([0.5, 0.5, 0.5])
else:
vals.append([ftrain, fholdout, ftest])
# Compute values using Thresholdout
noisy_vals = []
trainanswers = np.dot(X_train[:, :d].T, X_train[:, d]) / n
holdoutanswers = np.dot(X_holdout[:, :d].T, X_holdout[:, d]) / n
diffs = np.abs(trainanswers - holdoutanswers)
noise = np.random.normal(0, tolerance, d)
abovethr = diffs > threshold + noise
holdoutanswers[~abovethr] = trainanswers[~abovethr]
holdoutanswers[abovethr] = (holdoutanswers + np.random.normal(0, tolerance, d))[abovethr]
trainpos = trainanswers > 1.0 / np.sqrt(n)
holdopos = holdoutanswers > 1.0 / np.sqrt(n)
trainneg = trainanswers < -1.0 / np.sqrt(n)
holdoneg = holdoutanswers < -1.0 / np.sqrt(n)
selected = (trainpos & holdopos) | (trainneg & holdoneg)
trainanswers[~selected] = 0
sortanswers = np.abs(trainanswers).argsort()
for k in krange:
weights = np.zeros(d + 1)
topk = sortanswers[-k:]
weights[topk] = np.sign(trainanswers[topk])
ftrain = 1.0 * np.count_nonzero(np.sign(np.dot(X_train, weights)) == X_train[:, d]) / n
fholdout = 1.0 * np.count_nonzero(np.sign(np.dot(X_holdout, weights)) == X_holdout[:, d]) / n
if abs(ftrain - fholdout) < threshold + np.random.normal(0, tolerance):
fholdout = ftrain
else:
fholdout += np.random.normal(0, tolerance)
ftest = 1.0 * np.count_nonzero(np.sign(np.dot(X_test, weights)) == X_test[:, d]) / n
if k == 0:
noisy_vals.append([0.5, 0.5, 0.5])
else:
noisy_vals.append([ftrain, fholdout, ftest])
vals = np.array(vals)
noisy_vals = np.array(noisy_vals)
return vals, noisy_vals
def repeatexp(n, d, krange, reps, datafn):
""" Repeat experiment specified by fn for reps steps """
vallist = []
vallist_noisy = []
for r in range(0, reps):
#print("Repetition: {}".format(r))
X_train, X_holdout, X_test = datafn(n, d)
vals, vals_noisy = runClassifier(n, d, krange, X_train, X_holdout, X_test)
vallist.append(vals)
vallist_noisy.append(vals_noisy)
vallist = np.dstack(vallist)
vallist_noisy = np.dstack(vallist_noisy)
return vallist, vallist_noisy
def runandplotsummary(n, d, krange, reps, datafn, condName):
vallist_normal, vallist_tout = repeatexp(n, d, krange, reps, datafn)
mean_normal = np.mean(vallist_normal, axis=2)
std_normal = np.std(vallist_normal, axis=2)
mean_tout = np.mean(vallist_tout, axis=2)
std_tout = np.std(vallist_tout, axis=2)
ts = datetime.datetime.now().strftime('%Y%m%d%H%M')
plotname = "plot-{ts}-{n}-{d}-{reps}-{condition}"
plotname = plotname.format(ts=ts, n=n, d=d, reps=reps, condition=condName)
f, ax = plt.subplots(2, 1, sharex=True)
plot1(ax[0], krange, mean_normal, std_normal, plotname + "-std", "Standard holdout")
plot1(ax[1], krange, mean_tout, std_tout, plotname + "-thr", "Thresholdout")
ax[1].set_xlabel('Number of variables', fontsize='8')
ax[1].legend(loc=2, prop={'size': 6})
def plot1(a, x, m, sd, plotname, plottitle):
a.set_title(plottitle, fontsize='8')
a.set_ylabel('Accuracy', fontsize='8')
a.axis([x[0], x[-1], 0.45, 0.75])
colorList = ['#B2B2F5', '#CCFFCC', '#FF9848']
label = ['training', 'holdout', 'fresh']
for i, color in enumerate(colorList):
a.plot(x, m[:, i], c=color, marker='^', label=label[i])
a.fill_between(x, m[:, i] - sd[:, i], m[:, i] + sd[:, i],
alpha=0.5, edgecolor=color, facecolor=color, linestyle='dashdot') | [
"numpy.random.normal",
"numpy.abs",
"numpy.dstack",
"numpy.mean",
"numpy.sqrt",
"numpy.add",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"datetime.datetime.now",
"numpy.sign",
"numpy.std",
"matplotlib.pyplot.subplots"
] | [((218, 252), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(n, d + 1)'], {}), '(0, 1, (n, d + 1))\n', (234, 252), True, 'import numpy as np\n'), ((273, 295), 'numpy.sign', 'np.sign', (['X_train[:, d]'], {}), '(X_train[:, d])\n', (280, 295), True, 'import numpy as np\n'), ((312, 346), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(n, d + 1)'], {}), '(0, 1, (n, d + 1))\n', (328, 346), True, 'import numpy as np\n'), ((369, 393), 'numpy.sign', 'np.sign', (['X_holdout[:, d]'], {}), '(X_holdout[:, d])\n', (376, 393), True, 'import numpy as np\n'), ((407, 441), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(n, d + 1)'], {}), '(0, 1, (n, d + 1))\n', (423, 441), True, 'import numpy as np\n'), ((461, 482), 'numpy.sign', 'np.sign', (['X_test[:, d]'], {}), '(X_test[:, d])\n', (468, 482), True, 'import numpy as np\n'), ((743, 777), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(n, d + 1)'], {}), '(0, 1, (n, d + 1))\n', (759, 777), True, 'import numpy as np\n'), ((798, 820), 'numpy.sign', 'np.sign', (['X_train[:, d]'], {}), '(X_train[:, d])\n', (805, 820), True, 'import numpy as np\n'), ((837, 871), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(n, d + 1)'], {}), '(0, 1, (n, d + 1))\n', (853, 871), True, 'import numpy as np\n'), ((894, 918), 'numpy.sign', 'np.sign', (['X_holdout[:, d]'], {}), '(X_holdout[:, d])\n', (901, 918), True, 'import numpy as np\n'), ((932, 966), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(n, d + 1)'], {}), '(0, 1, (n, d + 1))\n', (948, 966), True, 'import numpy as np\n'), ((986, 1007), 'numpy.sign', 'np.sign', (['X_test[:, d]'], {}), '(X_test[:, d])\n', (993, 1007), True, 'import numpy as np\n'), ((1098, 1115), 'numpy.zeros', 'np.zeros', (['nbiased'], {}), '(nbiased)\n', (1106, 1115), True, 'import numpy as np\n'), ((3073, 3110), 'numpy.abs', 'np.abs', (['(trainanswers - holdoutanswers)'], {}), '(trainanswers - holdoutanswers)\n', (3079, 3110), True, 'import numpy as np\n'), ((3123, 3156), 'numpy.random.normal', 'np.random.normal', (['(0)', 'tolerance', 'd'], {}), '(0, tolerance, d)\n', (3139, 3156), True, 'import numpy as np\n'), ((4442, 4456), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (4450, 4456), True, 'import numpy as np\n'), ((4474, 4494), 'numpy.array', 'np.array', (['noisy_vals'], {}), '(noisy_vals)\n', (4482, 4494), True, 'import numpy as np\n'), ((4959, 4977), 'numpy.dstack', 'np.dstack', (['vallist'], {}), '(vallist)\n', (4968, 4977), True, 'import numpy as np\n'), ((4998, 5022), 'numpy.dstack', 'np.dstack', (['vallist_noisy'], {}), '(vallist_noisy)\n', (5007, 5022), True, 'import numpy as np\n'), ((5226, 5257), 'numpy.mean', 'np.mean', (['vallist_normal'], {'axis': '(2)'}), '(vallist_normal, axis=2)\n', (5233, 5257), True, 'import numpy as np\n'), ((5276, 5306), 'numpy.std', 'np.std', (['vallist_normal'], {'axis': '(2)'}), '(vallist_normal, axis=2)\n', (5282, 5306), True, 'import numpy as np\n'), ((5328, 5357), 'numpy.mean', 'np.mean', (['vallist_tout'], {'axis': '(2)'}), '(vallist_tout, axis=2)\n', (5335, 5357), True, 'import numpy as np\n'), ((5374, 5402), 'numpy.std', 'np.std', (['vallist_tout'], {'axis': '(2)'}), '(vallist_tout, axis=2)\n', (5380, 5402), True, 'import numpy as np\n'), ((5606, 5637), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)'}), '(2, 1, sharex=True)\n', (5618, 5637), True, 'import matplotlib.pyplot as plt\n'), ((1079, 1089), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1086, 1089), True, 'import numpy as np\n'), ((1217, 1250), 'numpy.add', 'np.add', (['X_holdout[i, :nbiased]', 'b'], {}), '(X_holdout[i, :nbiased], b)\n', (1223, 1250), True, 'import numpy as np\n'), ((1325, 1356), 'numpy.add', 'np.add', (['X_train[i, :nbiased]', 'b'], {}), '(X_train[i, :nbiased], b)\n', (1331, 1356), True, 'import numpy as np\n'), ((1429, 1459), 'numpy.add', 'np.add', (['X_test[i, :nbiased]', 'b'], {}), '(X_test[i, :nbiased], b)\n', (1435, 1459), True, 'import numpy as np\n'), ((1786, 1796), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1793, 1796), True, 'import numpy as np\n'), ((1819, 1829), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1826, 1829), True, 'import numpy as np\n'), ((1864, 1903), 'numpy.dot', 'np.dot', (['X_train[:, :d].T', 'X_train[:, d]'], {}), '(X_train[:, :d].T, X_train[:, d])\n', (1870, 1903), True, 'import numpy as np\n'), ((1929, 1972), 'numpy.dot', 'np.dot', (['X_holdout[:, :d].T', 'X_holdout[:, d]'], {}), '(X_holdout[:, :d].T, X_holdout[:, d])\n', (1935, 1972), True, 'import numpy as np\n'), ((2352, 2367), 'numpy.zeros', 'np.zeros', (['(d + 1)'], {}), '(d + 1)\n', (2360, 2367), True, 'import numpy as np\n'), ((2424, 2451), 'numpy.sign', 'np.sign', (['trainanswers[topk]'], {}), '(trainanswers[topk])\n', (2431, 2451), True, 'import numpy as np\n'), ((2948, 2987), 'numpy.dot', 'np.dot', (['X_train[:, :d].T', 'X_train[:, d]'], {}), '(X_train[:, :d].T, X_train[:, d])\n', (2954, 2987), True, 'import numpy as np\n'), ((3013, 3056), 'numpy.dot', 'np.dot', (['X_holdout[:, :d].T', 'X_holdout[:, d]'], {}), '(X_holdout[:, :d].T, X_holdout[:, d])\n', (3019, 3056), True, 'import numpy as np\n'), ((3723, 3738), 'numpy.zeros', 'np.zeros', (['(d + 1)'], {}), '(d + 1)\n', (3731, 3738), True, 'import numpy as np\n'), ((3795, 3822), 'numpy.sign', 'np.sign', (['trainanswers[topk]'], {}), '(trainanswers[topk])\n', (3802, 3822), True, 'import numpy as np\n'), ((2013, 2023), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2020, 2023), True, 'import numpy as np\n'), ((2062, 2072), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2069, 2072), True, 'import numpy as np\n'), ((2110, 2120), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2117, 2120), True, 'import numpy as np\n'), ((2160, 2170), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2167, 2170), True, 'import numpy as np\n'), ((2282, 2302), 'numpy.abs', 'np.abs', (['trainanswers'], {}), '(trainanswers)\n', (2288, 2302), True, 'import numpy as np\n'), ((3303, 3336), 'numpy.random.normal', 'np.random.normal', (['(0)', 'tolerance', 'd'], {}), '(0, tolerance, d)\n', (3319, 3336), True, 'import numpy as np\n'), ((3384, 3394), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (3391, 3394), True, 'import numpy as np\n'), ((3433, 3443), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (3440, 3443), True, 'import numpy as np\n'), ((3481, 3491), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (3488, 3491), True, 'import numpy as np\n'), ((3531, 3541), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (3538, 3541), True, 'import numpy as np\n'), ((3653, 3673), 'numpy.abs', 'np.abs', (['trainanswers'], {}), '(trainanswers)\n', (3659, 3673), True, 'import numpy as np\n'), ((4169, 4199), 'numpy.random.normal', 'np.random.normal', (['(0)', 'tolerance'], {}), '(0, tolerance)\n', (4185, 4199), True, 'import numpy as np\n'), ((5413, 5436), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5434, 5436), False, 'import datetime\n'), ((4069, 4099), 'numpy.random.normal', 'np.random.normal', (['(0)', 'tolerance'], {}), '(0, tolerance)\n', (4085, 4099), True, 'import numpy as np\n'), ((2500, 2524), 'numpy.dot', 'np.dot', (['X_train', 'weights'], {}), '(X_train, weights)\n', (2506, 2524), True, 'import numpy as np\n'), ((2598, 2624), 'numpy.dot', 'np.dot', (['X_holdout', 'weights'], {}), '(X_holdout, weights)\n', (2604, 2624), True, 'import numpy as np\n'), ((2697, 2720), 'numpy.dot', 'np.dot', (['X_test', 'weights'], {}), '(X_test, weights)\n', (2703, 2720), True, 'import numpy as np\n'), ((3871, 3895), 'numpy.dot', 'np.dot', (['X_train', 'weights'], {}), '(X_train, weights)\n', (3877, 3895), True, 'import numpy as np\n'), ((3969, 3995), 'numpy.dot', 'np.dot', (['X_holdout', 'weights'], {}), '(X_holdout, weights)\n', (3975, 3995), True, 'import numpy as np\n'), ((4247, 4270), 'numpy.dot', 'np.dot', (['X_test', 'weights'], {}), '(X_test, weights)\n', (4253, 4270), True, 'import numpy as np\n')] |
"""
This script demonstrates how to read holding current from ABF files.
You may need to install pyABF using the command "pip install pyabf"
"""
import pyabf
import numpy as np
# the R before the string tells Python to ignore backslashes
abfFilePath = R"C:\Users\scott\Documents\GitHub\pyABF\data\abfs\2019_05_02_DIC2_0011.abf"
abf = pyabf.ABF(abfFilePath)
# Let's calculate the mean current for a portion of every sweep (2-5 seconds)
for i in range(abf.sweepCount):
abf.setSweep(i)
pointsPerSecond = abf.dataRate
index1 = pointsPerSecond * 2
index2 = pointsPerSecond * 5
segment = abf.sweepY[index1:index2]
segmentMean = np.mean(segment)
print(f"sweep {i} mean current = {segmentMean} pA")
| [
"numpy.mean",
"pyabf.ABF"
] | [((337, 359), 'pyabf.ABF', 'pyabf.ABF', (['abfFilePath'], {}), '(abfFilePath)\n', (346, 359), False, 'import pyabf\n'), ((650, 666), 'numpy.mean', 'np.mean', (['segment'], {}), '(segment)\n', (657, 666), True, 'import numpy as np\n')] |
import random
import heapq as hp
import numpy as np
import time
def generate_instance(graph_type, graph_generator_inputs, demand_generator_inputs):
# this function generates an intances according to the asked caracteristics :
# - first a graph is generated : a grid graph or a random graph
# - then commodities are created so that there exist a solution
# - optional : remaining capacities are erased so that the capacities perfectly fit the solution
if graph_type == "grid":
reverse_graph_generator = generate_grid_reverse_graph
elif graph_type == "random":
reverse_graph_generator = generate_random_reverse_graph
elif graph_type == "random_connected":
reverse_graph_generator = generate_random_connected_reverse_graph
else:
assert False, "No generator for this type of graph is implemented, check your spelling or contribute"
# graph generation
reverse_graph, is_origin_list = reverse_graph_generator(*graph_generator_inputs)
origin_list = [node for node, is_origin in enumerate(is_origin_list) if is_origin]
# commodities generation
commodity_list, path_list = generate_demand(is_origin_list, reverse_graph, **demand_generator_inputs)
# the created graph was reversed so we reverse it
graph = [{neighbor : reverse_graph[neighbor][node] for neighbor in range(len(reverse_graph)) if node in reverse_graph[neighbor]} for node in range(len(reverse_graph))]
return graph, commodity_list, path_list, origin_list
def generate_grid_reverse_graph(nb_origins, nb_row_grid, nb_column_grid, nb_origin_connections, grid_link_capacity=15000, other_link_capacity=10000, local_connection_of_origin = False):
# generates a grid graph with additional nodes conected to the grid and uniform capacities
# the graph is reversed
reverse_graph = []
for i in range(nb_row_grid * nb_column_grid + nb_origins):
reverse_graph.append({})
# generates the grid
for i in range(nb_row_grid):
for j in range(nb_column_grid):
reverse_graph[i + nb_row_grid * j][(i+1)%nb_row_grid + nb_row_grid * j] = grid_link_capacity
reverse_graph[i + nb_row_grid * j][(i-1)%nb_row_grid + nb_row_grid * j] = grid_link_capacity
reverse_graph[i + nb_row_grid * j][i + nb_row_grid * ((j+1)%nb_column_grid)] = grid_link_capacity
reverse_graph[i + nb_row_grid * j][i + nb_row_grid * ((j-1)%nb_column_grid)] = grid_link_capacity
# adding the additional nodes (the origins, i.e. the gateways/pops)
if local_connection_of_origin:
for d in range(nb_origins):
origin = d + nb_row_grid * nb_column_grid
square_size = int(np.ceil(np.sqrt(nb_origin_connections)))
i = random.randint(0, nb_row_grid-1)
j = random.randint(0, nb_column_grid-1)
count = 0
for k in range(square_size):
for l in range(square_size):
if count < nb_origin_connections:
reverse_graph[(i+k)%nb_row_grid + nb_row_grid * ((j+l)%nb_column_grid)][origin] = other_link_capacity
count += 1
else:
for d in range(nb_origins):
origin = d + nb_row_grid * nb_column_grid
for k in range(nb_origin_connections):
i = random.randint(0, nb_row_grid-1)
j = random.randint(0, nb_column_grid-1)
reverse_graph[i + nb_row_grid * j][origin] = other_link_capacity
is_origin_list = [0] * nb_row_grid * nb_column_grid + [1] * nb_origins
return reverse_graph, is_origin_list
def generate_random_reverse_graph(nb_nodes, edge_proba, nb_origins, arc_capacity):
# generates a random graph with uniform capacities
# the graph is reversed
reverse_graph = [{} for i in range(nb_nodes)]
is_origin_list = [0]*nb_nodes
for node in np.random.choice(nb_nodes, nb_origins, replace=False):
is_origin_list[node] = 1
for node in range(nb_nodes):
for neighbor in range(nb_nodes):
if node != neighbor and random.random() < edge_proba:
reverse_graph[node][neighbor] = arc_capacity
return reverse_graph, is_origin_list
def generate_random_connected_reverse_graph(nb_nodes, edge_proba, nb_origins, arc_capacity):
# generates a random graph with uniform capacities
# the graph is reversed
# the returned graph is always strongly connected
reverse_graph = [{} for i in range(nb_nodes)]
is_origin_list = [0]*nb_nodes
for node in np.random.choice(nb_nodes, nb_origins, replace=False):
is_origin_list[node] = 1
not_root_set = set(range(nb_nodes))
while len(not_root_set) > 0:
initial_node = random.choice(tuple(not_root_set))
reachable = [False]*nb_nodes
reachable[initial_node] = True
pile = [initial_node]
while pile:
current_node = pile.pop()
for neighbor in reverse_graph[current_node]:
if not reachable[neighbor]:
reachable[neighbor] = True
pile.append(neighbor)
unreachable_nodes = [node for node in range(nb_nodes) if not reachable[node]]
if len(unreachable_nodes) == 0:
not_root_set.remove(initial_node)
else:
chosen_node = random.choice(unreachable_nodes)
reverse_graph[initial_node][chosen_node] = arc_capacity
current_nb_edge = sum([len(d) for d in reverse_graph])
edge_proba -= current_nb_edge / (nb_nodes**2 - nb_nodes)
for node in range(nb_nodes):
for neighbor in range(nb_nodes):
if node != neighbor and random.random() < edge_proba:
reverse_graph[node][neighbor] = arc_capacity
return reverse_graph, is_origin_list
def generate_demand(is_origin_list, reverse_graph, random_filling_of_origins=True, random_paths=True, max_demand=1500, delete_resuidal_capacity=False,
smaller_commodities=False, verbose=0):
# generates the commodities so that there exist a solution
# To create one commodity :
# a random node is chosen, all the origins attainable from the node are computed
# one is randomly chosen with a random path to it, create a commodity demand that can fit on the path
residual_graph = [{neighbor : reverse_graph[node][neighbor] for neighbor in reverse_graph[node]} for node in range(len(reverse_graph))]
commodity_list = []
path_list = []
possible_destination_nodes = 1 - np.array(is_origin_list)
i = 0
while True:
if i%100 == 0: print(i, end='\r')
i+=1
# choosing a random none origin node
destination = np.random.choice(len(is_origin_list), p=possible_destination_nodes / sum(possible_destination_nodes))
# getting all attainable origins
origin_list = get_availables_origins(residual_graph, destination, is_origin_list, random_paths)
# raising the failure when no origin is attainable
if origin_list == []:
possible_destination_nodes[destination] = 0
if sum(possible_destination_nodes) == 0:
if verbose:
print()
print("residual value is ",sum([sum(dct.values()) for dct in residual_graph]))
print("full value is ",sum([sum(dct.values()) for dct in reverse_graph]))
if delete_resuidal_capacity:
for node, neighbor_dict in enumerate(reverse_graph):
reverse_graph[node] = {neighbor : neighbor_dict[neighbor] - residual_graph[node][neighbor] for neighbor in neighbor_dict}
return commodity_list, path_list
else:
continue
# choosing an origin
if random_filling_of_origins:
origin, path = origin_list[random.randint(0, len(origin_list)-1)]
else:
origin, path = min(origin_list, key=lambda x:x[0])
# allocating the commodity in the graph
min_remaining_capacity = min([residual_graph[path[node_index]][path[node_index+1]] for node_index in range(len(path)-1)])
if smaller_commodities:
used_capacity = random.randint(1, min(min_remaining_capacity, max_demand))
else:
used_capacity = min(min_remaining_capacity, random.randint(1, max_demand))
for node_index in range(len(path)-1):
residual_graph[path[node_index]][path[node_index+1]] -= used_capacity
commodity_list.append((origin, destination, used_capacity))
path.reverse()
path_list.append(path)
def get_availables_origins(residual_graph, initial_node, is_origin_list, random_paths):
# look for all the origins attainable from the initial_node and a path to each of this origins
pile = [(initial_node, [initial_node])]
visited = [0]*len(residual_graph)
visited[initial_node] = 1
origin_list = []
while pile != []:
if random_paths:
current_node, path = pile.pop(random.randint(0, len(pile)-1))
else:
current_node, path = pile.pop(0)
for neighbor in residual_graph[current_node]:
if residual_graph[current_node][neighbor] > 0 and not visited[neighbor]:
visited[neighbor] = 1
if is_origin_list[neighbor]:
origin_list.append((neighbor, path + [neighbor]))
else:
pile.append((neighbor, path + [neighbor]))
return origin_list
def mutate_instance(graph, commodity_list, origin_list, mutation_rate=0.03):
# function that changes some destinations of the commodities and some connections of the origins
nb_nodes = len(graph)
is_origin_list = [0] * nb_nodes
for node in origin_list:
is_origin_list[node] = 1
for commodity_index, commodity in enumerate(commodity_list):
origin, destination, demand = commodity
if random.random() < mutation_rate:
neighbor_list = [neighbor for neighbor in graph[destination] if not is_origin_list[neighbor]]
if neighbor_list != []:
destination = np.random.choice(neighbor_list)
commodity_list[commodity_index] = (origin, destination, demand)
for origin in origin_list:
for neighbor in list(graph[origin].keys()):
if random.random() < mutation_rate:
possible_neighbor_list = [node for node in range(nb_nodes) if node not in graph[origin] and neighbor in graph[node] and not is_origin_list[node]]
# possible_neighbor_list = [node for node in graph[neighbor] if node not in graph[origin] and not is_origin_list[node]]
if possible_neighbor_list == []:
new_neighbor = neighbor
else:
new_neighbor = np.random.choice(possible_neighbor_list)
capacity = graph[origin].pop(neighbor)
graph[origin][new_neighbor] = capacity
nb_origin_neighbor = [[] for node in range(nb_nodes)]
for origin in origin_list:
for neighbor in graph[origin]:
nb_origin_neighbor[neighbor].append(origin)
if __name__ == "__main__":
pass
| [
"random.choice",
"numpy.sqrt",
"numpy.random.choice",
"numpy.array",
"random.random",
"random.randint"
] | [((3898, 3951), 'numpy.random.choice', 'np.random.choice', (['nb_nodes', 'nb_origins'], {'replace': '(False)'}), '(nb_nodes, nb_origins, replace=False)\n', (3914, 3951), True, 'import numpy as np\n'), ((4562, 4615), 'numpy.random.choice', 'np.random.choice', (['nb_nodes', 'nb_origins'], {'replace': '(False)'}), '(nb_nodes, nb_origins, replace=False)\n', (4578, 4615), True, 'import numpy as np\n'), ((6534, 6558), 'numpy.array', 'np.array', (['is_origin_list'], {}), '(is_origin_list)\n', (6542, 6558), True, 'import numpy as np\n'), ((2761, 2795), 'random.randint', 'random.randint', (['(0)', '(nb_row_grid - 1)'], {}), '(0, nb_row_grid - 1)\n', (2775, 2795), False, 'import random\n'), ((2810, 2847), 'random.randint', 'random.randint', (['(0)', '(nb_column_grid - 1)'], {}), '(0, nb_column_grid - 1)\n', (2824, 2847), False, 'import random\n'), ((5351, 5383), 'random.choice', 'random.choice', (['unreachable_nodes'], {}), '(unreachable_nodes)\n', (5364, 5383), False, 'import random\n'), ((9977, 9992), 'random.random', 'random.random', ([], {}), '()\n', (9990, 9992), False, 'import random\n'), ((3342, 3376), 'random.randint', 'random.randint', (['(0)', '(nb_row_grid - 1)'], {}), '(0, nb_row_grid - 1)\n', (3356, 3376), False, 'import random\n'), ((3395, 3432), 'random.randint', 'random.randint', (['(0)', '(nb_column_grid - 1)'], {}), '(0, nb_column_grid - 1)\n', (3409, 3432), False, 'import random\n'), ((8355, 8384), 'random.randint', 'random.randint', (['(1)', 'max_demand'], {}), '(1, max_demand)\n', (8369, 8384), False, 'import random\n'), ((10183, 10214), 'numpy.random.choice', 'np.random.choice', (['neighbor_list'], {}), '(neighbor_list)\n', (10199, 10214), True, 'import numpy as np\n'), ((10387, 10402), 'random.random', 'random.random', ([], {}), '()\n', (10400, 10402), False, 'import random\n'), ((2712, 2742), 'numpy.sqrt', 'np.sqrt', (['nb_origin_connections'], {}), '(nb_origin_connections)\n', (2719, 2742), True, 'import numpy as np\n'), ((4097, 4112), 'random.random', 'random.random', ([], {}), '()\n', (4110, 4112), False, 'import random\n'), ((5683, 5698), 'random.random', 'random.random', ([], {}), '()\n', (5696, 5698), False, 'import random\n'), ((10869, 10909), 'numpy.random.choice', 'np.random.choice', (['possible_neighbor_list'], {}), '(possible_neighbor_list)\n', (10885, 10909), True, 'import numpy as np\n')] |
import os
import sys
import xesmf as xe
import xarray as xr
import numpy as np
import pandas as pd
import math
from dask.diagnostics import ProgressBar
class color:
PURPLE = '\033[35m'
CYAN = '\033[36m'
BLUE = '\033[34m'
LBLUE='\033[94m'
GREEN = '\033[32m'
LGREEN='\033[92m'
YELLOW = '\033[33m'
RED = '\033[31m'
LRED='\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
class HidePrint:
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._stdout
def _check_list(item):
try:
if (type(item.replace("'",'').strip('[]').split(','))==list):
lm = str(item).replace("'",'').strip('[]').split(',')
lm_nospace = [x.strip() for x in lm]
item = str(lm_nospace).replace("'",'').strip('[]')
except:
pass
return item
def _regrid(dr, ds_in=None,_var=None,regrid='rect'):
if ds_in == None:
try:
ds_in = dr.rename({'longitude':'lon','latitude':'lat'})
except:
try:
ds_in=dr.rename({'nav_lon':'lon','nav_lat':'lat'})
except:
ds_in = dr
else:
try:
ds_in = ds_in.rename({'longitude':'lon','latitude':'lat'})
except:
try:
ds_in=ds_in.rename({'nav_lon':'lon','nav_lat':'lat'})
except:
ds_in = ds_in
if regrid!='rect':
ds_out=xe.util.grid_global(1, 1)
else:
ds_out=xr.Dataset({'lat': (['lat'], np.arange(-89.5, 90.0, 1.0)),\
'lon': (['lon'], np.arange(0, 360, 1.0)),})
try:
regridder = xe.Regridder(ds_in, ds_out, 'bilinear',periodic=True, \
reuse_weights=True)
except:
regridder = xe.Regridder(ds_in, ds_out, 'bilinear',periodic=True, \
reuse_weights=True,ignore_degenerate=True)
ds = regridder(dr)
return ds
class concat_data(object):
def __init__(self,fname1,fname2,**kwargs):
self.fname1 = fname1
self.fname2 = fname2
self._var = kwargs.get('var', None)
self.nc = kwargs.get('nc', 'yes')
self.init = kwargs.get('init', None)
self.end = kwargs.get('end', None)
self.out = kwargs.get('out', None)
def _cExp(self):
if type(self.fname1) is str:
if self._var == None:
var = self.fname1.split('/')[-1].split('_')[0]
else:
var = self._var
data1 = xr.open_mfdataset(self.fname1)[var]
data2 = xr.open_mfdataset(self.fname2)[var]
else:
data1 = self.fname1
data2 = self.fname2
data = xr.concat([data1,data2],dim='time')
print('\nConcat shape:', data.shape)
if self.nc == 'yes':
with ProgressBar():
if self.out !=None:
data.load().to_netcdf(self.out)
else:
if (self.init != None) or (self.end != None):
data.load().to_netcdf(('_').join(self.fname1.split('/')[-1].split('_')[:4])+'_'+str(self.init)+'-'+str(self.end)+'_combined.nc')
else:
data.load().to_netcdf(('_').join(self.fname1.split('/')[-1].split('_')[:4])+'_combined.nc')
else:
return data
class aave(object):
def __init__(self, val, lat_i=-90, lat_e=90, lon_i=-180, lon_e=180):
self.val = val
self.lat_i = lat_i
self.lat_e = lat_e
self.lon_i = lon_i
self.lon_e = lon_e
def get_lat_lon(self):
lat = self.val.lat.values
self.val.coords['lon']=(self.val.coords['lon'] + 180) % 360 - 180
sorted_val = self.val.sortby(self.val.lon)
lon = sorted_val.lon.values
late=list(lat).index(lat[lat<=self.lat_e][len(lat[lat<=self.lat_e])-1])
lati=list(lat).index(lat[lat>=self.lat_i][0])
lone=list(lon).index(lon[lon<=self.lon_e][len(lon[lon<=self.lon_e])-1])
loni=list(lon).index(lon[lon>=self.lon_i][0])
new_val = sorted_val[lati:late+1,loni:lone+1]
return new_val
def get_weight(self):
lat=self.get_lat_lon().lat.values
lon=self.get_lat_lon().lon.values
jlat = lat.shape[0]
rad = 4.0*math.atan(1.0)/180.0
re = 6371220.0
rr = re*rad
dlon = abs(lon[2]-lon[1])*rr
dx = dlon*np.cos(lat*rad)
dy = np.zeros(jlat)
dy[0] = abs(lat[2]-lat[1])*rr
dy[1:jlat-1] = abs(lat[2:jlat]-lat[0:jlat-2])*rr*0.5
dy[jlat-1] = abs(lat[jlat-1]-lat[jlat-2])*rr
multi = dx*dy
area=xr.DataArray(multi)
area=area.rename({'dim_0':'lat'})
return area
@staticmethod
def average_da(var, dim=None, weights=None):
if weights is None:
return var.mean(dim)
else:
if not isinstance(weights, xr.DataArray):
raise ValueError("weights must be a DataArray")
if var.notnull().any():
total_weights = weights.where(var.notnull()).sum(dim=dim)
else:
total_weights = weights.sum(dim)
return (var * weights).sum(dim) / total_weights
def spatial_avg(self):
new_val = self.get_lat_lon()
lon_avg = new_val.mean(dim='lon')
area = self.get_weight()
avg = self.average_da(lon_avg, dim='lat',weights=area)
return avg.values
def get_area(val,area):
if area=='global':
out=aave(val).spatial_avg()
if area=='tropic':
out=aave(val,lat_i=-30,lat_e=30).spatial_avg()
if area=='NH':
out=aave(val,lat_i=0).spatial_avg()
if area=='SH':
out=aave(val,lat_e=0).spatial_avg()
if area=='NA':
out=aave(val,lat_i=0,lat_e=65,lon_i=-60,lon_e=-10).spatial_avg()
if area=='EU':
out=aave(val,lat_i=30,lat_e=45,lon_i=0,lon_e=30).spatial_avg()
if area=='NH-mid':
out=aave(val,lat_i=30,lat_e=60).spatial_avg()
if area=='SPG':
out=aave(val,lat_i=45,lat_e=65,lon_i=-60,lon_e=-10).spatial_avg()
if area=='azores':
out=aave(val,lat_i=25,lat_e=35,lon_i=-35,lon_e=5).spatial_avg()
if area=='icelandic':
out=aave(val,lat_i=50,lat_e=60,lon_i=-35,lon_e=5).spatial_avg()
return out
def get_rm_ts(path,models,variables,experiments,region='global',area_file=None):
for exp in experiments:
for var in variables:
all_models=[]
av_models=[]
area_files=[]
for model in models:
print('\nFor variable / model / experiment: ', var,'/',model,'/',exp)
try:
if area_file!=None:
area=xr.open_mfdataset(path+'/areacell*'+model+'*'+exp+'*.nc')['areacella']
area_files.append(area)
data=xr.open_mfdataset(path+'/'+var+'_rm_'+model+'_'+exp+'_RM_*.nc')[var]
all_models.append(data)
av_models.append(model)
print(model)
except:
pass
n=0
mdata=pd.DataFrame()
mdata_actual=pd.DataFrame()
print('\nCalculating Spatial averages . . .')
for avg in all_models:
try:
avg=avg.rename_dims({'longitude':'lon','latitude':'lat'})
except:
pass
ts=[]
if area_file!=None:
ts=(avg*area_files[n]).sum(['lat','lon'])/area_files[n].sum(['lat','lon'])
else:
for i in range(len(avg.time)):
ts.append(get_area(avg[i,:,:],region))
mdata_actual[av_models[n]]=pd.Series(ts)
ser=(mdata_actual[av_models[n]]-mdata_actual[av_models[n]].mean())
mdata[av_models[n]]=ser
mdata.to_csv(path+'/'+var+'_'+exp+'_AllModels_'+region+'.csv', index=False)
mdata_actual.to_csv(path+'/'+var+'_'+exp+'_AllModels_actual_'+region+'.csv', index=False)
n=n+1
| [
"pandas.Series",
"xesmf.util.grid_global",
"dask.diagnostics.ProgressBar",
"xarray.concat",
"xesmf.Regridder",
"numpy.zeros",
"sys.stdout.close",
"numpy.cos",
"xarray.DataArray",
"math.atan",
"pandas.DataFrame",
"xarray.open_mfdataset",
"numpy.arange"
] | [((607, 625), 'sys.stdout.close', 'sys.stdout.close', ([], {}), '()\n', (623, 625), False, 'import sys\n'), ((1571, 1596), 'xesmf.util.grid_global', 'xe.util.grid_global', (['(1)', '(1)'], {}), '(1, 1)\n', (1590, 1596), True, 'import xesmf as xe\n'), ((1786, 1860), 'xesmf.Regridder', 'xe.Regridder', (['ds_in', 'ds_out', '"""bilinear"""'], {'periodic': '(True)', 'reuse_weights': '(True)'}), "(ds_in, ds_out, 'bilinear', periodic=True, reuse_weights=True)\n", (1798, 1860), True, 'import xesmf as xe\n'), ((2862, 2899), 'xarray.concat', 'xr.concat', (['[data1, data2]'], {'dim': '"""time"""'}), "([data1, data2], dim='time')\n", (2871, 2899), True, 'import xarray as xr\n'), ((4646, 4660), 'numpy.zeros', 'np.zeros', (['jlat'], {}), '(jlat)\n', (4654, 4660), True, 'import numpy as np\n'), ((4858, 4877), 'xarray.DataArray', 'xr.DataArray', (['multi'], {}), '(multi)\n', (4870, 4877), True, 'import xarray as xr\n'), ((1927, 2029), 'xesmf.Regridder', 'xe.Regridder', (['ds_in', 'ds_out', '"""bilinear"""'], {'periodic': '(True)', 'reuse_weights': '(True)', 'ignore_degenerate': '(True)'}), "(ds_in, ds_out, 'bilinear', periodic=True, reuse_weights=True,\n ignore_degenerate=True)\n", (1939, 2029), True, 'import xesmf as xe\n'), ((4617, 4634), 'numpy.cos', 'np.cos', (['(lat * rad)'], {}), '(lat * rad)\n', (4623, 4634), True, 'import numpy as np\n'), ((7377, 7391), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7389, 7391), True, 'import pandas as pd\n'), ((7417, 7431), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7429, 7431), True, 'import pandas as pd\n'), ((2677, 2707), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['self.fname1'], {}), '(self.fname1)\n', (2694, 2707), True, 'import xarray as xr\n'), ((2733, 2763), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['self.fname2'], {}), '(self.fname2)\n', (2750, 2763), True, 'import xarray as xr\n'), ((2989, 3002), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (3000, 3002), False, 'from dask.diagnostics import ProgressBar\n'), ((4498, 4512), 'math.atan', 'math.atan', (['(1.0)'], {}), '(1.0)\n', (4507, 4512), False, 'import math\n'), ((8022, 8035), 'pandas.Series', 'pd.Series', (['ts'], {}), '(ts)\n', (8031, 8035), True, 'import pandas as pd\n'), ((1651, 1678), 'numpy.arange', 'np.arange', (['(-89.5)', '(90.0)', '(1.0)'], {}), '(-89.5, 90.0, 1.0)\n', (1660, 1678), True, 'import numpy as np\n'), ((1730, 1752), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(1.0)'], {}), '(0, 360, 1.0)\n', (1739, 1752), True, 'import numpy as np\n'), ((7104, 7181), 'xarray.open_mfdataset', 'xr.open_mfdataset', (["(path + '/' + var + '_rm_' + model + '_' + exp + '_RM_*.nc')"], {}), "(path + '/' + var + '_rm_' + model + '_' + exp + '_RM_*.nc')\n", (7121, 7181), True, 'import xarray as xr\n'), ((6960, 7027), 'xarray.open_mfdataset', 'xr.open_mfdataset', (["(path + '/areacell*' + model + '*' + exp + '*.nc')"], {}), "(path + '/areacell*' + model + '*' + exp + '*.nc')\n", (6977, 7027), True, 'import xarray as xr\n')] |
#! python
import csv
import glob
import random
import math
import operator
from functools import reduce
import os
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
from os import system, name
import numpy as np
def clear():
'''
define console clear function
'''
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def GetInputDataFile():
'''
get user input for which data file to run algo on
also get number of centroids to compute and whether to
save scatter plot images or not
'''
#clear()
dataFile = None
k = None
csvList = glob.glob("data/*.csv")
print("select a data file to run spectral clustering")
for idx, filePath in enumerate(csvList):
print(f'({idx}) {filePath}')
dataFileIndex = int(input("select option "))
if 0 <= dataFileIndex < len(csvList):
dataFile = csvList[dataFileIndex]
else:
GetInputDataFile()
sigma = float(input("enter sigma value for gaussian similarity function "))
k = int(input("enter number of clusters to compute "))
YES_VALUES = {'y', 'yes', 'Y'}
saveScatterPlots = input("save scatter plot for each iteration ? (y,N) ").lower() in YES_VALUES
if(saveScatterPlots):
print('scatter plots will be saved in ./images/ folder')
print('output csv files will be store in ./output/ folder')
return (dataFile, k, saveScatterPlots, sigma)
def GetDistance(x, y):
'''
calculate Euclidean distance between two n dimentional points
'''
return math.sqrt(sum([(a - b) ** 2 for a, b in zip(x, y)]))
def Assign(centroides, data):
'''
Assign each point to one of the k clusters
'''
mapping = []
for point in data:
computedMap = {
'closestDistance' : None,
'closestCentroid' : None,
'point' : point,
}
for centroid in centroides:
distance = GetDistance(point, centroid)
if computedMap['closestDistance'] == None or computedMap['closestDistance'] > distance :
computedMap['closestDistance'] = distance
computedMap['closestCentroid'] = centroid
mapping.append(computedMap)
return mapping
def sumPoints(x1,x2):
return [(x1[0]+x2[0]),(x1[1]+x2[1])]
def Update(centroides, previousIterationData):
'''
calculate new centroid based on clusters
'''
## compute mean of each cluster, make that the new starting points
differenceVector = []
newCentroides = []
for centroid in centroides:
cdata = [y['point'] for y in list(filter(lambda x: x['closestCentroid'] == centroid, previousIterationData))]
totalVector = reduce(sumPoints, cdata)
mean = [x / len(cdata) for x in totalVector]
print(f'number of data points for {centroid} are {len(cdata)} with mean {mean}')
newCentroides.append(mean)
distance = GetDistance(mean, centroid)
differenceVector.append(distance)
return (newCentroides, differenceVector)
def CalculatePartitions(data, k, epsilon, maxIterations):
'''
cluster data in k centroids based on lloyds algorithm
'''
## ramdomly select K starting points to start
centroides = random.sample(data, k)
print(f'assigning {len(data)} number of data points to {k} clusters')
mapped = Assign(centroides, data)
significientDifference = True
itr=1
## repeat unit no change in clusters
while significientDifference:
print('iteration', itr)
itr += 1
newCentroides, diffVector = Update(centroides, mapped)
if sum(diffVector) < epsilon or itr > maxIterations:
significientDifference = False
if significientDifference:
centroides = newCentroides
mapped = Assign(centroides, data)
C = [centroides.index(x['closestCentroid'])+1 for x in mapped]
# OUTPUT
# (i) - centroides matrix
# (ii) - cluster index vector C ∈{ 1,2,3…K }^N, Where C(i)=j indicates that the ith row of X belongs to cluster j
return (centroides, C)
colors = list(mcolors.CSS4_COLORS.keys())
def plotCluster(originalData, C):
title = 'clusters computed with spectral clustring'
fig, ax = plt.subplots(1, figsize=(10, 6))
ax.set(title=title)
for idx, val in enumerate(originalData):
ax.scatter(val[0], val[1], color=colors[C[idx]+10], s=50)
ax.grid(True)
fig.tight_layout()
plt.savefig(f'images/{title}.png')
plt.show()
plt.close()
def GaussianSimilarityFunction(p1, p2, sigma):
disSquare = sum([(a - b) ** 2 for a, b in zip(p1, p2)])/(2*pow(sigma, 2))
similaritySocre = math.exp(-1*disSquare)
return similaritySocre
def DiagMatrixHelper(x, y, AdjacencyMatrix):
if x!=y :
return 0
else :
return sum(AdjacencyMatrix[x])
def GetEigneVectorsForSmallEigenValues(a, k):
eigenValues, eigenVectors = np.linalg.eig(a)
idx = eigenValues.argsort()[::1] # sort smallest to largest eignevalue
idx = idx[0:k] # take indexes corrosponding to k smallest eignevalue
eigenValues = eigenValues[idx]
print(eigenValues)
eigenVectors = eigenVectors[idx] # order corresponding eignevectors
return eigenVectors
def ComputeClustering(data, k=2, sigma=0.3, epsilon=10**-5, maxIterations=50):
dataCount = len(data)
# Compute adjacency matrix based on Gaussian Similarity function
AdjacencyMatrix = [[GaussianSimilarityFunction(data[x], data[y], sigma) for x in range(dataCount)] for y in range(dataCount)]
# Compute diagonal matrix from adjacency matrix
DiagonalMatrix = [[DiagMatrixHelper(x,y, AdjacencyMatrix) for x in range(dataCount)] for y in range(dataCount)]
# Compute laplacian matrix L = D - A
LaplacianMatrix = [[ DiagonalMatrix[x][y]-AdjacencyMatrix[x][y] for x in range(dataCount)] for y in range(dataCount)] # DiagonalMatrix - AdjacencyMatrix
# Compute eigen vectors corrosponding to k smallest eigen values
eigen = GetEigneVectorsForSmallEigenValues(LaplacianMatrix, k)
# Arrange egien vectors as column in a new U matrix
U = np.transpose(eigen).tolist()
# Perform K mean clustering on U
_, C = CalculatePartitions(U, k, epsilon, maxIterations)
return (C, AdjacencyMatrix, U)
if __name__ == "__main__":
print("Spectral clustering")
dataFile, k, savePlot, sigma = GetInputDataFile()
print(f"reading file from {dataFile}")
data = []
with open(dataFile, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
dataRow = [float(row[0]),float(row[1])]
data.append(dataRow)
C,_,_ = ComputeClustering(data, k, sigma)
if savePlot:
plotCluster(data, C)
| [
"random.sample",
"matplotlib.pyplot.savefig",
"numpy.linalg.eig",
"functools.reduce",
"matplotlib.pyplot.close",
"matplotlib.colors.CSS4_COLORS.keys",
"os.system",
"math.exp",
"numpy.transpose",
"csv.reader",
"matplotlib.pyplot.subplots",
"glob.glob",
"matplotlib.pyplot.show"
] | [((710, 733), 'glob.glob', 'glob.glob', (['"""data/*.csv"""'], {}), "('data/*.csv')\n", (719, 733), False, 'import glob\n'), ((3332, 3354), 'random.sample', 'random.sample', (['data', 'k'], {}), '(data, k)\n', (3345, 3354), False, 'import random\n'), ((4202, 4228), 'matplotlib.colors.CSS4_COLORS.keys', 'mcolors.CSS4_COLORS.keys', ([], {}), '()\n', (4226, 4228), True, 'from matplotlib import colors as mcolors\n'), ((4336, 4368), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(10, 6)'}), '(1, figsize=(10, 6))\n', (4348, 4368), True, 'import matplotlib.pyplot as plt\n'), ((4549, 4583), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""images/{title}.png"""'], {}), "(f'images/{title}.png')\n", (4560, 4583), True, 'import matplotlib.pyplot as plt\n'), ((4588, 4598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4596, 4598), True, 'import matplotlib.pyplot as plt\n'), ((4603, 4614), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4612, 4614), True, 'import matplotlib.pyplot as plt\n'), ((4763, 4787), 'math.exp', 'math.exp', (['(-1 * disSquare)'], {}), '(-1 * disSquare)\n', (4771, 4787), False, 'import math\n'), ((5020, 5036), 'numpy.linalg.eig', 'np.linalg.eig', (['a'], {}), '(a)\n', (5033, 5036), True, 'import numpy as np\n'), ((354, 367), 'os.system', 'system', (['"""cls"""'], {}), "('cls')\n", (360, 367), False, 'from os import system, name\n'), ((443, 458), 'os.system', 'system', (['"""clear"""'], {}), "('clear')\n", (449, 458), False, 'from os import system, name\n'), ((2795, 2819), 'functools.reduce', 'reduce', (['sumPoints', 'cdata'], {}), '(sumPoints, cdata)\n', (2801, 2819), False, 'from functools import reduce\n'), ((6619, 6653), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (6629, 6653), False, 'import csv\n'), ((6214, 6233), 'numpy.transpose', 'np.transpose', (['eigen'], {}), '(eigen)\n', (6226, 6233), True, 'import numpy as np\n')] |
import numpy as np
import os
import xml.etree.ElementTree as ET
from typing import List, Tuple, Union
from fvcore.common.file_io import PathManager
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
from detectron2.config import get_cfg
from detectron2 import model_zoo
from detectron2.engine import DefaultTrainer
from detectron2.evaluation import PascalVOCDetectionEvaluator
__all__ = ["load_noisy_voc_instances", "register_pascal_voc"]
CLASS_NAMES = (
"aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
"chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor"
)
def load_noisy_voc_instances(
dirname: str,
split: str,
class_names: Union[List[str], Tuple[str, ...]],
add_noise=False
):
"""
Load Pascal VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "Annotations", "ImageSets", "JPEGImages"
split (str): one of "train", "test", "val", "trainval"
class_names: list or tuple of class names
"""
with PathManager.open(
os.path.join(dirname, "ImageSets", "Main", split + ".txt")
) as f:
fileids = np.loadtxt(f, dtype=np.str)
# Needs to read many small annotation files. Makes sense at local
annotation_dirname = PathManager.get_local_path(
os.path.join(dirname, "Annotations/")
)
dicts = []
for fileid in fileids:
anno_file = os.path.join(annotation_dirname, fileid + ".xml")
jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg")
with PathManager.open(anno_file) as f:
tree = ET.parse(f)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
#process annotation bias
mislabeled = obj.find("biased")
isMislabeled = False
if mislabeled is not None:
isMislabeled = True
# We include "difficult" samples in training.
# Based on limited experiments, they don't hurt accuracy.
# difficult = int(obj.find("difficult").text)
# if difficult == 1:
# continue
bbox = obj.find("bndbox")
bbox = [
float(bbox.find(x).text)
for x in ["xmin", "ymin", "xmax", "ymax"]
]
# Original annotations are integers in the range [1, W or H]
# Assuming they mean 1-based pixel indices (inclusive),
# a box with annotation (xmin=1, xmax=W) covers the whole image.
# In coordinate space this is represented by (xmin=0, xmax=W)
bbox[0] -= 1.0
bbox[1] -= 1.0
instances.append(
{
"category_id": class_names.index(cls),
"bbox": bbox,
"bbox_mode": BoxMode.XYXY_ABS,
"mislabeled": isMislabeled
}
)
r["annotations"] = instances
dicts.append(r)
return dicts
def register_pascal_voc(name, dirname, split, year = 2007, class_names=CLASS_NAMES):
DatasetCatalog.register(
name, lambda: load_noisy_voc_instances(dirname, split, class_names)
)
MetadataCatalog.get(name).set(
thing_classes=list(class_names),
dirname=dirname,
year=year,
split=split
)
def split_pascal_voc(voc_root):
splits = ["PART1", "PART2"]
fileids = np.array([filename.split(".")[0] for filename in os.listdir(os.path.join(voc_root, "Annotations"))])
np.random.shuffle(fileids)
split_bar = round(len(fileids)/2)
first_half = os.path.join(voc_root, "ImageSets", "Main", "{}.txt".format(splits[0]))
np.savetxt(first_half, fileids[:split_bar], delimiter=" ", fmt="%s" )
second_half = os.path.join(voc_root, "ImageSets", "Main", "{}.txt".format(splits[1]))
np.savetxt(second_half, fileids[split_bar:], delimiter=" ", fmt="%s")
class PascalVOCTrainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create VOC evaluator(s) for a given dataset.
"""
return PascalVOCDetectionEvaluator(dataset_name)
def train_fastrcnn_on_noisy_data_split(train_dataset_name, test_dataset_name, batch_size, num_iter, output_dir = "."):
#setup cfg
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml"))
cfg.DATASETS.TRAIN= (train_dataset_name, )
cfg.DATASETS.TEST= (test_dataset_name, )
cfg.SOLVER.IMS_PER_BATCH = batch_size
cfg.SOLVER.MAX_ITER = num_iter
cfg.SOLVER.STEPS = [round(num_iter * 3/4), ]
cfg.OUTPUT_DIR = os.path.join(output_dir, "OUTPUT_{}".format(train_dataset_name))
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
#set training
trainer = PascalVOCTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
return cfg
def train_fastrcnn_on_noisy_dataset(dataset_root, batch_size, num_iter, output_dir = "."):
#split voc into two parts
splits = ["PART1", "PART2"]
split_pascal_voc(dataset_root)
#register splits and define configs
part1_dataset_name = "VOC2007_{}".format(splits[0])
register_pascal_voc(part1_dataset_name, dataset_root, splits[0])
part2_dataset_name = "VOC2007_{}".format(splits[1])
register_pascal_voc(part2_dataset_name, dataset_root, splits[1])
#set fastrcnn training
cfg1 = train_fastrcnn_on_noisy_data_split(part1_dataset_name, part2_dataset_name, batch_size, num_iter)
cfg2 = train_fastrcnn_on_noisy_data_split(part2_dataset_name, part1_dataset_name, batch_size, num_iter)
return [cfg1, cfg2]
| [
"detectron2.evaluation.PascalVOCDetectionEvaluator",
"xml.etree.ElementTree.parse",
"detectron2.config.get_cfg",
"os.makedirs",
"fvcore.common.file_io.PathManager.open",
"os.path.join",
"detectron2.model_zoo.get_config_file",
"numpy.savetxt",
"detectron2.data.MetadataCatalog.get",
"numpy.loadtxt",... | [((3929, 3955), 'numpy.random.shuffle', 'np.random.shuffle', (['fileids'], {}), '(fileids)\n', (3946, 3955), True, 'import numpy as np\n'), ((4088, 4156), 'numpy.savetxt', 'np.savetxt', (['first_half', 'fileids[:split_bar]'], {'delimiter': '""" """', 'fmt': '"""%s"""'}), "(first_half, fileids[:split_bar], delimiter=' ', fmt='%s')\n", (4098, 4156), True, 'import numpy as np\n'), ((4252, 4321), 'numpy.savetxt', 'np.savetxt', (['second_half', 'fileids[split_bar:]'], {'delimiter': '""" """', 'fmt': '"""%s"""'}), "(second_half, fileids[split_bar:], delimiter=' ', fmt='%s')\n", (4262, 4321), True, 'import numpy as np\n'), ((4861, 4870), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (4868, 4870), False, 'from detectron2.config import get_cfg\n'), ((5289, 5331), 'os.makedirs', 'os.makedirs', (['cfg.OUTPUT_DIR'], {'exist_ok': '(True)'}), '(cfg.OUTPUT_DIR, exist_ok=True)\n', (5300, 5331), False, 'import os\n'), ((1251, 1278), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'dtype': 'np.str'}), '(f, dtype=np.str)\n', (1261, 1278), True, 'import numpy as np\n'), ((1411, 1448), 'os.path.join', 'os.path.join', (['dirname', '"""Annotations/"""'], {}), "(dirname, 'Annotations/')\n", (1423, 1448), False, 'import os\n'), ((1522, 1571), 'os.path.join', 'os.path.join', (['annotation_dirname', "(fileid + '.xml')"], {}), "(annotation_dirname, fileid + '.xml')\n", (1534, 1571), False, 'import os\n'), ((1592, 1644), 'os.path.join', 'os.path.join', (['dirname', '"""JPEGImages"""', "(fileid + '.jpg')"], {}), "(dirname, 'JPEGImages', fileid + '.jpg')\n", (1604, 1644), False, 'import os\n'), ((4667, 4708), 'detectron2.evaluation.PascalVOCDetectionEvaluator', 'PascalVOCDetectionEvaluator', (['dataset_name'], {}), '(dataset_name)\n', (4694, 4708), False, 'from detectron2.evaluation import PascalVOCDetectionEvaluator\n'), ((4895, 4969), 'detectron2.model_zoo.get_config_file', 'model_zoo.get_config_file', (['"""PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml"""'], {}), "('PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml')\n", (4920, 4969), False, 'from detectron2 import model_zoo\n'), ((1162, 1220), 'os.path.join', 'os.path.join', (['dirname', '"""ImageSets"""', '"""Main"""', "(split + '.txt')"], {}), "(dirname, 'ImageSets', 'Main', split + '.txt')\n", (1174, 1220), False, 'import os\n'), ((1660, 1687), 'fvcore.common.file_io.PathManager.open', 'PathManager.open', (['anno_file'], {}), '(anno_file)\n', (1676, 1687), False, 'from fvcore.common.file_io import PathManager\n'), ((1713, 1724), 'xml.etree.ElementTree.parse', 'ET.parse', (['f'], {}), '(f)\n', (1721, 1724), True, 'import xml.etree.ElementTree as ET\n'), ((3597, 3622), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['name'], {}), '(name)\n', (3616, 3622), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((3884, 3921), 'os.path.join', 'os.path.join', (['voc_root', '"""Annotations"""'], {}), "(voc_root, 'Annotations')\n", (3896, 3921), False, 'import os\n')] |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
import numpy
import arrayio
from genomicode import filelib
from Betsy import module_utils
in_data = antecedents
assert module_utils.is_missing(in_data.identifier), 'no missing values'
M = arrayio.read(in_data.identifier)
f_out = file(outfile, 'w')
X = M.slice()
for i in range(M.dim()[0]):
med = numpy.median([j for j in X[i] if j])
for j in range(M.dim()[1]):
if M._X[i][j] is None:
M._X[i][j] = med
arrayio.tab_delimited_format.write(M, f_out)
f_out.close()
assert filelib.exists_nz(outfile), (
'the output file %s for median_fill_if_missing does not exist' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
filename = 'signal_median_fill_' + original_file + '.tdf'
return filename
| [
"Betsy.module_utils.get_inputid",
"numpy.median",
"Betsy.module_utils.is_missing",
"arrayio.tab_delimited_format.write",
"arrayio.read",
"genomicode.filelib.exists_nz",
"Module.AbstractModule.__init__"
] | [((97, 126), 'Module.AbstractModule.__init__', 'AbstractModule.__init__', (['self'], {}), '(self)\n', (120, 126), False, 'from Module import AbstractModule\n'), ((403, 446), 'Betsy.module_utils.is_missing', 'module_utils.is_missing', (['in_data.identifier'], {}), '(in_data.identifier)\n', (426, 446), False, 'from Betsy import module_utils\n'), ((480, 512), 'arrayio.read', 'arrayio.read', (['in_data.identifier'], {}), '(in_data.identifier)\n', (492, 512), False, 'import arrayio\n'), ((799, 843), 'arrayio.tab_delimited_format.write', 'arrayio.tab_delimited_format.write', (['M', 'f_out'], {}), '(M, f_out)\n', (833, 843), False, 'import arrayio\n'), ((881, 907), 'genomicode.filelib.exists_nz', 'filelib.exists_nz', (['outfile'], {}), '(outfile)\n', (898, 907), False, 'from genomicode import filelib\n'), ((1127, 1175), 'Betsy.module_utils.get_inputid', 'module_utils.get_inputid', (['antecedents.identifier'], {}), '(antecedents.identifier)\n', (1151, 1175), False, 'from Betsy import module_utils\n'), ((624, 660), 'numpy.median', 'numpy.median', (['[j for j in X[i] if j]'], {}), '([j for j in X[i] if j])\n', (636, 660), False, 'import numpy\n')] |
#!/usr/bin/env python
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import glob
import os
import sys
try:
sys.path.append(glob.glob('**/*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import logging
import random
try:
import pygame
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
try:
import queue
except ImportError:
import Queue as queue
def draw_image(surface, image):
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
surface.blit(image_surface, (0, 0))
def get_font():
fonts = [x for x in pygame.font.get_fonts()]
default_font = 'ubuntumono'
font = default_font if default_font in fonts else fonts[0]
font = pygame.font.match_font(font)
return pygame.font.Font(font, 14)
def should_quit():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
return True
return False
def main():
actor_list = []
pygame.init()
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
world = client.get_world()
print('enabling synchronous mode.')
settings = world.get_settings()
settings.synchronous_mode = True
world.apply_settings(settings)
try:
m = world.get_map()
start_pose = random.choice(m.get_spawn_points())
waypoint = m.get_waypoint(start_pose.location)
blueprint_library = world.get_blueprint_library()
vehicle = world.spawn_actor(
random.choice(blueprint_library.filter('vehicle.*')),
start_pose)
actor_list.append(vehicle)
vehicle.set_simulate_physics(False)
camera = world.spawn_actor(
blueprint_library.find('sensor.camera.rgb'),
carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)),
attach_to=vehicle)
actor_list.append(camera)
# Make sync queue for sensor data.
image_queue = queue.Queue()
camera.listen(image_queue.put)
frame = None
# display = pygame.display.set_mode(
# (800, 600),
# pygame.HWSURFACE | pygame.DOUBLEBUF)
font = get_font()
clock = pygame.time.Clock()
while True:
# if should_quit():
# return
clock.tick()
world.tick()
ts = world.wait_for_tick()
if frame is not None:
if ts.frame_count != frame + 1:
print('frame skip!')
frame = ts.frame_count
while True:
image = image_queue.get()
if image.frame_number == ts.frame_count:
break
print (
'wrong image time-stampstamp: frame=%d, image.frame=%d',
ts.frame_count,
image.frame_number)
waypoint = random.choice(waypoint.next(2))
vehicle.set_transform(waypoint.transform)
# draw_image(display, image)
text_surface = font.render('% 5d FPS' % clock.get_fps(), True, (255, 255, 255))
# display.blit(text_surface, (8, 10))
# pygame.display.flip()
finally:
print('\ndisabling synchronous mode.')
settings = world.get_settings()
settings.synchronous_mode = False
world.apply_settings(settings)
print('destroying actors.')
for actor in actor_list:
actor.destroy()
pygame.quit()
print('done.')
if __name__ == '__main__':
main()
| [
"numpy.dtype",
"pygame.font.get_fonts",
"numpy.reshape",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"carla.Location",
"pygame.font.match_font",
"carla.Client",
"pygame.time.Clock",
"pygame.font.Font",
"Queue.Queue",
"carla.Rotation",
"glob.glob"
] | [((981, 1030), 'numpy.reshape', 'np.reshape', (['array', '(image.height, image.width, 4)'], {}), '(array, (image.height, image.width, 4))\n', (991, 1030), True, 'import numpy as np\n'), ((1374, 1402), 'pygame.font.match_font', 'pygame.font.match_font', (['font'], {}), '(font)\n', (1396, 1402), False, 'import pygame\n'), ((1414, 1440), 'pygame.font.Font', 'pygame.font.Font', (['font', '(14)'], {}), '(font, 14)\n', (1430, 1440), False, 'import pygame\n'), ((1479, 1497), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1495, 1497), False, 'import pygame\n'), ((1730, 1743), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1741, 1743), False, 'import pygame\n'), ((1758, 1789), 'carla.Client', 'carla.Client', (['"""localhost"""', '(2000)'], {}), "('localhost', 2000)\n", (1770, 1789), False, 'import carla\n'), ((2727, 2740), 'Queue.Queue', 'queue.Queue', ([], {}), '()\n', (2738, 2740), True, 'import Queue as queue\n'), ((2968, 2987), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (2985, 2987), False, 'import pygame\n'), ((4264, 4277), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4275, 4277), False, 'import pygame\n'), ((302, 439), 'glob.glob', 'glob.glob', (["('**/*%d.%d-%s.egg' % (sys.version_info.major, sys.version_info.minor, \n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))"], {}), "('**/*%d.%d-%s.egg' % (sys.version_info.major, sys.version_info.\n minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))\n", (311, 439), False, 'import glob\n'), ((950, 967), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (958, 967), True, 'import numpy as np\n'), ((1243, 1266), 'pygame.font.get_fonts', 'pygame.font.get_fonts', ([], {}), '()\n', (1264, 1266), False, 'import pygame\n'), ((2537, 2566), 'carla.Location', 'carla.Location', ([], {'x': '(-5.5)', 'z': '(2.8)'}), '(x=-5.5, z=2.8)\n', (2551, 2566), False, 'import carla\n'), ((2568, 2593), 'carla.Rotation', 'carla.Rotation', ([], {'pitch': '(-15)'}), '(pitch=-15)\n', (2582, 2593), False, 'import carla\n')] |
from sklearn.model_selection import train_test_split
import pandas as pd
import re
import os
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.sequence import pad_sequences
import bert
def bert_encode(texts, tokenizer, max_len=510):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
input_sequence = ["[CLS]"] + text + ["[SEP]"] # BERT can only take 512 tokens. Including these two, we get
# 510+2
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence) + [0] * pad_len
pad_masks = [1] * len(input_sequence) + [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments)
def build_model(bert_layer, output_classes, max_len=512):
input_word_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
net = tf.keras.layers.Dense(64, activation='relu')(clf_output)
net = tf.keras.layers.Dropout(0.2)(net)
net = tf.keras.layers.Dense(32, activation='relu')(net)
net = tf.keras.layers.Dropout(0.2)(net)
out = tf.keras.layers.Dense(output_classes, activation='softmax')(net)
if output_classes == 2:
loss = 'binary_crossentropy'
else:
loss = 'categorical_crossentropy'
model = tf.keras.models.Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(tf.keras.optimizers.Adam(lr=1e-5), loss=loss, metrics=['accuracy'])
return model
def preprocess_txt(text):
sentence = text
# Remove punctuations and numbers
sentence = re.sub('[^a-zA-Z]', ' ', sentence)
# Single character removal
sentence = re.sub(r"\s+[a-zA-Z]\s+", ' ', sentence)
# Removing multiple spaces
sentence = re.sub(r'\s+', ' ', sentence)
return sentence
# import data here as pandas df from csv
def load_data(f_path, preprocess=True):
"""
Loads and preprocesses data from a CSV file into a pandas dataframe. Returns split test and train
data as well as labels.
f_path: file path to .csv file
preprocess: Whether to preprocess data to remove special characters, spaces, punctuation, and numbers.
"""
data = pd.read_csv(f_path)
data = data.drop(["Check"], axis=1).reset_index(drop=True)
if data.isnull().values.any(): # warns us if there are null values
print("W: Null values in data!")
sentences = np.array(data['Sentence'])
if preprocess:
for i, sentence in enumerate(sentences):
sentences[i] = preprocess_txt(sentence)
data['Label'] = data['Label'].replace(['left', 'right'], [0, 1])
train_x, test_x, train_y, test_y = train_test_split(sentences, np.array(data['Label']), test_size=0.1, shuffle=True)
return train_x, test_x, train_y, test_y
def padded_batch_dataset(sentences, labels, batch_size):
sentences = tf.data.Dataset.from_generator(lambda: (sentences, labels), output_types=(tf.int32, tf.int32))
batched_dataset = sentences.padded_batch(batch_size, padded_shapes=((None,), ()))
return batched_dataset
BertTokenizer = bert.bert_tokenization.FullTokenizer
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3", trainable=False)
vocabulary_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
to_lower = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = BertTokenizer(vocabulary_file, to_lower)
# fname = os.path.join(os.getcwd(), "\\commands_dataset.csv")
fname = "F:\\Documents\\Python Scripts\\RobotVoiceCommand\\commands_dataset.csv"
train_seq, test_seq, train_lab, test_lab = load_data(fname)
# make custom model
# define hyperparameters
PAD_LENGTH = len(sorted(train_seq, key=len)[-1])
OUTPUT_CLASSES = 2
NB_EPOCHS = 30
BATCH_SIZE = 8
train_input = bert_encode(train_seq, tokenizer=tokenizer, max_len=PAD_LENGTH)
test_input = bert_encode(test_seq, tokenizer=tokenizer, max_len=PAD_LENGTH)
model = build_model(bert_layer=bert_layer, output_classes=OUTPUT_CLASSES, max_len=PAD_LENGTH)
model.summary()
history = model.fit(train_input,
train_lab,
batch_size=BATCH_SIZE,
epochs=NB_EPOCHS,
validation_split=0.2)
| [
"pandas.read_csv",
"tensorflow.keras.layers.Dropout",
"tensorflow.data.Dataset.from_generator",
"tensorflow.keras.models.Model",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Input",
"tensorflow_hub.KerasLayer",
"re.sub"
] | [((3819, 3921), 'tensorflow_hub.KerasLayer', 'hub.KerasLayer', (['"""https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3"""'], {'trainable': '(False)'}), "('https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3'\n , trainable=False)\n", (3833, 3921), True, 'import tensorflow_hub as hub\n'), ((1122, 1193), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(max_len,)', 'dtype': 'tf.int32', 'name': '"""input_word_ids"""'}), "(shape=(max_len,), dtype=tf.int32, name='input_word_ids')\n", (1136, 1193), True, 'import tensorflow as tf\n'), ((1212, 1279), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(max_len,)', 'dtype': 'tf.int32', 'name': '"""input_mask"""'}), "(shape=(max_len,), dtype=tf.int32, name='input_mask')\n", (1226, 1279), True, 'import tensorflow as tf\n'), ((1299, 1367), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(max_len,)', 'dtype': 'tf.int32', 'name': '"""segment_ids"""'}), "(shape=(max_len,), dtype=tf.int32, name='segment_ids')\n", (1313, 1367), True, 'import tensorflow as tf\n'), ((1938, 2026), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': '[input_word_ids, input_mask, segment_ids]', 'outputs': 'out'}), '(inputs=[input_word_ids, input_mask, segment_ids],\n outputs=out)\n', (1959, 2026), True, 'import tensorflow as tf\n'), ((2237, 2271), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'sentence'], {}), "('[^a-zA-Z]', ' ', sentence)\n", (2243, 2271), False, 'import re\n'), ((2320, 2361), 're.sub', 're.sub', (['"""\\\\s+[a-zA-Z]\\\\s+"""', '""" """', 'sentence'], {}), "('\\\\s+[a-zA-Z]\\\\s+', ' ', sentence)\n", (2326, 2361), False, 'import re\n'), ((2409, 2438), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'sentence'], {}), "('\\\\s+', ' ', sentence)\n", (2415, 2438), False, 'import re\n'), ((2855, 2874), 'pandas.read_csv', 'pd.read_csv', (['f_path'], {}), '(f_path)\n', (2866, 2874), True, 'import pandas as pd\n'), ((3071, 3097), 'numpy.array', 'np.array', (["data['Sentence']"], {}), "(data['Sentence'])\n", (3079, 3097), True, 'import numpy as np\n'), ((3537, 3637), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['(lambda : (sentences, labels))'], {'output_types': '(tf.int32, tf.int32)'}), '(lambda : (sentences, labels), output_types=(\n tf.int32, tf.int32))\n', (3567, 3637), True, 'import tensorflow as tf\n'), ((971, 991), 'numpy.array', 'np.array', (['all_tokens'], {}), '(all_tokens)\n', (979, 991), True, 'import numpy as np\n'), ((993, 1012), 'numpy.array', 'np.array', (['all_masks'], {}), '(all_masks)\n', (1001, 1012), True, 'import numpy as np\n'), ((1014, 1036), 'numpy.array', 'np.array', (['all_segments'], {}), '(all_segments)\n', (1022, 1036), True, 'import numpy as np\n'), ((1516, 1560), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1537, 1560), True, 'import tensorflow as tf\n'), ((1584, 1612), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (1607, 1612), True, 'import tensorflow as tf\n'), ((1629, 1673), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (1650, 1673), True, 'import tensorflow as tf\n'), ((1690, 1718), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (1713, 1718), True, 'import tensorflow as tf\n'), ((1735, 1794), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_classes'], {'activation': '"""softmax"""'}), "(output_classes, activation='softmax')\n", (1756, 1794), True, 'import tensorflow as tf\n'), ((2042, 2076), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (2066, 2076), True, 'import tensorflow as tf\n'), ((3359, 3382), 'numpy.array', 'np.array', (["data['Label']"], {}), "(data['Label'])\n", (3367, 3382), True, 'import numpy as np\n')] |
import numpy as np
from context import arkouda as ak
from base_test import ArkoudaTest
SIZE = 10
K = 5
def make_array():
a = ak.randint(0, SIZE, SIZE)
return a
def compare_results(akres, sortedres) -> int:
'''
Compares the numpy and arkouda arrays via the numpy.allclose method with the
default relative and absolute tolerances, returning 0 if the arrays are similar
element-wise within the tolerances, 1 if they are dissimilar.element
:return: 0 (identical) or 1 (dissimilar)
:rtype: int
'''
akres = akres.to_ndarray()
if not np.array_equal(akres, sortedres):
akres = ak.array(akres)
sortedres = ak.array(sortedres)
innp = sortedres[ak.in1d(ak.array(sortedres), ak.array(akres), True)] # values in np array, but not ak array
inak = akres[ak.in1d(ak.array(akres), ak.array(sortedres), True)] # values in ak array, not not np array
print(f"(values in np but not ak: {innp}) (values in ak but not np: {inak})")
return 1
return 0
def run_test(runMin=True, isInd=True, verbose=True):
'''
The run_test method runs execution of the mink reduction
on a randomized array.
:return:
'''
aka = make_array()
failures = 0
try:
if not isInd:
if runMin:
akres = ak.mink(aka, K)
npres = np.sort(aka.to_ndarray())[:K] # first K elements from sorted array
else:
akres = ak.maxk(aka, K)
npres = np.sort(aka.to_ndarray())[-K:] # last K elements from sorted array
else:
if runMin:
akres = aka[ak.argmink(aka, K)]
npres = np.sort(aka.to_ndarray())[:K] # first K elements from sorted array
else:
akres = aka[ak.argmaxk(aka, K)]
npres = np.sort(aka.to_ndarray())[-K:] # last K elements from sorted array
except RuntimeError as E:
if verbose: print("Arkouda error: ", E)
return 1
failures += compare_results(akres, npres)
return failures
class MinKTest(ArkoudaTest):
def test_mink(self):
'''
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
'''
self.assertEqual(0, run_test())
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError) as cm:
ak.mink(list(range(0,10)), 1)
self.assertEqual('type of argument "pda" must be arkouda.pdarrayclass.pdarray; got list instead',
cm.exception.args[0])
with self.assertRaises(TypeError) as cm:
ak.mink(testArray, '1')
self.assertEqual('type of argument "k" must be one of (int, int64); got str instead',
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.mink(testArray, -1)
self.assertEqual("k must be 1 or greater",
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.mink(ak.array([]), 1)
self.assertEqual("must be a non-empty pdarray of type int or float",
cm.exception.args[0])
class MaxKTest(ArkoudaTest):
def test_maxk(self):
'''
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
'''
self.assertEqual(0, run_test(runMin=False))
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError) as cm:
ak.maxk(list(range(0,10)), 1)
self.assertEqual('type of argument "pda" must be arkouda.pdarrayclass.pdarray; got list instead',
cm.exception.args[0])
with self.assertRaises(TypeError) as cm:
ak.maxk(testArray, '1')
self.assertEqual('type of argument "k" must be one of (int, int64); got str instead',
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.maxk(testArray, -1)
self.assertEqual("k must be 1 or greater",
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.maxk(ak.array([]), 1)
self.assertEqual("must be a non-empty pdarray of type int or float",
cm.exception.args[0])
class ArgMinKTest(ArkoudaTest):
def test_argmink(self):
'''
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
'''
self.assertEqual(0, run_test(isInd=True))
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError) as cm:
ak.argmink(list(range(0,10)), 1)
self.assertEqual('type of argument "pda" must be arkouda.pdarrayclass.pdarray; got list instead',
cm.exception.args[0])
with self.assertRaises(TypeError) as cm:
ak.argmink(testArray, '1')
self.assertEqual('type of argument "k" must be one of (int, int64); got str instead',
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.argmink(testArray, -1)
self.assertEqual("k must be 1 or greater",
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.argmink(ak.array([]), 1)
self.assertEqual("must be a non-empty pdarray of type int or float",
cm.exception.args[0])
class ArgMaxKTest(ArkoudaTest):
def test_argmaxk(self):
'''
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
'''
self.assertEqual(0, run_test(runMin=False, isInd=True))
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError) as cm:
ak.argmaxk(list(range(0,10)), 1)
self.assertEqual('type of argument "pda" must be arkouda.pdarrayclass.pdarray; got list instead',
cm.exception.args[0])
with self.assertRaises(TypeError) as cm:
ak.argmaxk(testArray, '1')
self.assertEqual('type of argument "k" must be one of (int, int64); got str instead',
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.argmaxk(testArray, -1)
self.assertEqual("k must be 1 or greater",
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.argmaxk(ak.array([]), 1)
self.assertEqual("must be a non-empty pdarray of type int or float",
cm.exception.args[0])
| [
"context.arkouda.argmink",
"context.arkouda.array",
"context.arkouda.mink",
"context.arkouda.maxk",
"numpy.array_equal",
"context.arkouda.argmaxk",
"context.arkouda.randint"
] | [((131, 156), 'context.arkouda.randint', 'ak.randint', (['(0)', 'SIZE', 'SIZE'], {}), '(0, SIZE, SIZE)\n', (141, 156), True, 'from context import arkouda as ak\n'), ((584, 616), 'numpy.array_equal', 'np.array_equal', (['akres', 'sortedres'], {}), '(akres, sortedres)\n', (598, 616), True, 'import numpy as np\n'), ((634, 649), 'context.arkouda.array', 'ak.array', (['akres'], {}), '(akres)\n', (642, 649), True, 'from context import arkouda as ak\n'), ((670, 689), 'context.arkouda.array', 'ak.array', (['sortedres'], {}), '(sortedres)\n', (678, 689), True, 'from context import arkouda as ak\n'), ((2466, 2489), 'context.arkouda.randint', 'ak.randint', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (2476, 2489), True, 'from context import arkouda as ak\n'), ((3783, 3806), 'context.arkouda.randint', 'ak.randint', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (3793, 3806), True, 'from context import arkouda as ak\n'), ((5104, 5127), 'context.arkouda.randint', 'ak.randint', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (5114, 5127), True, 'from context import arkouda as ak\n'), ((6447, 6470), 'context.arkouda.randint', 'ak.randint', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (6457, 6470), True, 'from context import arkouda as ak\n'), ((2800, 2823), 'context.arkouda.mink', 'ak.mink', (['testArray', '"""1"""'], {}), "(testArray, '1')\n", (2807, 2823), True, 'from context import arkouda as ak\n'), ((3043, 3065), 'context.arkouda.mink', 'ak.mink', (['testArray', '(-1)'], {}), '(testArray, -1)\n', (3050, 3065), True, 'from context import arkouda as ak\n'), ((4117, 4140), 'context.arkouda.maxk', 'ak.maxk', (['testArray', '"""1"""'], {}), "(testArray, '1')\n", (4124, 4140), True, 'from context import arkouda as ak\n'), ((4360, 4382), 'context.arkouda.maxk', 'ak.maxk', (['testArray', '(-1)'], {}), '(testArray, -1)\n', (4367, 4382), True, 'from context import arkouda as ak\n'), ((5441, 5467), 'context.arkouda.argmink', 'ak.argmink', (['testArray', '"""1"""'], {}), "(testArray, '1')\n", (5451, 5467), True, 'from context import arkouda as ak\n'), ((5687, 5712), 'context.arkouda.argmink', 'ak.argmink', (['testArray', '(-1)'], {}), '(testArray, -1)\n', (5697, 5712), True, 'from context import arkouda as ak\n'), ((6792, 6818), 'context.arkouda.argmaxk', 'ak.argmaxk', (['testArray', '"""1"""'], {}), "(testArray, '1')\n", (6802, 6818), True, 'from context import arkouda as ak\n'), ((7038, 7063), 'context.arkouda.argmaxk', 'ak.argmaxk', (['testArray', '(-1)'], {}), '(testArray, -1)\n', (7048, 7063), True, 'from context import arkouda as ak\n'), ((723, 742), 'context.arkouda.array', 'ak.array', (['sortedres'], {}), '(sortedres)\n', (731, 742), True, 'from context import arkouda as ak\n'), ((744, 759), 'context.arkouda.array', 'ak.array', (['akres'], {}), '(akres)\n', (752, 759), True, 'from context import arkouda as ak\n'), ((836, 851), 'context.arkouda.array', 'ak.array', (['akres'], {}), '(akres)\n', (844, 851), True, 'from context import arkouda as ak\n'), ((853, 872), 'context.arkouda.array', 'ak.array', (['sortedres'], {}), '(sortedres)\n', (861, 872), True, 'from context import arkouda as ak\n'), ((1331, 1346), 'context.arkouda.mink', 'ak.mink', (['aka', 'K'], {}), '(aka, K)\n', (1338, 1346), True, 'from context import arkouda as ak\n'), ((1480, 1495), 'context.arkouda.maxk', 'ak.maxk', (['aka', 'K'], {}), '(aka, K)\n', (1487, 1495), True, 'from context import arkouda as ak\n'), ((3247, 3259), 'context.arkouda.array', 'ak.array', (['[]'], {}), '([])\n', (3255, 3259), True, 'from context import arkouda as ak\n'), ((4564, 4576), 'context.arkouda.array', 'ak.array', (['[]'], {}), '([])\n', (4572, 4576), True, 'from context import arkouda as ak\n'), ((5897, 5909), 'context.arkouda.array', 'ak.array', (['[]'], {}), '([])\n', (5905, 5909), True, 'from context import arkouda as ak\n'), ((7251, 7263), 'context.arkouda.array', 'ak.array', (['[]'], {}), '([])\n', (7259, 7263), True, 'from context import arkouda as ak\n'), ((1652, 1670), 'context.arkouda.argmink', 'ak.argmink', (['aka', 'K'], {}), '(aka, K)\n', (1662, 1670), True, 'from context import arkouda as ak\n'), ((1809, 1827), 'context.arkouda.argmaxk', 'ak.argmaxk', (['aka', 'K'], {}), '(aka, K)\n', (1819, 1827), True, 'from context import arkouda as ak\n')] |
# coding=utf-8
import sys
import utils.utils
import numpy as np
class Dataset():
def __init__(self, filename, dataset_len=100, read_step=3):
self.fn = filename
self.dataset_len = dataset_len
self.read_step = read_step
self.words = []
self.read_words()
def read_words(self):
assert self.fn is not None, "[ERROR]filename is empty!"
self.words = utils.utils.get_all_words_in_article(self.fn)
def get_data(self, data_len=None):
assert len(self.words) > 0, "[ERROR]input words is empty!"
dataset_len = self.dataset_len if data_len is None else data_len
words_len = len(self.words)
X = []
Y = []
for i in range(0, words_len-1, self.read_step):
unit_X, unit_Y = self.__get_str_unit(i, dataset_len)
X.append(unit_X)
Y.append(unit_Y)
return X, Y
def __get_str_unit(self, pos, dataset_len):
assert pos >= 0 and pos < len(
self.words), "[ERROR]__get_str_unit() position input error."
# print("pos:%d,dataset_len:%d,words_len:%d" %
# (pos, dataset_len, len(self.words)))
X_unit = []
Y_unit = []
if (pos+dataset_len) < len(self.words):
X_unit = list(self.words[pos:pos+dataset_len])
Y_unit = list(self.words[pos+dataset_len])
elif pos+dataset_len >= len(self.words):
Y_unit = [0]
X_unit = list(self.words[pos:])+[0] * \
(dataset_len-len(self.words[pos:]))
#print("X_unit:%s" % X_unit)
#print("Y_unit:%s\n" % Y_unit)
return X_unit, Y_unit
class TrainDataset(Dataset):
def __init__(self, filename, wdb, dataset_len=100, read_step=3):
super(TrainDataset, self).__init__(filename, dataset_len, read_step)
self.wdb = wdb
def get_data(self, data_len=None):
X, Y = super(TrainDataset, self).get_data(data_len)
X_out = []
Y_out = []
data_cnt = len(X)
out_cnt = int(data_cnt/100)
for i in range(data_cnt):
X_out.append(self.wdb.words2idx(X[i]))
Y_out.append(self.wdb.words2idx(Y[i]))
if(i % out_cnt == 0):
print("%d%%." % (100*i/data_cnt), end="")
sys.stdout.flush()
return np.array(X_out), np.array(Y_out)
| [
"numpy.array",
"sys.stdout.flush"
] | [((2333, 2348), 'numpy.array', 'np.array', (['X_out'], {}), '(X_out)\n', (2341, 2348), True, 'import numpy as np\n'), ((2350, 2365), 'numpy.array', 'np.array', (['Y_out'], {}), '(Y_out)\n', (2358, 2365), True, 'import numpy as np\n'), ((2299, 2317), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2315, 2317), False, 'import sys\n')] |
import numpy
import math
from timeit import default_timer as timer
import willump.evaluation.willump_executor
@willump.evaluation.willump_executor.willump_execute()
def process_row(input_numpy_array):
return_numpy_array = numpy.zeros(516)
return_numpy_array[0] = input_numpy_array[68] / math.sqrt(input_numpy_array[68])
return_numpy_array[1] = input_numpy_array[41] / math.sqrt(input_numpy_array[71])
return_numpy_array[2] = input_numpy_array[50] / math.sqrt(input_numpy_array[55])
return_numpy_array[3] = input_numpy_array[78] / math.sqrt(input_numpy_array[82])
return_numpy_array[4] = input_numpy_array[6] / math.sqrt(input_numpy_array[76])
return_numpy_array[5] = input_numpy_array[21] / math.sqrt(input_numpy_array[99])
return_numpy_array[6] = input_numpy_array[15] / math.sqrt(input_numpy_array[73])
return_numpy_array[7] = input_numpy_array[45] / math.sqrt(input_numpy_array[39])
return_numpy_array[8] = input_numpy_array[89] / math.sqrt(input_numpy_array[80])
return_numpy_array[9] = input_numpy_array[21] / math.sqrt(input_numpy_array[27])
return_numpy_array[10] = input_numpy_array[64] / math.sqrt(input_numpy_array[82])
return_numpy_array[11] = input_numpy_array[53] / math.sqrt(input_numpy_array[15])
return_numpy_array[12] = input_numpy_array[15] / math.sqrt(input_numpy_array[95])
return_numpy_array[13] = input_numpy_array[52] / math.sqrt(input_numpy_array[93])
return_numpy_array[14] = input_numpy_array[65] / math.sqrt(input_numpy_array[100])
return_numpy_array[15] = input_numpy_array[47] / math.sqrt(input_numpy_array[19])
return_numpy_array[16] = input_numpy_array[43] / math.sqrt(input_numpy_array[17])
return_numpy_array[17] = input_numpy_array[37] / math.sqrt(input_numpy_array[49])
return_numpy_array[18] = input_numpy_array[17] / math.sqrt(input_numpy_array[85])
return_numpy_array[19] = input_numpy_array[33] / math.sqrt(input_numpy_array[85])
return_numpy_array[20] = input_numpy_array[53] / math.sqrt(input_numpy_array[10])
return_numpy_array[21] = input_numpy_array[50] / math.sqrt(input_numpy_array[12])
return_numpy_array[22] = input_numpy_array[46] / math.sqrt(input_numpy_array[13])
return_numpy_array[23] = input_numpy_array[85] / math.sqrt(input_numpy_array[0])
return_numpy_array[24] = input_numpy_array[31] / math.sqrt(input_numpy_array[84])
return_numpy_array[25] = input_numpy_array[77] / math.sqrt(input_numpy_array[93])
return_numpy_array[26] = input_numpy_array[7] / math.sqrt(input_numpy_array[89])
return_numpy_array[27] = input_numpy_array[62] / math.sqrt(input_numpy_array[31])
return_numpy_array[28] = input_numpy_array[17] / math.sqrt(input_numpy_array[57])
return_numpy_array[29] = input_numpy_array[100] / math.sqrt(input_numpy_array[64])
return_numpy_array[30] = input_numpy_array[74] / math.sqrt(input_numpy_array[83])
return_numpy_array[31] = input_numpy_array[24] / math.sqrt(input_numpy_array[32])
return_numpy_array[32] = input_numpy_array[33] / math.sqrt(input_numpy_array[34])
return_numpy_array[33] = input_numpy_array[25] / math.sqrt(input_numpy_array[9])
return_numpy_array[34] = input_numpy_array[59] / math.sqrt(input_numpy_array[40])
return_numpy_array[35] = input_numpy_array[42] / math.sqrt(input_numpy_array[63])
return_numpy_array[36] = input_numpy_array[41] / math.sqrt(input_numpy_array[78])
return_numpy_array[37] = input_numpy_array[56] / math.sqrt(input_numpy_array[40])
return_numpy_array[38] = input_numpy_array[42] / math.sqrt(input_numpy_array[47])
return_numpy_array[39] = input_numpy_array[33] / math.sqrt(input_numpy_array[8])
return_numpy_array[40] = input_numpy_array[16] / math.sqrt(input_numpy_array[15])
return_numpy_array[41] = input_numpy_array[93] / math.sqrt(input_numpy_array[52])
return_numpy_array[42] = input_numpy_array[44] / math.sqrt(input_numpy_array[57])
return_numpy_array[43] = input_numpy_array[16] / math.sqrt(input_numpy_array[39])
return_numpy_array[44] = input_numpy_array[19] / math.sqrt(input_numpy_array[65])
return_numpy_array[45] = input_numpy_array[59] / math.sqrt(input_numpy_array[40])
return_numpy_array[46] = input_numpy_array[32] / math.sqrt(input_numpy_array[59])
return_numpy_array[47] = input_numpy_array[76] / math.sqrt(input_numpy_array[71])
return_numpy_array[48] = input_numpy_array[34] / math.sqrt(input_numpy_array[56])
return_numpy_array[49] = input_numpy_array[67] / math.sqrt(input_numpy_array[10])
return_numpy_array[50] = input_numpy_array[32] / math.sqrt(input_numpy_array[45])
return_numpy_array[51] = input_numpy_array[29] / math.sqrt(input_numpy_array[83])
return_numpy_array[52] = input_numpy_array[37] / math.sqrt(input_numpy_array[56])
return_numpy_array[53] = input_numpy_array[94] / math.sqrt(input_numpy_array[67])
return_numpy_array[54] = input_numpy_array[64] / math.sqrt(input_numpy_array[78])
return_numpy_array[55] = input_numpy_array[20] / math.sqrt(input_numpy_array[68])
return_numpy_array[56] = input_numpy_array[10] / math.sqrt(input_numpy_array[10])
return_numpy_array[57] = input_numpy_array[93] / math.sqrt(input_numpy_array[12])
return_numpy_array[58] = input_numpy_array[56] / math.sqrt(input_numpy_array[7])
return_numpy_array[59] = input_numpy_array[93] / math.sqrt(input_numpy_array[100])
return_numpy_array[60] = input_numpy_array[78] / math.sqrt(input_numpy_array[83])
return_numpy_array[61] = input_numpy_array[37] / math.sqrt(input_numpy_array[61])
return_numpy_array[62] = input_numpy_array[61] / math.sqrt(input_numpy_array[58])
return_numpy_array[63] = input_numpy_array[72] / math.sqrt(input_numpy_array[61])
return_numpy_array[64] = input_numpy_array[67] / math.sqrt(input_numpy_array[70])
return_numpy_array[65] = input_numpy_array[59] / math.sqrt(input_numpy_array[75])
return_numpy_array[66] = input_numpy_array[45] / math.sqrt(input_numpy_array[38])
return_numpy_array[67] = input_numpy_array[56] / math.sqrt(input_numpy_array[71])
return_numpy_array[68] = input_numpy_array[3] / math.sqrt(input_numpy_array[69])
return_numpy_array[69] = input_numpy_array[5] / math.sqrt(input_numpy_array[38])
return_numpy_array[70] = input_numpy_array[91] / math.sqrt(input_numpy_array[84])
return_numpy_array[71] = input_numpy_array[0] / math.sqrt(input_numpy_array[87])
return_numpy_array[72] = input_numpy_array[99] / math.sqrt(input_numpy_array[98])
return_numpy_array[73] = input_numpy_array[58] / math.sqrt(input_numpy_array[37])
return_numpy_array[74] = input_numpy_array[70] / math.sqrt(input_numpy_array[63])
return_numpy_array[75] = input_numpy_array[85] / math.sqrt(input_numpy_array[74])
return_numpy_array[76] = input_numpy_array[91] / math.sqrt(input_numpy_array[64])
return_numpy_array[77] = input_numpy_array[94] / math.sqrt(input_numpy_array[81])
return_numpy_array[78] = input_numpy_array[73] / math.sqrt(input_numpy_array[86])
return_numpy_array[79] = input_numpy_array[25] / math.sqrt(input_numpy_array[98])
return_numpy_array[80] = input_numpy_array[65] / math.sqrt(input_numpy_array[37])
return_numpy_array[81] = input_numpy_array[20] / math.sqrt(input_numpy_array[87])
return_numpy_array[82] = input_numpy_array[70] / math.sqrt(input_numpy_array[76])
return_numpy_array[83] = input_numpy_array[34] / math.sqrt(input_numpy_array[28])
return_numpy_array[84] = input_numpy_array[50] / math.sqrt(input_numpy_array[57])
return_numpy_array[85] = input_numpy_array[41] / math.sqrt(input_numpy_array[4])
return_numpy_array[86] = input_numpy_array[83] / math.sqrt(input_numpy_array[29])
return_numpy_array[87] = input_numpy_array[67] / math.sqrt(input_numpy_array[14])
return_numpy_array[88] = input_numpy_array[73] / math.sqrt(input_numpy_array[17])
return_numpy_array[89] = input_numpy_array[3] / math.sqrt(input_numpy_array[31])
return_numpy_array[90] = input_numpy_array[49] / math.sqrt(input_numpy_array[24])
return_numpy_array[91] = input_numpy_array[96] / math.sqrt(input_numpy_array[29])
return_numpy_array[92] = input_numpy_array[12] / math.sqrt(input_numpy_array[30])
return_numpy_array[93] = input_numpy_array[29] / math.sqrt(input_numpy_array[55])
return_numpy_array[94] = input_numpy_array[13] / math.sqrt(input_numpy_array[51])
return_numpy_array[95] = input_numpy_array[9] / math.sqrt(input_numpy_array[18])
return_numpy_array[96] = input_numpy_array[80] / math.sqrt(input_numpy_array[64])
return_numpy_array[97] = input_numpy_array[67] / math.sqrt(input_numpy_array[30])
return_numpy_array[98] = input_numpy_array[53] / math.sqrt(input_numpy_array[61])
return_numpy_array[99] = input_numpy_array[65] / math.sqrt(input_numpy_array[50])
return_numpy_array[100] = input_numpy_array[21] / math.sqrt(input_numpy_array[67])
return_numpy_array[101] = input_numpy_array[68] / math.sqrt(input_numpy_array[30])
return_numpy_array[102] = input_numpy_array[2] / math.sqrt(input_numpy_array[85])
return_numpy_array[103] = input_numpy_array[41] / math.sqrt(input_numpy_array[81])
return_numpy_array[104] = input_numpy_array[86] / math.sqrt(input_numpy_array[43])
return_numpy_array[105] = input_numpy_array[33] / math.sqrt(input_numpy_array[70])
return_numpy_array[106] = input_numpy_array[29] / math.sqrt(input_numpy_array[30])
return_numpy_array[107] = input_numpy_array[6] / math.sqrt(input_numpy_array[80])
return_numpy_array[108] = input_numpy_array[3] / math.sqrt(input_numpy_array[62])
return_numpy_array[109] = input_numpy_array[72] / math.sqrt(input_numpy_array[68])
return_numpy_array[110] = input_numpy_array[15] / math.sqrt(input_numpy_array[25])
return_numpy_array[111] = input_numpy_array[43] / math.sqrt(input_numpy_array[22])
return_numpy_array[112] = input_numpy_array[20] / math.sqrt(input_numpy_array[71])
return_numpy_array[113] = input_numpy_array[33] / math.sqrt(input_numpy_array[32])
return_numpy_array[114] = input_numpy_array[74] / math.sqrt(input_numpy_array[10])
return_numpy_array[115] = input_numpy_array[37] / math.sqrt(input_numpy_array[95])
return_numpy_array[116] = input_numpy_array[43] / math.sqrt(input_numpy_array[34])
return_numpy_array[117] = input_numpy_array[21] / math.sqrt(input_numpy_array[75])
return_numpy_array[118] = input_numpy_array[31] / math.sqrt(input_numpy_array[90])
return_numpy_array[119] = input_numpy_array[23] / math.sqrt(input_numpy_array[78])
return_numpy_array[120] = input_numpy_array[22] / math.sqrt(input_numpy_array[28])
return_numpy_array[121] = input_numpy_array[44] / math.sqrt(input_numpy_array[71])
return_numpy_array[122] = input_numpy_array[12] / math.sqrt(input_numpy_array[30])
return_numpy_array[123] = input_numpy_array[78] / math.sqrt(input_numpy_array[87])
return_numpy_array[124] = input_numpy_array[56] / math.sqrt(input_numpy_array[37])
return_numpy_array[125] = input_numpy_array[31] / math.sqrt(input_numpy_array[77])
return_numpy_array[126] = input_numpy_array[89] / math.sqrt(input_numpy_array[88])
return_numpy_array[127] = input_numpy_array[83] / math.sqrt(input_numpy_array[87])
return_numpy_array[128] = input_numpy_array[41] / math.sqrt(input_numpy_array[96])
return_numpy_array[129] = input_numpy_array[2] / math.sqrt(input_numpy_array[2])
return_numpy_array[130] = input_numpy_array[18] / math.sqrt(input_numpy_array[96])
return_numpy_array[131] = input_numpy_array[42] / math.sqrt(input_numpy_array[93])
return_numpy_array[132] = input_numpy_array[71] / math.sqrt(input_numpy_array[3])
return_numpy_array[133] = input_numpy_array[27] / math.sqrt(input_numpy_array[88])
return_numpy_array[134] = input_numpy_array[45] / math.sqrt(input_numpy_array[74])
return_numpy_array[135] = input_numpy_array[88] / math.sqrt(input_numpy_array[24])
return_numpy_array[136] = input_numpy_array[98] / math.sqrt(input_numpy_array[47])
return_numpy_array[137] = input_numpy_array[86] / math.sqrt(input_numpy_array[48])
return_numpy_array[138] = input_numpy_array[70] / math.sqrt(input_numpy_array[74])
return_numpy_array[139] = input_numpy_array[19] / math.sqrt(input_numpy_array[20])
return_numpy_array[140] = input_numpy_array[75] / math.sqrt(input_numpy_array[92])
return_numpy_array[141] = input_numpy_array[78] / math.sqrt(input_numpy_array[5])
return_numpy_array[142] = input_numpy_array[5] / math.sqrt(input_numpy_array[6])
return_numpy_array[143] = input_numpy_array[67] / math.sqrt(input_numpy_array[48])
return_numpy_array[144] = input_numpy_array[26] / math.sqrt(input_numpy_array[22])
return_numpy_array[145] = input_numpy_array[85] / math.sqrt(input_numpy_array[42])
return_numpy_array[146] = input_numpy_array[16] / math.sqrt(input_numpy_array[47])
return_numpy_array[147] = input_numpy_array[56] / math.sqrt(input_numpy_array[79])
return_numpy_array[148] = input_numpy_array[52] / math.sqrt(input_numpy_array[90])
return_numpy_array[149] = input_numpy_array[67] / math.sqrt(input_numpy_array[94])
return_numpy_array[150] = input_numpy_array[24] / math.sqrt(input_numpy_array[87])
return_numpy_array[151] = input_numpy_array[76] / math.sqrt(input_numpy_array[73])
return_numpy_array[152] = input_numpy_array[23] / math.sqrt(input_numpy_array[78])
return_numpy_array[153] = input_numpy_array[5] / math.sqrt(input_numpy_array[16])
return_numpy_array[154] = input_numpy_array[86] / math.sqrt(input_numpy_array[59])
return_numpy_array[155] = input_numpy_array[35] / math.sqrt(input_numpy_array[65])
return_numpy_array[156] = input_numpy_array[3] / math.sqrt(input_numpy_array[64])
return_numpy_array[157] = input_numpy_array[51] / math.sqrt(input_numpy_array[62])
return_numpy_array[158] = input_numpy_array[6] / math.sqrt(input_numpy_array[88])
return_numpy_array[159] = input_numpy_array[89] / math.sqrt(input_numpy_array[10])
return_numpy_array[160] = input_numpy_array[59] / math.sqrt(input_numpy_array[94])
return_numpy_array[161] = input_numpy_array[16] / math.sqrt(input_numpy_array[0])
return_numpy_array[162] = input_numpy_array[49] / math.sqrt(input_numpy_array[87])
return_numpy_array[163] = input_numpy_array[0] / math.sqrt(input_numpy_array[64])
return_numpy_array[164] = input_numpy_array[31] / math.sqrt(input_numpy_array[76])
return_numpy_array[165] = input_numpy_array[93] / math.sqrt(input_numpy_array[86])
return_numpy_array[166] = input_numpy_array[31] / math.sqrt(input_numpy_array[54])
return_numpy_array[167] = input_numpy_array[60] / math.sqrt(input_numpy_array[35])
return_numpy_array[168] = input_numpy_array[80] / math.sqrt(input_numpy_array[5])
return_numpy_array[169] = input_numpy_array[5] / math.sqrt(input_numpy_array[88])
return_numpy_array[170] = input_numpy_array[70] / math.sqrt(input_numpy_array[92])
return_numpy_array[171] = input_numpy_array[100] / math.sqrt(input_numpy_array[53])
return_numpy_array[172] = input_numpy_array[30] / math.sqrt(input_numpy_array[61])
return_numpy_array[173] = input_numpy_array[50] / math.sqrt(input_numpy_array[40])
return_numpy_array[174] = input_numpy_array[13] / math.sqrt(input_numpy_array[62])
return_numpy_array[175] = input_numpy_array[81] / math.sqrt(input_numpy_array[5])
return_numpy_array[176] = input_numpy_array[21] / math.sqrt(input_numpy_array[18])
return_numpy_array[177] = input_numpy_array[68] / math.sqrt(input_numpy_array[16])
return_numpy_array[178] = input_numpy_array[76] / math.sqrt(input_numpy_array[97])
return_numpy_array[179] = input_numpy_array[30] / math.sqrt(input_numpy_array[8])
return_numpy_array[180] = input_numpy_array[69] / math.sqrt(input_numpy_array[72])
return_numpy_array[181] = input_numpy_array[79] / math.sqrt(input_numpy_array[72])
return_numpy_array[182] = input_numpy_array[84] / math.sqrt(input_numpy_array[36])
return_numpy_array[183] = input_numpy_array[74] / math.sqrt(input_numpy_array[28])
return_numpy_array[184] = input_numpy_array[97] / math.sqrt(input_numpy_array[78])
return_numpy_array[185] = input_numpy_array[56] / math.sqrt(input_numpy_array[18])
return_numpy_array[186] = input_numpy_array[63] / math.sqrt(input_numpy_array[50])
return_numpy_array[187] = input_numpy_array[57] / math.sqrt(input_numpy_array[29])
return_numpy_array[188] = input_numpy_array[26] / math.sqrt(input_numpy_array[41])
return_numpy_array[189] = input_numpy_array[24] / math.sqrt(input_numpy_array[53])
return_numpy_array[190] = input_numpy_array[93] / math.sqrt(input_numpy_array[53])
return_numpy_array[191] = input_numpy_array[23] / math.sqrt(input_numpy_array[7])
return_numpy_array[192] = input_numpy_array[27] / math.sqrt(input_numpy_array[96])
return_numpy_array[193] = input_numpy_array[67] / math.sqrt(input_numpy_array[81])
return_numpy_array[194] = input_numpy_array[27] / math.sqrt(input_numpy_array[54])
return_numpy_array[195] = input_numpy_array[81] / math.sqrt(input_numpy_array[27])
return_numpy_array[196] = input_numpy_array[99] / math.sqrt(input_numpy_array[34])
return_numpy_array[197] = input_numpy_array[86] / math.sqrt(input_numpy_array[96])
return_numpy_array[198] = input_numpy_array[1] / math.sqrt(input_numpy_array[77])
return_numpy_array[199] = input_numpy_array[32] / math.sqrt(input_numpy_array[71])
return_numpy_array[200] = input_numpy_array[31] / math.sqrt(input_numpy_array[59])
return_numpy_array[201] = input_numpy_array[22] / math.sqrt(input_numpy_array[90])
return_numpy_array[202] = input_numpy_array[47] / math.sqrt(input_numpy_array[90])
return_numpy_array[203] = input_numpy_array[22] / math.sqrt(input_numpy_array[78])
return_numpy_array[204] = input_numpy_array[69] / math.sqrt(input_numpy_array[59])
return_numpy_array[205] = input_numpy_array[55] / math.sqrt(input_numpy_array[2])
return_numpy_array[206] = input_numpy_array[38] / math.sqrt(input_numpy_array[40])
return_numpy_array[207] = input_numpy_array[85] / math.sqrt(input_numpy_array[57])
return_numpy_array[208] = input_numpy_array[91] / math.sqrt(input_numpy_array[49])
return_numpy_array[209] = input_numpy_array[81] / math.sqrt(input_numpy_array[19])
return_numpy_array[210] = input_numpy_array[91] / math.sqrt(input_numpy_array[53])
return_numpy_array[211] = input_numpy_array[90] / math.sqrt(input_numpy_array[38])
return_numpy_array[212] = input_numpy_array[87] / math.sqrt(input_numpy_array[18])
return_numpy_array[213] = input_numpy_array[75] / math.sqrt(input_numpy_array[29])
return_numpy_array[214] = input_numpy_array[57] / math.sqrt(input_numpy_array[52])
return_numpy_array[215] = input_numpy_array[84] / math.sqrt(input_numpy_array[40])
return_numpy_array[216] = input_numpy_array[63] / math.sqrt(input_numpy_array[12])
return_numpy_array[217] = input_numpy_array[10] / math.sqrt(input_numpy_array[50])
return_numpy_array[218] = input_numpy_array[70] / math.sqrt(input_numpy_array[12])
return_numpy_array[219] = input_numpy_array[78] / math.sqrt(input_numpy_array[1])
return_numpy_array[220] = input_numpy_array[84] / math.sqrt(input_numpy_array[13])
return_numpy_array[221] = input_numpy_array[92] / math.sqrt(input_numpy_array[58])
return_numpy_array[222] = input_numpy_array[36] / math.sqrt(input_numpy_array[99])
return_numpy_array[223] = input_numpy_array[2] / math.sqrt(input_numpy_array[50])
return_numpy_array[224] = input_numpy_array[64] / math.sqrt(input_numpy_array[63])
return_numpy_array[225] = input_numpy_array[52] / math.sqrt(input_numpy_array[97])
return_numpy_array[226] = input_numpy_array[50] / math.sqrt(input_numpy_array[82])
return_numpy_array[227] = input_numpy_array[68] / math.sqrt(input_numpy_array[26])
return_numpy_array[228] = input_numpy_array[40] / math.sqrt(input_numpy_array[69])
return_numpy_array[229] = input_numpy_array[89] / math.sqrt(input_numpy_array[71])
return_numpy_array[230] = input_numpy_array[66] / math.sqrt(input_numpy_array[96])
return_numpy_array[231] = input_numpy_array[95] / math.sqrt(input_numpy_array[24])
return_numpy_array[232] = input_numpy_array[41] / math.sqrt(input_numpy_array[20])
return_numpy_array[233] = input_numpy_array[13] / math.sqrt(input_numpy_array[3])
return_numpy_array[234] = input_numpy_array[30] / math.sqrt(input_numpy_array[57])
return_numpy_array[235] = input_numpy_array[42] / math.sqrt(input_numpy_array[86])
return_numpy_array[236] = input_numpy_array[7] / math.sqrt(input_numpy_array[31])
return_numpy_array[237] = input_numpy_array[55] / math.sqrt(input_numpy_array[19])
return_numpy_array[238] = input_numpy_array[82] / math.sqrt(input_numpy_array[18])
return_numpy_array[239] = input_numpy_array[75] / math.sqrt(input_numpy_array[50])
return_numpy_array[240] = input_numpy_array[14] / math.sqrt(input_numpy_array[58])
return_numpy_array[241] = input_numpy_array[32] / math.sqrt(input_numpy_array[51])
return_numpy_array[242] = input_numpy_array[68] / math.sqrt(input_numpy_array[80])
return_numpy_array[243] = input_numpy_array[11] / math.sqrt(input_numpy_array[53])
return_numpy_array[244] = input_numpy_array[47] / math.sqrt(input_numpy_array[1])
return_numpy_array[245] = input_numpy_array[6] / math.sqrt(input_numpy_array[9])
return_numpy_array[246] = input_numpy_array[25] / math.sqrt(input_numpy_array[75])
return_numpy_array[247] = input_numpy_array[5] / math.sqrt(input_numpy_array[73])
return_numpy_array[248] = input_numpy_array[47] / math.sqrt(input_numpy_array[79])
return_numpy_array[249] = input_numpy_array[73] / math.sqrt(input_numpy_array[4])
return_numpy_array[250] = input_numpy_array[51] / math.sqrt(input_numpy_array[40])
return_numpy_array[251] = input_numpy_array[75] / math.sqrt(input_numpy_array[63])
return_numpy_array[252] = input_numpy_array[68] / math.sqrt(input_numpy_array[61])
return_numpy_array[253] = input_numpy_array[5] / math.sqrt(input_numpy_array[76])
return_numpy_array[254] = input_numpy_array[97] / math.sqrt(input_numpy_array[45])
return_numpy_array[255] = input_numpy_array[34] / math.sqrt(input_numpy_array[60])
return_numpy_array[256] = input_numpy_array[42] / math.sqrt(input_numpy_array[55])
return_numpy_array[257] = input_numpy_array[37] / math.sqrt(input_numpy_array[47])
return_numpy_array[258] = input_numpy_array[19] / math.sqrt(input_numpy_array[79])
return_numpy_array[259] = input_numpy_array[8] / math.sqrt(input_numpy_array[14])
return_numpy_array[260] = input_numpy_array[5] / math.sqrt(input_numpy_array[83])
return_numpy_array[261] = input_numpy_array[92] / math.sqrt(input_numpy_array[38])
return_numpy_array[262] = input_numpy_array[94] / math.sqrt(input_numpy_array[13])
return_numpy_array[263] = input_numpy_array[30] / math.sqrt(input_numpy_array[39])
return_numpy_array[264] = input_numpy_array[18] / math.sqrt(input_numpy_array[11])
return_numpy_array[265] = input_numpy_array[91] / math.sqrt(input_numpy_array[36])
return_numpy_array[266] = input_numpy_array[23] / math.sqrt(input_numpy_array[57])
return_numpy_array[267] = input_numpy_array[79] / math.sqrt(input_numpy_array[29])
return_numpy_array[268] = input_numpy_array[44] / math.sqrt(input_numpy_array[87])
return_numpy_array[269] = input_numpy_array[10] / math.sqrt(input_numpy_array[71])
return_numpy_array[270] = input_numpy_array[85] / math.sqrt(input_numpy_array[60])
return_numpy_array[271] = input_numpy_array[29] / math.sqrt(input_numpy_array[82])
return_numpy_array[272] = input_numpy_array[17] / math.sqrt(input_numpy_array[44])
return_numpy_array[273] = input_numpy_array[27] / math.sqrt(input_numpy_array[40])
return_numpy_array[274] = input_numpy_array[60] / math.sqrt(input_numpy_array[57])
return_numpy_array[275] = input_numpy_array[32] / math.sqrt(input_numpy_array[55])
return_numpy_array[276] = input_numpy_array[32] / math.sqrt(input_numpy_array[2])
return_numpy_array[277] = input_numpy_array[22] / math.sqrt(input_numpy_array[41])
return_numpy_array[278] = input_numpy_array[45] / math.sqrt(input_numpy_array[57])
return_numpy_array[279] = input_numpy_array[58] / math.sqrt(input_numpy_array[10])
return_numpy_array[280] = input_numpy_array[5] / math.sqrt(input_numpy_array[11])
return_numpy_array[281] = input_numpy_array[49] / math.sqrt(input_numpy_array[53])
return_numpy_array[282] = input_numpy_array[26] / math.sqrt(input_numpy_array[67])
return_numpy_array[283] = input_numpy_array[16] / math.sqrt(input_numpy_array[40])
return_numpy_array[284] = input_numpy_array[80] / math.sqrt(input_numpy_array[45])
return_numpy_array[285] = input_numpy_array[7] / math.sqrt(input_numpy_array[87])
return_numpy_array[286] = input_numpy_array[20] / math.sqrt(input_numpy_array[22])
return_numpy_array[287] = input_numpy_array[97] / math.sqrt(input_numpy_array[31])
return_numpy_array[288] = input_numpy_array[27] / math.sqrt(input_numpy_array[63])
return_numpy_array[289] = input_numpy_array[75] / math.sqrt(input_numpy_array[41])
return_numpy_array[290] = input_numpy_array[72] / math.sqrt(input_numpy_array[31])
return_numpy_array[291] = input_numpy_array[65] / math.sqrt(input_numpy_array[44])
return_numpy_array[292] = input_numpy_array[21] / math.sqrt(input_numpy_array[81])
return_numpy_array[293] = input_numpy_array[51] / math.sqrt(input_numpy_array[22])
return_numpy_array[294] = input_numpy_array[79] / math.sqrt(input_numpy_array[62])
return_numpy_array[295] = input_numpy_array[56] / math.sqrt(input_numpy_array[75])
return_numpy_array[296] = input_numpy_array[84] / math.sqrt(input_numpy_array[68])
return_numpy_array[297] = input_numpy_array[87] / math.sqrt(input_numpy_array[98])
return_numpy_array[298] = input_numpy_array[12] / math.sqrt(input_numpy_array[12])
return_numpy_array[299] = input_numpy_array[35] / math.sqrt(input_numpy_array[45])
return_numpy_array[300] = input_numpy_array[16] / math.sqrt(input_numpy_array[10])
return_numpy_array[301] = input_numpy_array[44] / math.sqrt(input_numpy_array[7])
return_numpy_array[302] = input_numpy_array[97] / math.sqrt(input_numpy_array[64])
return_numpy_array[303] = input_numpy_array[54] / math.sqrt(input_numpy_array[5])
return_numpy_array[304] = input_numpy_array[32] / math.sqrt(input_numpy_array[37])
return_numpy_array[305] = input_numpy_array[3] / math.sqrt(input_numpy_array[38])
return_numpy_array[306] = input_numpy_array[77] / math.sqrt(input_numpy_array[34])
return_numpy_array[307] = input_numpy_array[33] / math.sqrt(input_numpy_array[16])
return_numpy_array[308] = input_numpy_array[34] / math.sqrt(input_numpy_array[33])
return_numpy_array[309] = input_numpy_array[23] / math.sqrt(input_numpy_array[48])
return_numpy_array[310] = input_numpy_array[44] / math.sqrt(input_numpy_array[9])
return_numpy_array[311] = input_numpy_array[11] / math.sqrt(input_numpy_array[27])
return_numpy_array[312] = input_numpy_array[73] / math.sqrt(input_numpy_array[99])
return_numpy_array[313] = input_numpy_array[62] / math.sqrt(input_numpy_array[8])
return_numpy_array[314] = input_numpy_array[85] / math.sqrt(input_numpy_array[33])
return_numpy_array[315] = input_numpy_array[92] / math.sqrt(input_numpy_array[19])
return_numpy_array[316] = input_numpy_array[80] / math.sqrt(input_numpy_array[72])
return_numpy_array[317] = input_numpy_array[85] / math.sqrt(input_numpy_array[88])
return_numpy_array[318] = input_numpy_array[89] / math.sqrt(input_numpy_array[12])
return_numpy_array[319] = input_numpy_array[19] / math.sqrt(input_numpy_array[73])
return_numpy_array[320] = input_numpy_array[66] / math.sqrt(input_numpy_array[22])
return_numpy_array[321] = input_numpy_array[79] / math.sqrt(input_numpy_array[1])
return_numpy_array[322] = input_numpy_array[56] / math.sqrt(input_numpy_array[23])
return_numpy_array[323] = input_numpy_array[71] / math.sqrt(input_numpy_array[37])
return_numpy_array[324] = input_numpy_array[64] / math.sqrt(input_numpy_array[98])
return_numpy_array[325] = input_numpy_array[79] / math.sqrt(input_numpy_array[39])
return_numpy_array[326] = input_numpy_array[52] / math.sqrt(input_numpy_array[37])
return_numpy_array[327] = input_numpy_array[33] / math.sqrt(input_numpy_array[11])
return_numpy_array[328] = input_numpy_array[85] / math.sqrt(input_numpy_array[57])
return_numpy_array[329] = input_numpy_array[48] / math.sqrt(input_numpy_array[34])
return_numpy_array[330] = input_numpy_array[97] / math.sqrt(input_numpy_array[63])
return_numpy_array[331] = input_numpy_array[54] / math.sqrt(input_numpy_array[39])
return_numpy_array[332] = input_numpy_array[3] / math.sqrt(input_numpy_array[61])
return_numpy_array[333] = input_numpy_array[13] / math.sqrt(input_numpy_array[100])
return_numpy_array[334] = input_numpy_array[31] / math.sqrt(input_numpy_array[94])
return_numpy_array[335] = input_numpy_array[73] / math.sqrt(input_numpy_array[35])
return_numpy_array[336] = input_numpy_array[91] / math.sqrt(input_numpy_array[36])
return_numpy_array[337] = input_numpy_array[84] / math.sqrt(input_numpy_array[67])
return_numpy_array[338] = input_numpy_array[87] / math.sqrt(input_numpy_array[84])
return_numpy_array[339] = input_numpy_array[21] / math.sqrt(input_numpy_array[50])
return_numpy_array[340] = input_numpy_array[33] / math.sqrt(input_numpy_array[88])
return_numpy_array[341] = input_numpy_array[89] / math.sqrt(input_numpy_array[3])
return_numpy_array[342] = input_numpy_array[43] / math.sqrt(input_numpy_array[33])
return_numpy_array[343] = input_numpy_array[37] / math.sqrt(input_numpy_array[15])
return_numpy_array[344] = input_numpy_array[0] / math.sqrt(input_numpy_array[77])
return_numpy_array[345] = input_numpy_array[18] / math.sqrt(input_numpy_array[37])
return_numpy_array[346] = input_numpy_array[99] / math.sqrt(input_numpy_array[13])
return_numpy_array[347] = input_numpy_array[90] / math.sqrt(input_numpy_array[14])
return_numpy_array[348] = input_numpy_array[88] / math.sqrt(input_numpy_array[62])
return_numpy_array[349] = input_numpy_array[1] / math.sqrt(input_numpy_array[35])
return_numpy_array[350] = input_numpy_array[79] / math.sqrt(input_numpy_array[10])
return_numpy_array[351] = input_numpy_array[60] / math.sqrt(input_numpy_array[63])
return_numpy_array[352] = input_numpy_array[65] / math.sqrt(input_numpy_array[12])
return_numpy_array[353] = input_numpy_array[35] / math.sqrt(input_numpy_array[69])
return_numpy_array[354] = input_numpy_array[46] / math.sqrt(input_numpy_array[30])
return_numpy_array[355] = input_numpy_array[54] / math.sqrt(input_numpy_array[13])
return_numpy_array[356] = input_numpy_array[87] / math.sqrt(input_numpy_array[64])
return_numpy_array[357] = input_numpy_array[74] / math.sqrt(input_numpy_array[91])
return_numpy_array[358] = input_numpy_array[78] / math.sqrt(input_numpy_array[95])
return_numpy_array[359] = input_numpy_array[46] / math.sqrt(input_numpy_array[39])
return_numpy_array[360] = input_numpy_array[55] / math.sqrt(input_numpy_array[31])
return_numpy_array[361] = input_numpy_array[81] / math.sqrt(input_numpy_array[87])
return_numpy_array[362] = input_numpy_array[42] / math.sqrt(input_numpy_array[93])
return_numpy_array[363] = input_numpy_array[66] / math.sqrt(input_numpy_array[67])
return_numpy_array[364] = input_numpy_array[52] / math.sqrt(input_numpy_array[30])
return_numpy_array[365] = input_numpy_array[56] / math.sqrt(input_numpy_array[53])
return_numpy_array[366] = input_numpy_array[85] / math.sqrt(input_numpy_array[9])
return_numpy_array[367] = input_numpy_array[31] / math.sqrt(input_numpy_array[59])
return_numpy_array[368] = input_numpy_array[86] / math.sqrt(input_numpy_array[77])
return_numpy_array[369] = input_numpy_array[39] / math.sqrt(input_numpy_array[41])
return_numpy_array[370] = input_numpy_array[35] / math.sqrt(input_numpy_array[39])
return_numpy_array[371] = input_numpy_array[22] / math.sqrt(input_numpy_array[89])
return_numpy_array[372] = input_numpy_array[45] / math.sqrt(input_numpy_array[56])
return_numpy_array[373] = input_numpy_array[7] / math.sqrt(input_numpy_array[42])
return_numpy_array[374] = input_numpy_array[5] / math.sqrt(input_numpy_array[92])
return_numpy_array[375] = input_numpy_array[93] / math.sqrt(input_numpy_array[23])
return_numpy_array[376] = input_numpy_array[21] / math.sqrt(input_numpy_array[83])
return_numpy_array[377] = input_numpy_array[90] / math.sqrt(input_numpy_array[60])
return_numpy_array[378] = input_numpy_array[74] / math.sqrt(input_numpy_array[29])
return_numpy_array[379] = input_numpy_array[40] / math.sqrt(input_numpy_array[9])
return_numpy_array[380] = input_numpy_array[70] / math.sqrt(input_numpy_array[71])
return_numpy_array[381] = input_numpy_array[16] / math.sqrt(input_numpy_array[73])
return_numpy_array[382] = input_numpy_array[61] / math.sqrt(input_numpy_array[100])
return_numpy_array[383] = input_numpy_array[18] / math.sqrt(input_numpy_array[56])
return_numpy_array[384] = input_numpy_array[18] / math.sqrt(input_numpy_array[94])
return_numpy_array[385] = input_numpy_array[41] / math.sqrt(input_numpy_array[43])
return_numpy_array[386] = input_numpy_array[8] / math.sqrt(input_numpy_array[87])
return_numpy_array[387] = input_numpy_array[93] / math.sqrt(input_numpy_array[65])
return_numpy_array[388] = input_numpy_array[31] / math.sqrt(input_numpy_array[75])
return_numpy_array[389] = input_numpy_array[54] / math.sqrt(input_numpy_array[46])
return_numpy_array[390] = input_numpy_array[56] / math.sqrt(input_numpy_array[50])
return_numpy_array[391] = input_numpy_array[68] / math.sqrt(input_numpy_array[92])
return_numpy_array[392] = input_numpy_array[7] / math.sqrt(input_numpy_array[42])
return_numpy_array[393] = input_numpy_array[7] / math.sqrt(input_numpy_array[84])
return_numpy_array[394] = input_numpy_array[46] / math.sqrt(input_numpy_array[50])
return_numpy_array[395] = input_numpy_array[47] / math.sqrt(input_numpy_array[65])
return_numpy_array[396] = input_numpy_array[43] / math.sqrt(input_numpy_array[82])
return_numpy_array[397] = input_numpy_array[46] / math.sqrt(input_numpy_array[32])
return_numpy_array[398] = input_numpy_array[51] / math.sqrt(input_numpy_array[17])
return_numpy_array[399] = input_numpy_array[14] / math.sqrt(input_numpy_array[5])
return_numpy_array[400] = input_numpy_array[17] / math.sqrt(input_numpy_array[33])
return_numpy_array[401] = input_numpy_array[54] / math.sqrt(input_numpy_array[86])
return_numpy_array[402] = input_numpy_array[7] / math.sqrt(input_numpy_array[41])
return_numpy_array[403] = input_numpy_array[59] / math.sqrt(input_numpy_array[16])
return_numpy_array[404] = input_numpy_array[68] / math.sqrt(input_numpy_array[36])
return_numpy_array[405] = input_numpy_array[20] / math.sqrt(input_numpy_array[4])
return_numpy_array[406] = input_numpy_array[43] / math.sqrt(input_numpy_array[64])
return_numpy_array[407] = input_numpy_array[4] / math.sqrt(input_numpy_array[1])
return_numpy_array[408] = input_numpy_array[13] / math.sqrt(input_numpy_array[93])
return_numpy_array[409] = input_numpy_array[65] / math.sqrt(input_numpy_array[50])
return_numpy_array[410] = input_numpy_array[50] / math.sqrt(input_numpy_array[41])
return_numpy_array[411] = input_numpy_array[66] / math.sqrt(input_numpy_array[72])
return_numpy_array[412] = input_numpy_array[17] / math.sqrt(input_numpy_array[10])
return_numpy_array[413] = input_numpy_array[75] / math.sqrt(input_numpy_array[6])
return_numpy_array[414] = input_numpy_array[51] / math.sqrt(input_numpy_array[83])
return_numpy_array[415] = input_numpy_array[46] / math.sqrt(input_numpy_array[17])
return_numpy_array[416] = input_numpy_array[77] / math.sqrt(input_numpy_array[57])
return_numpy_array[417] = input_numpy_array[53] / math.sqrt(input_numpy_array[33])
return_numpy_array[418] = input_numpy_array[47] / math.sqrt(input_numpy_array[39])
return_numpy_array[419] = input_numpy_array[94] / math.sqrt(input_numpy_array[15])
return_numpy_array[420] = input_numpy_array[93] / math.sqrt(input_numpy_array[85])
return_numpy_array[421] = input_numpy_array[73] / math.sqrt(input_numpy_array[94])
return_numpy_array[422] = input_numpy_array[84] / math.sqrt(input_numpy_array[95])
return_numpy_array[423] = input_numpy_array[32] / math.sqrt(input_numpy_array[56])
return_numpy_array[424] = input_numpy_array[17] / math.sqrt(input_numpy_array[90])
return_numpy_array[425] = input_numpy_array[76] / math.sqrt(input_numpy_array[68])
return_numpy_array[426] = input_numpy_array[25] / math.sqrt(input_numpy_array[94])
return_numpy_array[427] = input_numpy_array[64] / math.sqrt(input_numpy_array[29])
return_numpy_array[428] = input_numpy_array[37] / math.sqrt(input_numpy_array[89])
return_numpy_array[429] = input_numpy_array[50] / math.sqrt(input_numpy_array[99])
return_numpy_array[430] = input_numpy_array[40] / math.sqrt(input_numpy_array[78])
return_numpy_array[431] = input_numpy_array[46] / math.sqrt(input_numpy_array[63])
return_numpy_array[432] = input_numpy_array[19] / math.sqrt(input_numpy_array[83])
return_numpy_array[433] = input_numpy_array[49] / math.sqrt(input_numpy_array[42])
return_numpy_array[434] = input_numpy_array[65] / math.sqrt(input_numpy_array[80])
return_numpy_array[435] = input_numpy_array[83] / math.sqrt(input_numpy_array[97])
return_numpy_array[436] = input_numpy_array[93] / math.sqrt(input_numpy_array[17])
return_numpy_array[437] = input_numpy_array[54] / math.sqrt(input_numpy_array[40])
return_numpy_array[438] = input_numpy_array[90] / math.sqrt(input_numpy_array[100])
return_numpy_array[439] = input_numpy_array[18] / math.sqrt(input_numpy_array[35])
return_numpy_array[440] = input_numpy_array[77] / math.sqrt(input_numpy_array[28])
return_numpy_array[441] = input_numpy_array[68] / math.sqrt(input_numpy_array[23])
return_numpy_array[442] = input_numpy_array[63] / math.sqrt(input_numpy_array[47])
return_numpy_array[443] = input_numpy_array[93] / math.sqrt(input_numpy_array[6])
return_numpy_array[444] = input_numpy_array[85] / math.sqrt(input_numpy_array[88])
return_numpy_array[445] = input_numpy_array[100] / math.sqrt(input_numpy_array[60])
return_numpy_array[446] = input_numpy_array[26] / math.sqrt(input_numpy_array[64])
return_numpy_array[447] = input_numpy_array[98] / math.sqrt(input_numpy_array[96])
return_numpy_array[448] = input_numpy_array[29] / math.sqrt(input_numpy_array[75])
return_numpy_array[449] = input_numpy_array[99] / math.sqrt(input_numpy_array[30])
return_numpy_array[450] = input_numpy_array[74] / math.sqrt(input_numpy_array[86])
return_numpy_array[451] = input_numpy_array[69] / math.sqrt(input_numpy_array[11])
return_numpy_array[452] = input_numpy_array[75] / math.sqrt(input_numpy_array[64])
return_numpy_array[453] = input_numpy_array[23] / math.sqrt(input_numpy_array[41])
return_numpy_array[454] = input_numpy_array[49] / math.sqrt(input_numpy_array[3])
return_numpy_array[455] = input_numpy_array[70] / math.sqrt(input_numpy_array[55])
return_numpy_array[456] = input_numpy_array[21] / math.sqrt(input_numpy_array[38])
return_numpy_array[457] = input_numpy_array[65] / math.sqrt(input_numpy_array[81])
return_numpy_array[458] = input_numpy_array[9] / math.sqrt(input_numpy_array[47])
return_numpy_array[459] = input_numpy_array[99] / math.sqrt(input_numpy_array[15])
return_numpy_array[460] = input_numpy_array[61] / math.sqrt(input_numpy_array[85])
return_numpy_array[461] = input_numpy_array[33] / math.sqrt(input_numpy_array[54])
return_numpy_array[462] = input_numpy_array[66] / math.sqrt(input_numpy_array[19])
return_numpy_array[463] = input_numpy_array[50] / math.sqrt(input_numpy_array[39])
return_numpy_array[464] = input_numpy_array[40] / math.sqrt(input_numpy_array[54])
return_numpy_array[465] = input_numpy_array[11] / math.sqrt(input_numpy_array[23])
return_numpy_array[466] = input_numpy_array[24] / math.sqrt(input_numpy_array[22])
return_numpy_array[467] = input_numpy_array[86] / math.sqrt(input_numpy_array[17])
return_numpy_array[468] = input_numpy_array[83] / math.sqrt(input_numpy_array[2])
return_numpy_array[469] = input_numpy_array[44] / math.sqrt(input_numpy_array[69])
return_numpy_array[470] = input_numpy_array[53] / math.sqrt(input_numpy_array[25])
return_numpy_array[471] = input_numpy_array[83] / math.sqrt(input_numpy_array[67])
return_numpy_array[472] = input_numpy_array[3] / math.sqrt(input_numpy_array[42])
return_numpy_array[473] = input_numpy_array[43] / math.sqrt(input_numpy_array[33])
return_numpy_array[474] = input_numpy_array[73] / math.sqrt(input_numpy_array[97])
return_numpy_array[475] = input_numpy_array[39] / math.sqrt(input_numpy_array[20])
return_numpy_array[476] = input_numpy_array[98] / math.sqrt(input_numpy_array[58])
return_numpy_array[477] = input_numpy_array[21] / math.sqrt(input_numpy_array[38])
return_numpy_array[478] = input_numpy_array[88] / math.sqrt(input_numpy_array[47])
return_numpy_array[479] = input_numpy_array[6] / math.sqrt(input_numpy_array[93])
return_numpy_array[480] = input_numpy_array[37] / math.sqrt(input_numpy_array[96])
return_numpy_array[481] = input_numpy_array[23] / math.sqrt(input_numpy_array[60])
return_numpy_array[482] = input_numpy_array[68] / math.sqrt(input_numpy_array[2])
return_numpy_array[483] = input_numpy_array[66] / math.sqrt(input_numpy_array[64])
return_numpy_array[484] = input_numpy_array[49] / math.sqrt(input_numpy_array[26])
return_numpy_array[485] = input_numpy_array[18] / math.sqrt(input_numpy_array[26])
return_numpy_array[486] = input_numpy_array[48] / math.sqrt(input_numpy_array[7])
return_numpy_array[487] = input_numpy_array[89] / math.sqrt(input_numpy_array[94])
return_numpy_array[488] = input_numpy_array[5] / math.sqrt(input_numpy_array[54])
return_numpy_array[489] = input_numpy_array[20] / math.sqrt(input_numpy_array[91])
return_numpy_array[490] = input_numpy_array[86] / math.sqrt(input_numpy_array[35])
return_numpy_array[491] = input_numpy_array[68] / math.sqrt(input_numpy_array[12])
return_numpy_array[492] = input_numpy_array[54] / math.sqrt(input_numpy_array[60])
return_numpy_array[493] = input_numpy_array[35] / math.sqrt(input_numpy_array[45])
return_numpy_array[494] = input_numpy_array[44] / math.sqrt(input_numpy_array[8])
return_numpy_array[495] = input_numpy_array[82] / math.sqrt(input_numpy_array[29])
return_numpy_array[496] = input_numpy_array[39] / math.sqrt(input_numpy_array[43])
return_numpy_array[497] = input_numpy_array[39] / math.sqrt(input_numpy_array[88])
return_numpy_array[498] = input_numpy_array[1] / math.sqrt(input_numpy_array[18])
return_numpy_array[499] = input_numpy_array[73] / math.sqrt(input_numpy_array[71])
return_numpy_array[500] = input_numpy_array[55] / math.sqrt(input_numpy_array[34])
return_numpy_array[501] = input_numpy_array[46] / math.sqrt(input_numpy_array[70])
return_numpy_array[502] = input_numpy_array[33] / math.sqrt(input_numpy_array[48])
return_numpy_array[503] = input_numpy_array[2] / math.sqrt(input_numpy_array[36])
return_numpy_array[504] = input_numpy_array[92] / math.sqrt(input_numpy_array[89])
return_numpy_array[505] = input_numpy_array[47] / math.sqrt(input_numpy_array[67])
return_numpy_array[506] = input_numpy_array[86] / math.sqrt(input_numpy_array[90])
return_numpy_array[507] = input_numpy_array[98] / math.sqrt(input_numpy_array[45])
return_numpy_array[508] = input_numpy_array[91] / math.sqrt(input_numpy_array[53])
return_numpy_array[509] = input_numpy_array[69] / math.sqrt(input_numpy_array[80])
return_numpy_array[510] = input_numpy_array[34] / math.sqrt(input_numpy_array[61])
return_numpy_array[511] = input_numpy_array[20] / math.sqrt(input_numpy_array[49])
return_numpy_array[512] = input_numpy_array[11] / math.sqrt(input_numpy_array[27])
return_numpy_array[513] = input_numpy_array[86] / math.sqrt(input_numpy_array[9])
return_numpy_array[514] = input_numpy_array[8] / math.sqrt(input_numpy_array[99])
return_numpy_array[515] = input_numpy_array[25] / math.sqrt(input_numpy_array[8])
return return_numpy_array
def main():
sample_row = numpy.ones(101, dtype=numpy.float64)
NUMITER = 10000
sample_row[10] = 5
sample_row[100] = 6
sample_row[11] = 8
# Force compilation.
process_row(sample_row)
process_row(sample_row)
start = timer()
for _ in range(NUMITER):
process_row(sample_row)
end = timer()
print((end - start) / NUMITER)
if __name__ == '__main__':
main()
| [
"timeit.default_timer",
"numpy.zeros",
"math.sqrt",
"numpy.ones"
] | [((228, 244), 'numpy.zeros', 'numpy.zeros', (['(516)'], {}), '(516)\n', (239, 244), False, 'import numpy\n'), ((45010, 45046), 'numpy.ones', 'numpy.ones', (['(101)'], {'dtype': 'numpy.float64'}), '(101, dtype=numpy.float64)\n', (45020, 45046), False, 'import numpy\n'), ((45230, 45237), 'timeit.default_timer', 'timer', ([], {}), '()\n', (45235, 45237), True, 'from timeit import default_timer as timer\n'), ((45309, 45316), 'timeit.default_timer', 'timer', ([], {}), '()\n', (45314, 45316), True, 'from timeit import default_timer as timer\n'), ((297, 329), 'math.sqrt', 'math.sqrt', (['input_numpy_array[68]'], {}), '(input_numpy_array[68])\n', (306, 329), False, 'import math\n'), ((382, 414), 'math.sqrt', 'math.sqrt', (['input_numpy_array[71]'], {}), '(input_numpy_array[71])\n', (391, 414), False, 'import math\n'), ((467, 499), 'math.sqrt', 'math.sqrt', (['input_numpy_array[55]'], {}), '(input_numpy_array[55])\n', (476, 499), False, 'import math\n'), ((552, 584), 'math.sqrt', 'math.sqrt', (['input_numpy_array[82]'], {}), '(input_numpy_array[82])\n', (561, 584), False, 'import math\n'), ((636, 668), 'math.sqrt', 'math.sqrt', (['input_numpy_array[76]'], {}), '(input_numpy_array[76])\n', (645, 668), False, 'import math\n'), ((721, 753), 'math.sqrt', 'math.sqrt', (['input_numpy_array[99]'], {}), '(input_numpy_array[99])\n', (730, 753), False, 'import math\n'), ((806, 838), 'math.sqrt', 'math.sqrt', (['input_numpy_array[73]'], {}), '(input_numpy_array[73])\n', (815, 838), False, 'import math\n'), ((891, 923), 'math.sqrt', 'math.sqrt', (['input_numpy_array[39]'], {}), '(input_numpy_array[39])\n', (900, 923), False, 'import math\n'), ((976, 1008), 'math.sqrt', 'math.sqrt', (['input_numpy_array[80]'], {}), '(input_numpy_array[80])\n', (985, 1008), False, 'import math\n'), ((1061, 1093), 'math.sqrt', 'math.sqrt', (['input_numpy_array[27]'], {}), '(input_numpy_array[27])\n', (1070, 1093), False, 'import math\n'), ((1147, 1179), 'math.sqrt', 'math.sqrt', (['input_numpy_array[82]'], {}), '(input_numpy_array[82])\n', (1156, 1179), False, 'import math\n'), ((1233, 1265), 'math.sqrt', 'math.sqrt', (['input_numpy_array[15]'], {}), '(input_numpy_array[15])\n', (1242, 1265), False, 'import math\n'), ((1319, 1351), 'math.sqrt', 'math.sqrt', (['input_numpy_array[95]'], {}), '(input_numpy_array[95])\n', (1328, 1351), False, 'import math\n'), ((1405, 1437), 'math.sqrt', 'math.sqrt', (['input_numpy_array[93]'], {}), '(input_numpy_array[93])\n', (1414, 1437), False, 'import math\n'), ((1491, 1524), 'math.sqrt', 'math.sqrt', (['input_numpy_array[100]'], {}), '(input_numpy_array[100])\n', (1500, 1524), False, 'import math\n'), ((1578, 1610), 'math.sqrt', 'math.sqrt', (['input_numpy_array[19]'], {}), '(input_numpy_array[19])\n', (1587, 1610), False, 'import math\n'), ((1664, 1696), 'math.sqrt', 'math.sqrt', (['input_numpy_array[17]'], {}), '(input_numpy_array[17])\n', (1673, 1696), False, 'import math\n'), ((1750, 1782), 'math.sqrt', 'math.sqrt', (['input_numpy_array[49]'], {}), '(input_numpy_array[49])\n', (1759, 1782), False, 'import math\n'), ((1836, 1868), 'math.sqrt', 'math.sqrt', (['input_numpy_array[85]'], {}), '(input_numpy_array[85])\n', (1845, 1868), False, 'import math\n'), ((1922, 1954), 'math.sqrt', 'math.sqrt', (['input_numpy_array[85]'], {}), '(input_numpy_array[85])\n', (1931, 1954), False, 'import math\n'), ((2008, 2040), 'math.sqrt', 'math.sqrt', (['input_numpy_array[10]'], {}), '(input_numpy_array[10])\n', (2017, 2040), False, 'import math\n'), ((2094, 2126), 'math.sqrt', 'math.sqrt', (['input_numpy_array[12]'], {}), '(input_numpy_array[12])\n', (2103, 2126), False, 'import math\n'), ((2180, 2212), 'math.sqrt', 'math.sqrt', (['input_numpy_array[13]'], {}), '(input_numpy_array[13])\n', (2189, 2212), False, 'import math\n'), ((2266, 2297), 'math.sqrt', 'math.sqrt', (['input_numpy_array[0]'], {}), '(input_numpy_array[0])\n', (2275, 2297), False, 'import math\n'), ((2351, 2383), 'math.sqrt', 'math.sqrt', (['input_numpy_array[84]'], {}), '(input_numpy_array[84])\n', (2360, 2383), False, 'import math\n'), ((2437, 2469), 'math.sqrt', 'math.sqrt', (['input_numpy_array[93]'], {}), '(input_numpy_array[93])\n', (2446, 2469), False, 'import math\n'), ((2522, 2554), 'math.sqrt', 'math.sqrt', (['input_numpy_array[89]'], {}), '(input_numpy_array[89])\n', (2531, 2554), False, 'import math\n'), ((2608, 2640), 'math.sqrt', 'math.sqrt', (['input_numpy_array[31]'], {}), '(input_numpy_array[31])\n', (2617, 2640), False, 'import math\n'), ((2694, 2726), 'math.sqrt', 'math.sqrt', (['input_numpy_array[57]'], {}), '(input_numpy_array[57])\n', (2703, 2726), False, 'import math\n'), ((2781, 2813), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (2790, 2813), False, 'import math\n'), ((2867, 2899), 'math.sqrt', 'math.sqrt', (['input_numpy_array[83]'], {}), '(input_numpy_array[83])\n', (2876, 2899), False, 'import math\n'), ((2953, 2985), 'math.sqrt', 'math.sqrt', (['input_numpy_array[32]'], {}), '(input_numpy_array[32])\n', (2962, 2985), False, 'import math\n'), ((3039, 3071), 'math.sqrt', 'math.sqrt', (['input_numpy_array[34]'], {}), '(input_numpy_array[34])\n', (3048, 3071), False, 'import math\n'), ((3125, 3156), 'math.sqrt', 'math.sqrt', (['input_numpy_array[9]'], {}), '(input_numpy_array[9])\n', (3134, 3156), False, 'import math\n'), ((3210, 3242), 'math.sqrt', 'math.sqrt', (['input_numpy_array[40]'], {}), '(input_numpy_array[40])\n', (3219, 3242), False, 'import math\n'), ((3296, 3328), 'math.sqrt', 'math.sqrt', (['input_numpy_array[63]'], {}), '(input_numpy_array[63])\n', (3305, 3328), False, 'import math\n'), ((3382, 3414), 'math.sqrt', 'math.sqrt', (['input_numpy_array[78]'], {}), '(input_numpy_array[78])\n', (3391, 3414), False, 'import math\n'), ((3468, 3500), 'math.sqrt', 'math.sqrt', (['input_numpy_array[40]'], {}), '(input_numpy_array[40])\n', (3477, 3500), False, 'import math\n'), ((3554, 3586), 'math.sqrt', 'math.sqrt', (['input_numpy_array[47]'], {}), '(input_numpy_array[47])\n', (3563, 3586), False, 'import math\n'), ((3640, 3671), 'math.sqrt', 'math.sqrt', (['input_numpy_array[8]'], {}), '(input_numpy_array[8])\n', (3649, 3671), False, 'import math\n'), ((3725, 3757), 'math.sqrt', 'math.sqrt', (['input_numpy_array[15]'], {}), '(input_numpy_array[15])\n', (3734, 3757), False, 'import math\n'), ((3811, 3843), 'math.sqrt', 'math.sqrt', (['input_numpy_array[52]'], {}), '(input_numpy_array[52])\n', (3820, 3843), False, 'import math\n'), ((3897, 3929), 'math.sqrt', 'math.sqrt', (['input_numpy_array[57]'], {}), '(input_numpy_array[57])\n', (3906, 3929), False, 'import math\n'), ((3983, 4015), 'math.sqrt', 'math.sqrt', (['input_numpy_array[39]'], {}), '(input_numpy_array[39])\n', (3992, 4015), False, 'import math\n'), ((4069, 4101), 'math.sqrt', 'math.sqrt', (['input_numpy_array[65]'], {}), '(input_numpy_array[65])\n', (4078, 4101), False, 'import math\n'), ((4155, 4187), 'math.sqrt', 'math.sqrt', (['input_numpy_array[40]'], {}), '(input_numpy_array[40])\n', (4164, 4187), False, 'import math\n'), ((4241, 4273), 'math.sqrt', 'math.sqrt', (['input_numpy_array[59]'], {}), '(input_numpy_array[59])\n', (4250, 4273), False, 'import math\n'), ((4327, 4359), 'math.sqrt', 'math.sqrt', (['input_numpy_array[71]'], {}), '(input_numpy_array[71])\n', (4336, 4359), False, 'import math\n'), ((4413, 4445), 'math.sqrt', 'math.sqrt', (['input_numpy_array[56]'], {}), '(input_numpy_array[56])\n', (4422, 4445), False, 'import math\n'), ((4499, 4531), 'math.sqrt', 'math.sqrt', (['input_numpy_array[10]'], {}), '(input_numpy_array[10])\n', (4508, 4531), False, 'import math\n'), ((4585, 4617), 'math.sqrt', 'math.sqrt', (['input_numpy_array[45]'], {}), '(input_numpy_array[45])\n', (4594, 4617), False, 'import math\n'), ((4671, 4703), 'math.sqrt', 'math.sqrt', (['input_numpy_array[83]'], {}), '(input_numpy_array[83])\n', (4680, 4703), False, 'import math\n'), ((4757, 4789), 'math.sqrt', 'math.sqrt', (['input_numpy_array[56]'], {}), '(input_numpy_array[56])\n', (4766, 4789), False, 'import math\n'), ((4843, 4875), 'math.sqrt', 'math.sqrt', (['input_numpy_array[67]'], {}), '(input_numpy_array[67])\n', (4852, 4875), False, 'import math\n'), ((4929, 4961), 'math.sqrt', 'math.sqrt', (['input_numpy_array[78]'], {}), '(input_numpy_array[78])\n', (4938, 4961), False, 'import math\n'), ((5015, 5047), 'math.sqrt', 'math.sqrt', (['input_numpy_array[68]'], {}), '(input_numpy_array[68])\n', (5024, 5047), False, 'import math\n'), ((5101, 5133), 'math.sqrt', 'math.sqrt', (['input_numpy_array[10]'], {}), '(input_numpy_array[10])\n', (5110, 5133), False, 'import math\n'), ((5187, 5219), 'math.sqrt', 'math.sqrt', (['input_numpy_array[12]'], {}), '(input_numpy_array[12])\n', (5196, 5219), False, 'import math\n'), ((5273, 5304), 'math.sqrt', 'math.sqrt', (['input_numpy_array[7]'], {}), '(input_numpy_array[7])\n', (5282, 5304), False, 'import math\n'), ((5358, 5391), 'math.sqrt', 'math.sqrt', (['input_numpy_array[100]'], {}), '(input_numpy_array[100])\n', (5367, 5391), False, 'import math\n'), ((5445, 5477), 'math.sqrt', 'math.sqrt', (['input_numpy_array[83]'], {}), '(input_numpy_array[83])\n', (5454, 5477), False, 'import math\n'), ((5531, 5563), 'math.sqrt', 'math.sqrt', (['input_numpy_array[61]'], {}), '(input_numpy_array[61])\n', (5540, 5563), False, 'import math\n'), ((5617, 5649), 'math.sqrt', 'math.sqrt', (['input_numpy_array[58]'], {}), '(input_numpy_array[58])\n', (5626, 5649), False, 'import math\n'), ((5703, 5735), 'math.sqrt', 'math.sqrt', (['input_numpy_array[61]'], {}), '(input_numpy_array[61])\n', (5712, 5735), False, 'import math\n'), ((5789, 5821), 'math.sqrt', 'math.sqrt', (['input_numpy_array[70]'], {}), '(input_numpy_array[70])\n', (5798, 5821), False, 'import math\n'), ((5875, 5907), 'math.sqrt', 'math.sqrt', (['input_numpy_array[75]'], {}), '(input_numpy_array[75])\n', (5884, 5907), False, 'import math\n'), ((5961, 5993), 'math.sqrt', 'math.sqrt', (['input_numpy_array[38]'], {}), '(input_numpy_array[38])\n', (5970, 5993), False, 'import math\n'), ((6047, 6079), 'math.sqrt', 'math.sqrt', (['input_numpy_array[71]'], {}), '(input_numpy_array[71])\n', (6056, 6079), False, 'import math\n'), ((6132, 6164), 'math.sqrt', 'math.sqrt', (['input_numpy_array[69]'], {}), '(input_numpy_array[69])\n', (6141, 6164), False, 'import math\n'), ((6217, 6249), 'math.sqrt', 'math.sqrt', (['input_numpy_array[38]'], {}), '(input_numpy_array[38])\n', (6226, 6249), False, 'import math\n'), ((6303, 6335), 'math.sqrt', 'math.sqrt', (['input_numpy_array[84]'], {}), '(input_numpy_array[84])\n', (6312, 6335), False, 'import math\n'), ((6388, 6420), 'math.sqrt', 'math.sqrt', (['input_numpy_array[87]'], {}), '(input_numpy_array[87])\n', (6397, 6420), False, 'import math\n'), ((6474, 6506), 'math.sqrt', 'math.sqrt', (['input_numpy_array[98]'], {}), '(input_numpy_array[98])\n', (6483, 6506), False, 'import math\n'), ((6560, 6592), 'math.sqrt', 'math.sqrt', (['input_numpy_array[37]'], {}), '(input_numpy_array[37])\n', (6569, 6592), False, 'import math\n'), ((6646, 6678), 'math.sqrt', 'math.sqrt', (['input_numpy_array[63]'], {}), '(input_numpy_array[63])\n', (6655, 6678), False, 'import math\n'), ((6732, 6764), 'math.sqrt', 'math.sqrt', (['input_numpy_array[74]'], {}), '(input_numpy_array[74])\n', (6741, 6764), False, 'import math\n'), ((6818, 6850), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (6827, 6850), False, 'import math\n'), ((6904, 6936), 'math.sqrt', 'math.sqrt', (['input_numpy_array[81]'], {}), '(input_numpy_array[81])\n', (6913, 6936), False, 'import math\n'), ((6990, 7022), 'math.sqrt', 'math.sqrt', (['input_numpy_array[86]'], {}), '(input_numpy_array[86])\n', (6999, 7022), False, 'import math\n'), ((7076, 7108), 'math.sqrt', 'math.sqrt', (['input_numpy_array[98]'], {}), '(input_numpy_array[98])\n', (7085, 7108), False, 'import math\n'), ((7162, 7194), 'math.sqrt', 'math.sqrt', (['input_numpy_array[37]'], {}), '(input_numpy_array[37])\n', (7171, 7194), False, 'import math\n'), ((7248, 7280), 'math.sqrt', 'math.sqrt', (['input_numpy_array[87]'], {}), '(input_numpy_array[87])\n', (7257, 7280), False, 'import math\n'), ((7334, 7366), 'math.sqrt', 'math.sqrt', (['input_numpy_array[76]'], {}), '(input_numpy_array[76])\n', (7343, 7366), False, 'import math\n'), ((7420, 7452), 'math.sqrt', 'math.sqrt', (['input_numpy_array[28]'], {}), '(input_numpy_array[28])\n', (7429, 7452), False, 'import math\n'), ((7506, 7538), 'math.sqrt', 'math.sqrt', (['input_numpy_array[57]'], {}), '(input_numpy_array[57])\n', (7515, 7538), False, 'import math\n'), ((7592, 7623), 'math.sqrt', 'math.sqrt', (['input_numpy_array[4]'], {}), '(input_numpy_array[4])\n', (7601, 7623), False, 'import math\n'), ((7677, 7709), 'math.sqrt', 'math.sqrt', (['input_numpy_array[29]'], {}), '(input_numpy_array[29])\n', (7686, 7709), False, 'import math\n'), ((7763, 7795), 'math.sqrt', 'math.sqrt', (['input_numpy_array[14]'], {}), '(input_numpy_array[14])\n', (7772, 7795), False, 'import math\n'), ((7849, 7881), 'math.sqrt', 'math.sqrt', (['input_numpy_array[17]'], {}), '(input_numpy_array[17])\n', (7858, 7881), False, 'import math\n'), ((7934, 7966), 'math.sqrt', 'math.sqrt', (['input_numpy_array[31]'], {}), '(input_numpy_array[31])\n', (7943, 7966), False, 'import math\n'), ((8020, 8052), 'math.sqrt', 'math.sqrt', (['input_numpy_array[24]'], {}), '(input_numpy_array[24])\n', (8029, 8052), False, 'import math\n'), ((8106, 8138), 'math.sqrt', 'math.sqrt', (['input_numpy_array[29]'], {}), '(input_numpy_array[29])\n', (8115, 8138), False, 'import math\n'), ((8192, 8224), 'math.sqrt', 'math.sqrt', (['input_numpy_array[30]'], {}), '(input_numpy_array[30])\n', (8201, 8224), False, 'import math\n'), ((8278, 8310), 'math.sqrt', 'math.sqrt', (['input_numpy_array[55]'], {}), '(input_numpy_array[55])\n', (8287, 8310), False, 'import math\n'), ((8364, 8396), 'math.sqrt', 'math.sqrt', (['input_numpy_array[51]'], {}), '(input_numpy_array[51])\n', (8373, 8396), False, 'import math\n'), ((8449, 8481), 'math.sqrt', 'math.sqrt', (['input_numpy_array[18]'], {}), '(input_numpy_array[18])\n', (8458, 8481), False, 'import math\n'), ((8535, 8567), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (8544, 8567), False, 'import math\n'), ((8621, 8653), 'math.sqrt', 'math.sqrt', (['input_numpy_array[30]'], {}), '(input_numpy_array[30])\n', (8630, 8653), False, 'import math\n'), ((8707, 8739), 'math.sqrt', 'math.sqrt', (['input_numpy_array[61]'], {}), '(input_numpy_array[61])\n', (8716, 8739), False, 'import math\n'), ((8793, 8825), 'math.sqrt', 'math.sqrt', (['input_numpy_array[50]'], {}), '(input_numpy_array[50])\n', (8802, 8825), False, 'import math\n'), ((8880, 8912), 'math.sqrt', 'math.sqrt', (['input_numpy_array[67]'], {}), '(input_numpy_array[67])\n', (8889, 8912), False, 'import math\n'), ((8967, 8999), 'math.sqrt', 'math.sqrt', (['input_numpy_array[30]'], {}), '(input_numpy_array[30])\n', (8976, 8999), False, 'import math\n'), ((9053, 9085), 'math.sqrt', 'math.sqrt', (['input_numpy_array[85]'], {}), '(input_numpy_array[85])\n', (9062, 9085), False, 'import math\n'), ((9140, 9172), 'math.sqrt', 'math.sqrt', (['input_numpy_array[81]'], {}), '(input_numpy_array[81])\n', (9149, 9172), False, 'import math\n'), ((9227, 9259), 'math.sqrt', 'math.sqrt', (['input_numpy_array[43]'], {}), '(input_numpy_array[43])\n', (9236, 9259), False, 'import math\n'), ((9314, 9346), 'math.sqrt', 'math.sqrt', (['input_numpy_array[70]'], {}), '(input_numpy_array[70])\n', (9323, 9346), False, 'import math\n'), ((9401, 9433), 'math.sqrt', 'math.sqrt', (['input_numpy_array[30]'], {}), '(input_numpy_array[30])\n', (9410, 9433), False, 'import math\n'), ((9487, 9519), 'math.sqrt', 'math.sqrt', (['input_numpy_array[80]'], {}), '(input_numpy_array[80])\n', (9496, 9519), False, 'import math\n'), ((9573, 9605), 'math.sqrt', 'math.sqrt', (['input_numpy_array[62]'], {}), '(input_numpy_array[62])\n', (9582, 9605), False, 'import math\n'), ((9660, 9692), 'math.sqrt', 'math.sqrt', (['input_numpy_array[68]'], {}), '(input_numpy_array[68])\n', (9669, 9692), False, 'import math\n'), ((9747, 9779), 'math.sqrt', 'math.sqrt', (['input_numpy_array[25]'], {}), '(input_numpy_array[25])\n', (9756, 9779), False, 'import math\n'), ((9834, 9866), 'math.sqrt', 'math.sqrt', (['input_numpy_array[22]'], {}), '(input_numpy_array[22])\n', (9843, 9866), False, 'import math\n'), ((9921, 9953), 'math.sqrt', 'math.sqrt', (['input_numpy_array[71]'], {}), '(input_numpy_array[71])\n', (9930, 9953), False, 'import math\n'), ((10008, 10040), 'math.sqrt', 'math.sqrt', (['input_numpy_array[32]'], {}), '(input_numpy_array[32])\n', (10017, 10040), False, 'import math\n'), ((10095, 10127), 'math.sqrt', 'math.sqrt', (['input_numpy_array[10]'], {}), '(input_numpy_array[10])\n', (10104, 10127), False, 'import math\n'), ((10182, 10214), 'math.sqrt', 'math.sqrt', (['input_numpy_array[95]'], {}), '(input_numpy_array[95])\n', (10191, 10214), False, 'import math\n'), ((10269, 10301), 'math.sqrt', 'math.sqrt', (['input_numpy_array[34]'], {}), '(input_numpy_array[34])\n', (10278, 10301), False, 'import math\n'), ((10356, 10388), 'math.sqrt', 'math.sqrt', (['input_numpy_array[75]'], {}), '(input_numpy_array[75])\n', (10365, 10388), False, 'import math\n'), ((10443, 10475), 'math.sqrt', 'math.sqrt', (['input_numpy_array[90]'], {}), '(input_numpy_array[90])\n', (10452, 10475), False, 'import math\n'), ((10530, 10562), 'math.sqrt', 'math.sqrt', (['input_numpy_array[78]'], {}), '(input_numpy_array[78])\n', (10539, 10562), False, 'import math\n'), ((10617, 10649), 'math.sqrt', 'math.sqrt', (['input_numpy_array[28]'], {}), '(input_numpy_array[28])\n', (10626, 10649), False, 'import math\n'), ((10704, 10736), 'math.sqrt', 'math.sqrt', (['input_numpy_array[71]'], {}), '(input_numpy_array[71])\n', (10713, 10736), False, 'import math\n'), ((10791, 10823), 'math.sqrt', 'math.sqrt', (['input_numpy_array[30]'], {}), '(input_numpy_array[30])\n', (10800, 10823), False, 'import math\n'), ((10878, 10910), 'math.sqrt', 'math.sqrt', (['input_numpy_array[87]'], {}), '(input_numpy_array[87])\n', (10887, 10910), False, 'import math\n'), ((10965, 10997), 'math.sqrt', 'math.sqrt', (['input_numpy_array[37]'], {}), '(input_numpy_array[37])\n', (10974, 10997), False, 'import math\n'), ((11052, 11084), 'math.sqrt', 'math.sqrt', (['input_numpy_array[77]'], {}), '(input_numpy_array[77])\n', (11061, 11084), False, 'import math\n'), ((11139, 11171), 'math.sqrt', 'math.sqrt', (['input_numpy_array[88]'], {}), '(input_numpy_array[88])\n', (11148, 11171), False, 'import math\n'), ((11226, 11258), 'math.sqrt', 'math.sqrt', (['input_numpy_array[87]'], {}), '(input_numpy_array[87])\n', (11235, 11258), False, 'import math\n'), ((11313, 11345), 'math.sqrt', 'math.sqrt', (['input_numpy_array[96]'], {}), '(input_numpy_array[96])\n', (11322, 11345), False, 'import math\n'), ((11399, 11430), 'math.sqrt', 'math.sqrt', (['input_numpy_array[2]'], {}), '(input_numpy_array[2])\n', (11408, 11430), False, 'import math\n'), ((11485, 11517), 'math.sqrt', 'math.sqrt', (['input_numpy_array[96]'], {}), '(input_numpy_array[96])\n', (11494, 11517), False, 'import math\n'), ((11572, 11604), 'math.sqrt', 'math.sqrt', (['input_numpy_array[93]'], {}), '(input_numpy_array[93])\n', (11581, 11604), False, 'import math\n'), ((11659, 11690), 'math.sqrt', 'math.sqrt', (['input_numpy_array[3]'], {}), '(input_numpy_array[3])\n', (11668, 11690), False, 'import math\n'), ((11745, 11777), 'math.sqrt', 'math.sqrt', (['input_numpy_array[88]'], {}), '(input_numpy_array[88])\n', (11754, 11777), False, 'import math\n'), ((11832, 11864), 'math.sqrt', 'math.sqrt', (['input_numpy_array[74]'], {}), '(input_numpy_array[74])\n', (11841, 11864), False, 'import math\n'), ((11919, 11951), 'math.sqrt', 'math.sqrt', (['input_numpy_array[24]'], {}), '(input_numpy_array[24])\n', (11928, 11951), False, 'import math\n'), ((12006, 12038), 'math.sqrt', 'math.sqrt', (['input_numpy_array[47]'], {}), '(input_numpy_array[47])\n', (12015, 12038), False, 'import math\n'), ((12093, 12125), 'math.sqrt', 'math.sqrt', (['input_numpy_array[48]'], {}), '(input_numpy_array[48])\n', (12102, 12125), False, 'import math\n'), ((12180, 12212), 'math.sqrt', 'math.sqrt', (['input_numpy_array[74]'], {}), '(input_numpy_array[74])\n', (12189, 12212), False, 'import math\n'), ((12267, 12299), 'math.sqrt', 'math.sqrt', (['input_numpy_array[20]'], {}), '(input_numpy_array[20])\n', (12276, 12299), False, 'import math\n'), ((12354, 12386), 'math.sqrt', 'math.sqrt', (['input_numpy_array[92]'], {}), '(input_numpy_array[92])\n', (12363, 12386), False, 'import math\n'), ((12441, 12472), 'math.sqrt', 'math.sqrt', (['input_numpy_array[5]'], {}), '(input_numpy_array[5])\n', (12450, 12472), False, 'import math\n'), ((12526, 12557), 'math.sqrt', 'math.sqrt', (['input_numpy_array[6]'], {}), '(input_numpy_array[6])\n', (12535, 12557), False, 'import math\n'), ((12612, 12644), 'math.sqrt', 'math.sqrt', (['input_numpy_array[48]'], {}), '(input_numpy_array[48])\n', (12621, 12644), False, 'import math\n'), ((12699, 12731), 'math.sqrt', 'math.sqrt', (['input_numpy_array[22]'], {}), '(input_numpy_array[22])\n', (12708, 12731), False, 'import math\n'), ((12786, 12818), 'math.sqrt', 'math.sqrt', (['input_numpy_array[42]'], {}), '(input_numpy_array[42])\n', (12795, 12818), False, 'import math\n'), ((12873, 12905), 'math.sqrt', 'math.sqrt', (['input_numpy_array[47]'], {}), '(input_numpy_array[47])\n', (12882, 12905), False, 'import math\n'), ((12960, 12992), 'math.sqrt', 'math.sqrt', (['input_numpy_array[79]'], {}), '(input_numpy_array[79])\n', (12969, 12992), False, 'import math\n'), ((13047, 13079), 'math.sqrt', 'math.sqrt', (['input_numpy_array[90]'], {}), '(input_numpy_array[90])\n', (13056, 13079), False, 'import math\n'), ((13134, 13166), 'math.sqrt', 'math.sqrt', (['input_numpy_array[94]'], {}), '(input_numpy_array[94])\n', (13143, 13166), False, 'import math\n'), ((13221, 13253), 'math.sqrt', 'math.sqrt', (['input_numpy_array[87]'], {}), '(input_numpy_array[87])\n', (13230, 13253), False, 'import math\n'), ((13308, 13340), 'math.sqrt', 'math.sqrt', (['input_numpy_array[73]'], {}), '(input_numpy_array[73])\n', (13317, 13340), False, 'import math\n'), ((13395, 13427), 'math.sqrt', 'math.sqrt', (['input_numpy_array[78]'], {}), '(input_numpy_array[78])\n', (13404, 13427), False, 'import math\n'), ((13481, 13513), 'math.sqrt', 'math.sqrt', (['input_numpy_array[16]'], {}), '(input_numpy_array[16])\n', (13490, 13513), False, 'import math\n'), ((13568, 13600), 'math.sqrt', 'math.sqrt', (['input_numpy_array[59]'], {}), '(input_numpy_array[59])\n', (13577, 13600), False, 'import math\n'), ((13655, 13687), 'math.sqrt', 'math.sqrt', (['input_numpy_array[65]'], {}), '(input_numpy_array[65])\n', (13664, 13687), False, 'import math\n'), ((13741, 13773), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (13750, 13773), False, 'import math\n'), ((13828, 13860), 'math.sqrt', 'math.sqrt', (['input_numpy_array[62]'], {}), '(input_numpy_array[62])\n', (13837, 13860), False, 'import math\n'), ((13914, 13946), 'math.sqrt', 'math.sqrt', (['input_numpy_array[88]'], {}), '(input_numpy_array[88])\n', (13923, 13946), False, 'import math\n'), ((14001, 14033), 'math.sqrt', 'math.sqrt', (['input_numpy_array[10]'], {}), '(input_numpy_array[10])\n', (14010, 14033), False, 'import math\n'), ((14088, 14120), 'math.sqrt', 'math.sqrt', (['input_numpy_array[94]'], {}), '(input_numpy_array[94])\n', (14097, 14120), False, 'import math\n'), ((14175, 14206), 'math.sqrt', 'math.sqrt', (['input_numpy_array[0]'], {}), '(input_numpy_array[0])\n', (14184, 14206), False, 'import math\n'), ((14261, 14293), 'math.sqrt', 'math.sqrt', (['input_numpy_array[87]'], {}), '(input_numpy_array[87])\n', (14270, 14293), False, 'import math\n'), ((14347, 14379), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (14356, 14379), False, 'import math\n'), ((14434, 14466), 'math.sqrt', 'math.sqrt', (['input_numpy_array[76]'], {}), '(input_numpy_array[76])\n', (14443, 14466), False, 'import math\n'), ((14521, 14553), 'math.sqrt', 'math.sqrt', (['input_numpy_array[86]'], {}), '(input_numpy_array[86])\n', (14530, 14553), False, 'import math\n'), ((14608, 14640), 'math.sqrt', 'math.sqrt', (['input_numpy_array[54]'], {}), '(input_numpy_array[54])\n', (14617, 14640), False, 'import math\n'), ((14695, 14727), 'math.sqrt', 'math.sqrt', (['input_numpy_array[35]'], {}), '(input_numpy_array[35])\n', (14704, 14727), False, 'import math\n'), ((14782, 14813), 'math.sqrt', 'math.sqrt', (['input_numpy_array[5]'], {}), '(input_numpy_array[5])\n', (14791, 14813), False, 'import math\n'), ((14867, 14899), 'math.sqrt', 'math.sqrt', (['input_numpy_array[88]'], {}), '(input_numpy_array[88])\n', (14876, 14899), False, 'import math\n'), ((14954, 14986), 'math.sqrt', 'math.sqrt', (['input_numpy_array[92]'], {}), '(input_numpy_array[92])\n', (14963, 14986), False, 'import math\n'), ((15042, 15074), 'math.sqrt', 'math.sqrt', (['input_numpy_array[53]'], {}), '(input_numpy_array[53])\n', (15051, 15074), False, 'import math\n'), ((15129, 15161), 'math.sqrt', 'math.sqrt', (['input_numpy_array[61]'], {}), '(input_numpy_array[61])\n', (15138, 15161), False, 'import math\n'), ((15216, 15248), 'math.sqrt', 'math.sqrt', (['input_numpy_array[40]'], {}), '(input_numpy_array[40])\n', (15225, 15248), False, 'import math\n'), ((15303, 15335), 'math.sqrt', 'math.sqrt', (['input_numpy_array[62]'], {}), '(input_numpy_array[62])\n', (15312, 15335), False, 'import math\n'), ((15390, 15421), 'math.sqrt', 'math.sqrt', (['input_numpy_array[5]'], {}), '(input_numpy_array[5])\n', (15399, 15421), False, 'import math\n'), ((15476, 15508), 'math.sqrt', 'math.sqrt', (['input_numpy_array[18]'], {}), '(input_numpy_array[18])\n', (15485, 15508), False, 'import math\n'), ((15563, 15595), 'math.sqrt', 'math.sqrt', (['input_numpy_array[16]'], {}), '(input_numpy_array[16])\n', (15572, 15595), False, 'import math\n'), ((15650, 15682), 'math.sqrt', 'math.sqrt', (['input_numpy_array[97]'], {}), '(input_numpy_array[97])\n', (15659, 15682), False, 'import math\n'), ((15737, 15768), 'math.sqrt', 'math.sqrt', (['input_numpy_array[8]'], {}), '(input_numpy_array[8])\n', (15746, 15768), False, 'import math\n'), ((15823, 15855), 'math.sqrt', 'math.sqrt', (['input_numpy_array[72]'], {}), '(input_numpy_array[72])\n', (15832, 15855), False, 'import math\n'), ((15910, 15942), 'math.sqrt', 'math.sqrt', (['input_numpy_array[72]'], {}), '(input_numpy_array[72])\n', (15919, 15942), False, 'import math\n'), ((15997, 16029), 'math.sqrt', 'math.sqrt', (['input_numpy_array[36]'], {}), '(input_numpy_array[36])\n', (16006, 16029), False, 'import math\n'), ((16084, 16116), 'math.sqrt', 'math.sqrt', (['input_numpy_array[28]'], {}), '(input_numpy_array[28])\n', (16093, 16116), False, 'import math\n'), ((16171, 16203), 'math.sqrt', 'math.sqrt', (['input_numpy_array[78]'], {}), '(input_numpy_array[78])\n', (16180, 16203), False, 'import math\n'), ((16258, 16290), 'math.sqrt', 'math.sqrt', (['input_numpy_array[18]'], {}), '(input_numpy_array[18])\n', (16267, 16290), False, 'import math\n'), ((16345, 16377), 'math.sqrt', 'math.sqrt', (['input_numpy_array[50]'], {}), '(input_numpy_array[50])\n', (16354, 16377), False, 'import math\n'), ((16432, 16464), 'math.sqrt', 'math.sqrt', (['input_numpy_array[29]'], {}), '(input_numpy_array[29])\n', (16441, 16464), False, 'import math\n'), ((16519, 16551), 'math.sqrt', 'math.sqrt', (['input_numpy_array[41]'], {}), '(input_numpy_array[41])\n', (16528, 16551), False, 'import math\n'), ((16606, 16638), 'math.sqrt', 'math.sqrt', (['input_numpy_array[53]'], {}), '(input_numpy_array[53])\n', (16615, 16638), False, 'import math\n'), ((16693, 16725), 'math.sqrt', 'math.sqrt', (['input_numpy_array[53]'], {}), '(input_numpy_array[53])\n', (16702, 16725), False, 'import math\n'), ((16780, 16811), 'math.sqrt', 'math.sqrt', (['input_numpy_array[7]'], {}), '(input_numpy_array[7])\n', (16789, 16811), False, 'import math\n'), ((16866, 16898), 'math.sqrt', 'math.sqrt', (['input_numpy_array[96]'], {}), '(input_numpy_array[96])\n', (16875, 16898), False, 'import math\n'), ((16953, 16985), 'math.sqrt', 'math.sqrt', (['input_numpy_array[81]'], {}), '(input_numpy_array[81])\n', (16962, 16985), False, 'import math\n'), ((17040, 17072), 'math.sqrt', 'math.sqrt', (['input_numpy_array[54]'], {}), '(input_numpy_array[54])\n', (17049, 17072), False, 'import math\n'), ((17127, 17159), 'math.sqrt', 'math.sqrt', (['input_numpy_array[27]'], {}), '(input_numpy_array[27])\n', (17136, 17159), False, 'import math\n'), ((17214, 17246), 'math.sqrt', 'math.sqrt', (['input_numpy_array[34]'], {}), '(input_numpy_array[34])\n', (17223, 17246), False, 'import math\n'), ((17301, 17333), 'math.sqrt', 'math.sqrt', (['input_numpy_array[96]'], {}), '(input_numpy_array[96])\n', (17310, 17333), False, 'import math\n'), ((17387, 17419), 'math.sqrt', 'math.sqrt', (['input_numpy_array[77]'], {}), '(input_numpy_array[77])\n', (17396, 17419), False, 'import math\n'), ((17474, 17506), 'math.sqrt', 'math.sqrt', (['input_numpy_array[71]'], {}), '(input_numpy_array[71])\n', (17483, 17506), False, 'import math\n'), ((17561, 17593), 'math.sqrt', 'math.sqrt', (['input_numpy_array[59]'], {}), '(input_numpy_array[59])\n', (17570, 17593), False, 'import math\n'), ((17648, 17680), 'math.sqrt', 'math.sqrt', (['input_numpy_array[90]'], {}), '(input_numpy_array[90])\n', (17657, 17680), False, 'import math\n'), ((17735, 17767), 'math.sqrt', 'math.sqrt', (['input_numpy_array[90]'], {}), '(input_numpy_array[90])\n', (17744, 17767), False, 'import math\n'), ((17822, 17854), 'math.sqrt', 'math.sqrt', (['input_numpy_array[78]'], {}), '(input_numpy_array[78])\n', (17831, 17854), False, 'import math\n'), ((17909, 17941), 'math.sqrt', 'math.sqrt', (['input_numpy_array[59]'], {}), '(input_numpy_array[59])\n', (17918, 17941), False, 'import math\n'), ((17996, 18027), 'math.sqrt', 'math.sqrt', (['input_numpy_array[2]'], {}), '(input_numpy_array[2])\n', (18005, 18027), False, 'import math\n'), ((18082, 18114), 'math.sqrt', 'math.sqrt', (['input_numpy_array[40]'], {}), '(input_numpy_array[40])\n', (18091, 18114), False, 'import math\n'), ((18169, 18201), 'math.sqrt', 'math.sqrt', (['input_numpy_array[57]'], {}), '(input_numpy_array[57])\n', (18178, 18201), False, 'import math\n'), ((18256, 18288), 'math.sqrt', 'math.sqrt', (['input_numpy_array[49]'], {}), '(input_numpy_array[49])\n', (18265, 18288), False, 'import math\n'), ((18343, 18375), 'math.sqrt', 'math.sqrt', (['input_numpy_array[19]'], {}), '(input_numpy_array[19])\n', (18352, 18375), False, 'import math\n'), ((18430, 18462), 'math.sqrt', 'math.sqrt', (['input_numpy_array[53]'], {}), '(input_numpy_array[53])\n', (18439, 18462), False, 'import math\n'), ((18517, 18549), 'math.sqrt', 'math.sqrt', (['input_numpy_array[38]'], {}), '(input_numpy_array[38])\n', (18526, 18549), False, 'import math\n'), ((18604, 18636), 'math.sqrt', 'math.sqrt', (['input_numpy_array[18]'], {}), '(input_numpy_array[18])\n', (18613, 18636), False, 'import math\n'), ((18691, 18723), 'math.sqrt', 'math.sqrt', (['input_numpy_array[29]'], {}), '(input_numpy_array[29])\n', (18700, 18723), False, 'import math\n'), ((18778, 18810), 'math.sqrt', 'math.sqrt', (['input_numpy_array[52]'], {}), '(input_numpy_array[52])\n', (18787, 18810), False, 'import math\n'), ((18865, 18897), 'math.sqrt', 'math.sqrt', (['input_numpy_array[40]'], {}), '(input_numpy_array[40])\n', (18874, 18897), False, 'import math\n'), ((18952, 18984), 'math.sqrt', 'math.sqrt', (['input_numpy_array[12]'], {}), '(input_numpy_array[12])\n', (18961, 18984), False, 'import math\n'), ((19039, 19071), 'math.sqrt', 'math.sqrt', (['input_numpy_array[50]'], {}), '(input_numpy_array[50])\n', (19048, 19071), False, 'import math\n'), ((19126, 19158), 'math.sqrt', 'math.sqrt', (['input_numpy_array[12]'], {}), '(input_numpy_array[12])\n', (19135, 19158), False, 'import math\n'), ((19213, 19244), 'math.sqrt', 'math.sqrt', (['input_numpy_array[1]'], {}), '(input_numpy_array[1])\n', (19222, 19244), False, 'import math\n'), ((19299, 19331), 'math.sqrt', 'math.sqrt', (['input_numpy_array[13]'], {}), '(input_numpy_array[13])\n', (19308, 19331), False, 'import math\n'), ((19386, 19418), 'math.sqrt', 'math.sqrt', (['input_numpy_array[58]'], {}), '(input_numpy_array[58])\n', (19395, 19418), False, 'import math\n'), ((19473, 19505), 'math.sqrt', 'math.sqrt', (['input_numpy_array[99]'], {}), '(input_numpy_array[99])\n', (19482, 19505), False, 'import math\n'), ((19559, 19591), 'math.sqrt', 'math.sqrt', (['input_numpy_array[50]'], {}), '(input_numpy_array[50])\n', (19568, 19591), False, 'import math\n'), ((19646, 19678), 'math.sqrt', 'math.sqrt', (['input_numpy_array[63]'], {}), '(input_numpy_array[63])\n', (19655, 19678), False, 'import math\n'), ((19733, 19765), 'math.sqrt', 'math.sqrt', (['input_numpy_array[97]'], {}), '(input_numpy_array[97])\n', (19742, 19765), False, 'import math\n'), ((19820, 19852), 'math.sqrt', 'math.sqrt', (['input_numpy_array[82]'], {}), '(input_numpy_array[82])\n', (19829, 19852), False, 'import math\n'), ((19907, 19939), 'math.sqrt', 'math.sqrt', (['input_numpy_array[26]'], {}), '(input_numpy_array[26])\n', (19916, 19939), False, 'import math\n'), ((19994, 20026), 'math.sqrt', 'math.sqrt', (['input_numpy_array[69]'], {}), '(input_numpy_array[69])\n', (20003, 20026), False, 'import math\n'), ((20081, 20113), 'math.sqrt', 'math.sqrt', (['input_numpy_array[71]'], {}), '(input_numpy_array[71])\n', (20090, 20113), False, 'import math\n'), ((20168, 20200), 'math.sqrt', 'math.sqrt', (['input_numpy_array[96]'], {}), '(input_numpy_array[96])\n', (20177, 20200), False, 'import math\n'), ((20255, 20287), 'math.sqrt', 'math.sqrt', (['input_numpy_array[24]'], {}), '(input_numpy_array[24])\n', (20264, 20287), False, 'import math\n'), ((20342, 20374), 'math.sqrt', 'math.sqrt', (['input_numpy_array[20]'], {}), '(input_numpy_array[20])\n', (20351, 20374), False, 'import math\n'), ((20429, 20460), 'math.sqrt', 'math.sqrt', (['input_numpy_array[3]'], {}), '(input_numpy_array[3])\n', (20438, 20460), False, 'import math\n'), ((20515, 20547), 'math.sqrt', 'math.sqrt', (['input_numpy_array[57]'], {}), '(input_numpy_array[57])\n', (20524, 20547), False, 'import math\n'), ((20602, 20634), 'math.sqrt', 'math.sqrt', (['input_numpy_array[86]'], {}), '(input_numpy_array[86])\n', (20611, 20634), False, 'import math\n'), ((20688, 20720), 'math.sqrt', 'math.sqrt', (['input_numpy_array[31]'], {}), '(input_numpy_array[31])\n', (20697, 20720), False, 'import math\n'), ((20775, 20807), 'math.sqrt', 'math.sqrt', (['input_numpy_array[19]'], {}), '(input_numpy_array[19])\n', (20784, 20807), False, 'import math\n'), ((20862, 20894), 'math.sqrt', 'math.sqrt', (['input_numpy_array[18]'], {}), '(input_numpy_array[18])\n', (20871, 20894), False, 'import math\n'), ((20949, 20981), 'math.sqrt', 'math.sqrt', (['input_numpy_array[50]'], {}), '(input_numpy_array[50])\n', (20958, 20981), False, 'import math\n'), ((21036, 21068), 'math.sqrt', 'math.sqrt', (['input_numpy_array[58]'], {}), '(input_numpy_array[58])\n', (21045, 21068), False, 'import math\n'), ((21123, 21155), 'math.sqrt', 'math.sqrt', (['input_numpy_array[51]'], {}), '(input_numpy_array[51])\n', (21132, 21155), False, 'import math\n'), ((21210, 21242), 'math.sqrt', 'math.sqrt', (['input_numpy_array[80]'], {}), '(input_numpy_array[80])\n', (21219, 21242), False, 'import math\n'), ((21297, 21329), 'math.sqrt', 'math.sqrt', (['input_numpy_array[53]'], {}), '(input_numpy_array[53])\n', (21306, 21329), False, 'import math\n'), ((21384, 21415), 'math.sqrt', 'math.sqrt', (['input_numpy_array[1]'], {}), '(input_numpy_array[1])\n', (21393, 21415), False, 'import math\n'), ((21469, 21500), 'math.sqrt', 'math.sqrt', (['input_numpy_array[9]'], {}), '(input_numpy_array[9])\n', (21478, 21500), False, 'import math\n'), ((21555, 21587), 'math.sqrt', 'math.sqrt', (['input_numpy_array[75]'], {}), '(input_numpy_array[75])\n', (21564, 21587), False, 'import math\n'), ((21641, 21673), 'math.sqrt', 'math.sqrt', (['input_numpy_array[73]'], {}), '(input_numpy_array[73])\n', (21650, 21673), False, 'import math\n'), ((21728, 21760), 'math.sqrt', 'math.sqrt', (['input_numpy_array[79]'], {}), '(input_numpy_array[79])\n', (21737, 21760), False, 'import math\n'), ((21815, 21846), 'math.sqrt', 'math.sqrt', (['input_numpy_array[4]'], {}), '(input_numpy_array[4])\n', (21824, 21846), False, 'import math\n'), ((21901, 21933), 'math.sqrt', 'math.sqrt', (['input_numpy_array[40]'], {}), '(input_numpy_array[40])\n', (21910, 21933), False, 'import math\n'), ((21988, 22020), 'math.sqrt', 'math.sqrt', (['input_numpy_array[63]'], {}), '(input_numpy_array[63])\n', (21997, 22020), False, 'import math\n'), ((22075, 22107), 'math.sqrt', 'math.sqrt', (['input_numpy_array[61]'], {}), '(input_numpy_array[61])\n', (22084, 22107), False, 'import math\n'), ((22161, 22193), 'math.sqrt', 'math.sqrt', (['input_numpy_array[76]'], {}), '(input_numpy_array[76])\n', (22170, 22193), False, 'import math\n'), ((22248, 22280), 'math.sqrt', 'math.sqrt', (['input_numpy_array[45]'], {}), '(input_numpy_array[45])\n', (22257, 22280), False, 'import math\n'), ((22335, 22367), 'math.sqrt', 'math.sqrt', (['input_numpy_array[60]'], {}), '(input_numpy_array[60])\n', (22344, 22367), False, 'import math\n'), ((22422, 22454), 'math.sqrt', 'math.sqrt', (['input_numpy_array[55]'], {}), '(input_numpy_array[55])\n', (22431, 22454), False, 'import math\n'), ((22509, 22541), 'math.sqrt', 'math.sqrt', (['input_numpy_array[47]'], {}), '(input_numpy_array[47])\n', (22518, 22541), False, 'import math\n'), ((22596, 22628), 'math.sqrt', 'math.sqrt', (['input_numpy_array[79]'], {}), '(input_numpy_array[79])\n', (22605, 22628), False, 'import math\n'), ((22682, 22714), 'math.sqrt', 'math.sqrt', (['input_numpy_array[14]'], {}), '(input_numpy_array[14])\n', (22691, 22714), False, 'import math\n'), ((22768, 22800), 'math.sqrt', 'math.sqrt', (['input_numpy_array[83]'], {}), '(input_numpy_array[83])\n', (22777, 22800), False, 'import math\n'), ((22855, 22887), 'math.sqrt', 'math.sqrt', (['input_numpy_array[38]'], {}), '(input_numpy_array[38])\n', (22864, 22887), False, 'import math\n'), ((22942, 22974), 'math.sqrt', 'math.sqrt', (['input_numpy_array[13]'], {}), '(input_numpy_array[13])\n', (22951, 22974), False, 'import math\n'), ((23029, 23061), 'math.sqrt', 'math.sqrt', (['input_numpy_array[39]'], {}), '(input_numpy_array[39])\n', (23038, 23061), False, 'import math\n'), ((23116, 23148), 'math.sqrt', 'math.sqrt', (['input_numpy_array[11]'], {}), '(input_numpy_array[11])\n', (23125, 23148), False, 'import math\n'), ((23203, 23235), 'math.sqrt', 'math.sqrt', (['input_numpy_array[36]'], {}), '(input_numpy_array[36])\n', (23212, 23235), False, 'import math\n'), ((23290, 23322), 'math.sqrt', 'math.sqrt', (['input_numpy_array[57]'], {}), '(input_numpy_array[57])\n', (23299, 23322), False, 'import math\n'), ((23377, 23409), 'math.sqrt', 'math.sqrt', (['input_numpy_array[29]'], {}), '(input_numpy_array[29])\n', (23386, 23409), False, 'import math\n'), ((23464, 23496), 'math.sqrt', 'math.sqrt', (['input_numpy_array[87]'], {}), '(input_numpy_array[87])\n', (23473, 23496), False, 'import math\n'), ((23551, 23583), 'math.sqrt', 'math.sqrt', (['input_numpy_array[71]'], {}), '(input_numpy_array[71])\n', (23560, 23583), False, 'import math\n'), ((23638, 23670), 'math.sqrt', 'math.sqrt', (['input_numpy_array[60]'], {}), '(input_numpy_array[60])\n', (23647, 23670), False, 'import math\n'), ((23725, 23757), 'math.sqrt', 'math.sqrt', (['input_numpy_array[82]'], {}), '(input_numpy_array[82])\n', (23734, 23757), False, 'import math\n'), ((23812, 23844), 'math.sqrt', 'math.sqrt', (['input_numpy_array[44]'], {}), '(input_numpy_array[44])\n', (23821, 23844), False, 'import math\n'), ((23899, 23931), 'math.sqrt', 'math.sqrt', (['input_numpy_array[40]'], {}), '(input_numpy_array[40])\n', (23908, 23931), False, 'import math\n'), ((23986, 24018), 'math.sqrt', 'math.sqrt', (['input_numpy_array[57]'], {}), '(input_numpy_array[57])\n', (23995, 24018), False, 'import math\n'), ((24073, 24105), 'math.sqrt', 'math.sqrt', (['input_numpy_array[55]'], {}), '(input_numpy_array[55])\n', (24082, 24105), False, 'import math\n'), ((24160, 24191), 'math.sqrt', 'math.sqrt', (['input_numpy_array[2]'], {}), '(input_numpy_array[2])\n', (24169, 24191), False, 'import math\n'), ((24246, 24278), 'math.sqrt', 'math.sqrt', (['input_numpy_array[41]'], {}), '(input_numpy_array[41])\n', (24255, 24278), False, 'import math\n'), ((24333, 24365), 'math.sqrt', 'math.sqrt', (['input_numpy_array[57]'], {}), '(input_numpy_array[57])\n', (24342, 24365), False, 'import math\n'), ((24420, 24452), 'math.sqrt', 'math.sqrt', (['input_numpy_array[10]'], {}), '(input_numpy_array[10])\n', (24429, 24452), False, 'import math\n'), ((24506, 24538), 'math.sqrt', 'math.sqrt', (['input_numpy_array[11]'], {}), '(input_numpy_array[11])\n', (24515, 24538), False, 'import math\n'), ((24593, 24625), 'math.sqrt', 'math.sqrt', (['input_numpy_array[53]'], {}), '(input_numpy_array[53])\n', (24602, 24625), False, 'import math\n'), ((24680, 24712), 'math.sqrt', 'math.sqrt', (['input_numpy_array[67]'], {}), '(input_numpy_array[67])\n', (24689, 24712), False, 'import math\n'), ((24767, 24799), 'math.sqrt', 'math.sqrt', (['input_numpy_array[40]'], {}), '(input_numpy_array[40])\n', (24776, 24799), False, 'import math\n'), ((24854, 24886), 'math.sqrt', 'math.sqrt', (['input_numpy_array[45]'], {}), '(input_numpy_array[45])\n', (24863, 24886), False, 'import math\n'), ((24940, 24972), 'math.sqrt', 'math.sqrt', (['input_numpy_array[87]'], {}), '(input_numpy_array[87])\n', (24949, 24972), False, 'import math\n'), ((25027, 25059), 'math.sqrt', 'math.sqrt', (['input_numpy_array[22]'], {}), '(input_numpy_array[22])\n', (25036, 25059), False, 'import math\n'), ((25114, 25146), 'math.sqrt', 'math.sqrt', (['input_numpy_array[31]'], {}), '(input_numpy_array[31])\n', (25123, 25146), False, 'import math\n'), ((25201, 25233), 'math.sqrt', 'math.sqrt', (['input_numpy_array[63]'], {}), '(input_numpy_array[63])\n', (25210, 25233), False, 'import math\n'), ((25288, 25320), 'math.sqrt', 'math.sqrt', (['input_numpy_array[41]'], {}), '(input_numpy_array[41])\n', (25297, 25320), False, 'import math\n'), ((25375, 25407), 'math.sqrt', 'math.sqrt', (['input_numpy_array[31]'], {}), '(input_numpy_array[31])\n', (25384, 25407), False, 'import math\n'), ((25462, 25494), 'math.sqrt', 'math.sqrt', (['input_numpy_array[44]'], {}), '(input_numpy_array[44])\n', (25471, 25494), False, 'import math\n'), ((25549, 25581), 'math.sqrt', 'math.sqrt', (['input_numpy_array[81]'], {}), '(input_numpy_array[81])\n', (25558, 25581), False, 'import math\n'), ((25636, 25668), 'math.sqrt', 'math.sqrt', (['input_numpy_array[22]'], {}), '(input_numpy_array[22])\n', (25645, 25668), False, 'import math\n'), ((25723, 25755), 'math.sqrt', 'math.sqrt', (['input_numpy_array[62]'], {}), '(input_numpy_array[62])\n', (25732, 25755), False, 'import math\n'), ((25810, 25842), 'math.sqrt', 'math.sqrt', (['input_numpy_array[75]'], {}), '(input_numpy_array[75])\n', (25819, 25842), False, 'import math\n'), ((25897, 25929), 'math.sqrt', 'math.sqrt', (['input_numpy_array[68]'], {}), '(input_numpy_array[68])\n', (25906, 25929), False, 'import math\n'), ((25984, 26016), 'math.sqrt', 'math.sqrt', (['input_numpy_array[98]'], {}), '(input_numpy_array[98])\n', (25993, 26016), False, 'import math\n'), ((26071, 26103), 'math.sqrt', 'math.sqrt', (['input_numpy_array[12]'], {}), '(input_numpy_array[12])\n', (26080, 26103), False, 'import math\n'), ((26158, 26190), 'math.sqrt', 'math.sqrt', (['input_numpy_array[45]'], {}), '(input_numpy_array[45])\n', (26167, 26190), False, 'import math\n'), ((26245, 26277), 'math.sqrt', 'math.sqrt', (['input_numpy_array[10]'], {}), '(input_numpy_array[10])\n', (26254, 26277), False, 'import math\n'), ((26332, 26363), 'math.sqrt', 'math.sqrt', (['input_numpy_array[7]'], {}), '(input_numpy_array[7])\n', (26341, 26363), False, 'import math\n'), ((26418, 26450), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (26427, 26450), False, 'import math\n'), ((26505, 26536), 'math.sqrt', 'math.sqrt', (['input_numpy_array[5]'], {}), '(input_numpy_array[5])\n', (26514, 26536), False, 'import math\n'), ((26591, 26623), 'math.sqrt', 'math.sqrt', (['input_numpy_array[37]'], {}), '(input_numpy_array[37])\n', (26600, 26623), False, 'import math\n'), ((26677, 26709), 'math.sqrt', 'math.sqrt', (['input_numpy_array[38]'], {}), '(input_numpy_array[38])\n', (26686, 26709), False, 'import math\n'), ((26764, 26796), 'math.sqrt', 'math.sqrt', (['input_numpy_array[34]'], {}), '(input_numpy_array[34])\n', (26773, 26796), False, 'import math\n'), ((26851, 26883), 'math.sqrt', 'math.sqrt', (['input_numpy_array[16]'], {}), '(input_numpy_array[16])\n', (26860, 26883), False, 'import math\n'), ((26938, 26970), 'math.sqrt', 'math.sqrt', (['input_numpy_array[33]'], {}), '(input_numpy_array[33])\n', (26947, 26970), False, 'import math\n'), ((27025, 27057), 'math.sqrt', 'math.sqrt', (['input_numpy_array[48]'], {}), '(input_numpy_array[48])\n', (27034, 27057), False, 'import math\n'), ((27112, 27143), 'math.sqrt', 'math.sqrt', (['input_numpy_array[9]'], {}), '(input_numpy_array[9])\n', (27121, 27143), False, 'import math\n'), ((27198, 27230), 'math.sqrt', 'math.sqrt', (['input_numpy_array[27]'], {}), '(input_numpy_array[27])\n', (27207, 27230), False, 'import math\n'), ((27285, 27317), 'math.sqrt', 'math.sqrt', (['input_numpy_array[99]'], {}), '(input_numpy_array[99])\n', (27294, 27317), False, 'import math\n'), ((27372, 27403), 'math.sqrt', 'math.sqrt', (['input_numpy_array[8]'], {}), '(input_numpy_array[8])\n', (27381, 27403), False, 'import math\n'), ((27458, 27490), 'math.sqrt', 'math.sqrt', (['input_numpy_array[33]'], {}), '(input_numpy_array[33])\n', (27467, 27490), False, 'import math\n'), ((27545, 27577), 'math.sqrt', 'math.sqrt', (['input_numpy_array[19]'], {}), '(input_numpy_array[19])\n', (27554, 27577), False, 'import math\n'), ((27632, 27664), 'math.sqrt', 'math.sqrt', (['input_numpy_array[72]'], {}), '(input_numpy_array[72])\n', (27641, 27664), False, 'import math\n'), ((27719, 27751), 'math.sqrt', 'math.sqrt', (['input_numpy_array[88]'], {}), '(input_numpy_array[88])\n', (27728, 27751), False, 'import math\n'), ((27806, 27838), 'math.sqrt', 'math.sqrt', (['input_numpy_array[12]'], {}), '(input_numpy_array[12])\n', (27815, 27838), False, 'import math\n'), ((27893, 27925), 'math.sqrt', 'math.sqrt', (['input_numpy_array[73]'], {}), '(input_numpy_array[73])\n', (27902, 27925), False, 'import math\n'), ((27980, 28012), 'math.sqrt', 'math.sqrt', (['input_numpy_array[22]'], {}), '(input_numpy_array[22])\n', (27989, 28012), False, 'import math\n'), ((28067, 28098), 'math.sqrt', 'math.sqrt', (['input_numpy_array[1]'], {}), '(input_numpy_array[1])\n', (28076, 28098), False, 'import math\n'), ((28153, 28185), 'math.sqrt', 'math.sqrt', (['input_numpy_array[23]'], {}), '(input_numpy_array[23])\n', (28162, 28185), False, 'import math\n'), ((28240, 28272), 'math.sqrt', 'math.sqrt', (['input_numpy_array[37]'], {}), '(input_numpy_array[37])\n', (28249, 28272), False, 'import math\n'), ((28327, 28359), 'math.sqrt', 'math.sqrt', (['input_numpy_array[98]'], {}), '(input_numpy_array[98])\n', (28336, 28359), False, 'import math\n'), ((28414, 28446), 'math.sqrt', 'math.sqrt', (['input_numpy_array[39]'], {}), '(input_numpy_array[39])\n', (28423, 28446), False, 'import math\n'), ((28501, 28533), 'math.sqrt', 'math.sqrt', (['input_numpy_array[37]'], {}), '(input_numpy_array[37])\n', (28510, 28533), False, 'import math\n'), ((28588, 28620), 'math.sqrt', 'math.sqrt', (['input_numpy_array[11]'], {}), '(input_numpy_array[11])\n', (28597, 28620), False, 'import math\n'), ((28675, 28707), 'math.sqrt', 'math.sqrt', (['input_numpy_array[57]'], {}), '(input_numpy_array[57])\n', (28684, 28707), False, 'import math\n'), ((28762, 28794), 'math.sqrt', 'math.sqrt', (['input_numpy_array[34]'], {}), '(input_numpy_array[34])\n', (28771, 28794), False, 'import math\n'), ((28849, 28881), 'math.sqrt', 'math.sqrt', (['input_numpy_array[63]'], {}), '(input_numpy_array[63])\n', (28858, 28881), False, 'import math\n'), ((28936, 28968), 'math.sqrt', 'math.sqrt', (['input_numpy_array[39]'], {}), '(input_numpy_array[39])\n', (28945, 28968), False, 'import math\n'), ((29022, 29054), 'math.sqrt', 'math.sqrt', (['input_numpy_array[61]'], {}), '(input_numpy_array[61])\n', (29031, 29054), False, 'import math\n'), ((29109, 29142), 'math.sqrt', 'math.sqrt', (['input_numpy_array[100]'], {}), '(input_numpy_array[100])\n', (29118, 29142), False, 'import math\n'), ((29197, 29229), 'math.sqrt', 'math.sqrt', (['input_numpy_array[94]'], {}), '(input_numpy_array[94])\n', (29206, 29229), False, 'import math\n'), ((29284, 29316), 'math.sqrt', 'math.sqrt', (['input_numpy_array[35]'], {}), '(input_numpy_array[35])\n', (29293, 29316), False, 'import math\n'), ((29371, 29403), 'math.sqrt', 'math.sqrt', (['input_numpy_array[36]'], {}), '(input_numpy_array[36])\n', (29380, 29403), False, 'import math\n'), ((29458, 29490), 'math.sqrt', 'math.sqrt', (['input_numpy_array[67]'], {}), '(input_numpy_array[67])\n', (29467, 29490), False, 'import math\n'), ((29545, 29577), 'math.sqrt', 'math.sqrt', (['input_numpy_array[84]'], {}), '(input_numpy_array[84])\n', (29554, 29577), False, 'import math\n'), ((29632, 29664), 'math.sqrt', 'math.sqrt', (['input_numpy_array[50]'], {}), '(input_numpy_array[50])\n', (29641, 29664), False, 'import math\n'), ((29719, 29751), 'math.sqrt', 'math.sqrt', (['input_numpy_array[88]'], {}), '(input_numpy_array[88])\n', (29728, 29751), False, 'import math\n'), ((29806, 29837), 'math.sqrt', 'math.sqrt', (['input_numpy_array[3]'], {}), '(input_numpy_array[3])\n', (29815, 29837), False, 'import math\n'), ((29892, 29924), 'math.sqrt', 'math.sqrt', (['input_numpy_array[33]'], {}), '(input_numpy_array[33])\n', (29901, 29924), False, 'import math\n'), ((29979, 30011), 'math.sqrt', 'math.sqrt', (['input_numpy_array[15]'], {}), '(input_numpy_array[15])\n', (29988, 30011), False, 'import math\n'), ((30065, 30097), 'math.sqrt', 'math.sqrt', (['input_numpy_array[77]'], {}), '(input_numpy_array[77])\n', (30074, 30097), False, 'import math\n'), ((30152, 30184), 'math.sqrt', 'math.sqrt', (['input_numpy_array[37]'], {}), '(input_numpy_array[37])\n', (30161, 30184), False, 'import math\n'), ((30239, 30271), 'math.sqrt', 'math.sqrt', (['input_numpy_array[13]'], {}), '(input_numpy_array[13])\n', (30248, 30271), False, 'import math\n'), ((30326, 30358), 'math.sqrt', 'math.sqrt', (['input_numpy_array[14]'], {}), '(input_numpy_array[14])\n', (30335, 30358), False, 'import math\n'), ((30413, 30445), 'math.sqrt', 'math.sqrt', (['input_numpy_array[62]'], {}), '(input_numpy_array[62])\n', (30422, 30445), False, 'import math\n'), ((30499, 30531), 'math.sqrt', 'math.sqrt', (['input_numpy_array[35]'], {}), '(input_numpy_array[35])\n', (30508, 30531), False, 'import math\n'), ((30586, 30618), 'math.sqrt', 'math.sqrt', (['input_numpy_array[10]'], {}), '(input_numpy_array[10])\n', (30595, 30618), False, 'import math\n'), ((30673, 30705), 'math.sqrt', 'math.sqrt', (['input_numpy_array[63]'], {}), '(input_numpy_array[63])\n', (30682, 30705), False, 'import math\n'), ((30760, 30792), 'math.sqrt', 'math.sqrt', (['input_numpy_array[12]'], {}), '(input_numpy_array[12])\n', (30769, 30792), False, 'import math\n'), ((30847, 30879), 'math.sqrt', 'math.sqrt', (['input_numpy_array[69]'], {}), '(input_numpy_array[69])\n', (30856, 30879), False, 'import math\n'), ((30934, 30966), 'math.sqrt', 'math.sqrt', (['input_numpy_array[30]'], {}), '(input_numpy_array[30])\n', (30943, 30966), False, 'import math\n'), ((31021, 31053), 'math.sqrt', 'math.sqrt', (['input_numpy_array[13]'], {}), '(input_numpy_array[13])\n', (31030, 31053), False, 'import math\n'), ((31108, 31140), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (31117, 31140), False, 'import math\n'), ((31195, 31227), 'math.sqrt', 'math.sqrt', (['input_numpy_array[91]'], {}), '(input_numpy_array[91])\n', (31204, 31227), False, 'import math\n'), ((31282, 31314), 'math.sqrt', 'math.sqrt', (['input_numpy_array[95]'], {}), '(input_numpy_array[95])\n', (31291, 31314), False, 'import math\n'), ((31369, 31401), 'math.sqrt', 'math.sqrt', (['input_numpy_array[39]'], {}), '(input_numpy_array[39])\n', (31378, 31401), False, 'import math\n'), ((31456, 31488), 'math.sqrt', 'math.sqrt', (['input_numpy_array[31]'], {}), '(input_numpy_array[31])\n', (31465, 31488), False, 'import math\n'), ((31543, 31575), 'math.sqrt', 'math.sqrt', (['input_numpy_array[87]'], {}), '(input_numpy_array[87])\n', (31552, 31575), False, 'import math\n'), ((31630, 31662), 'math.sqrt', 'math.sqrt', (['input_numpy_array[93]'], {}), '(input_numpy_array[93])\n', (31639, 31662), False, 'import math\n'), ((31717, 31749), 'math.sqrt', 'math.sqrt', (['input_numpy_array[67]'], {}), '(input_numpy_array[67])\n', (31726, 31749), False, 'import math\n'), ((31804, 31836), 'math.sqrt', 'math.sqrt', (['input_numpy_array[30]'], {}), '(input_numpy_array[30])\n', (31813, 31836), False, 'import math\n'), ((31891, 31923), 'math.sqrt', 'math.sqrt', (['input_numpy_array[53]'], {}), '(input_numpy_array[53])\n', (31900, 31923), False, 'import math\n'), ((31978, 32009), 'math.sqrt', 'math.sqrt', (['input_numpy_array[9]'], {}), '(input_numpy_array[9])\n', (31987, 32009), False, 'import math\n'), ((32064, 32096), 'math.sqrt', 'math.sqrt', (['input_numpy_array[59]'], {}), '(input_numpy_array[59])\n', (32073, 32096), False, 'import math\n'), ((32151, 32183), 'math.sqrt', 'math.sqrt', (['input_numpy_array[77]'], {}), '(input_numpy_array[77])\n', (32160, 32183), False, 'import math\n'), ((32238, 32270), 'math.sqrt', 'math.sqrt', (['input_numpy_array[41]'], {}), '(input_numpy_array[41])\n', (32247, 32270), False, 'import math\n'), ((32325, 32357), 'math.sqrt', 'math.sqrt', (['input_numpy_array[39]'], {}), '(input_numpy_array[39])\n', (32334, 32357), False, 'import math\n'), ((32412, 32444), 'math.sqrt', 'math.sqrt', (['input_numpy_array[89]'], {}), '(input_numpy_array[89])\n', (32421, 32444), False, 'import math\n'), ((32499, 32531), 'math.sqrt', 'math.sqrt', (['input_numpy_array[56]'], {}), '(input_numpy_array[56])\n', (32508, 32531), False, 'import math\n'), ((32585, 32617), 'math.sqrt', 'math.sqrt', (['input_numpy_array[42]'], {}), '(input_numpy_array[42])\n', (32594, 32617), False, 'import math\n'), ((32671, 32703), 'math.sqrt', 'math.sqrt', (['input_numpy_array[92]'], {}), '(input_numpy_array[92])\n', (32680, 32703), False, 'import math\n'), ((32758, 32790), 'math.sqrt', 'math.sqrt', (['input_numpy_array[23]'], {}), '(input_numpy_array[23])\n', (32767, 32790), False, 'import math\n'), ((32845, 32877), 'math.sqrt', 'math.sqrt', (['input_numpy_array[83]'], {}), '(input_numpy_array[83])\n', (32854, 32877), False, 'import math\n'), ((32932, 32964), 'math.sqrt', 'math.sqrt', (['input_numpy_array[60]'], {}), '(input_numpy_array[60])\n', (32941, 32964), False, 'import math\n'), ((33019, 33051), 'math.sqrt', 'math.sqrt', (['input_numpy_array[29]'], {}), '(input_numpy_array[29])\n', (33028, 33051), False, 'import math\n'), ((33106, 33137), 'math.sqrt', 'math.sqrt', (['input_numpy_array[9]'], {}), '(input_numpy_array[9])\n', (33115, 33137), False, 'import math\n'), ((33192, 33224), 'math.sqrt', 'math.sqrt', (['input_numpy_array[71]'], {}), '(input_numpy_array[71])\n', (33201, 33224), False, 'import math\n'), ((33279, 33311), 'math.sqrt', 'math.sqrt', (['input_numpy_array[73]'], {}), '(input_numpy_array[73])\n', (33288, 33311), False, 'import math\n'), ((33366, 33399), 'math.sqrt', 'math.sqrt', (['input_numpy_array[100]'], {}), '(input_numpy_array[100])\n', (33375, 33399), False, 'import math\n'), ((33454, 33486), 'math.sqrt', 'math.sqrt', (['input_numpy_array[56]'], {}), '(input_numpy_array[56])\n', (33463, 33486), False, 'import math\n'), ((33541, 33573), 'math.sqrt', 'math.sqrt', (['input_numpy_array[94]'], {}), '(input_numpy_array[94])\n', (33550, 33573), False, 'import math\n'), ((33628, 33660), 'math.sqrt', 'math.sqrt', (['input_numpy_array[43]'], {}), '(input_numpy_array[43])\n', (33637, 33660), False, 'import math\n'), ((33714, 33746), 'math.sqrt', 'math.sqrt', (['input_numpy_array[87]'], {}), '(input_numpy_array[87])\n', (33723, 33746), False, 'import math\n'), ((33801, 33833), 'math.sqrt', 'math.sqrt', (['input_numpy_array[65]'], {}), '(input_numpy_array[65])\n', (33810, 33833), False, 'import math\n'), ((33888, 33920), 'math.sqrt', 'math.sqrt', (['input_numpy_array[75]'], {}), '(input_numpy_array[75])\n', (33897, 33920), False, 'import math\n'), ((33975, 34007), 'math.sqrt', 'math.sqrt', (['input_numpy_array[46]'], {}), '(input_numpy_array[46])\n', (33984, 34007), False, 'import math\n'), ((34062, 34094), 'math.sqrt', 'math.sqrt', (['input_numpy_array[50]'], {}), '(input_numpy_array[50])\n', (34071, 34094), False, 'import math\n'), ((34149, 34181), 'math.sqrt', 'math.sqrt', (['input_numpy_array[92]'], {}), '(input_numpy_array[92])\n', (34158, 34181), False, 'import math\n'), ((34235, 34267), 'math.sqrt', 'math.sqrt', (['input_numpy_array[42]'], {}), '(input_numpy_array[42])\n', (34244, 34267), False, 'import math\n'), ((34321, 34353), 'math.sqrt', 'math.sqrt', (['input_numpy_array[84]'], {}), '(input_numpy_array[84])\n', (34330, 34353), False, 'import math\n'), ((34408, 34440), 'math.sqrt', 'math.sqrt', (['input_numpy_array[50]'], {}), '(input_numpy_array[50])\n', (34417, 34440), False, 'import math\n'), ((34495, 34527), 'math.sqrt', 'math.sqrt', (['input_numpy_array[65]'], {}), '(input_numpy_array[65])\n', (34504, 34527), False, 'import math\n'), ((34582, 34614), 'math.sqrt', 'math.sqrt', (['input_numpy_array[82]'], {}), '(input_numpy_array[82])\n', (34591, 34614), False, 'import math\n'), ((34669, 34701), 'math.sqrt', 'math.sqrt', (['input_numpy_array[32]'], {}), '(input_numpy_array[32])\n', (34678, 34701), False, 'import math\n'), ((34756, 34788), 'math.sqrt', 'math.sqrt', (['input_numpy_array[17]'], {}), '(input_numpy_array[17])\n', (34765, 34788), False, 'import math\n'), ((34843, 34874), 'math.sqrt', 'math.sqrt', (['input_numpy_array[5]'], {}), '(input_numpy_array[5])\n', (34852, 34874), False, 'import math\n'), ((34929, 34961), 'math.sqrt', 'math.sqrt', (['input_numpy_array[33]'], {}), '(input_numpy_array[33])\n', (34938, 34961), False, 'import math\n'), ((35016, 35048), 'math.sqrt', 'math.sqrt', (['input_numpy_array[86]'], {}), '(input_numpy_array[86])\n', (35025, 35048), False, 'import math\n'), ((35102, 35134), 'math.sqrt', 'math.sqrt', (['input_numpy_array[41]'], {}), '(input_numpy_array[41])\n', (35111, 35134), False, 'import math\n'), ((35189, 35221), 'math.sqrt', 'math.sqrt', (['input_numpy_array[16]'], {}), '(input_numpy_array[16])\n', (35198, 35221), False, 'import math\n'), ((35276, 35308), 'math.sqrt', 'math.sqrt', (['input_numpy_array[36]'], {}), '(input_numpy_array[36])\n', (35285, 35308), False, 'import math\n'), ((35363, 35394), 'math.sqrt', 'math.sqrt', (['input_numpy_array[4]'], {}), '(input_numpy_array[4])\n', (35372, 35394), False, 'import math\n'), ((35449, 35481), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (35458, 35481), False, 'import math\n'), ((35535, 35566), 'math.sqrt', 'math.sqrt', (['input_numpy_array[1]'], {}), '(input_numpy_array[1])\n', (35544, 35566), False, 'import math\n'), ((35621, 35653), 'math.sqrt', 'math.sqrt', (['input_numpy_array[93]'], {}), '(input_numpy_array[93])\n', (35630, 35653), False, 'import math\n'), ((35708, 35740), 'math.sqrt', 'math.sqrt', (['input_numpy_array[50]'], {}), '(input_numpy_array[50])\n', (35717, 35740), False, 'import math\n'), ((35795, 35827), 'math.sqrt', 'math.sqrt', (['input_numpy_array[41]'], {}), '(input_numpy_array[41])\n', (35804, 35827), False, 'import math\n'), ((35882, 35914), 'math.sqrt', 'math.sqrt', (['input_numpy_array[72]'], {}), '(input_numpy_array[72])\n', (35891, 35914), False, 'import math\n'), ((35969, 36001), 'math.sqrt', 'math.sqrt', (['input_numpy_array[10]'], {}), '(input_numpy_array[10])\n', (35978, 36001), False, 'import math\n'), ((36056, 36087), 'math.sqrt', 'math.sqrt', (['input_numpy_array[6]'], {}), '(input_numpy_array[6])\n', (36065, 36087), False, 'import math\n'), ((36142, 36174), 'math.sqrt', 'math.sqrt', (['input_numpy_array[83]'], {}), '(input_numpy_array[83])\n', (36151, 36174), False, 'import math\n'), ((36229, 36261), 'math.sqrt', 'math.sqrt', (['input_numpy_array[17]'], {}), '(input_numpy_array[17])\n', (36238, 36261), False, 'import math\n'), ((36316, 36348), 'math.sqrt', 'math.sqrt', (['input_numpy_array[57]'], {}), '(input_numpy_array[57])\n', (36325, 36348), False, 'import math\n'), ((36403, 36435), 'math.sqrt', 'math.sqrt', (['input_numpy_array[33]'], {}), '(input_numpy_array[33])\n', (36412, 36435), False, 'import math\n'), ((36490, 36522), 'math.sqrt', 'math.sqrt', (['input_numpy_array[39]'], {}), '(input_numpy_array[39])\n', (36499, 36522), False, 'import math\n'), ((36577, 36609), 'math.sqrt', 'math.sqrt', (['input_numpy_array[15]'], {}), '(input_numpy_array[15])\n', (36586, 36609), False, 'import math\n'), ((36664, 36696), 'math.sqrt', 'math.sqrt', (['input_numpy_array[85]'], {}), '(input_numpy_array[85])\n', (36673, 36696), False, 'import math\n'), ((36751, 36783), 'math.sqrt', 'math.sqrt', (['input_numpy_array[94]'], {}), '(input_numpy_array[94])\n', (36760, 36783), False, 'import math\n'), ((36838, 36870), 'math.sqrt', 'math.sqrt', (['input_numpy_array[95]'], {}), '(input_numpy_array[95])\n', (36847, 36870), False, 'import math\n'), ((36925, 36957), 'math.sqrt', 'math.sqrt', (['input_numpy_array[56]'], {}), '(input_numpy_array[56])\n', (36934, 36957), False, 'import math\n'), ((37012, 37044), 'math.sqrt', 'math.sqrt', (['input_numpy_array[90]'], {}), '(input_numpy_array[90])\n', (37021, 37044), False, 'import math\n'), ((37099, 37131), 'math.sqrt', 'math.sqrt', (['input_numpy_array[68]'], {}), '(input_numpy_array[68])\n', (37108, 37131), False, 'import math\n'), ((37186, 37218), 'math.sqrt', 'math.sqrt', (['input_numpy_array[94]'], {}), '(input_numpy_array[94])\n', (37195, 37218), False, 'import math\n'), ((37273, 37305), 'math.sqrt', 'math.sqrt', (['input_numpy_array[29]'], {}), '(input_numpy_array[29])\n', (37282, 37305), False, 'import math\n'), ((37360, 37392), 'math.sqrt', 'math.sqrt', (['input_numpy_array[89]'], {}), '(input_numpy_array[89])\n', (37369, 37392), False, 'import math\n'), ((37447, 37479), 'math.sqrt', 'math.sqrt', (['input_numpy_array[99]'], {}), '(input_numpy_array[99])\n', (37456, 37479), False, 'import math\n'), ((37534, 37566), 'math.sqrt', 'math.sqrt', (['input_numpy_array[78]'], {}), '(input_numpy_array[78])\n', (37543, 37566), False, 'import math\n'), ((37621, 37653), 'math.sqrt', 'math.sqrt', (['input_numpy_array[63]'], {}), '(input_numpy_array[63])\n', (37630, 37653), False, 'import math\n'), ((37708, 37740), 'math.sqrt', 'math.sqrt', (['input_numpy_array[83]'], {}), '(input_numpy_array[83])\n', (37717, 37740), False, 'import math\n'), ((37795, 37827), 'math.sqrt', 'math.sqrt', (['input_numpy_array[42]'], {}), '(input_numpy_array[42])\n', (37804, 37827), False, 'import math\n'), ((37882, 37914), 'math.sqrt', 'math.sqrt', (['input_numpy_array[80]'], {}), '(input_numpy_array[80])\n', (37891, 37914), False, 'import math\n'), ((37969, 38001), 'math.sqrt', 'math.sqrt', (['input_numpy_array[97]'], {}), '(input_numpy_array[97])\n', (37978, 38001), False, 'import math\n'), ((38056, 38088), 'math.sqrt', 'math.sqrt', (['input_numpy_array[17]'], {}), '(input_numpy_array[17])\n', (38065, 38088), False, 'import math\n'), ((38143, 38175), 'math.sqrt', 'math.sqrt', (['input_numpy_array[40]'], {}), '(input_numpy_array[40])\n', (38152, 38175), False, 'import math\n'), ((38230, 38263), 'math.sqrt', 'math.sqrt', (['input_numpy_array[100]'], {}), '(input_numpy_array[100])\n', (38239, 38263), False, 'import math\n'), ((38318, 38350), 'math.sqrt', 'math.sqrt', (['input_numpy_array[35]'], {}), '(input_numpy_array[35])\n', (38327, 38350), False, 'import math\n'), ((38405, 38437), 'math.sqrt', 'math.sqrt', (['input_numpy_array[28]'], {}), '(input_numpy_array[28])\n', (38414, 38437), False, 'import math\n'), ((38492, 38524), 'math.sqrt', 'math.sqrt', (['input_numpy_array[23]'], {}), '(input_numpy_array[23])\n', (38501, 38524), False, 'import math\n'), ((38579, 38611), 'math.sqrt', 'math.sqrt', (['input_numpy_array[47]'], {}), '(input_numpy_array[47])\n', (38588, 38611), False, 'import math\n'), ((38666, 38697), 'math.sqrt', 'math.sqrt', (['input_numpy_array[6]'], {}), '(input_numpy_array[6])\n', (38675, 38697), False, 'import math\n'), ((38752, 38784), 'math.sqrt', 'math.sqrt', (['input_numpy_array[88]'], {}), '(input_numpy_array[88])\n', (38761, 38784), False, 'import math\n'), ((38840, 38872), 'math.sqrt', 'math.sqrt', (['input_numpy_array[60]'], {}), '(input_numpy_array[60])\n', (38849, 38872), False, 'import math\n'), ((38927, 38959), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (38936, 38959), False, 'import math\n'), ((39014, 39046), 'math.sqrt', 'math.sqrt', (['input_numpy_array[96]'], {}), '(input_numpy_array[96])\n', (39023, 39046), False, 'import math\n'), ((39101, 39133), 'math.sqrt', 'math.sqrt', (['input_numpy_array[75]'], {}), '(input_numpy_array[75])\n', (39110, 39133), False, 'import math\n'), ((39188, 39220), 'math.sqrt', 'math.sqrt', (['input_numpy_array[30]'], {}), '(input_numpy_array[30])\n', (39197, 39220), False, 'import math\n'), ((39275, 39307), 'math.sqrt', 'math.sqrt', (['input_numpy_array[86]'], {}), '(input_numpy_array[86])\n', (39284, 39307), False, 'import math\n'), ((39362, 39394), 'math.sqrt', 'math.sqrt', (['input_numpy_array[11]'], {}), '(input_numpy_array[11])\n', (39371, 39394), False, 'import math\n'), ((39449, 39481), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (39458, 39481), False, 'import math\n'), ((39536, 39568), 'math.sqrt', 'math.sqrt', (['input_numpy_array[41]'], {}), '(input_numpy_array[41])\n', (39545, 39568), False, 'import math\n'), ((39623, 39654), 'math.sqrt', 'math.sqrt', (['input_numpy_array[3]'], {}), '(input_numpy_array[3])\n', (39632, 39654), False, 'import math\n'), ((39709, 39741), 'math.sqrt', 'math.sqrt', (['input_numpy_array[55]'], {}), '(input_numpy_array[55])\n', (39718, 39741), False, 'import math\n'), ((39796, 39828), 'math.sqrt', 'math.sqrt', (['input_numpy_array[38]'], {}), '(input_numpy_array[38])\n', (39805, 39828), False, 'import math\n'), ((39883, 39915), 'math.sqrt', 'math.sqrt', (['input_numpy_array[81]'], {}), '(input_numpy_array[81])\n', (39892, 39915), False, 'import math\n'), ((39969, 40001), 'math.sqrt', 'math.sqrt', (['input_numpy_array[47]'], {}), '(input_numpy_array[47])\n', (39978, 40001), False, 'import math\n'), ((40056, 40088), 'math.sqrt', 'math.sqrt', (['input_numpy_array[15]'], {}), '(input_numpy_array[15])\n', (40065, 40088), False, 'import math\n'), ((40143, 40175), 'math.sqrt', 'math.sqrt', (['input_numpy_array[85]'], {}), '(input_numpy_array[85])\n', (40152, 40175), False, 'import math\n'), ((40230, 40262), 'math.sqrt', 'math.sqrt', (['input_numpy_array[54]'], {}), '(input_numpy_array[54])\n', (40239, 40262), False, 'import math\n'), ((40317, 40349), 'math.sqrt', 'math.sqrt', (['input_numpy_array[19]'], {}), '(input_numpy_array[19])\n', (40326, 40349), False, 'import math\n'), ((40404, 40436), 'math.sqrt', 'math.sqrt', (['input_numpy_array[39]'], {}), '(input_numpy_array[39])\n', (40413, 40436), False, 'import math\n'), ((40491, 40523), 'math.sqrt', 'math.sqrt', (['input_numpy_array[54]'], {}), '(input_numpy_array[54])\n', (40500, 40523), False, 'import math\n'), ((40578, 40610), 'math.sqrt', 'math.sqrt', (['input_numpy_array[23]'], {}), '(input_numpy_array[23])\n', (40587, 40610), False, 'import math\n'), ((40665, 40697), 'math.sqrt', 'math.sqrt', (['input_numpy_array[22]'], {}), '(input_numpy_array[22])\n', (40674, 40697), False, 'import math\n'), ((40752, 40784), 'math.sqrt', 'math.sqrt', (['input_numpy_array[17]'], {}), '(input_numpy_array[17])\n', (40761, 40784), False, 'import math\n'), ((40839, 40870), 'math.sqrt', 'math.sqrt', (['input_numpy_array[2]'], {}), '(input_numpy_array[2])\n', (40848, 40870), False, 'import math\n'), ((40925, 40957), 'math.sqrt', 'math.sqrt', (['input_numpy_array[69]'], {}), '(input_numpy_array[69])\n', (40934, 40957), False, 'import math\n'), ((41012, 41044), 'math.sqrt', 'math.sqrt', (['input_numpy_array[25]'], {}), '(input_numpy_array[25])\n', (41021, 41044), False, 'import math\n'), ((41099, 41131), 'math.sqrt', 'math.sqrt', (['input_numpy_array[67]'], {}), '(input_numpy_array[67])\n', (41108, 41131), False, 'import math\n'), ((41185, 41217), 'math.sqrt', 'math.sqrt', (['input_numpy_array[42]'], {}), '(input_numpy_array[42])\n', (41194, 41217), False, 'import math\n'), ((41272, 41304), 'math.sqrt', 'math.sqrt', (['input_numpy_array[33]'], {}), '(input_numpy_array[33])\n', (41281, 41304), False, 'import math\n'), ((41359, 41391), 'math.sqrt', 'math.sqrt', (['input_numpy_array[97]'], {}), '(input_numpy_array[97])\n', (41368, 41391), False, 'import math\n'), ((41446, 41478), 'math.sqrt', 'math.sqrt', (['input_numpy_array[20]'], {}), '(input_numpy_array[20])\n', (41455, 41478), False, 'import math\n'), ((41533, 41565), 'math.sqrt', 'math.sqrt', (['input_numpy_array[58]'], {}), '(input_numpy_array[58])\n', (41542, 41565), False, 'import math\n'), ((41620, 41652), 'math.sqrt', 'math.sqrt', (['input_numpy_array[38]'], {}), '(input_numpy_array[38])\n', (41629, 41652), False, 'import math\n'), ((41707, 41739), 'math.sqrt', 'math.sqrt', (['input_numpy_array[47]'], {}), '(input_numpy_array[47])\n', (41716, 41739), False, 'import math\n'), ((41793, 41825), 'math.sqrt', 'math.sqrt', (['input_numpy_array[93]'], {}), '(input_numpy_array[93])\n', (41802, 41825), False, 'import math\n'), ((41880, 41912), 'math.sqrt', 'math.sqrt', (['input_numpy_array[96]'], {}), '(input_numpy_array[96])\n', (41889, 41912), False, 'import math\n'), ((41967, 41999), 'math.sqrt', 'math.sqrt', (['input_numpy_array[60]'], {}), '(input_numpy_array[60])\n', (41976, 41999), False, 'import math\n'), ((42054, 42085), 'math.sqrt', 'math.sqrt', (['input_numpy_array[2]'], {}), '(input_numpy_array[2])\n', (42063, 42085), False, 'import math\n'), ((42140, 42172), 'math.sqrt', 'math.sqrt', (['input_numpy_array[64]'], {}), '(input_numpy_array[64])\n', (42149, 42172), False, 'import math\n'), ((42227, 42259), 'math.sqrt', 'math.sqrt', (['input_numpy_array[26]'], {}), '(input_numpy_array[26])\n', (42236, 42259), False, 'import math\n'), ((42314, 42346), 'math.sqrt', 'math.sqrt', (['input_numpy_array[26]'], {}), '(input_numpy_array[26])\n', (42323, 42346), False, 'import math\n'), ((42401, 42432), 'math.sqrt', 'math.sqrt', (['input_numpy_array[7]'], {}), '(input_numpy_array[7])\n', (42410, 42432), False, 'import math\n'), ((42487, 42519), 'math.sqrt', 'math.sqrt', (['input_numpy_array[94]'], {}), '(input_numpy_array[94])\n', (42496, 42519), False, 'import math\n'), ((42573, 42605), 'math.sqrt', 'math.sqrt', (['input_numpy_array[54]'], {}), '(input_numpy_array[54])\n', (42582, 42605), False, 'import math\n'), ((42660, 42692), 'math.sqrt', 'math.sqrt', (['input_numpy_array[91]'], {}), '(input_numpy_array[91])\n', (42669, 42692), False, 'import math\n'), ((42747, 42779), 'math.sqrt', 'math.sqrt', (['input_numpy_array[35]'], {}), '(input_numpy_array[35])\n', (42756, 42779), False, 'import math\n'), ((42834, 42866), 'math.sqrt', 'math.sqrt', (['input_numpy_array[12]'], {}), '(input_numpy_array[12])\n', (42843, 42866), False, 'import math\n'), ((42921, 42953), 'math.sqrt', 'math.sqrt', (['input_numpy_array[60]'], {}), '(input_numpy_array[60])\n', (42930, 42953), False, 'import math\n'), ((43008, 43040), 'math.sqrt', 'math.sqrt', (['input_numpy_array[45]'], {}), '(input_numpy_array[45])\n', (43017, 43040), False, 'import math\n'), ((43095, 43126), 'math.sqrt', 'math.sqrt', (['input_numpy_array[8]'], {}), '(input_numpy_array[8])\n', (43104, 43126), False, 'import math\n'), ((43181, 43213), 'math.sqrt', 'math.sqrt', (['input_numpy_array[29]'], {}), '(input_numpy_array[29])\n', (43190, 43213), False, 'import math\n'), ((43268, 43300), 'math.sqrt', 'math.sqrt', (['input_numpy_array[43]'], {}), '(input_numpy_array[43])\n', (43277, 43300), False, 'import math\n'), ((43355, 43387), 'math.sqrt', 'math.sqrt', (['input_numpy_array[88]'], {}), '(input_numpy_array[88])\n', (43364, 43387), False, 'import math\n'), ((43441, 43473), 'math.sqrt', 'math.sqrt', (['input_numpy_array[18]'], {}), '(input_numpy_array[18])\n', (43450, 43473), False, 'import math\n'), ((43528, 43560), 'math.sqrt', 'math.sqrt', (['input_numpy_array[71]'], {}), '(input_numpy_array[71])\n', (43537, 43560), False, 'import math\n'), ((43615, 43647), 'math.sqrt', 'math.sqrt', (['input_numpy_array[34]'], {}), '(input_numpy_array[34])\n', (43624, 43647), False, 'import math\n'), ((43702, 43734), 'math.sqrt', 'math.sqrt', (['input_numpy_array[70]'], {}), '(input_numpy_array[70])\n', (43711, 43734), False, 'import math\n'), ((43789, 43821), 'math.sqrt', 'math.sqrt', (['input_numpy_array[48]'], {}), '(input_numpy_array[48])\n', (43798, 43821), False, 'import math\n'), ((43875, 43907), 'math.sqrt', 'math.sqrt', (['input_numpy_array[36]'], {}), '(input_numpy_array[36])\n', (43884, 43907), False, 'import math\n'), ((43962, 43994), 'math.sqrt', 'math.sqrt', (['input_numpy_array[89]'], {}), '(input_numpy_array[89])\n', (43971, 43994), False, 'import math\n'), ((44049, 44081), 'math.sqrt', 'math.sqrt', (['input_numpy_array[67]'], {}), '(input_numpy_array[67])\n', (44058, 44081), False, 'import math\n'), ((44136, 44168), 'math.sqrt', 'math.sqrt', (['input_numpy_array[90]'], {}), '(input_numpy_array[90])\n', (44145, 44168), False, 'import math\n'), ((44223, 44255), 'math.sqrt', 'math.sqrt', (['input_numpy_array[45]'], {}), '(input_numpy_array[45])\n', (44232, 44255), False, 'import math\n'), ((44310, 44342), 'math.sqrt', 'math.sqrt', (['input_numpy_array[53]'], {}), '(input_numpy_array[53])\n', (44319, 44342), False, 'import math\n'), ((44397, 44429), 'math.sqrt', 'math.sqrt', (['input_numpy_array[80]'], {}), '(input_numpy_array[80])\n', (44406, 44429), False, 'import math\n'), ((44484, 44516), 'math.sqrt', 'math.sqrt', (['input_numpy_array[61]'], {}), '(input_numpy_array[61])\n', (44493, 44516), False, 'import math\n'), ((44571, 44603), 'math.sqrt', 'math.sqrt', (['input_numpy_array[49]'], {}), '(input_numpy_array[49])\n', (44580, 44603), False, 'import math\n'), ((44658, 44690), 'math.sqrt', 'math.sqrt', (['input_numpy_array[27]'], {}), '(input_numpy_array[27])\n', (44667, 44690), False, 'import math\n'), ((44745, 44776), 'math.sqrt', 'math.sqrt', (['input_numpy_array[9]'], {}), '(input_numpy_array[9])\n', (44754, 44776), False, 'import math\n'), ((44830, 44862), 'math.sqrt', 'math.sqrt', (['input_numpy_array[99]'], {}), '(input_numpy_array[99])\n', (44839, 44862), False, 'import math\n'), ((44917, 44948), 'math.sqrt', 'math.sqrt', (['input_numpy_array[8]'], {}), '(input_numpy_array[8])\n', (44926, 44948), False, 'import math\n')] |
#!python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Multiples two square matrices together using multiple blocks and shared memory.
Each thread block is assigned a "tile" of the resulting matrix and is responsible
for generating the elements in that tile. Each thread in a block computes one element
of the tile.
"""
import numpy as np
from numpy import linalg as la
from pycuda import driver, compiler, gpuarray, tools
# -- initialize the device
import pycuda.autoinit
kernel_code_template = """
__global__ void MatrixMulKernel(float *A, float *B, float *C)
{
const uint wA = %(MATRIX_SIZE)s;
const uint wB = %(MATRIX_SIZE)s;
// Block index
const uint bx = blockIdx.x;
const uint by = blockIdx.y;
// Thread index
const uint tx = threadIdx.x;
const uint ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
const uint aBegin = wA * %(BLOCK_SIZE)s * by;
// Index of the last sub-matrix of A processed by the block
const uint aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
const uint aStep = %(BLOCK_SIZE)s;
// Index of the first sub-matrix of B processed by the block
const uint bBegin = %(BLOCK_SIZE)s * bx;
// Step size used to iterate through the sub-matrices of B
const uint bStep = %(BLOCK_SIZE)s * wB;
// The element of the block sub-matrix that is computed
// by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B required to
// compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Shared memory for the sub-matrix of A
__shared__ float As[%(BLOCK_SIZE)s][%(BLOCK_SIZE)s];
// Shared memory for the sub-matrix of B
__shared__ float Bs[%(BLOCK_SIZE)s][%(BLOCK_SIZE)s];
// Load the matrices from global memory to shared memory
// each thread loads one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < %(BLOCK_SIZE)s; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to global memory;
// each thread writes one element
const uint c = wB * %(BLOCK_SIZE)s * by + %(BLOCK_SIZE)s * bx;
C[c + wB * ty + tx] = Csub;
}
"""
# define the (square) matrix size
MATRIX_SIZE = 4
# define size of blocks and tiles sub-matrix
# (we assume that the block size is same as tile size)
TILE_SIZE = 2
BLOCK_SIZE = TILE_SIZE
# create two random square matrices
a_cpu = np.random.randn(MATRIX_SIZE, MATRIX_SIZE).astype(np.float32)
b_cpu = np.random.randn(MATRIX_SIZE, MATRIX_SIZE).astype(np.float32)
# compute reference on the CPU to verify GPU computation
c_cpu = np.dot(a_cpu, b_cpu)
# transfer host (CPU) memory to device (GPU) memory
a_gpu = gpuarray.to_gpu(a_cpu)
b_gpu = gpuarray.to_gpu(b_cpu)
# create empty gpu array for the result (C = A * B)
c_gpu = gpuarray.empty((MATRIX_SIZE, MATRIX_SIZE), np.float32)
# get the kernel code from the template
# by specifying the constants MATRIX_SIZE and BLOCK_SIZE
kernel_code = kernel_code_template % {
'MATRIX_SIZE': MATRIX_SIZE,
'BLOCK_SIZE': BLOCK_SIZE,
}
# compile the kernel code
mod = compiler.SourceModule(kernel_code)
# get the kernel function from the compiled module
matrixmul = mod.get_function("MatrixMulKernel")
# call the kernel on the card
matrixmul(
# inputs
a_gpu, b_gpu,
# output
c_gpu,
# grid of multiple blocks
grid = (MATRIX_SIZE // TILE_SIZE, MATRIX_SIZE // TILE_SIZE),
# block of multiple threads
block = (TILE_SIZE, TILE_SIZE, 1),
)
# print the results
print("-" * 80)
print("Matrix A (GPU):")
print(a_gpu.get())
print("-" * 80)
print("Matrix B (GPU):")
print(b_gpu.get())
print("-" * 80)
print("Matrix C (GPU):")
print(c_gpu.get())
print("-" * 80)
print("CPU-GPU difference:")
print(c_cpu - c_gpu.get())
print("L2 norm:", la.norm(c_cpu - c_gpu.get()))
np.allclose(c_cpu, c_gpu.get())
| [
"pycuda.compiler.SourceModule",
"pycuda.gpuarray.empty",
"numpy.dot",
"pycuda.gpuarray.to_gpu",
"numpy.random.randn"
] | [((3118, 3138), 'numpy.dot', 'np.dot', (['a_cpu', 'b_cpu'], {}), '(a_cpu, b_cpu)\n', (3124, 3138), True, 'import numpy as np\n'), ((3201, 3223), 'pycuda.gpuarray.to_gpu', 'gpuarray.to_gpu', (['a_cpu'], {}), '(a_cpu)\n', (3216, 3223), False, 'from pycuda import driver, compiler, gpuarray, tools\n'), ((3233, 3255), 'pycuda.gpuarray.to_gpu', 'gpuarray.to_gpu', (['b_cpu'], {}), '(b_cpu)\n', (3248, 3255), False, 'from pycuda import driver, compiler, gpuarray, tools\n'), ((3317, 3371), 'pycuda.gpuarray.empty', 'gpuarray.empty', (['(MATRIX_SIZE, MATRIX_SIZE)', 'np.float32'], {}), '((MATRIX_SIZE, MATRIX_SIZE), np.float32)\n', (3331, 3371), False, 'from pycuda import driver, compiler, gpuarray, tools\n'), ((3612, 3646), 'pycuda.compiler.SourceModule', 'compiler.SourceModule', (['kernel_code'], {}), '(kernel_code)\n', (3633, 3646), False, 'from pycuda import driver, compiler, gpuarray, tools\n'), ((2922, 2963), 'numpy.random.randn', 'np.random.randn', (['MATRIX_SIZE', 'MATRIX_SIZE'], {}), '(MATRIX_SIZE, MATRIX_SIZE)\n', (2937, 2963), True, 'import numpy as np\n'), ((2991, 3032), 'numpy.random.randn', 'np.random.randn', (['MATRIX_SIZE', 'MATRIX_SIZE'], {}), '(MATRIX_SIZE, MATRIX_SIZE)\n', (3006, 3032), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Code for creating performance plots
.. codeauthor:: <NAME> <<EMAIL>>
"""
import os
os.environ["NUMBA_NUM_THREADS"] = "1" # check single thread performance
import functools
import timeit
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from pde import UnitGrid
from pde.tools.misc import estimate_computation_speed
from pde.tools.output import display_progress
try:
import cv2
except ImportError:
print('Warning: OpenCV is not available and will not be used to compare')
opencv_laplace = None
else:
opencv_laplace = functools.partial(
cv2.Laplacian, ddepth=cv2.CV_64F, borderType=cv2.BORDER_REFLECT
)
def time_function(func, arg, repeat=3):
""" estimates the computation speed of a function
Args:
func (callable): The function to test
arg: The single argument on which the function will be estimate
repeat (int): How often the function is tested
Returns:
float: Estimated duration of calling the function a single time
"""
number = int(estimate_computation_speed(func, arg))
func = functools.partial(func, arg)
return min(timeit.repeat(func, number=number, repeat=repeat)) / number
def get_performance_data(periodic=False):
""" obtain the data used in the performance plot
Args:
periodic (bool): The boundary conditions of the underlying grid
Returns:
dict: The durations of calculating the Laplacian on different grids
using different methods
"""
sizes = 2 ** np.arange(3, 13)
statistics = {}
for size in display_progress(sizes):
data = {}
grid = UnitGrid([size] * 2, periodic=periodic)
test_data = np.random.randn(*grid.shape)
for method in ["numba", "scipy"]:
op = grid.get_operator("laplace", bc="natural", method=method)
data[method] = time_function(op, test_data)
if opencv_laplace:
data["opencv"] = time_function(opencv_laplace, test_data)
statistics[int(size)] = data
return statistics
def plot_performance(performance_data, title=None):
""" plot the performance data
Args:
performance_data: The data obtained from calling
:func:`get_performance_data`.
title (str): The title of the plot
"""
plt.figure(figsize=[4, 3])
METHOD_LABELS = {"numba": "py-pde"}
sizes = np.array(sorted(performance_data.keys()))
grid_sizes = sizes ** 2
methods = sorted(performance_data[sizes[0]].keys(), reverse=True)
for method in methods:
data = np.array([performance_data[size][method] for size in sizes])
plt.loglog(grid_sizes, data, ".-", label=METHOD_LABELS.get(method, method))
plt.xlim(grid_sizes[0], grid_sizes[-1])
plt.xlabel("Number of grid points")
plt.ylabel("Runtime [ms]")
plt.legend(loc="best")
# fix ticks of y-axis
locmaj = mpl.ticker.LogLocator(base=10, numticks=12)
plt.gca().xaxis.set_major_locator(locmaj)
if title:
plt.title(title)
plt.tight_layout()
def main():
""" run main scripts """
data = get_performance_data(periodic=False)
plot_performance(data, title="2D Laplacian (reflecting BCs)")
plt.savefig("performance_noflux.pdf", transparent=True)
plt.savefig("performance_noflux.png", transparent=True, dpi=200)
plt.close()
data = get_performance_data(periodic=True)
plot_performance(data, title="2D Laplacian (periodic BCs)")
plt.savefig("performance_periodic.pdf", transparent=True)
plt.savefig("performance_periodic.png", transparent=True, dpi=200)
plt.close()
if __name__ == "__main__":
main()
| [
"pde.UnitGrid",
"matplotlib.ticker.LogLocator",
"matplotlib.pyplot.ylabel",
"numpy.array",
"pde.tools.misc.estimate_computation_speed",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"matpl... | [((593, 680), 'functools.partial', 'functools.partial', (['cv2.Laplacian'], {'ddepth': 'cv2.CV_64F', 'borderType': 'cv2.BORDER_REFLECT'}), '(cv2.Laplacian, ddepth=cv2.CV_64F, borderType=cv2.\n BORDER_REFLECT)\n', (610, 680), False, 'import functools\n'), ((1143, 1171), 'functools.partial', 'functools.partial', (['func', 'arg'], {}), '(func, arg)\n', (1160, 1171), False, 'import functools\n'), ((1640, 1663), 'pde.tools.output.display_progress', 'display_progress', (['sizes'], {}), '(sizes)\n', (1656, 1663), False, 'from pde.tools.output import display_progress\n'), ((2377, 2403), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[4, 3]'}), '(figsize=[4, 3])\n', (2387, 2403), True, 'import matplotlib.pyplot as plt\n'), ((2791, 2830), 'matplotlib.pyplot.xlim', 'plt.xlim', (['grid_sizes[0]', 'grid_sizes[-1]'], {}), '(grid_sizes[0], grid_sizes[-1])\n', (2799, 2830), True, 'import matplotlib.pyplot as plt\n'), ((2835, 2870), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of grid points"""'], {}), "('Number of grid points')\n", (2845, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2875, 2901), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Runtime [ms]"""'], {}), "('Runtime [ms]')\n", (2885, 2901), True, 'import matplotlib.pyplot as plt\n'), ((2906, 2928), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2916, 2928), True, 'import matplotlib.pyplot as plt\n'), ((2969, 3012), 'matplotlib.ticker.LogLocator', 'mpl.ticker.LogLocator', ([], {'base': '(10)', 'numticks': '(12)'}), '(base=10, numticks=12)\n', (2990, 3012), True, 'import matplotlib as mpl\n'), ((3104, 3122), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3120, 3122), True, 'import matplotlib.pyplot as plt\n'), ((3284, 3339), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""performance_noflux.pdf"""'], {'transparent': '(True)'}), "('performance_noflux.pdf', transparent=True)\n", (3295, 3339), True, 'import matplotlib.pyplot as plt\n'), ((3344, 3408), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""performance_noflux.png"""'], {'transparent': '(True)', 'dpi': '(200)'}), "('performance_noflux.png', transparent=True, dpi=200)\n", (3355, 3408), True, 'import matplotlib.pyplot as plt\n'), ((3413, 3424), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3422, 3424), True, 'import matplotlib.pyplot as plt\n'), ((3541, 3598), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""performance_periodic.pdf"""'], {'transparent': '(True)'}), "('performance_periodic.pdf', transparent=True)\n", (3552, 3598), True, 'import matplotlib.pyplot as plt\n'), ((3603, 3669), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""performance_periodic.png"""'], {'transparent': '(True)', 'dpi': '(200)'}), "('performance_periodic.png', transparent=True, dpi=200)\n", (3614, 3669), True, 'import matplotlib.pyplot as plt\n'), ((3674, 3685), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3683, 3685), True, 'import matplotlib.pyplot as plt\n'), ((1093, 1130), 'pde.tools.misc.estimate_computation_speed', 'estimate_computation_speed', (['func', 'arg'], {}), '(func, arg)\n', (1119, 1130), False, 'from pde.tools.misc import estimate_computation_speed\n'), ((1586, 1602), 'numpy.arange', 'np.arange', (['(3)', '(13)'], {}), '(3, 13)\n', (1595, 1602), True, 'import numpy as np\n'), ((1698, 1737), 'pde.UnitGrid', 'UnitGrid', (['([size] * 2)'], {'periodic': 'periodic'}), '([size] * 2, periodic=periodic)\n', (1706, 1737), False, 'from pde import UnitGrid\n'), ((1758, 1786), 'numpy.random.randn', 'np.random.randn', (['*grid.shape'], {}), '(*grid.shape)\n', (1773, 1786), True, 'import numpy as np\n'), ((2641, 2701), 'numpy.array', 'np.array', (['[performance_data[size][method] for size in sizes]'], {}), '([performance_data[size][method] for size in sizes])\n', (2649, 2701), True, 'import numpy as np\n'), ((3082, 3098), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3091, 3098), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1236), 'timeit.repeat', 'timeit.repeat', (['func'], {'number': 'number', 'repeat': 'repeat'}), '(func, number=number, repeat=repeat)\n', (1200, 1236), False, 'import timeit\n'), ((3017, 3026), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3024, 3026), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import scipy.linalg
def fit_plane(data):
"""
Fits a plane to a set of 3d data
Args:
data: np array where each row is [x, y, z]
Returns:
A tuple containing the coefficients of the regression. Z = C[0]*X + C[1]*Y + C[2]
"""
# best-fit linear plane
A = np.c_[data[:, 0], data[:, 1], np.ones(data.shape[0])]
C, _, _, _ = scipy.linalg.lstsq(A, data[:, 2]) # coefficients
return C[0], C[1], C[2]
# Z = C[0]*X + C[1]*Y + C[2] | [
"numpy.ones"
] | [((347, 369), 'numpy.ones', 'np.ones', (['data.shape[0]'], {}), '(data.shape[0])\n', (354, 369), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__doc__ = r"""
Grayscale wrapper for gym.Env.
Created on 31-03-2021
"""
from typing import Dict, Sequence, Tuple
import gym
import gym.spaces
import numpy
import numpy as np
import torch
from draugr.extensions import rgb_to_grayscale
from gym.spaces import Box
from torchvision.transforms import Grayscale
from trolls.spaces_mixin import SpacesMixin
__all__ = ["Grayscale", "GrayScaleObservation"]
class GrayscaleNonTorch(gym.Wrapper, SpacesMixin):
"""Grayscale wrapper for gym.Env, converting frames to grayscale.
Only works with gym.spaces.Box environment with 2D RGB frames.
The last dimension (RGB) of environment observation space will be removed.
Example:
env = gym.make('Env')
# env.observation_space = (100, 100, 3)
env_wrapped = Grayscale(gym.make('Env'))
# env.observation_space = (100, 100)
Args:
env (gym.Env): Environment to wrap.
Raises:
ValueError: If observation space shape is not 3
or environment is not gym.spaces.Box.
"""
def __init__(self, env):
if not isinstance(env.observation_space, gym.spaces.Box):
raise ValueError("Grayscale only works with gym.spaces.Box environment.")
if len(env.observation_space.shape) != 3:
raise ValueError("Grayscale only works with 2D RGB images")
super().__init__(env)
_low = env.observation_space.low.flatten()[0]
_high = env.observation_space.high.flatten()[0]
assert _low == 0
assert _high == 255
self._observation_space = gym.spaces.Box(
_low, _high, shape=env.observation_space.shape[:-1], dtype=np.uint8
)
def reset(self, **kwargs):
"""gym.Env reset function.
Args:
**kwargs: Unused.
Returns:
np.ndarray: Observation conforming to observation_space
"""
del kwargs
return rgb_to_grayscale(self.env.reset())
def step(self, action) -> Tuple[Sequence, float, bool, Dict]:
"""See gym.Env.
Args:
action (np.ndarray): Action conforming to action_space
Returns:
np.ndarray: Observation conforming to observation_space
float: Reward for this step
bool: Termination signal
dict: Extra information from the environment.
"""
obs, reward, done, info = self.env.step(action)
return rgb_to_grayscale(obs), reward, done, info
class GrayScaleObservation(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
obs_shape = self.observation_space.shape[:2]
self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=numpy.uint8)
def permute_orientation(self, observation):
# permute [H, W, C] array to [C, H, W] tensor
observation = numpy.transpose(observation, (2, 0, 1))
observation = torch.tensor(observation.copy(), dtype=torch.float)
return observation
def observation(self, observation):
observation = self.permute_orientation(observation)
transform = Grayscale()
observation = transform(observation)
return observation
| [
"numpy.transpose",
"draugr.extensions.rgb_to_grayscale",
"torchvision.transforms.Grayscale",
"gym.spaces.Box"
] | [((1668, 1756), 'gym.spaces.Box', 'gym.spaces.Box', (['_low', '_high'], {'shape': 'env.observation_space.shape[:-1]', 'dtype': 'np.uint8'}), '(_low, _high, shape=env.observation_space.shape[:-1], dtype=\n np.uint8)\n', (1682, 1756), False, 'import gym\n'), ((2772, 2828), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'obs_shape', 'dtype': 'numpy.uint8'}), '(low=0, high=255, shape=obs_shape, dtype=numpy.uint8)\n', (2775, 2828), False, 'from gym.spaces import Box\n'), ((2954, 2993), 'numpy.transpose', 'numpy.transpose', (['observation', '(2, 0, 1)'], {}), '(observation, (2, 0, 1))\n', (2969, 2993), False, 'import numpy\n'), ((3216, 3227), 'torchvision.transforms.Grayscale', 'Grayscale', ([], {}), '()\n', (3225, 3227), False, 'from torchvision.transforms import Grayscale\n'), ((2531, 2552), 'draugr.extensions.rgb_to_grayscale', 'rgb_to_grayscale', (['obs'], {}), '(obs)\n', (2547, 2552), False, 'from draugr.extensions import rgb_to_grayscale\n')] |
from collections import OrderedDict
import numpy as np
from qtpy.QtCore import Qt, Signal
from qtpy.QtGui import QPainter, QTransform, QPen
from qtpy.QtWidgets import (QGraphicsView, QGraphicsScene, QApplication,
QGraphicsTextItem, QGraphicsEllipseItem,
QGraphicsLineItem)
from glue.utils.qt import mpl_to_qt_color, qt_to_mpl_color
from glue.core.component_link import KeyLink
from glue.plugins.join_on_key.link_helpers import Index_Link
COLOR_SELECTED = (0.2, 0.9, 0.2)
COLOR_CONNECTED = (0.6, 0.9, 0.9)
COLOR_DISCONNECTED = (0.9, 0.6, 0.6)
def get_pen(color, linewidth=1, linestyle=Qt.SolidLine):
color = mpl_to_qt_color(color)
return QPen(color, linewidth, linestyle, Qt.RoundCap, Qt.RoundJoin)
class Edge(QGraphicsLineItem):
def __init__(self, node_source, node_dest, linewidth=3, zindex=5, key_link=False):
self.linewidth = linewidth
if key_link:
self.linestyle = Qt.DashLine
else:
self.linestyle = Qt.SolidLine
self.node_source = node_source
self.node_dest = node_dest
super(Edge, self).__init__(0, 0, 1, 1)
self.setZValue(zindex)
self.color = '0.5'
def update_position(self):
x0, y0 = self.node_source.node_position
x1, y1 = self.node_dest.node_position
self.setLine(x0, y0, x1, y1)
@property
def color(self):
return qt_to_mpl_color(self.pen().color())
@color.setter
def color(self, value):
self.setPen(get_pen(value, self.linewidth, self.linestyle))
def add_to_scene(self, scene):
scene.addItem(self)
def remove_from_scene(self, scene):
scene.removeItem(self)
def contains(self, point):
return super(Edge, self).contains(self.mapFromScene(point))
class DataNode:
def __init__(self, data, radius=15):
self.data = data
# Add circular node
self.node = QGraphicsEllipseItem(0, 0, 1, 1)
# Set radius
self.radius = radius
# Add text label
self.label = QGraphicsTextItem(data.label)
font = self.label.font()
font.setPointSize(10)
self.label.setFont(font)
# Add line between label and node
self.line1 = QGraphicsLineItem(0, 0, 1, 1)
self.line2 = QGraphicsLineItem(0, 0, 1, 1)
self.node.setZValue(20)
self.label.setZValue(10)
self.line1.setZValue(10)
self.line2.setZValue(10)
self.line1.setPen(get_pen('0.5'))
self.line2.setPen(get_pen('0.5'))
self.color = '0.8'
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
self._radius = value
self.node.setRect(-value, -value, 2 * value, 2 * value)
def contains(self, point):
# Check label
if self.label.contains(self.label.mapFromScene(point)):
return True
# Check node
if self.node.contains(self.node.mapFromScene(point)):
return True
return False
def update(self):
self.node.update()
def add_to_scene(self, scene):
scene.addItem(self.node)
scene.addItem(self.label)
scene.addItem(self.line1)
scene.addItem(self.line2)
def remove_from_scene(self, scene):
scene.removeItem(self.node)
scene.removeItem(self.label)
scene.removeItem(self.line1)
scene.removeItem(self.line2)
@property
def node_position(self):
pos = self.node.pos()
return pos.x(), pos.y()
@node_position.setter
def node_position(self, value):
self.node.setPos(value[0], value[1])
self.update_lines()
@property
def label_position(self):
pos = self.label.pos()
return pos.x(), pos.y()
@label_position.setter
def label_position(self, value):
self.label.setPos(value[0], value[1])
self.update_lines()
def update_lines(self):
x0, y0 = self.label_position
x2, y2 = self.node_position
x1 = 0.5 * (x0 + x2)
y1 = y0
self.line1.setLine(x0, y0, x1, y1)
self.line2.setLine(x1, y1, x2, y2)
@property
def color(self):
return qt_to_mpl_color(self.node.brush().color())
@color.setter
def color(self, value):
self.node.setBrush(mpl_to_qt_color(value))
def get_connections(dc_links):
links = []
for link in dc_links:
data1 = link.data1
data2 = link.data2
if link.key_link:
key_link = True
else:
key_link = False
if (data1, data2) not in links and (data2, data1) not in links:
links.append((data1, data2, key_link))
return links
def layout_simple_circle(nodes, edges, center=None, radius=None, reorder=True):
# Place nodes around a circle
if reorder:
nodes[:] = order_nodes_by_connections(nodes, edges)
for i, node in enumerate(nodes):
angle = 2 * np.pi * i / len(nodes)
nx = radius * np.cos(angle) + center[0]
ny = radius * np.sin(angle) + center[1]
node.node_position = nx, ny
def order_nodes_by_connections(nodes, edges):
search_nodes = list(nodes)
sorted_nodes = []
while len(search_nodes) > 0:
lengths = []
connections = []
for node in search_nodes:
direct, indirect = find_connections(node, search_nodes, edges)
connections.append((indirect, direct))
lengths.append((len(indirect), len(direct)))
m = max(lengths)
for i in range(len(lengths)):
if lengths[i] == m:
for node in connections[i][0] + connections[i][1]:
if node not in sorted_nodes:
sorted_nodes.append(node)
search_nodes = [node for node in nodes if node not in sorted_nodes]
return sorted_nodes
class DataGraphWidget(QGraphicsView):
selection_changed = Signal()
def __init__(self, parent=None):
super(DataGraphWidget, self).__init__(parent=parent)
# Set up scene
self.scene = QGraphicsScene(self)
self.scene.setItemIndexMethod(QGraphicsScene.NoIndex)
self.scene.setSceneRect(0, 0, 800, 300)
self.setScene(self.scene)
self.setWindowTitle("Glue data graph")
self.setRenderHint(QPainter.Antialiasing)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QGraphicsView.AnchorViewCenter)
self.selection_level = 0
def resizeEvent(self, event):
self.scene.setSceneRect(0, 0, self.width(), self.height())
self.relayout(reorder=False)
def relayout(self, reorder=True):
# Update radius
for node in self.nodes:
node.radius = self.height() / 30.
layout_simple_circle(self.nodes, self.edges,
center=(self.width() / 2, self.height() / 2),
radius=self.height() / 3, reorder=reorder)
# Update edge positions
for edge in self.background_edges + self.edges:
edge.update_position()
# Set up labels
self.left_nodes = [node for node in self.nodes if node.node_position[0] < self.width() / 2]
self.left_nodes = sorted(self.left_nodes, key=lambda x: x.node_position[1], reverse=True)
self.right_nodes = [node for node in self.nodes if node not in self.left_nodes]
self.right_nodes = sorted(self.right_nodes, key=lambda x: x.node_position[1], reverse=True)
for i, node in enumerate(self.left_nodes):
y = self.height() - (i + 1) / (len(self.left_nodes) + 1) * self.height()
node.label_position = self.width() / 2 - self.height() / 2, y
for i, node in enumerate(self.right_nodes):
y = self.height() - (i + 1) / (len(self.right_nodes) + 1) * self.height()
node.label_position = self.width() / 2 + self.height() / 2, y
def set_data_collection(self, data_collection, old_links=None, new_links=None):
# Get data and initialize nodes
self.data_to_nodes = OrderedDict((data, DataNode(data)) for data in data_collection)
self.nodes = list(self.data_to_nodes.values())
# Get links and set up edges
if old_links:
self.background_edges = [Edge(self.data_to_nodes[data1], self.data_to_nodes[data2], linewidth=1, zindex=1, key_link=key_link)
for data1, data2, key_link in get_connections(data_collection.external_links)]
else:
self.background_edges = []
if new_links:
self.edges = [Edge(self.data_to_nodes[data1], self.data_to_nodes[data2], key_link=key_link)
for data1, data2, key_link in get_connections(new_links)]
else:
self.edges = []
# Figure out positions
self.relayout()
# Add nodes and edges to graph
for node in self.nodes:
node.add_to_scene(self.scene)
for edge in self.background_edges + self.edges:
edge.add_to_scene(self.scene)
self.text_adjusted = False
self.selected_edge = None
self.selected_node1 = None
self.selected_node2 = None
def set_links(self, links):
for edge in self.edges:
edge.remove_from_scene(self.scene)
self.edges = [Edge(self.data_to_nodes[data1], self.data_to_nodes[data2], key_link=key_link)
for data1, data2, key_link in get_connections(links)]
for edge in self.edges:
edge.update_position()
for edge in self.edges:
edge.add_to_scene(self.scene)
self._update_selected_edge()
self._update_selected_colors()
def paintEvent(self, event):
super(DataGraphWidget, self).paintEvent(event)
if not self.text_adjusted:
for node in self.nodes:
width = node.label.boundingRect().width()
height = node.label.boundingRect().height()
transform = QTransform()
if node in self.left_nodes:
transform.translate(-width, -height / 2)
else:
transform.translate(0, -height / 2)
node.label.setTransform(transform)
self.text_adjusted = True
def manual_select(self, data1=None, data2=None):
if data1 is None and data2 is not None:
data1, data2 = data2, data1
if data2 is not None:
self.selection_level = 2
elif data1 is not None:
self.selection_level = 1
else:
self.selection_level = 0
self.selected_node1 = self.data_to_nodes.get(data1, None)
self.selected_node2 = self.data_to_nodes.get(data2, None)
self._update_selected_edge()
self._update_selected_colors()
def find_object(self, event):
for obj in list(self.nodes) + self.edges:
if obj.contains(event.localPos()):
return obj
def mouseMoveEvent(self, event):
# TODO: Don't update until the end
# TODO: Only select object on top
selected = self.find_object(event)
if selected is None:
if self.selection_level == 0:
self.selected_node1 = None
self.selected_node2 = None
self._update_selected_edge()
elif self.selection_level == 1:
self.selected_node2 = None
self._update_selected_edge()
elif isinstance(selected, DataNode):
if self.selection_level == 0:
self.selected_node1 = selected
self.selected_node2 = None
elif self.selection_level == 1:
if selected is not self.selected_node1:
self.selected_node2 = selected
self._update_selected_edge()
elif isinstance(selected, Edge):
if self.selection_level == 0:
self.selected_edge = selected
self.selected_node1 = selected.node_source
self.selected_node2 = selected.node_dest
self._update_selected_colors()
self.selection_changed.emit()
def mousePressEvent(self, event):
# TODO: Don't update until the end
# TODO: Only select object on top
selected = self.find_object(event)
if selected is None:
self.selection_level = 0
self.selected_node1 = None
self.selected_node2 = None
self._update_selected_edge()
elif isinstance(selected, DataNode):
if self.selection_level == 0:
self.selected_node1 = selected
self.selection_level += 1
elif self.selection_level == 1:
if selected is self.selected_node1:
self.selected_node1 = None
self.selection_level = 0
else:
self.selected_node2 = selected
self.selection_level = 2
elif self.selection_level == 2:
if selected is self.selected_node2:
self.selected_node2 = None
self.selection_level = 1
else:
self.selected_node1 = selected
self.selected_node2 = None
self.selection_level = 1
self._update_selected_edge()
elif isinstance(selected, Edge):
if self.selected_edge is selected and self.selection_level == 2:
self.selected_edge = None
self.selected_node1 = None
self.selected_node2 = None
self.selection_level = 0
else:
self.selected_edge = selected
self.selected_node1 = selected.node_source
self.selected_node2 = selected.node_dest
self.selection_level = 2
self.mouseMoveEvent(event)
def _update_selected_edge(self):
for edge in self.edges:
if (edge.node_source is self.selected_node1 and edge.node_dest is self.selected_node2 or
edge.node_source is self.selected_node2 and edge.node_dest is self.selected_node1):
self.selected_edge = edge
break
else:
self.selected_edge = None
def _update_selected_colors(self):
colors = {}
if self.selected_node1 is not None and self.selection_level < 2:
direct, indirect = find_connections(self.selected_node1, self.nodes, self.edges)
for node in self.nodes:
if node in direct or node in indirect:
colors[node] = COLOR_CONNECTED
else:
colors[node] = COLOR_DISCONNECTED
for edge in self.edges:
if (edge.node_source is self.selected_node1 or
edge.node_dest is self.selected_node1):
colors[edge] = COLOR_CONNECTED
if self.selected_edge is not None:
colors[self.selected_edge] = COLOR_SELECTED
if self.selected_node1 is not None:
colors[self.selected_node1] = COLOR_SELECTED
if self.selected_node2 is not None:
colors[self.selected_node2] = COLOR_SELECTED
self.set_colors(colors)
def set_colors(self, colors):
for obj in list(self.nodes) + self.edges:
default_color = '0.8' if isinstance(obj, DataNode) else '0.5'
obj.color = colors.get(obj, default_color)
obj.update()
def find_connections(node, nodes, edges):
direct = [node]
indirect = []
current = direct
connected = [node]
changed = True
while changed:
changed = False
for edge in edges:
source = edge.node_source
dest = edge.node_dest
if source in connected and dest not in connected:
current.append(dest)
changed = True
if dest in connected and source not in connected:
current.append(source)
changed = True
current = indirect
connected.extend(current)
return direct, indirect
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
app.setAttribute(Qt.AA_UseHighDpiPixmaps)
from glue.core.state import load
dc = load('links.glu')
widget = DataGraphWidget(dc)
widget.show()
sys.exit(app.exec_())
| [
"qtpy.QtWidgets.QApplication",
"qtpy.QtWidgets.QGraphicsTextItem",
"qtpy.QtWidgets.QGraphicsLineItem",
"qtpy.QtCore.Signal",
"qtpy.QtGui.QPen",
"qtpy.QtGui.QTransform",
"qtpy.QtWidgets.QGraphicsEllipseItem",
"glue.core.state.load",
"numpy.cos",
"qtpy.QtWidgets.QGraphicsScene",
"numpy.sin",
"gl... | [((672, 694), 'glue.utils.qt.mpl_to_qt_color', 'mpl_to_qt_color', (['color'], {}), '(color)\n', (687, 694), False, 'from glue.utils.qt import mpl_to_qt_color, qt_to_mpl_color\n'), ((706, 766), 'qtpy.QtGui.QPen', 'QPen', (['color', 'linewidth', 'linestyle', 'Qt.RoundCap', 'Qt.RoundJoin'], {}), '(color, linewidth, linestyle, Qt.RoundCap, Qt.RoundJoin)\n', (710, 766), False, 'from qtpy.QtGui import QPainter, QTransform, QPen\n'), ((6010, 6018), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (6016, 6018), False, 'from qtpy.QtCore import Qt, Signal\n'), ((16465, 16487), 'qtpy.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (16477, 16487), False, 'from qtpy.QtWidgets import QGraphicsView, QGraphicsScene, QApplication, QGraphicsTextItem, QGraphicsEllipseItem, QGraphicsLineItem\n'), ((16581, 16598), 'glue.core.state.load', 'load', (['"""links.glu"""'], {}), "('links.glu')\n", (16585, 16598), False, 'from glue.core.state import load\n'), ((1956, 1988), 'qtpy.QtWidgets.QGraphicsEllipseItem', 'QGraphicsEllipseItem', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (1976, 1988), False, 'from qtpy.QtWidgets import QGraphicsView, QGraphicsScene, QApplication, QGraphicsTextItem, QGraphicsEllipseItem, QGraphicsLineItem\n'), ((2087, 2116), 'qtpy.QtWidgets.QGraphicsTextItem', 'QGraphicsTextItem', (['data.label'], {}), '(data.label)\n', (2104, 2116), False, 'from qtpy.QtWidgets import QGraphicsView, QGraphicsScene, QApplication, QGraphicsTextItem, QGraphicsEllipseItem, QGraphicsLineItem\n'), ((2277, 2306), 'qtpy.QtWidgets.QGraphicsLineItem', 'QGraphicsLineItem', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (2294, 2306), False, 'from qtpy.QtWidgets import QGraphicsView, QGraphicsScene, QApplication, QGraphicsTextItem, QGraphicsEllipseItem, QGraphicsLineItem\n'), ((2328, 2357), 'qtpy.QtWidgets.QGraphicsLineItem', 'QGraphicsLineItem', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (2345, 2357), False, 'from qtpy.QtWidgets import QGraphicsView, QGraphicsScene, QApplication, QGraphicsTextItem, QGraphicsEllipseItem, QGraphicsLineItem\n'), ((6165, 6185), 'qtpy.QtWidgets.QGraphicsScene', 'QGraphicsScene', (['self'], {}), '(self)\n', (6179, 6185), False, 'from qtpy.QtWidgets import QGraphicsView, QGraphicsScene, QApplication, QGraphicsTextItem, QGraphicsEllipseItem, QGraphicsLineItem\n'), ((4382, 4404), 'glue.utils.qt.mpl_to_qt_color', 'mpl_to_qt_color', (['value'], {}), '(value)\n', (4397, 4404), False, 'from glue.utils.qt import mpl_to_qt_color, qt_to_mpl_color\n'), ((5069, 5082), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5075, 5082), True, 'import numpy as np\n'), ((5117, 5130), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5123, 5130), True, 'import numpy as np\n'), ((10164, 10176), 'qtpy.QtGui.QTransform', 'QTransform', ([], {}), '()\n', (10174, 10176), False, 'from qtpy.QtGui import QPainter, QTransform, QPen\n')] |
import random
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("paper", rc={"font.size":14,"axes.titlesize":14,"axes.labelsize":14})
import matplotlib as m
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.cm as cm
from matplotlib.colors import Normalize
#dump/read functions
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
#function to normalize output by features: [num_data, timesteps, features]
def normalizeOutput(data):
output_maxs = np.zeros([data.shape[1], data.shape[2]])
output_maxs[:, 0] = np.max(data[:, :, 0])
output_maxs[:, 1] = np.max(data[:, :, 1])
output_maxs[:, 2] = np.max(data[:, :, 2])
return (data/(output_maxs)), output_maxs
def normalizeProps(data):
output_maxs = np.zeros([data.shape[1],])
for i in range(data.shape[1]):
output_maxs[i] = np.max(data[:, i])
return (data/(output_maxs)), output_maxs
#function to plot the density plot
def histplot(data_train, data_test):
col_names = ['$x_1$', '$x_2$', '$x_3$', '$x_4$', '$x_5$']
plt.figure(figsize=(14, 3))
for idx, feature in enumerate(col_names):
plt.subplot(1, 5, idx+1)
dtr = data_train[:, idx]
dts = data_test[:, idx]
dtr = dtr[~np.isnan(dtr)] #drop existing nans
dts = dts[~np.isnan(dts)]
new_bins = np.linspace(np.min(dtr), np.max(dtr), 30)
sns.distplot(dtr, hist=True, bins=new_bins, norm_hist=False, kde=False, label="Train")
sns.distplot(dts, hist=True, bins=new_bins, norm_hist=False, kde=False, label="Test")
plt.tick_params(axis='both', which='both', bottom='on', top='off', labelbottom='on', right='off', left='on', labelleft='off')
plt.tight_layout(), plt.legend(), plt.title(feature)
class DataLoader:
def __init__(self, verbose=False):
self.verbose = verbose
self.fileno = None
self.field_name = None
self.d = None
self.u = None
self.cumm = None
self.loc = None
self.x = None
self.x_maxs = None
self.d_cumm_maxs = None
self.cumm_norm = None
self.train_idx = None
self.test_idx = None
def load_data(self):
#load cleaned data objects
self.fileno, self.field_name, self.d, self.u, self.cumm, self.loc, self.x = load_obj('Data_Field_Bakken/DATA-clean')
def calculate_cumulative_d(self, truncate=True):
if truncate:
self.d = self.d[:, 0:60, :]
self.u = self.u[:, 0:60]
#calculate the cumulative profiles for each well, each phase
for i in range(self.d.shape[0]):
for j in range(self.d.shape[2]):
for k in range(self.d.shape[1]-1):
self.d[i, k+1, j] = self.d[i, k+1, j] + self.d[i, k, j]
#normalize by phase
self.d, self.d_cumm_maxs = normalizeOutput(self.d)
#normalize cumulative production (scalar)
self.cumm_norm = self.cumm/np.max(self.cumm)
#split data, test data has full attributes and training data may have missign values
def get_split(self, use_complete_data_only=False):
self.load_data()
self.calculate_cumulative_d()
indicator_nans = np.zeros(self.x.shape)
indicator_nans[self.x==0] = np.nan
indicator_nans[self.x==1] = 1
nans = np.isnan(indicator_nans)
nans_well = np.any(nans, axis=1)
tot_data = self.x.shape[0]
idx = np.linspace(0, (tot_data)-1, tot_data, dtype=np.int32)
partition = int(tot_data*0.7)
idx = np.random.permutation(idx)
self.train_idx = idx[0:partition]
self.test_idx = idx[partition:]
#self.train_idx = idx[nans_well==True]
#self.test_idx = idx[nans_well==False]
#normalize
self.x, self.x_maxs = normalizeProps(self.x)
#prepare input data with missing values as Nans
self.x_nans = np.copy(self.x)
self.x_nans[self.x==0] = np.nan
#prepare input data without missing values (as global mean imputation)
self.x_imputed = np.copy(self.x)
x_means = np.mean(self.x, axis=0)
for i in range(self.x.shape[1]): #by features
for j in range(self.x.shape[0]): #by well
if self.x[j, i] == 0:
self.x_imputed[j, i] = x_means[i]
#only use complete dataset only, ie the test dataset (small subset, further 60:40 split)
if use_complete_data_only:
train_idx_complete = self.test_idx[0:200]
test_idx_complete = self.test_idx[200:]
self.train_idx = train_idx_complete
self.test_idx = test_idx_complete
x_train = self.x_imputed[train_idx_complete]
x_test = self.x_imputed[test_idx_complete]
y_train = self.d[train_idx_complete]
y_test = self.d[test_idx_complete]
cumm_train = self.cumm_norm[train_idx_complete]
cumm_test = self.cumm_norm[test_idx_complete]
return x_train, x_test, y_train, y_test, cumm_train, cumm_test
x_nans_train = self.x_nans[self.train_idx]
x_nans_test = self.x_nans[self.test_idx]
x_imputed_train = self.x_imputed[self.train_idx]
x_imputed_test = self.x_imputed[self.test_idx]
y_train = self.d[self.train_idx]
y_test = self.d[self.test_idx]
cumm_train = self.cumm_norm[self.train_idx]
cumm_test = self.cumm_norm[self.test_idx]
return x_nans_train, x_nans_test, x_imputed_train, x_imputed_test, y_train, y_test, cumm_train, cumm_test
#plot fields
def plot_fields(self):
#get number of unique fields
unique_fields = np.unique(self.field_name)
no_unique_fields = len(unique_fields)
field_code = np.arange(no_unique_fields)
my_cmap = cm.get_cmap('Dark2')
my_norm = Normalize(vmin=0, vmax=(no_unique_fields-1))
cs = my_cmap(my_norm(field_code))
#assign each data point the field code
FieldName_idx = np.zeros(self.field_name.shape, dtype='int32')
for i in range(len(self.field_name)):
idx, = (np.where(unique_fields == self.field_name[i]))[0]
FieldName_idx[i] = idx
f = plt.figure(figsize=[10, 10])
plt.scatter(self.loc[:,0], self.loc[:,1], s=100, c=cs[FieldName_idx])
plt.scatter(self.loc[self.train_idx,0], self.loc[self.train_idx,1], s=50, c='k', marker='x', label='Train')
plt.scatter(self.loc[self.test_idx,0], self.loc[self.test_idx,1], s=50, c='k', marker='|', label='Test')
plt.legend()
plt.xlabel("Latitude")
plt.ylabel("Longitude")
plt.axis('scaled')
plt.title(str(no_unique_fields) + " unique fields, " + str(len(self.train_idx)) + " train, "+ str(len(self.test_idx)) + " test")
plt.grid(False)
plt.tight_layout()
f.savefig('readme/field_parti_maps.png', dpi=300, bbox_inches='tight')
def plot_wells(self):
plt.figure(figsize=[14, 14])
for i in range(15):
ax1 = plt.subplot(5, 3, i+1)
#shift to view other sets
i = i + 0
_ = self.d[i]
ax1.plot(_[:, 2], c='r', label='Gas', alpha=0.8)
ax1.plot(_[:, 1], c='b', label='Water', alpha=0.8)
ax1.plot(_[:, 0], c='g', label='Oil', alpha=0.8)
ax1.legend()
ax1.set_xlabel('Timesteps')
ax1.set_ylabel('Rate')
ax1.set_title(self.fileno[i])
ax2 = ax1.twinx()
c = 'tab:purple'
#plot control on another axis
_ = self.u[i]
ax2.plot(_, color=c)
ax2.set_ylabel('Control', color=c)
ax2.tick_params(axis ='y', labelcolor=c)
ax1.grid(False)
ax2.grid(False)
plt.tight_layout()
#field data: display the output data
def plot_data_by_phase(self):
plt.figure(figsize=[3, 3])
for i in range(self.d.shape[0]):
plt.plot(self.d[i, :, 0], c='green', alpha=0.4)
plt.legend(['Bakken'])
plt.title('Oil rates')
plt.figure(figsize=[3, 3])
for i in range(self.d.shape[0]):
plt.plot(self.d[i, :, 1], c='red', alpha=0.4)
plt.legend(['Bakken'])
plt.title('Gas rates')
plt.figure(figsize=[3, 3])
for i in range(self.d.shape[0]):
plt.plot(self.d[i, :, 2], c='blue', alpha=0.4)
plt.legend(['Bakken'])
plt.title('Water rates')
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"numpy.arange",
"numpy.mean",
"seaborn.distplot",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"numpy.min",
"matplotlib.pyplot.axis... | [((122, 148), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (135, 148), True, 'import seaborn as sns\n'), ((149, 243), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {'rc': "{'font.size': 14, 'axes.titlesize': 14, 'axes.labelsize': 14}"}), "('paper', rc={'font.size': 14, 'axes.titlesize': 14,\n 'axes.labelsize': 14})\n", (164, 243), True, 'import seaborn as sns\n'), ((735, 775), 'numpy.zeros', 'np.zeros', (['[data.shape[1], data.shape[2]]'], {}), '([data.shape[1], data.shape[2]])\n', (743, 775), True, 'import numpy as np\n'), ((798, 819), 'numpy.max', 'np.max', (['data[:, :, 0]'], {}), '(data[:, :, 0])\n', (804, 819), True, 'import numpy as np\n'), ((841, 862), 'numpy.max', 'np.max', (['data[:, :, 1]'], {}), '(data[:, :, 1])\n', (847, 862), True, 'import numpy as np\n'), ((885, 906), 'numpy.max', 'np.max', (['data[:, :, 2]'], {}), '(data[:, :, 2])\n', (891, 906), True, 'import numpy as np\n'), ((996, 1021), 'numpy.zeros', 'np.zeros', (['[data.shape[1]]'], {}), '([data.shape[1]])\n', (1004, 1021), True, 'import numpy as np\n'), ((1281, 1308), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 3)'}), '(figsize=(14, 3))\n', (1291, 1308), True, 'import matplotlib.pyplot as plt\n'), ((486, 530), 'pickle.dump', 'pickle.dump', (['obj', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(obj, f, pickle.HIGHEST_PROTOCOL)\n', (497, 530), False, 'import pickle\n'), ((600, 614), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (611, 614), False, 'import pickle\n'), ((1083, 1101), 'numpy.max', 'np.max', (['data[:, i]'], {}), '(data[:, i])\n', (1089, 1101), True, 'import numpy as np\n'), ((1354, 1380), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(5)', '(idx + 1)'], {}), '(1, 5, idx + 1)\n', (1365, 1380), True, 'import matplotlib.pyplot as plt\n'), ((1565, 1655), 'seaborn.distplot', 'sns.distplot', (['dtr'], {'hist': '(True)', 'bins': 'new_bins', 'norm_hist': '(False)', 'kde': '(False)', 'label': '"""Train"""'}), "(dtr, hist=True, bins=new_bins, norm_hist=False, kde=False,\n label='Train')\n", (1577, 1655), True, 'import seaborn as sns\n'), ((1654, 1743), 'seaborn.distplot', 'sns.distplot', (['dts'], {'hist': '(True)', 'bins': 'new_bins', 'norm_hist': '(False)', 'kde': '(False)', 'label': '"""Test"""'}), "(dts, hist=True, bins=new_bins, norm_hist=False, kde=False,\n label='Test')\n", (1666, 1743), True, 'import seaborn as sns\n'), ((1742, 1871), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '"""on"""', 'top': '"""off"""', 'labelbottom': '"""on"""', 'right': '"""off"""', 'left': '"""on"""', 'labelleft': '"""off"""'}), "(axis='both', which='both', bottom='on', top='off',\n labelbottom='on', right='off', left='on', labelleft='off')\n", (1757, 1871), True, 'import matplotlib.pyplot as plt\n'), ((3195, 3217), 'numpy.zeros', 'np.zeros', (['self.x.shape'], {}), '(self.x.shape)\n', (3203, 3217), True, 'import numpy as np\n'), ((3298, 3322), 'numpy.isnan', 'np.isnan', (['indicator_nans'], {}), '(indicator_nans)\n', (3306, 3322), True, 'import numpy as np\n'), ((3337, 3357), 'numpy.any', 'np.any', (['nans'], {'axis': '(1)'}), '(nans, axis=1)\n', (3343, 3357), True, 'import numpy as np\n'), ((3398, 3452), 'numpy.linspace', 'np.linspace', (['(0)', '(tot_data - 1)', 'tot_data'], {'dtype': 'np.int32'}), '(0, tot_data - 1, tot_data, dtype=np.int32)\n', (3409, 3452), True, 'import numpy as np\n'), ((3496, 3522), 'numpy.random.permutation', 'np.random.permutation', (['idx'], {}), '(idx)\n', (3517, 3522), True, 'import numpy as np\n'), ((3811, 3826), 'numpy.copy', 'np.copy', (['self.x'], {}), '(self.x)\n', (3818, 3826), True, 'import numpy as np\n'), ((3956, 3971), 'numpy.copy', 'np.copy', (['self.x'], {}), '(self.x)\n', (3963, 3971), True, 'import numpy as np\n'), ((3984, 4007), 'numpy.mean', 'np.mean', (['self.x'], {'axis': '(0)'}), '(self.x, axis=0)\n', (3991, 4007), True, 'import numpy as np\n'), ((5382, 5408), 'numpy.unique', 'np.unique', (['self.field_name'], {}), '(self.field_name)\n', (5391, 5408), True, 'import numpy as np\n'), ((5464, 5491), 'numpy.arange', 'np.arange', (['no_unique_fields'], {}), '(no_unique_fields)\n', (5473, 5491), True, 'import numpy as np\n'), ((5505, 5525), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Dark2"""'], {}), "('Dark2')\n", (5516, 5525), True, 'import matplotlib.cm as cm\n'), ((5538, 5582), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0)', 'vmax': '(no_unique_fields - 1)'}), '(vmin=0, vmax=no_unique_fields - 1)\n', (5547, 5582), False, 'from matplotlib.colors import Normalize\n'), ((5679, 5725), 'numpy.zeros', 'np.zeros', (['self.field_name.shape'], {'dtype': '"""int32"""'}), "(self.field_name.shape, dtype='int32')\n", (5687, 5725), True, 'import numpy as np\n'), ((5860, 5888), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[10, 10]'}), '(figsize=[10, 10])\n', (5870, 5888), True, 'import matplotlib.pyplot as plt\n'), ((5891, 5962), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.loc[:, 0]', 'self.loc[:, 1]'], {'s': '(100)', 'c': 'cs[FieldName_idx]'}), '(self.loc[:, 0], self.loc[:, 1], s=100, c=cs[FieldName_idx])\n', (5902, 5962), True, 'import matplotlib.pyplot as plt\n'), ((5964, 6077), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.loc[self.train_idx, 0]', 'self.loc[self.train_idx, 1]'], {'s': '(50)', 'c': '"""k"""', 'marker': '"""x"""', 'label': '"""Train"""'}), "(self.loc[self.train_idx, 0], self.loc[self.train_idx, 1], s=50,\n c='k', marker='x', label='Train')\n", (5975, 6077), True, 'import matplotlib.pyplot as plt\n'), ((6074, 6185), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.loc[self.test_idx, 0]', 'self.loc[self.test_idx, 1]'], {'s': '(50)', 'c': '"""k"""', 'marker': '"""|"""', 'label': '"""Test"""'}), "(self.loc[self.test_idx, 0], self.loc[self.test_idx, 1], s=50, c\n ='k', marker='|', label='Test')\n", (6085, 6185), True, 'import matplotlib.pyplot as plt\n'), ((6183, 6195), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6193, 6195), True, 'import matplotlib.pyplot as plt\n'), ((6198, 6220), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Latitude"""'], {}), "('Latitude')\n", (6208, 6220), True, 'import matplotlib.pyplot as plt\n'), ((6223, 6246), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Longitude"""'], {}), "('Longitude')\n", (6233, 6246), True, 'import matplotlib.pyplot as plt\n'), ((6249, 6267), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (6257, 6267), True, 'import matplotlib.pyplot as plt\n'), ((6401, 6416), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (6409, 6416), True, 'import matplotlib.pyplot as plt\n'), ((6419, 6437), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6435, 6437), True, 'import matplotlib.pyplot as plt\n'), ((6539, 6567), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[14, 14]'}), '(figsize=[14, 14])\n', (6549, 6567), True, 'import matplotlib.pyplot as plt\n'), ((7190, 7208), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7206, 7208), True, 'import matplotlib.pyplot as plt\n'), ((7283, 7309), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[3, 3]'}), '(figsize=[3, 3])\n', (7293, 7309), True, 'import matplotlib.pyplot as plt\n'), ((7398, 7420), 'matplotlib.pyplot.legend', 'plt.legend', (["['Bakken']"], {}), "(['Bakken'])\n", (7408, 7420), True, 'import matplotlib.pyplot as plt\n'), ((7423, 7445), 'matplotlib.pyplot.title', 'plt.title', (['"""Oil rates"""'], {}), "('Oil rates')\n", (7432, 7445), True, 'import matplotlib.pyplot as plt\n'), ((7452, 7478), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[3, 3]'}), '(figsize=[3, 3])\n', (7462, 7478), True, 'import matplotlib.pyplot as plt\n'), ((7568, 7590), 'matplotlib.pyplot.legend', 'plt.legend', (["['Bakken']"], {}), "(['Bakken'])\n", (7578, 7590), True, 'import matplotlib.pyplot as plt\n'), ((7593, 7615), 'matplotlib.pyplot.title', 'plt.title', (['"""Gas rates"""'], {}), "('Gas rates')\n", (7602, 7615), True, 'import matplotlib.pyplot as plt\n'), ((7619, 7645), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[3, 3]'}), '(figsize=[3, 3])\n', (7629, 7645), True, 'import matplotlib.pyplot as plt\n'), ((7733, 7755), 'matplotlib.pyplot.legend', 'plt.legend', (["['Bakken']"], {}), "(['Bakken'])\n", (7743, 7755), True, 'import matplotlib.pyplot as plt\n'), ((7758, 7782), 'matplotlib.pyplot.title', 'plt.title', (['"""Water rates"""'], {}), "('Water rates')\n", (7767, 7782), True, 'import matplotlib.pyplot as plt\n'), ((1533, 1544), 'numpy.min', 'np.min', (['dtr'], {}), '(dtr)\n', (1539, 1544), True, 'import numpy as np\n'), ((1546, 1557), 'numpy.max', 'np.max', (['dtr'], {}), '(dtr)\n', (1552, 1557), True, 'import numpy as np\n'), ((1870, 1888), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1886, 1888), True, 'import matplotlib.pyplot as plt\n'), ((1890, 1902), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1900, 1902), True, 'import matplotlib.pyplot as plt\n'), ((1904, 1922), 'matplotlib.pyplot.title', 'plt.title', (['feature'], {}), '(feature)\n', (1913, 1922), True, 'import matplotlib.pyplot as plt\n'), ((2961, 2978), 'numpy.max', 'np.max', (['self.cumm'], {}), '(self.cumm)\n', (2967, 2978), True, 'import numpy as np\n'), ((6599, 6623), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(3)', '(i + 1)'], {}), '(5, 3, i + 1)\n', (6610, 6623), True, 'import matplotlib.pyplot as plt\n'), ((7348, 7395), 'matplotlib.pyplot.plot', 'plt.plot', (['self.d[i, :, 0]'], {'c': '"""green"""', 'alpha': '(0.4)'}), "(self.d[i, :, 0], c='green', alpha=0.4)\n", (7356, 7395), True, 'import matplotlib.pyplot as plt\n'), ((7517, 7562), 'matplotlib.pyplot.plot', 'plt.plot', (['self.d[i, :, 1]'], {'c': '"""red"""', 'alpha': '(0.4)'}), "(self.d[i, :, 1], c='red', alpha=0.4)\n", (7525, 7562), True, 'import matplotlib.pyplot as plt\n'), ((7684, 7730), 'matplotlib.pyplot.plot', 'plt.plot', (['self.d[i, :, 2]'], {'c': '"""blue"""', 'alpha': '(0.4)'}), "(self.d[i, :, 2], c='blue', alpha=0.4)\n", (7692, 7730), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1458), 'numpy.isnan', 'np.isnan', (['dtr'], {}), '(dtr)\n', (1453, 1458), True, 'import numpy as np\n'), ((1493, 1506), 'numpy.isnan', 'np.isnan', (['dts'], {}), '(dts)\n', (1501, 1506), True, 'import numpy as np\n'), ((5777, 5822), 'numpy.where', 'np.where', (['(unique_fields == self.field_name[i])'], {}), '(unique_fields == self.field_name[i])\n', (5785, 5822), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod
from enum import Enum
from functools import lru_cache
import pathlib
import subprocess
import tempfile
import numpy as np
from pyschism.mesh.hgrid import Hgrid
def C_of_sigma(sigma, theta_b, theta_f):
assert theta_b <= 0. and theta_b <= 1.
assert theta_f <= 0. and theta_f <= 1.
A = (1-theta_b)(np.sinh(sigma*theta_f)/np.sinh(theta_f))
B_1 = np.tanh(theta_f*(sigma+0.5)) - np.tanh(theta_f/2.)
B = theta_b * (B_1 / (2.*np.tanh(theta_f/2.)))
return A + B
def eta_of_sigma(sigma):
return 1 + sigma
def S_to_Z(sigma):
# eq 3.1
pass
class VgridType(Enum):
LSC2 = 1
SZ = 2
@classmethod
def _missing_(cls, name):
raise ValueError(f'ivcor={name} is not a valid vgrid type.')
class Vgrid(ABC):
@abstractmethod
def __str__(self):
raise NotImplementedError
@staticmethod
def default(h_s, ztot, h_c, theta_b, theta_f, sigma):
return SZ.default(h_s, ztot, h_c, theta_b, theta_f, sigma)
@classmethod
def from_binary(cls, hgrid, binary='gen_vqs'):
_tmpdir = tempfile.TemporaryDirectory()
tmpdir = pathlib.Path(_tmpdir.name)
hgrid = Hgrid.open(hgrid, crs='EPSG:4326')
hgrid.write(tmpdir / 'hgrid.gr3')
subprocess.check_call([binary], cwd=tmpdir)
return cls.open(tmpdir / 'vgrid.in')
@staticmethod
def open(path):
'''
Based on:
https://github.com/wzhengui/pylibs/blob/master/Utility/schism_file.py
'''
with open(path) as f:
return VgridTypeDispatch[VgridType(
int(f.read().strip().split()[0])).name].value.open(path)
@abstractmethod
def get_xyz(self, gr3, crs=None):
pass
def write(self, path, overwrite=False):
path = pathlib.Path(path)
if path.is_file() and not overwrite:
raise Exception(
'File exists, pass overwrite=True to allow overwrite.')
with open(path, 'w') as f:
f.write(str(self))
@property
def ivcor(self):
return VgridType[self.__class__.__name__].value
@property
@abstractmethod
def nvrt(self):
raise NotImplementedError
@lru_cache(maxsize=1)
def is2D(self):
if isinstance(self, SZ):
if str(self) == str(SZ.default()):
return True
return False
def is3D(self):
return ~self.is2D()
class LSC2(Vgrid):
def __init__(self, sigma):
self.sigma = sigma
def __str__(self):
f = [
f'{self.ivcor}',
f'{self.nvrt}',
]
for i, row in enumerate(self.sigma):
kbp = int((row == -1).sum())
line = [
f'{i+1}'.rjust(11),
f'{kbp}'.rjust(11),
7*' ',
'-1.000000',
]
for value in row:
if value != -1:
line.append(7*' ')
line.append(f'{value:6f}')
f.append(' '.join(line))
return '\n'.join(f)
def get_xyz(self, gr3, crs=None):
xy = gr3.get_xy(crs)
z = gr3.values[:, None]*self.sigma
x = np.tile(xy[:, 0], (z.shape[1],))
y = np.tile(xy[:, 0], (z.shape[1],))
return np.vstack([x, y, z.flatten()]).T
@classmethod
def open(cls, path):
path = pathlib.Path(path)
with open(path) as f:
lines = f.readlines()
ivcor = int(lines[0].strip().split()[0])
if ivcor != 1:
raise TypeError(f'File {path} is not an LSC2 grid (ivcor != 1).')
nvrt = int(lines[1].strip().split()[0])
kbp = np.array([int(i.split()[1])-1 for i in lines[2:]])
sigma = -np.ones((len(kbp), nvrt))
for i, line in enumerate(lines[2:]):
sigma[i, kbp[i]:] = np.array(
line.strip().split()[2:]).astype('float')
return cls(sigma)
@property
def nvrt(self):
return self.sigma.shape[1]
class SZ(Vgrid):
def __init__(self, h_s, ztot, h_c, theta_b, theta_f, sigma):
self.h_s = h_s
self.ztot = np.array(ztot)
self.h_c = h_c
self.theta_b = theta_b
self.theta_f = theta_f
self.sigma = np.array(sigma)
def __str__(self):
f = [
f'{self.ivcor:d} !ivcor',
f'{self.nvrt:d} {self.kz:d} {self.h_s:G} '
'!nvrt, kz (# of Z-levels); h_s '
' (transition depth between S and Z)',
'Z levels',
]
for i, row in enumerate(self.ztot):
f.append(f'{i+1:d} {row:G}')
f.extend([
'S levels',
f'{self.h_c:G} {self.theta_b:G} {self.theta_f:G} '
' !h_c, theta_b, theta_f',
])
for i, row in enumerate(self.sigma):
f.append(f'{i+1:d} {row:G}')
return '\n'.join(f)
def get_xyz(self, gr3, crs=None):
raise NotImplementedError('SZ.get_xyz')
@classmethod
def open(cls, path):
path = pathlib.Path(path)
with open(path) as f:
lines = f.readlines()
ivcor = int(lines[0].strip().split()[0])
if ivcor != 2:
raise TypeError(f'File {path} is not an SZ grid (ivcor != 2).')
nvrt = int(lines[1].strip().split()[0])
kz, h_s = lines[1].strip().split()[1:3]
kz = int(kz)
h_s = float(h_s)
# read z grid
ztot = []
irec = 2
for i in np.arange(kz):
irec = irec+1
ztot.append(lines[irec].strip().split()[1])
ztot = np.array(ztot).astype('float')
# read s grid
sigma = []
irec = irec+2
nsigma = nvrt - kz+1
h_c, theta_b, theta_f = np.array(
lines[irec].strip().split()[:3]).astype('float')
for i in np.arange(nsigma):
irec = irec + 1
sigma.append(lines[irec].strip().split()[1])
sigma = np.array(sigma).astype('float')
return cls(h_s, ztot, h_c, theta_b, theta_f, sigma)
@classmethod
def default(cls, h_s, ztot, h_c, theta_b, theta_f, sigma):
# h_s, ztot, h_c, theta_b, theta_f, sigma
#return cls(1.e6, [-1.e6], 40., 1., 1.e-4, [-1, 0.])
return cls(h_s, ztot, h_c, theta_b, theta_f, sigma)
@property
def kz(self):
return self.ztot.shape[0]
@property
def nvrt(self):
return self.sigma.shape[0]
class VgridTypeDispatch(Enum):
LSC2 = LSC2
SZ = SZ
| [
"tempfile.TemporaryDirectory",
"numpy.tile",
"pathlib.Path",
"subprocess.check_call",
"numpy.tanh",
"numpy.sinh",
"numpy.array",
"functools.lru_cache",
"pyschism.mesh.hgrid.Hgrid.open",
"numpy.arange"
] | [((2229, 2249), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (2238, 2249), False, 'from functools import lru_cache\n'), ((398, 430), 'numpy.tanh', 'np.tanh', (['(theta_f * (sigma + 0.5))'], {}), '(theta_f * (sigma + 0.5))\n', (405, 430), True, 'import numpy as np\n'), ((429, 451), 'numpy.tanh', 'np.tanh', (['(theta_f / 2.0)'], {}), '(theta_f / 2.0)\n', (436, 451), True, 'import numpy as np\n'), ((1104, 1133), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1131, 1133), False, 'import tempfile\n'), ((1151, 1177), 'pathlib.Path', 'pathlib.Path', (['_tmpdir.name'], {}), '(_tmpdir.name)\n', (1163, 1177), False, 'import pathlib\n'), ((1194, 1228), 'pyschism.mesh.hgrid.Hgrid.open', 'Hgrid.open', (['hgrid'], {'crs': '"""EPSG:4326"""'}), "(hgrid, crs='EPSG:4326')\n", (1204, 1228), False, 'from pyschism.mesh.hgrid import Hgrid\n'), ((1279, 1322), 'subprocess.check_call', 'subprocess.check_call', (['[binary]'], {'cwd': 'tmpdir'}), '([binary], cwd=tmpdir)\n', (1300, 1322), False, 'import subprocess\n'), ((1810, 1828), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (1822, 1828), False, 'import pathlib\n'), ((3215, 3247), 'numpy.tile', 'np.tile', (['xy[:, 0]', '(z.shape[1],)'], {}), '(xy[:, 0], (z.shape[1],))\n', (3222, 3247), True, 'import numpy as np\n'), ((3260, 3292), 'numpy.tile', 'np.tile', (['xy[:, 0]', '(z.shape[1],)'], {}), '(xy[:, 0], (z.shape[1],))\n', (3267, 3292), True, 'import numpy as np\n'), ((3400, 3418), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (3412, 3418), False, 'import pathlib\n'), ((4165, 4179), 'numpy.array', 'np.array', (['ztot'], {}), '(ztot)\n', (4173, 4179), True, 'import numpy as np\n'), ((4286, 4301), 'numpy.array', 'np.array', (['sigma'], {}), '(sigma)\n', (4294, 4301), True, 'import numpy as np\n'), ((5070, 5088), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (5082, 5088), False, 'import pathlib\n'), ((5522, 5535), 'numpy.arange', 'np.arange', (['kz'], {}), '(kz)\n', (5531, 5535), True, 'import numpy as np\n'), ((5877, 5894), 'numpy.arange', 'np.arange', (['nsigma'], {}), '(nsigma)\n', (5886, 5894), True, 'import numpy as np\n'), ((347, 371), 'numpy.sinh', 'np.sinh', (['(sigma * theta_f)'], {}), '(sigma * theta_f)\n', (354, 371), True, 'import numpy as np\n'), ((370, 386), 'numpy.sinh', 'np.sinh', (['theta_f'], {}), '(theta_f)\n', (377, 386), True, 'import numpy as np\n'), ((478, 500), 'numpy.tanh', 'np.tanh', (['(theta_f / 2.0)'], {}), '(theta_f / 2.0)\n', (485, 500), True, 'import numpy as np\n'), ((5634, 5648), 'numpy.array', 'np.array', (['ztot'], {}), '(ztot)\n', (5642, 5648), True, 'import numpy as np\n'), ((5997, 6012), 'numpy.array', 'np.array', (['sigma'], {}), '(sigma)\n', (6005, 6012), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
import matplotlib as mb
path = r'D:\data\20190825\001350_test1'
data_name = path+path[16:]+r'.dat'
data = np.loadtxt(data_name, unpack=True)
n=121
# gate= np.array_split(data[0],n)
freq = np.array_split(data[0],n)[0]
# real = np.array_split(data[2],n)
absol = np.array_split(data[3],n)
print(freq)
plt.title(path[8:])
for i in range(n):
plt.plot(freq[i]/1e6,absol[i], label= 'Probe -62 dBm')
# plt.imshow(absol, aspect='auto',extent=[freq[0]/1e9, freq[-1]/1e9, power[-1][0], power[0][0]], cmap = 'jet')
plt.xlabel('Drive Frequency (MHz)')
plt.ylabel(r'$S_{21}$')
plt.legend()
plt.show() | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array_split",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((201, 235), 'numpy.loadtxt', 'np.loadtxt', (['data_name'], {'unpack': '(True)'}), '(data_name, unpack=True)\n', (211, 235), True, 'import numpy as np\n'), ((364, 390), 'numpy.array_split', 'np.array_split', (['data[3]', 'n'], {}), '(data[3], n)\n', (378, 390), True, 'import numpy as np\n'), ((408, 427), 'matplotlib.pyplot.title', 'plt.title', (['path[8:]'], {}), '(path[8:])\n', (417, 427), True, 'import matplotlib.pyplot as plt\n'), ((618, 653), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Drive Frequency (MHz)"""'], {}), "('Drive Frequency (MHz)')\n", (628, 653), True, 'import matplotlib.pyplot as plt\n'), ((655, 677), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$S_{21}$"""'], {}), "('$S_{21}$')\n", (665, 677), True, 'import matplotlib.pyplot as plt\n'), ((680, 692), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (690, 692), True, 'import matplotlib.pyplot as plt\n'), ((694, 704), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (702, 704), True, 'import matplotlib.pyplot as plt\n'), ((290, 316), 'numpy.array_split', 'np.array_split', (['data[0]', 'n'], {}), '(data[0], n)\n', (304, 316), True, 'import numpy as np\n'), ((450, 512), 'matplotlib.pyplot.plot', 'plt.plot', (['(freq[i] / 1000000.0)', 'absol[i]'], {'label': '"""Probe -62 dBm"""'}), "(freq[i] / 1000000.0, absol[i], label='Probe -62 dBm')\n", (458, 512), True, 'import matplotlib.pyplot as plt\n')] |
"""
Classification.py
"""
import os
import json
import gzip
import json
import numpy as np
from datetime import datetime
from sklearn.linear_model import LogisticRegression, SGDClassifier
from arxiv_public_data.embeddings.util import load_embeddings, fill_zeros
import arxiv_public_data.tests.cocitation_category_feature as features
from arxiv_public_data.config import DIR_OUTPUT, DIR_BASE, LOGGER
from arxiv_public_data.oai_metadata import load_metadata
logger = LOGGER.getChild('lr-classify')
def loaddata(fname='data/internal-references.json.gz'):
return json.load(gzip.open(fname, 'r'))
def in_top_n(prob, target, n=5):
intopn = 0
labels = np.arange(prob.shape[1])
for p, t in zip(prob, target):
if t in sorted(labels, key = lambda i: p[i])[-n:]:
intopn += 1
return intopn/prob.shape[0]
def train_test(model, X_train, y_train, X_test, y_test):
model.fit(X_train, y_train)
prec = np.mean(model.predict(X_test) == y_test)
prob = model.predict_proba(X_test)
loglikelihood = np.sum(np.log([p[t] for p, t in zip(prob, y_test)]))
perplexity = 2 ** ( - loglikelihood / len(y_test) / np.log(2))
top3 = in_top_n(prob, y_test, 3)
top5 = in_top_n(prob, y_test, 5)
return dict(top1=prec, top3=top3, top5=top5, loglikelihood=loglikelihood,
perplexity=perplexity)
EMBDIR = os.path.join(DIR_OUTPUT, 'embeddings')
usel_abstract = os.path.join(EMBDIR, 'abstract-embedding-usel-2019-03-19.pkl')
usel_title = os.path.join(EMBDIR, 'title-embedding-usel-2019-03-19.pkl')
usel_fulltext = os.path.join(
EMBDIR, 'fulltext-embedding-usel-2-headers-2019-04-05.pkl'
)
md_file = os.path.join(DIR_BASE, 'arxiv-metadata-oai-2019-03-01.json.gz')
adj_file = os.path.join(DIR_OUTPUT, 'internal-citations.json.gz')
def maincat(name):
if '.' in name:
return name.split('.')[0]
return name
def shuffle(arr, seed=14850):
""" Deterministic in-place shuffling """
rng = np.random.RandomState(seed)
rng.shuffle(arr)
def ids_cats(md_file, subcats=True):
md = load_metadata(md_file)
ids = np.array([m['id'] for m in md], dtype='object')
shuffle(ids)
if subcats:
cats = np.array([m['categories'][0].split()[0] for m in md],
dtype='object')
else:
cats = np.array([maincat(m['categories'][0].split()[0]) for m in md],
dtype='object')
shuffle(cats)
return ids, cats
if __name__ == "__main__":
adj = loaddata(adj_file)
ids, cats = ids_cats(md_file, subcats=True)
scats = list(set(cats))
labels = {c: l for l, c in enumerate(scats)}
target = [labels[c] for c in cats]
train_size = 1200000
ids_train = ids[:train_size]
ids_test = ids[train_size:]
target_train = target[:train_size]
target_test = target[train_size:]
# Features containing cocitation category information
mc_train, mc_test = features.cocitation_feature(adj, ids_train, ids_test,
target_train, target_test)
model_kwargs = dict(loss='log', tol=1e-6, max_iter=50, alpha=1e-7,
verbose=False, n_jobs=6)
results = {}
# JUST cocitation features
logger.info('Fitting cocitation vectors')
lr = SGDClassifier(**model_kwargs)
results['cocitation'] = train_test(lr, mc_train, target_train,
mc_test, target_test)
logger.info(results['cocitation'])
logger.info('cocitation vectors done!')
# JUST full text
fulltext_vec = fill_zeros(load_embeddings(usel_fulltext, headers=2))
shuffle(fulltext_vec)
fulltext_train = fulltext_vec[:train_size]
fulltext_test = fulltext_vec[train_size:]
logger.info('Fitting fulltext vectors')
lr = SGDClassifier(**model_kwargs)
results['fulltext'] = train_test(lr, fulltext_train, target_train,
fulltext_test, target_test)
logger.info(results['fulltext'])
logger.info('fulltext vectors done!')
# JUST titles
title_vec = load_embeddings(usel_title)['embeddings']
shuffle(title_vec)
title_train = title_vec[:train_size]
title_test = title_vec[train_size:]
logger.info('Fitting title vectors')
lr = SGDClassifier(**model_kwargs)
results['titles'] = train_test(lr, title_train, target_train, title_test,
target_test)
logger.info(results['titles'])
logger.info('title vectors done!')
# JUST abstracts
abstract_vec = load_embeddings(usel_abstract)['embeddings']
shuffle(abstract_vec)
abstract_train = abstract_vec[:train_size]
abstract_test = abstract_vec[train_size:]
logger.info('Fitting abstract vectors')
lr = SGDClassifier(**model_kwargs)
results['abstracts'] = train_test(lr, abstract_train, target_train,
abstract_test, target_test)
logger.info(results['abstracts'])
logger.info('abstract vectors done!')
# ALL features
logger.info('Fitting all features')
lr = SGDClassifier(**model_kwargs)
results['all'] = train_test(
lr,
np.concatenate(
[title_train, abstract_train, mc_train, fulltext_train], axis=1
),
target_train,
np.concatenate(
[title_test, abstract_test, mc_test, fulltext_test], axis=1
),
target_test
)
logger.info(results['all'])
logger.info('all features done!')
#
# Now feature ablations (individual removals
#
# ALL - titles
logger.info('Fitting all - titles')
lr = SGDClassifier(**model_kwargs)
results['all - titles'] = train_test(
lr,
np.concatenate(
[abstract_train, mc_train, fulltext_train], axis=1
),
target_train,
np.concatenate(
[abstract_test, mc_test, fulltext_test], axis=1
),
target_test
)
logger.info(results['all - titles'])
logger.info('all - titles done!')
# ALL - abstracts
logger.info('Fitting all - abstracts')
lr = SGDClassifier(**model_kwargs)
results['all - abstracts'] = train_test(
lr,
np.concatenate(
[title_train, mc_train, fulltext_train], axis=1
),
target_train,
np.concatenate(
[title_test, mc_test, fulltext_test], axis=1
),
target_test
)
logger.info(results['all - abstracts'])
logger.info('all - abstracts done!')
# ALL - cocitation
logger.info('Fitting all - cocitation')
lr = SGDClassifier(**model_kwargs)
results['all - cocitation'] = train_test(
lr,
np.concatenate(
[title_train, abstract_train, fulltext_train], axis=1
),
target_train,
np.concatenate(
[title_test, abstract_test, fulltext_test], axis=1
),
target_test
)
logger.info(results['all - cocitation'])
logger.info('all - cocitation done!')
# ALL - fulltext
logger.info('Fitting all features')
lr = SGDClassifier(**model_kwargs)
results['all - fulltext'] = train_test(
lr,
np.concatenate(
[title_train, abstract_train, mc_train], axis=1
),
target_train,
np.concatenate(
[title_test, abstract_test, mc_test], axis=1
),
target_test
)
logger.info(results['all - fulltext'])
logger.info('all - fulltext done!')
# SAVE
nowdate = str(datetime.now()).split()[0]
filename = "logistic-regression-classification-{}.json".format(nowdate)
with open(os.path.join(DIR_OUTPUT, filename), 'w') as fout:
json.dump(results, fout)
| [
"sklearn.linear_model.SGDClassifier",
"arxiv_public_data.oai_metadata.load_metadata",
"gzip.open",
"json.dump",
"numpy.log",
"os.path.join",
"arxiv_public_data.tests.cocitation_category_feature.cocitation_feature",
"numpy.array",
"arxiv_public_data.embeddings.util.load_embeddings",
"datetime.datet... | [((469, 499), 'arxiv_public_data.config.LOGGER.getChild', 'LOGGER.getChild', (['"""lr-classify"""'], {}), "('lr-classify')\n", (484, 499), False, 'from arxiv_public_data.config import DIR_OUTPUT, DIR_BASE, LOGGER\n'), ((1360, 1398), 'os.path.join', 'os.path.join', (['DIR_OUTPUT', '"""embeddings"""'], {}), "(DIR_OUTPUT, 'embeddings')\n", (1372, 1398), False, 'import os\n'), ((1415, 1477), 'os.path.join', 'os.path.join', (['EMBDIR', '"""abstract-embedding-usel-2019-03-19.pkl"""'], {}), "(EMBDIR, 'abstract-embedding-usel-2019-03-19.pkl')\n", (1427, 1477), False, 'import os\n'), ((1491, 1550), 'os.path.join', 'os.path.join', (['EMBDIR', '"""title-embedding-usel-2019-03-19.pkl"""'], {}), "(EMBDIR, 'title-embedding-usel-2019-03-19.pkl')\n", (1503, 1550), False, 'import os\n'), ((1567, 1639), 'os.path.join', 'os.path.join', (['EMBDIR', '"""fulltext-embedding-usel-2-headers-2019-04-05.pkl"""'], {}), "(EMBDIR, 'fulltext-embedding-usel-2-headers-2019-04-05.pkl')\n", (1579, 1639), False, 'import os\n'), ((1657, 1720), 'os.path.join', 'os.path.join', (['DIR_BASE', '"""arxiv-metadata-oai-2019-03-01.json.gz"""'], {}), "(DIR_BASE, 'arxiv-metadata-oai-2019-03-01.json.gz')\n", (1669, 1720), False, 'import os\n'), ((1732, 1786), 'os.path.join', 'os.path.join', (['DIR_OUTPUT', '"""internal-citations.json.gz"""'], {}), "(DIR_OUTPUT, 'internal-citations.json.gz')\n", (1744, 1786), False, 'import os\n'), ((663, 687), 'numpy.arange', 'np.arange', (['prob.shape[1]'], {}), '(prob.shape[1])\n', (672, 687), True, 'import numpy as np\n'), ((1963, 1990), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1984, 1990), True, 'import numpy as np\n'), ((2059, 2081), 'arxiv_public_data.oai_metadata.load_metadata', 'load_metadata', (['md_file'], {}), '(md_file)\n', (2072, 2081), False, 'from arxiv_public_data.oai_metadata import load_metadata\n'), ((2092, 2139), 'numpy.array', 'np.array', (["[m['id'] for m in md]"], {'dtype': '"""object"""'}), "([m['id'] for m in md], dtype='object')\n", (2100, 2139), True, 'import numpy as np\n'), ((2946, 3031), 'arxiv_public_data.tests.cocitation_category_feature.cocitation_feature', 'features.cocitation_feature', (['adj', 'ids_train', 'ids_test', 'target_train', 'target_test'], {}), '(adj, ids_train, ids_test, target_train, target_test\n )\n', (2973, 3031), True, 'import arxiv_public_data.tests.cocitation_category_feature as features\n'), ((3308, 3337), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '(**model_kwargs)\n', (3321, 3337), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((3817, 3846), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '(**model_kwargs)\n', (3830, 3846), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((4294, 4323), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '(**model_kwargs)\n', (4307, 4323), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((4787, 4816), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '(**model_kwargs)\n', (4800, 4816), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((5107, 5136), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '(**model_kwargs)\n', (5120, 5136), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((5650, 5679), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '(**model_kwargs)\n', (5663, 5679), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((6129, 6158), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '(**model_kwargs)\n', (6142, 6158), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((6613, 6642), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '(**model_kwargs)\n', (6626, 6642), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((7106, 7135), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '(**model_kwargs)\n', (7119, 7135), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((578, 599), 'gzip.open', 'gzip.open', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (587, 599), False, 'import gzip\n'), ((3601, 3642), 'arxiv_public_data.embeddings.util.load_embeddings', 'load_embeddings', (['usel_fulltext'], {'headers': '(2)'}), '(usel_fulltext, headers=2)\n', (3616, 3642), False, 'from arxiv_public_data.embeddings.util import load_embeddings, fill_zeros\n'), ((4097, 4124), 'arxiv_public_data.embeddings.util.load_embeddings', 'load_embeddings', (['usel_title'], {}), '(usel_title)\n', (4112, 4124), False, 'from arxiv_public_data.embeddings.util import load_embeddings, fill_zeros\n'), ((4569, 4599), 'arxiv_public_data.embeddings.util.load_embeddings', 'load_embeddings', (['usel_abstract'], {}), '(usel_abstract)\n', (4584, 4599), False, 'from arxiv_public_data.embeddings.util import load_embeddings, fill_zeros\n'), ((5190, 5269), 'numpy.concatenate', 'np.concatenate', (['[title_train, abstract_train, mc_train, fulltext_train]'], {'axis': '(1)'}), '([title_train, abstract_train, mc_train, fulltext_train], axis=1)\n', (5204, 5269), True, 'import numpy as np\n'), ((5323, 5398), 'numpy.concatenate', 'np.concatenate', (['[title_test, abstract_test, mc_test, fulltext_test]'], {'axis': '(1)'}), '([title_test, abstract_test, mc_test, fulltext_test], axis=1)\n', (5337, 5398), True, 'import numpy as np\n'), ((5742, 5808), 'numpy.concatenate', 'np.concatenate', (['[abstract_train, mc_train, fulltext_train]'], {'axis': '(1)'}), '([abstract_train, mc_train, fulltext_train], axis=1)\n', (5756, 5808), True, 'import numpy as np\n'), ((5862, 5925), 'numpy.concatenate', 'np.concatenate', (['[abstract_test, mc_test, fulltext_test]'], {'axis': '(1)'}), '([abstract_test, mc_test, fulltext_test], axis=1)\n', (5876, 5925), True, 'import numpy as np\n'), ((6224, 6287), 'numpy.concatenate', 'np.concatenate', (['[title_train, mc_train, fulltext_train]'], {'axis': '(1)'}), '([title_train, mc_train, fulltext_train], axis=1)\n', (6238, 6287), True, 'import numpy as np\n'), ((6341, 6401), 'numpy.concatenate', 'np.concatenate', (['[title_test, mc_test, fulltext_test]'], {'axis': '(1)'}), '([title_test, mc_test, fulltext_test], axis=1)\n', (6355, 6401), True, 'import numpy as np\n'), ((6709, 6778), 'numpy.concatenate', 'np.concatenate', (['[title_train, abstract_train, fulltext_train]'], {'axis': '(1)'}), '([title_train, abstract_train, fulltext_train], axis=1)\n', (6723, 6778), True, 'import numpy as np\n'), ((6832, 6898), 'numpy.concatenate', 'np.concatenate', (['[title_test, abstract_test, fulltext_test]'], {'axis': '(1)'}), '([title_test, abstract_test, fulltext_test], axis=1)\n', (6846, 6898), True, 'import numpy as np\n'), ((7200, 7263), 'numpy.concatenate', 'np.concatenate', (['[title_train, abstract_train, mc_train]'], {'axis': '(1)'}), '([title_train, abstract_train, mc_train], axis=1)\n', (7214, 7263), True, 'import numpy as np\n'), ((7317, 7377), 'numpy.concatenate', 'np.concatenate', (['[title_test, abstract_test, mc_test]'], {'axis': '(1)'}), '([title_test, abstract_test, mc_test], axis=1)\n', (7331, 7377), True, 'import numpy as np\n'), ((7715, 7739), 'json.dump', 'json.dump', (['results', 'fout'], {}), '(results, fout)\n', (7724, 7739), False, 'import json\n'), ((1148, 1157), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1154, 1157), True, 'import numpy as np\n'), ((7657, 7691), 'os.path.join', 'os.path.join', (['DIR_OUTPUT', 'filename'], {}), '(DIR_OUTPUT, filename)\n', (7669, 7691), False, 'import os\n'), ((7540, 7554), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7552, 7554), False, 'from datetime import datetime\n')] |
import os, copy, cProfile, pstats, io
import numpy as np
import gdspy as gp
import gds_tools as gdst
def profile(fnc):
"""A decorator that uses cProfile to profile a function"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = fnc(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
return retval
return inner
def RotMat(rad):
#==========================
# Generate rotation matrix \\
#=========================================================================
# Arguments: rad : radians to rotate about origin ||
#=========================================================================
return np.matrix([[np.cos(rad), -np.sin(rad)], [np.sin(rad), np.cos(rad)]])
def VecRot(rad, vec, origin = (0, 0)):
#=========================
# Perform vector rotation \\
#=========================================================================
# Arguments: rad : radians to rotate about origin ||
# vec : input vector (2-x-n list) ||
#=========================================================================
return (RotMat(rad).dot(np.array(vec) - np.array(origin)) + np.array(origin)).tolist()[0]
def instruction_parse(s, args = None):
#============================
# Simple instructions parser \\
#=========================================================================
# Parses a string and converts it to a dictionary. ||
# ||
# Arguments: s : input strung ||
# args : dictionary with keys for variable placement ||
#=========================================================================
if args:
for a in args:
s = s.replace('{'+a+'}', str(args[a]))
dic = {}
key = ''
val = ''
pos = 'key'
for i, c in enumerate(s):
if c == ' ' or c == '\n' or c == '\t': # ignore whitespace
continue
elif c == ':':
pos = 'val'
elif c != ':' and c != ',' and pos == 'key':
key += c
elif c != ':' and c != ',' and pos == 'val':
val += c
elif c == ',':
True # do nothing
else:
print('Error: unknown parameter, could not parse.')
return False
if c == ',' or (i + 1) == len(s):
val = eval(val.replace('pi', str(np.pi)))
dic[key] = float(val)
key = ''
val = ''
pos = 'key'
if (i + 1) == len(s):
break
return dic
def flatten(objectlist, endpoints, endpoint_dims, layer = 0):
#===========================
# FLatten a list of objects \\
#=========================================================================
# Flattening will cause all objects in the objectlist to be placed in ||
# one single layer and remove boundaries between them if there are any. ||
# All layer information will become lost! If you just want to combine ||
# structures while keeping layer information, use cluster() ||
# ||
# Arguments: objectlist : list of objects (GDStructure) ||
# endpoints : dictionary of new endpoints ||
# endpoint_dims : dictionary of new endpoint sizes ||
#=========================================================================
# Define function to allow for recursive walk through list and pick out all
# compound structures
def stacker(inlist):
outlist = []
for i in inlist:
if i.compound:
outlist += [i] + stacker(i.compound)
else:
outlist += [i]
return outlist
objectlist = stacker(objectlist)
ends = copy.deepcopy(endpoints)
epsz = copy.deepcopy(endpoint_dims)
objs = []
for i in objectlist:
objs.append(i.structure)
return gdst.classes.GDStructure(gp.boolean(objs, None, 'or', layer = layer), ends, epsz)
def lattice(cell, repeat, spacing):
#============================
# Generate a crystal lattice \\
#=========================================================================
# Arguments: cell : unit cell as gdspy Cell object ||
# repeat : (n_x, n_y) vector with amount of cells ||
# spacing : sapce between unit cells ||
#=========================================================================
array = gp.CellArray(cell, repeat[0], repeat[1], spacing)
ends = {'A': (0, spacing[1] * repeat[1] / 2), 'B': (spacing[0] * (repeat[0] - 1) / 2, spacing[1] * (repeat[1] - 1/2))}
epsz = {'A': 0, 'B': 0}
return gdst.classes.GDStructure(array, ends, epsz)
def lattice_cutter(lattice, objectlist, mode = 'and', layer = 0):
#=====================================
# Cut a lattice up using boolean \\
#=========================================================================
# Arguments: lattice : output of lattice() function ||
# objectlist : list of objects that intersect lattice ||
# (optional) mode : what boolean operation to apply ||
# (optional) layer : layer to put resulting structure on ||
#=========================================================================
if type(objectlist) is not type([]):
objectlist = [objectlist]
for i in objectlist:
if i.compound:
lattice = lattice_cutter(lattice, i.compound)
lattice.structure = gp.boolean(lattice.structure, i.structure, mode, layer = layer)
return lattice
def add(cell, elements, signal_from = None):
#================================
# Add structures to a gdspy cell \\
#=========================================================================
# Arguments: cell : gdspy cell object ||
# elements : list of GDStructure objects ||
#=========================================================================
if not isinstance(signal_from, list):
signal_from = [signal_from]
if elements not in signal_from:
signal_from.append(elements)
if not isinstance(elements, list):
elements = [elements]
for element in elements:
if isinstance(element, list):
gdst.add(cell, element)
else:
if isinstance(element, gdst.classes.GDSComponent):
for polygon in element.polygons:
cell.add(polygon)
for previous_component in element.previous:
if previous_component not in signal_from and element.previous:
signal_from.append(previous_component)
gdst.add(cell, previous_component, signal_from = signal_from)
for next_component in element.next:
if next_component not in signal_from and element.next:
signal_from.append(next_component)
gdst.add(cell, next_component, signal_from = signal_from)
else:
cell.add(element)
def mirror(p):
#============================
# Mirror points about y-axis \\
#=========================================================================
# Arguments: p : list of (x, y) points ||
#=========================================================================
for i, val in enumerate(p):
p[i] = (-val[0], val[1])
return p
def symm_coords(points, mirror_x = True, mirror_y = True):
if not isinstance(points, list):
points = [points]
output_points = copy.deepcopy(points)
if mirror_y:
for i, val in enumerate(points):
output_points.append((-val[0], val[1]))
if mirror_x:
for i, val in enumerate(points):
output_points.append((val[0], -val[1]))
if mirror_x and mirror_y:
for i, val in enumerate(points):
output_points.append((-val[0], -val[1]))
return output_points
def save(cell, filename, unit = 1e-6, precision = 1e-9):
#=====================
# Save cell to a file \\
#=========================================================================
# Arguments: cell : gdspy cell object or a list of cells ||
# filename : filename to write to (relative path) ||
#=========================================================================
writer = gp.GdsWriter(filename, unit = unit, precision = precision)
if type(cell) == type([]):
for cell_item in cell:
writer.write_cell(cell_item)
else:
writer.write_cell(cell)
return writer.close()
def biquadratic_func(x):
return x ** 2 * (2 - x ** 2)
def rotate_reference_cell(reference, angle, center = (0, 0)):
dx = np.cos(angle) * (reference.origin[0] - center[0]) - np.sin(angle) * (reference.origin[1] - center[1]) + center[0]
dy = np.sin(angle) * (reference.origin[0] - center[0]) + np.cos(angle) * (reference.origin[1] - center[1]) + center[1]
angle_deg = np.degrees(angle)
reference.rotation += angle_deg
reference.translate(dx - reference.origin[0], dy - reference.origin[1])
def inside(points, cellref, dist, nop = 3, precision = 0.001):
#=====================
# Save cell to a file \\
#=========================================================================
# Arguments: points : list of points to check ||
# cellref : gdspy cell reference object ||
# dist : distance from points to search ||
# nop : number of probe points within dist ||
# precision : gdspy.inside precision parameter ||
#=========================================================================
# Force uneven
if nop % 2 == 0:
nop += 1
search_ps = []
for p in points:
px = np.linspace(p[0] - dist/2, p[0] + dist/2, nop)
py = np.linspace(p[1] - dist/2, p[1] + dist/2, nop)
search_ps.append([[i, j] for i in px for j in py])
return gp.inside(search_ps, cellref, precision = precision)
def convert_to_dxf(filename):
print("-- Converting to DXF --")
# Convert GDS to DXF with Klayout
os.system('/Applications/klayout.app/Contents/MacOS/klayout -zz -rd input="{}.gds" -rd output="{}.dxf" -r convert.rb'.format(filename, filename))
def bounding_box_center(object):
bounding_box = object.get_bounding_box()
bounding_box_x = (bounding_box[1][0] + bounding_box[0][0]) / 2
bounding_box_y = (bounding_box[1][1] + bounding_box[0][1]) / 2
return (bounding_box_x, bounding_box_y)
def file_path_name(file_path_name_ext):
filename = os.path.basename(file_path_name_ext)
filepath = file_path_name_ext.replace(filename, "")
filename = filename.replace(".py","")
return filepath + filename | [
"gds_tools.classes.GDStructure",
"gdspy.boolean",
"io.StringIO",
"gdspy.inside",
"pstats.Stats",
"gdspy.GdsWriter",
"numpy.linspace",
"gds_tools.add",
"os.path.basename",
"numpy.cos",
"copy.deepcopy",
"numpy.sin",
"numpy.degrees",
"cProfile.Profile",
"numpy.array",
"gdspy.CellArray"
] | [((4351, 4375), 'copy.deepcopy', 'copy.deepcopy', (['endpoints'], {}), '(endpoints)\n', (4364, 4375), False, 'import os, copy, cProfile, pstats, io\n'), ((4388, 4416), 'copy.deepcopy', 'copy.deepcopy', (['endpoint_dims'], {}), '(endpoint_dims)\n', (4401, 4416), False, 'import os, copy, cProfile, pstats, io\n'), ((5114, 5163), 'gdspy.CellArray', 'gp.CellArray', (['cell', 'repeat[0]', 'repeat[1]', 'spacing'], {}), '(cell, repeat[0], repeat[1], spacing)\n', (5126, 5163), True, 'import gdspy as gp\n'), ((5331, 5374), 'gds_tools.classes.GDStructure', 'gdst.classes.GDStructure', (['array', 'ends', 'epsz'], {}), '(array, ends, epsz)\n', (5355, 5374), True, 'import gds_tools as gdst\n'), ((8475, 8496), 'copy.deepcopy', 'copy.deepcopy', (['points'], {}), '(points)\n', (8488, 8496), False, 'import os, copy, cProfile, pstats, io\n'), ((9330, 9384), 'gdspy.GdsWriter', 'gp.GdsWriter', (['filename'], {'unit': 'unit', 'precision': 'precision'}), '(filename, unit=unit, precision=precision)\n', (9342, 9384), True, 'import gdspy as gp\n'), ((9960, 9977), 'numpy.degrees', 'np.degrees', (['angle'], {}), '(angle)\n', (9970, 9977), True, 'import numpy as np\n'), ((11076, 11126), 'gdspy.inside', 'gp.inside', (['search_ps', 'cellref'], {'precision': 'precision'}), '(search_ps, cellref, precision=precision)\n', (11085, 11126), True, 'import gdspy as gp\n'), ((11714, 11750), 'os.path.basename', 'os.path.basename', (['file_path_name_ext'], {}), '(file_path_name_ext)\n', (11730, 11750), False, 'import os, copy, cProfile, pstats, io\n'), ((262, 280), 'cProfile.Profile', 'cProfile.Profile', ([], {}), '()\n', (278, 280), False, 'import os, copy, cProfile, pstats, io\n'), ((376, 389), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (387, 389), False, 'import os, copy, cProfile, pstats, io\n'), ((4533, 4574), 'gdspy.boolean', 'gp.boolean', (['objs', 'None', '"""or"""'], {'layer': 'layer'}), "(objs, None, 'or', layer=layer)\n", (4543, 4574), True, 'import gdspy as gp\n'), ((6226, 6287), 'gdspy.boolean', 'gp.boolean', (['lattice.structure', 'i.structure', 'mode'], {'layer': 'layer'}), '(lattice.structure, i.structure, mode, layer=layer)\n', (6236, 6287), True, 'import gdspy as gp\n'), ((10894, 10944), 'numpy.linspace', 'np.linspace', (['(p[0] - dist / 2)', '(p[0] + dist / 2)', 'nop'], {}), '(p[0] - dist / 2, p[0] + dist / 2, nop)\n', (10905, 10944), True, 'import numpy as np\n'), ((10955, 11005), 'numpy.linspace', 'np.linspace', (['(p[1] - dist / 2)', '(p[1] + dist / 2)', 'nop'], {}), '(p[1] - dist / 2, p[1] + dist / 2, nop)\n', (10966, 11005), True, 'import numpy as np\n'), ((7086, 7109), 'gds_tools.add', 'gdst.add', (['cell', 'element'], {}), '(cell, element)\n', (7094, 7109), True, 'import gds_tools as gdst\n'), ((435, 461), 'pstats.Stats', 'pstats.Stats', (['pr'], {'stream': 's'}), '(pr, stream=s)\n', (447, 461), False, 'import os, copy, cProfile, pstats, io\n'), ((931, 942), 'numpy.cos', 'np.cos', (['rad'], {}), '(rad)\n', (937, 942), True, 'import numpy as np\n'), ((960, 971), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (966, 971), True, 'import numpy as np\n'), ((973, 984), 'numpy.cos', 'np.cos', (['rad'], {}), '(rad)\n', (979, 984), True, 'import numpy as np\n'), ((9705, 9718), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (9711, 9718), True, 'import numpy as np\n'), ((9757, 9770), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (9763, 9770), True, 'import numpy as np\n'), ((9829, 9842), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (9835, 9842), True, 'import numpy as np\n'), ((9881, 9894), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (9887, 9894), True, 'import numpy as np\n'), ((945, 956), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (951, 956), True, 'import numpy as np\n'), ((1481, 1497), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (1489, 1497), True, 'import numpy as np\n'), ((7512, 7571), 'gds_tools.add', 'gdst.add', (['cell', 'previous_component'], {'signal_from': 'signal_from'}), '(cell, previous_component, signal_from=signal_from)\n', (7520, 7571), True, 'import gds_tools as gdst\n'), ((7790, 7845), 'gds_tools.add', 'gdst.add', (['cell', 'next_component'], {'signal_from': 'signal_from'}), '(cell, next_component, signal_from=signal_from)\n', (7798, 7845), True, 'import gds_tools as gdst\n'), ((1445, 1458), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (1453, 1458), True, 'import numpy as np\n'), ((1461, 1477), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (1469, 1477), True, 'import numpy as np\n')] |
# Script is based on https://github.com/richzhang/colorization/colorize.py
import numpy as np
import argparse
import cv2 as cv
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--prototxt', help='Path to colorization_deploy_v2.prototxt', default='./models/colorization_release_v2.prototxt')
parser.add_argument('--caffemodel', help='Path to colorization_release_v2.caffemodel', default='./models/colorization_release_v2.caffemodel')
parser.add_argument('--kernel', help='Path to pts_in_hull.npy', default='./resources/pts_in_hull.npy')
args = parser.parse_args()
return args
if __name__ == '__main__':
W_in = 224
H_in = 224
imshowSize = (640, 480)
args = parse_args()
# Select desired model
net = cv.dnn.readNetFromCaffe(args.prototxt, args.caffemodel)
pts_in_hull = np.load(args.kernel) # load cluster centers
# populate cluster centers as 1x1 convolution kernel
pts_in_hull = pts_in_hull.transpose().reshape(2, 313, 1, 1)
net.getLayer(long(net.getLayerId('class8_ab'))).blobs = [pts_in_hull.astype(np.float32)]
net.getLayer(long(net.getLayerId('conv8_313_rh'))).blobs = [np.full([1, 313], 2.606, np.float32)]
if args.input:
cap = cv.VideoCapture(args.input)
else:
cap = cv.VideoCapture(0)
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:
cv.waitKey()
break
img_rgb = (frame[:,:,[2, 1, 0]] * 1.0 / 255).astype(np.float32)
img_lab = cv.cvtColor(img_rgb, cv.COLOR_RGB2Lab)
img_l = img_lab[:,:,0] # pull out L channel
(H_orig,W_orig) = img_rgb.shape[:2] # original image size
# resize image to network input size
img_rs = cv.resize(img_rgb, (W_in, H_in)) # resize image to network input size
img_lab_rs = cv.cvtColor(img_rs, cv.COLOR_RGB2Lab)
img_l_rs = img_lab_rs[:,:,0]
img_l_rs -= 50 # subtract 50 for mean-centering
net.setInput(cv.dnn.blobFromImage(img_l_rs))
ab_dec = net.forward('class8_ab')[0,:,:,:].transpose((1,2,0)) # this is our result
(H_out,W_out) = ab_dec.shape[:2]
ab_dec_us = cv.resize(ab_dec, (W_orig, H_orig))
img_lab_out = np.concatenate((img_l[:,:,np.newaxis],ab_dec_us),axis=2) # concatenate with original image L
img_bgr_out = np.clip(cv.cvtColor(img_lab_out, cv.COLOR_Lab2BGR), 0, 1)
frame = cv.resize(frame, imshowSize)
cv.imshow('origin', frame)
cv.imshow('gray', cv.cvtColor(frame, cv.COLOR_RGB2GRAY))
cv.imshow('colorized', cv.resize(img_bgr_out, imshowSize))
| [
"cv2.dnn.blobFromImage",
"argparse.ArgumentParser",
"cv2.dnn.readNetFromCaffe",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.cvtColor",
"numpy.concatenate",
"numpy.full",
"cv2.resize",
"numpy.load",
"cv2.waitKey"
] | [((159, 235), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""iColor: deep interactive colorization"""'}), "(description='iColor: deep interactive colorization')\n", (182, 235), False, 'import argparse\n'), ((927, 982), 'cv2.dnn.readNetFromCaffe', 'cv.dnn.readNetFromCaffe', (['args.prototxt', 'args.caffemodel'], {}), '(args.prototxt, args.caffemodel)\n', (950, 982), True, 'import cv2 as cv\n'), ((1002, 1022), 'numpy.load', 'np.load', (['args.kernel'], {}), '(args.kernel)\n', (1009, 1022), True, 'import numpy as np\n'), ((1325, 1361), 'numpy.full', 'np.full', (['[1, 313]', '(2.606)', 'np.float32'], {}), '([1, 313], 2.606, np.float32)\n', (1332, 1361), True, 'import numpy as np\n'), ((1397, 1424), 'cv2.VideoCapture', 'cv.VideoCapture', (['args.input'], {}), '(args.input)\n', (1412, 1424), True, 'import cv2 as cv\n'), ((1449, 1467), 'cv2.VideoCapture', 'cv.VideoCapture', (['(0)'], {}), '(0)\n', (1464, 1467), True, 'import cv2 as cv\n'), ((1479, 1492), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (1489, 1492), True, 'import cv2 as cv\n'), ((1695, 1733), 'cv2.cvtColor', 'cv.cvtColor', (['img_rgb', 'cv.COLOR_RGB2Lab'], {}), '(img_rgb, cv.COLOR_RGB2Lab)\n', (1706, 1733), True, 'import cv2 as cv\n'), ((1915, 1947), 'cv2.resize', 'cv.resize', (['img_rgb', '(W_in, H_in)'], {}), '(img_rgb, (W_in, H_in))\n', (1924, 1947), True, 'import cv2 as cv\n'), ((2006, 2043), 'cv2.cvtColor', 'cv.cvtColor', (['img_rs', 'cv.COLOR_RGB2Lab'], {}), '(img_rs, cv.COLOR_RGB2Lab)\n', (2017, 2043), True, 'import cv2 as cv\n'), ((2344, 2379), 'cv2.resize', 'cv.resize', (['ab_dec', '(W_orig, H_orig)'], {}), '(ab_dec, (W_orig, H_orig))\n', (2353, 2379), True, 'import cv2 as cv\n'), ((2402, 2462), 'numpy.concatenate', 'np.concatenate', (['(img_l[:, :, np.newaxis], ab_dec_us)'], {'axis': '(2)'}), '((img_l[:, :, np.newaxis], ab_dec_us), axis=2)\n', (2416, 2462), True, 'import numpy as np\n'), ((2592, 2620), 'cv2.resize', 'cv.resize', (['frame', 'imshowSize'], {}), '(frame, imshowSize)\n', (2601, 2620), True, 'import cv2 as cv\n'), ((2629, 2655), 'cv2.imshow', 'cv.imshow', (['"""origin"""', 'frame'], {}), "('origin', frame)\n", (2638, 2655), True, 'import cv2 as cv\n'), ((1572, 1584), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (1582, 1584), True, 'import cv2 as cv\n'), ((2159, 2189), 'cv2.dnn.blobFromImage', 'cv.dnn.blobFromImage', (['img_l_rs'], {}), '(img_l_rs)\n', (2179, 2189), True, 'import cv2 as cv\n'), ((2525, 2567), 'cv2.cvtColor', 'cv.cvtColor', (['img_lab_out', 'cv.COLOR_Lab2BGR'], {}), '(img_lab_out, cv.COLOR_Lab2BGR)\n', (2536, 2567), True, 'import cv2 as cv\n'), ((2682, 2719), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_RGB2GRAY'], {}), '(frame, cv.COLOR_RGB2GRAY)\n', (2693, 2719), True, 'import cv2 as cv\n'), ((2752, 2786), 'cv2.resize', 'cv.resize', (['img_bgr_out', 'imshowSize'], {}), '(img_bgr_out, imshowSize)\n', (2761, 2786), True, 'import cv2 as cv\n')] |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data
from skimage.filters import threshold_multiotsu # um filtro que realiza este método
# Setting the font size for all plots.
matplotlib.rcParams['font.size'] = 9
# The input image.
image = data.camera()
# Applying multi-Otsu threshold for the default value, generating
# three classes.
thresholds = threshold_multiotsu(image)
# Using the threshold values, we generate the three regions.
# divide a image em "thresholds" bins. Cria 3 regiões, uma com valores menores que thresholds, outra que pertença ao
# intervalo de dados de thresholds e outra com maiores do que threshold. Avalia em que bin os dados estão.
# 0 -> bin 1/ 1 -> bin 2/ 2 -> bin 3
regions = np.digitize(image, bins=thresholds)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(10, 3.5))
# Plotting the original image.
ax[0].imshow(image, cmap='gray')
ax[0].set_title('Original')
ax[0].axis('off')
# Plotting the histogram and the two thresholds obtained from multi-Otsu.
ax[1].hist(image.ravel(), bins=255)
ax[1].set_title('Histogram')
for thresh in thresholds:
ax[1].axvline(thresh, color='r')
# Plotting the Multi Otsu result.
ax[2].imshow(regions, cmap='jet')
ax[2].set_title('Multi-Otsu result')
ax[2].axis('off')
plt.subplots_adjust()
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.digitize",
"skimage.data.camera",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"skimage.filters.threshold_multiotsu"
] | [((285, 298), 'skimage.data.camera', 'data.camera', ([], {}), '()\n', (296, 298), False, 'from skimage import data\n'), ((396, 422), 'skimage.filters.threshold_multiotsu', 'threshold_multiotsu', (['image'], {}), '(image)\n', (415, 422), False, 'from skimage.filters import threshold_multiotsu\n'), ((756, 791), 'numpy.digitize', 'np.digitize', (['image'], {'bins': 'thresholds'}), '(image, bins=thresholds)\n', (767, 791), True, 'import numpy as np\n'), ((803, 852), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(10, 3.5)'}), '(nrows=1, ncols=3, figsize=(10, 3.5))\n', (815, 852), True, 'import matplotlib.pyplot as plt\n'), ((1292, 1313), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {}), '()\n', (1311, 1313), True, 'import matplotlib.pyplot as plt\n'), ((1315, 1325), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1323, 1325), True, 'import matplotlib.pyplot as plt\n')] |
import torch
import torch.nn
import torch.nn.functional as nn
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mb_size = 32
z_dim = 10
eps_dim = 4
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
h_dim = 128
cnt = 0
lr = 1e-3
def log(x):
return torch.log(x + 1e-8)
# Encoder: q(z|x,eps)
Q = torch.nn.Sequential(
torch.nn.Linear(X_dim + eps_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, z_dim)
)
# Decoder: p(x|z)
P = torch.nn.Sequential(
torch.nn.Linear(z_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, X_dim),
torch.nn.Sigmoid()
)
# Discriminator: T(X, z)
T = torch.nn.Sequential(
torch.nn.Linear(X_dim + z_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, 1)
)
def reset_grad():
Q.zero_grad()
P.zero_grad()
T.zero_grad()
def sample_X(size, include_y=False):
X, y = mnist.train.next_batch(size)
X = Variable(torch.from_numpy(X))
if include_y:
y = np.argmax(y, axis=1).astype(np.int)
y = Variable(torch.from_numpy(y))
return X, y
return X
Q_solver = optim.Adam(Q.parameters(), lr=lr)
P_solver = optim.Adam(P.parameters(), lr=lr)
T_solver = optim.Adam(T.parameters(), lr=lr)
for it in range(1000000):
X = sample_X(mb_size)
eps = Variable(torch.randn(mb_size, eps_dim))
z = Variable(torch.randn(mb_size, z_dim))
# Optimize VAE
z_sample = Q(torch.cat([X, eps], 1))
X_sample = P(z_sample)
T_sample = T(torch.cat([X, z_sample], 1))
disc = torch.mean(-T_sample)
loglike = -nn.binary_cross_entropy(X_sample, X, size_average=False) / mb_size
elbo = -(disc + loglike)
elbo.backward()
Q_solver.step()
P_solver.step()
reset_grad()
# Discriminator T(X, z)
z_sample = Q(torch.cat([X, eps], 1))
T_q = nn.sigmoid(T(torch.cat([X, z_sample], 1)))
T_prior = nn.sigmoid(T(torch.cat([X, z], 1)))
T_loss = -torch.mean(log(T_q) + log(1. - T_prior))
T_loss.backward()
T_solver.step()
reset_grad()
# Print and plot every now and then
if it % 1000 == 0:
print('Iter-{}; ELBO: {:.4}; T_loss: {:.4}'
.format(it, -elbo.data[0], -T_loss.data[0]))
samples = P(z).data.numpy()[:16]
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
if not os.path.exists('out/'):
os.makedirs('out/')
plt.savefig('out/{}.png'
.format(str(cnt).zfill(3)), bbox_inches='tight')
cnt += 1
plt.close(fig)
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"os.path.exists",
"torch.log",
"os.makedirs",
"torch.mean",
"torch.nn.functional.binary_cross_entropy",
"numpy.argmax",
"torch.from_numpy",
"matplotlib.pyplot.close",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"matplotlib.pyplot.figure... | [((342, 401), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""../../MNIST_data"""'], {'one_hot': '(True)'}), "('../../MNIST_data', one_hot=True)\n", (367, 401), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((577, 597), 'torch.log', 'torch.log', (['(x + 1e-08)'], {}), '(x + 1e-08)\n', (586, 597), False, 'import torch\n'), ((655, 694), 'torch.nn.Linear', 'torch.nn.Linear', (['(X_dim + eps_dim)', 'h_dim'], {}), '(X_dim + eps_dim, h_dim)\n', (670, 694), False, 'import torch\n'), ((701, 716), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (714, 716), False, 'import torch\n'), ((723, 752), 'torch.nn.Linear', 'torch.nn.Linear', (['h_dim', 'z_dim'], {}), '(h_dim, z_dim)\n', (738, 752), False, 'import torch\n'), ((808, 837), 'torch.nn.Linear', 'torch.nn.Linear', (['z_dim', 'h_dim'], {}), '(z_dim, h_dim)\n', (823, 837), False, 'import torch\n'), ((844, 859), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (857, 859), False, 'import torch\n'), ((866, 895), 'torch.nn.Linear', 'torch.nn.Linear', (['h_dim', 'X_dim'], {}), '(h_dim, X_dim)\n', (881, 895), False, 'import torch\n'), ((902, 920), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (918, 920), False, 'import torch\n'), ((983, 1020), 'torch.nn.Linear', 'torch.nn.Linear', (['(X_dim + z_dim)', 'h_dim'], {}), '(X_dim + z_dim, h_dim)\n', (998, 1020), False, 'import torch\n'), ((1027, 1042), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (1040, 1042), False, 'import torch\n'), ((1049, 1074), 'torch.nn.Linear', 'torch.nn.Linear', (['h_dim', '(1)'], {}), '(h_dim, 1)\n', (1064, 1074), False, 'import torch\n'), ((1881, 1902), 'torch.mean', 'torch.mean', (['(-T_sample)'], {}), '(-T_sample)\n', (1891, 1902), False, 'import torch\n'), ((1259, 1278), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (1275, 1278), False, 'import torch\n'), ((1650, 1679), 'torch.randn', 'torch.randn', (['mb_size', 'eps_dim'], {}), '(mb_size, eps_dim)\n', (1661, 1679), False, 'import torch\n'), ((1699, 1726), 'torch.randn', 'torch.randn', (['mb_size', 'z_dim'], {}), '(mb_size, z_dim)\n', (1710, 1726), False, 'import torch\n'), ((1768, 1790), 'torch.cat', 'torch.cat', (['[X, eps]', '(1)'], {}), '([X, eps], 1)\n', (1777, 1790), False, 'import torch\n'), ((1838, 1865), 'torch.cat', 'torch.cat', (['[X, z_sample]', '(1)'], {}), '([X, z_sample], 1)\n', (1847, 1865), False, 'import torch\n'), ((2150, 2172), 'torch.cat', 'torch.cat', (['[X, eps]', '(1)'], {}), '([X, eps], 1)\n', (2159, 2172), False, 'import torch\n'), ((2642, 2668), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (2652, 2668), True, 'import matplotlib.pyplot as plt\n'), ((2683, 2706), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(4)', '(4)'], {}), '(4, 4)\n', (2700, 2706), True, 'import matplotlib.gridspec as gridspec\n'), ((3246, 3260), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3255, 3260), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1391), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (1388, 1391), False, 'import torch\n'), ((1919, 1975), 'torch.nn.functional.binary_cross_entropy', 'nn.binary_cross_entropy', (['X_sample', 'X'], {'size_average': '(False)'}), '(X_sample, X, size_average=False)\n', (1942, 1975), True, 'import torch.nn.functional as nn\n'), ((2198, 2225), 'torch.cat', 'torch.cat', (['[X, z_sample]', '(1)'], {}), '([X, z_sample], 1)\n', (2207, 2225), False, 'import torch\n'), ((2256, 2276), 'torch.cat', 'torch.cat', (['[X, z]', '(1)'], {}), '([X, z], 1)\n', (2265, 2276), False, 'import torch\n'), ((2818, 2836), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {}), '(gs[i])\n', (2829, 2836), True, 'import matplotlib.pyplot as plt\n'), ((2850, 2865), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2858, 2865), True, 'import matplotlib.pyplot as plt\n'), ((3056, 3078), 'os.path.exists', 'os.path.exists', (['"""out/"""'], {}), "('out/')\n", (3070, 3078), False, 'import os\n'), ((3093, 3112), 'os.makedirs', 'os.makedirs', (['"""out/"""'], {}), "('out/')\n", (3104, 3112), False, 'import os\n'), ((1314, 1334), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (1323, 1334), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by BaseDetection, Inc. and its affiliates.
import logging
import time
import weakref
from typing import Dict
import numpy as np
import torch
from cvpods.utils import comm
from cvpods.utils.dump.events import EventStorage, get_event_storage
from cvpods.utils.registry import Registry
from .hooks import HookBase
RUNNERS = Registry("runners")
logger = logging.getLogger(__name__)
@RUNNERS.register()
class RunnerBase:
"""
Base class for iterative runner with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): the current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self):
self._hooks = []
def register_hooks(self, hooks):
"""
Register hooks to the runner. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and runner cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(
self,
start_iter: int,
start_epoch: int,
max_iter: int,
):
"""
Args:
start_iter, max_iter (int): See docs above
"""
self.iter = self.start_iter = start_iter
self.epoch = self.start_epoch = start_epoch
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
# by default, a step contains data_loading and model forward,
# loss backward is executed in after_step for better expansibility
self.run_step()
self.after_step()
# self.iter == max_iter can be used by `after_train` to
# tell whether the training successfully finished or failed
# due to exceptions.
self.iter += 1
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
self.storage._iter = self.iter
for h in self._hooks:
h.after_train()
def before_step(self):
# Maintain the invariant that storage.iter == runner.iter
# for the entire execution of each step
self.storage._iter = self.iter
for h in self._hooks:
h.before_step()
def after_step(self):
for h in self._hooks:
h.after_step()
def drun_step(self):
raise NotImplementedError
@RUNNERS.register()
class SimpleRunner(RunnerBase):
"""
A simple runner for the most common type of task:
fetch a data batch and execute model forwarding, optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
Note that all other tasks during training (checkpointing, logging, evaluation,
LR schedule, gradients compute, parameters udpate) are maintained by hooks,
which can be registered by :meth:`RunnerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass RunnerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__()
"""
We set the model to training mode in the runner.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.optimizer = optimizer
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[IterRunner] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
try:
data = next(self._data_loader_iter)
except StopIteration:
self.epoch += 1
if hasattr(self.data_loader.sampler, 'set_epoch'):
self.data_loader.sampler.set_epoch(self.epoch)
self._data_loader_iter = iter(self.data_loader)
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
losses = sum([
metrics_value for metrics_value in loss_dict.values()
if metrics_value.requires_grad
])
self._detect_anomaly(losses, loss_dict)
self._write_metrics(loss_dict, data_time)
self.step_outputs = {
"loss_for_backward": losses,
}
def _detect_anomaly(self, losses, loss_dict):
if not torch.isfinite(losses).all():
raise FloatingPointError(
"Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format(
self.iter, loss_dict
)
)
def _write_metrics(
self,
loss_dict: Dict[str, torch.Tensor],
data_time: float,
prefix: str = "",
):
"""
Args:
loss_dict (dict): dict of scalar losses
data_time (float): time taken by the dataloader iteration
"""
device = next(iter(loss_dict.values())).device
# Use a new stream so these ops don't wait for DDP or backward
with torch.cuda.stream(torch.cuda.Stream() if device.type == "cuda" else None):
metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
metrics_dict["data_time"] = data_time
# Gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in cvpods.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
storage = get_event_storage()
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(loss for key, loss in metrics_dict.items() if "loss" in key)
storage.put_scalar("{}total_loss".format(prefix), total_losses_reduced)
if len(metrics_dict) > 1:
storage.put_scalars(**metrics_dict)
| [
"logging.getLogger",
"cvpods.utils.dump.events.get_event_storage",
"numpy.mean",
"cvpods.utils.comm.is_main_process",
"torch.isfinite",
"time.perf_counter",
"torch.cuda.Stream",
"cvpods.utils.dump.events.EventStorage",
"weakref.proxy",
"cvpods.utils.registry.Registry",
"cvpods.utils.comm.gather"... | [((389, 408), 'cvpods.utils.registry.Registry', 'Registry', (['"""runners"""'], {}), "('runners')\n", (397, 408), False, 'from cvpods.utils.registry import Registry\n'), ((418, 445), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (435, 445), False, 'import logging\n'), ((5330, 5349), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5347, 5349), False, 'import time\n'), ((7517, 7539), 'cvpods.utils.comm.is_main_process', 'comm.is_main_process', ([], {}), '()\n', (7537, 7539), False, 'from cvpods.utils import comm\n'), ((1814, 1833), 'weakref.proxy', 'weakref.proxy', (['self'], {}), '(self)\n', (1827, 1833), False, 'import weakref\n'), ((2187, 2211), 'cvpods.utils.dump.events.EventStorage', 'EventStorage', (['start_iter'], {}), '(start_iter)\n', (2199, 2211), False, 'from cvpods.utils.dump.events import EventStorage, get_event_storage\n'), ((5828, 5847), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5845, 5847), False, 'import time\n'), ((7479, 7504), 'cvpods.utils.comm.gather', 'comm.gather', (['metrics_dict'], {}), '(metrics_dict)\n', (7490, 7504), False, 'from cvpods.utils import comm\n'), ((7563, 7582), 'cvpods.utils.dump.events.get_event_storage', 'get_event_storage', ([], {}), '()\n', (7580, 7582), False, 'from cvpods.utils.dump.events import EventStorage, get_event_storage\n'), ((7951, 7992), 'numpy.mean', 'np.mean', (['[x[k] for x in all_metrics_dict]'], {}), '([x[k] for x in all_metrics_dict])\n', (7958, 7992), True, 'import numpy as np\n'), ((6384, 6406), 'torch.isfinite', 'torch.isfinite', (['losses'], {}), '(losses)\n', (6398, 6406), False, 'import torch\n'), ((7072, 7091), 'torch.cuda.Stream', 'torch.cuda.Stream', ([], {}), '()\n', (7089, 7091), False, 'import torch\n')] |
import os
import random
import numpy as np
import torch
from prettytable import PrettyTable
def seed_torch(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_learn_params = 0
for name, parameter in model.named_parameters():
param = parameter.numel()
table.add_row([name, param])
total_learn_params += param
print(table)
print(f"Total Params: {total_learn_params}")
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"prettytable.PrettyTable",
"random.seed",
"numpy.random.seed",
"torch.cuda.manual_seed"
] | [((120, 137), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (131, 137), False, 'import random\n'), ((187, 207), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (201, 207), True, 'import numpy as np\n'), ((212, 235), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (229, 235), False, 'import torch\n'), ((240, 268), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (262, 268), False, 'import torch\n'), ((273, 305), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (299, 305), False, 'import torch\n'), ((468, 506), 'prettytable.PrettyTable', 'PrettyTable', (["['Modules', 'Parameters']"], {}), "(['Modules', 'Parameters'])\n", (479, 506), False, 'from prettytable import PrettyTable\n')] |
# -*- coding: utf-8 -*-
# License: Apache License 2.0
import os
import platform
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
def list_cpp_files(package_dir='wikipedia2vec'):
if sys.platform.startswith("win"):
compile_args = []
link_args = []
elif platform.system() == 'Darwin':
compile_args = ['-Wno-unused-function', '-std=c++11', '-stdlib=libc++']
link_args = ['-std=c++11', '-stdlib=libc++']
else:
compile_args = ['-Wno-unused-function', '-std=c++11']
link_args = ['-std=c++11']
ret = []
for (dir_name, _, files) in os.walk(package_dir):
for file_name in files:
(module_name, ext) = os.path.splitext(file_name)
if ext == '.pyx':
module_name = '.'.join(dir_name.split(os.sep) + [module_name])
path = os.path.join(dir_name, file_name)
ret.append((module_name, dict(
sources=[path], language='c++', extra_compile_args=compile_args,
extra_link_args=link_args
)))
return ret
# Copied from https://github.com/RaRe-Technologies/gensim/blob/master/setup.py
class custom_build_ext(build_ext):
def finalize_options(self):
build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
# https://docs.python.org/2/library/__builtin__.html#module-__builtin__
if isinstance(__builtins__, dict):
__builtins__["__NUMPY_SETUP__"] = False
else:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
setup(
name='wikipedia2vec',
version='1.0.4',
description='A tool for learning vector representations of words and entities from Wikipedia',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='http://wikipedia2vec.github.io/',
packages=find_packages(exclude=('tests*',)),
cmdclass=dict(build_ext=custom_build_ext),
ext_modules=[Extension(module_name, **kwargs) for (module_name, kwargs) in list_cpp_files()],
include_package_data=True,
entry_points={
'console_scripts': [
'wikipedia2vec=wikipedia2vec.cli:cli',
]
},
keywords=['wikipedia', 'embedding', 'wikipedia2vec'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'click',
'jieba',
'joblib',
'lmdb',
'marisa-trie',
'mwparserfromhell',
'numpy',
'scipy',
'six',
'tqdm',
],
setup_requires=['numpy'],
tests_require=['nose'],
test_suite='nose.collector',
)
| [
"setuptools.find_packages",
"sys.platform.startswith",
"os.path.splitext",
"os.path.join",
"setuptools.Extension",
"platform.system",
"numpy.get_include",
"setuptools.command.build_ext.build_ext.finalize_options",
"os.walk"
] | [((255, 285), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (278, 285), False, 'import sys\n'), ((662, 682), 'os.walk', 'os.walk', (['package_dir'], {}), '(package_dir)\n', (669, 682), False, 'import os\n'), ((1313, 1345), 'setuptools.command.build_ext.build_ext.finalize_options', 'build_ext.finalize_options', (['self'], {}), '(self)\n', (1339, 1345), False, 'from setuptools.command.build_ext import build_ext\n'), ((2091, 2125), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('tests*',)"}), "(exclude=('tests*',))\n", (2104, 2125), False, 'from setuptools import setup, find_packages, Extension\n'), ((345, 362), 'platform.system', 'platform.system', ([], {}), '()\n', (360, 362), False, 'import platform\n'), ((749, 776), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (765, 776), False, 'import os\n'), ((1712, 1731), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1729, 1731), False, 'import numpy\n'), ((2191, 2223), 'setuptools.Extension', 'Extension', (['module_name'], {}), '(module_name, **kwargs)\n', (2200, 2223), False, 'from setuptools import setup, find_packages, Extension\n'), ((909, 942), 'os.path.join', 'os.path.join', (['dir_name', 'file_name'], {}), '(dir_name, file_name)\n', (921, 942), False, 'import os\n')] |
import cv2
import math
import os
import random as rnd
import numpy as np
from PIL import Image, ImageDraw, ImageFilter
def gaussian_noise(height, width):
"""
Create a background with Gaussian noise (to mimic paper)
"""
# We create an all white image
image = np.ones((height, width)) #* int(255 * random.random())
# We add gaussian noise
cv2.randn(image, 100, 90)
return Image.fromarray(image).convert("RGBA")
import random
r = lambda: random.randint(0, 180)
# import Image,ImageDraw
# from random import randint as rint
# def random_gradient(h, w):
# img = Image.new("RGB", (w,h), "#FFFFFF")
# draw = ImageDraw.Draw(img)
# r,g,b = rint(0,255), rint(0,255), rint(0,255)
# if rnd.random() > 0.5:
# dr = (rint(0,255) - r)/h
# dg = (rint(0,255) - g)/h
# db = (rint(0,255) - b)/h
# for i in range(h):
# r,g,b = r+dr, g+dg, b+db
# draw.line((i,0,i,h), fill=(int(r),int(g),int(b)))
# else:
# dr = (rint(0,255) - r)/w
# dg = (rint(0,255) - g)/w
# db = (rint(0,255) - b)/w
# for i in range(w):
# r,g,b = r+dr, g+dg, b+db
# draw.line((0,i,w,i), fill=(int(r),int(g),int(b)))
# return img
def get_gradient_2d(start, stop, width, height, is_horizontal):
if is_horizontal:
return np.tile(np.linspace(start, stop, width), (height, 1))
else:
return np.tile(np.linspace(start, stop, height), (width, 1)).T
def get_gradient_3d(height, width):
start_list, stop_list, is_horizontal_list = (r(), r(), r()), (r(), r(), r()), (bool(r()%2), bool(r()%2), bool(r()%2))
result = np.zeros((height, width, len(start_list)), dtype=np.uint8)
for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):
result[:, :, i] = get_gradient_2d(start, stop, width, height, is_horizontal)
return Image.fromarray(result).convert("RGBA")
def plain_white(height, width):
"""
Create a plain white background
"""
return Image.new("RGB", (width, height), (r(), r(), r())).convert("RGBA")
def quasicrystal(height, width):
"""
Create a background with quasicrystal (https://en.wikipedia.org/wiki/Quasicrystal)
"""
image = Image.new("RGBA", (width, height))
pixels = image.load()
frequency = rnd.random() * 30 + 20 # frequency
phase = rnd.random() * 2 * math.pi # phase
rotation_count = rnd.randint(10, 20) # of rotations
seed = (random.random(), random.random(), random.random())
for kw in range(width):
y = float(kw) / (width - 1) * 4 * math.pi - 2 * math.pi
for kh in range(height):
clrs = []
for ch in range(3):
x = float(kh) / (height - 1) * 4 * math.pi - 2 * math.pi
z = 10*seed[ch]
for i in range(rotation_count):
r = math.hypot(x, y)
a = math.atan2(y, x) + i * math.pi * 2.0 / rotation_count
z += math.cos(r * math.sin(a) * frequency + phase)
clrs.append(int(180 - round(180 * z / rotation_count)))
pixels[kw, kh] = tuple(clrs) # grayscale
return image.convert("RGBA")
def image(height, width, image_dir):
"""
Create a background with a image
"""
images = os.listdir(image_dir)
if len(images) > 0:
pic = Image.open(
os.path.join(image_dir, images[rnd.randint(0, len(images) - 1)])
)
if pic.size[0] < width:
pic = pic.resize(
[width, int(pic.size[1] * (width / pic.size[0]))], Image.ANTIALIAS
)
if pic.size[1] < height:
pic = pic.resize(
[int(pic.size[0] * (height / pic.size[1])), height], Image.ANTIALIAS
)
if pic.size[0] == width:
x = 0
else:
x = rnd.randint(0, pic.size[0] - width)
if pic.size[1] == height:
y = 0
else:
y = rnd.randint(0, pic.size[1] - height)
return pic.crop((x, y, x + width, y + height))
else:
raise Exception("No images where found in the images folder!")
| [
"cv2.randn",
"PIL.Image.fromarray",
"os.listdir",
"numpy.ones",
"PIL.Image.new",
"numpy.linspace",
"math.atan2",
"math.hypot",
"random.random",
"math.sin",
"random.randint"
] | [((286, 310), 'numpy.ones', 'np.ones', (['(height, width)'], {}), '((height, width))\n', (293, 310), True, 'import numpy as np\n'), ((374, 399), 'cv2.randn', 'cv2.randn', (['image', '(100)', '(90)'], {}), '(image, 100, 90)\n', (383, 399), False, 'import cv2\n'), ((478, 500), 'random.randint', 'random.randint', (['(0)', '(180)'], {}), '(0, 180)\n', (492, 500), False, 'import random\n'), ((2295, 2329), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(width, height)'], {}), "('RGBA', (width, height))\n", (2304, 2329), False, 'from PIL import Image, ImageDraw, ImageFilter\n'), ((2477, 2496), 'random.randint', 'rnd.randint', (['(10)', '(20)'], {}), '(10, 20)\n', (2488, 2496), True, 'import random as rnd\n'), ((3368, 3389), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (3378, 3389), False, 'import os\n'), ((2525, 2540), 'random.random', 'random.random', ([], {}), '()\n', (2538, 2540), False, 'import random\n'), ((2542, 2557), 'random.random', 'random.random', ([], {}), '()\n', (2555, 2557), False, 'import random\n'), ((2559, 2574), 'random.random', 'random.random', ([], {}), '()\n', (2572, 2574), False, 'import random\n'), ((412, 434), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (427, 434), False, 'from PIL import Image, ImageDraw, ImageFilter\n'), ((1375, 1406), 'numpy.linspace', 'np.linspace', (['start', 'stop', 'width'], {}), '(start, stop, width)\n', (1386, 1406), True, 'import numpy as np\n'), ((1933, 1956), 'PIL.Image.fromarray', 'Image.fromarray', (['result'], {}), '(result)\n', (1948, 1956), False, 'from PIL import Image, ImageDraw, ImageFilter\n'), ((2372, 2384), 'random.random', 'rnd.random', ([], {}), '()\n', (2382, 2384), True, 'import random as rnd\n'), ((2420, 2432), 'random.random', 'rnd.random', ([], {}), '()\n', (2430, 2432), True, 'import random as rnd\n'), ((3932, 3967), 'random.randint', 'rnd.randint', (['(0)', '(pic.size[0] - width)'], {}), '(0, pic.size[0] - width)\n', (3943, 3967), True, 'import random as rnd\n'), ((4050, 4086), 'random.randint', 'rnd.randint', (['(0)', '(pic.size[1] - height)'], {}), '(0, pic.size[1] - height)\n', (4061, 4086), True, 'import random as rnd\n'), ((1454, 1486), 'numpy.linspace', 'np.linspace', (['start', 'stop', 'height'], {}), '(start, stop, height)\n', (1465, 1486), True, 'import numpy as np\n'), ((2932, 2948), 'math.hypot', 'math.hypot', (['x', 'y'], {}), '(x, y)\n', (2942, 2948), False, 'import math\n'), ((2973, 2989), 'math.atan2', 'math.atan2', (['y', 'x'], {}), '(y, x)\n', (2983, 2989), False, 'import math\n'), ((3065, 3076), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (3073, 3076), False, 'import math\n')] |
from collections import Counter
from tqdm import tqdm
from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score
from tqdm import tqdm
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import seaborn as sns
def auc(X, y, model):
probs = model.predict_proba(X)[:,1]
return roc_auc_score(y, probs)
def aps(X, y, model):
probs = model.predict_proba(X)[:,1]
return average_precision_score(y, probs)
def get_metrics(X, y, y_pred, model):
"""
Function to calculate the following metrics for evaluating the model:
Accuracy, F1, ROC-AUC, Recall, Precision, and PR-AUC Scores
Need to enter X_valid, y_valid, y_pred, and model
"""
ac_val = accuracy_score(y, y_pred)
f1_val = f1_score(y, y_pred)
au_val = auc(X, y, model)
rc_val = recall_score(y, y_pred)
pr_val = precision_score(y, y_pred)
aps_val = aps(X, y, model)
print('Accuracy Score: ', ac_val)
print('F1 Score: ', f1_val)
print('ROC-AUC Score: ', au_val)
print('Recall Score: ', rc_val)
print('Precision Score: ', pr_val)
print('PR-AUC Score: ', aps_val)
def run_resampling(X_train, y_train, X_valid, y_valid, resampling_method, model):
"""
Function to run resampling method on training set to produce balanced dataset,
to show the count of the majority and minority class of resampled data,
to train provided model on training data and evaluate metrics on validation data
Need to enter X_train, y_train, X_valid, y_valid, resampling_method, and model
"""
X_train_resampled, y_train_resampled = resampling_method.fit_resample(X_train, y_train)
print("Training Count: ", Counter(y_train_resampled))
new_model = model.fit(X_train_resampled, y_train_resampled)
y_pred = new_model.predict(X_valid)
get_metrics(X_valid, y_valid, y_pred, new_model)
def group_list(lst, size=100):
"""
Generate batches of 100 ids in each
Returns list of strings with , seperated ids
"""
new_list =[]
idx = 0
while idx < len(lst):
new_list.append(
','.join([str(item) for item in lst[idx:idx+size]])
)
idx += size
return new_list
def tweets_request(tweets_ids):
"""
Make a request to Tweeter API
"""
df_lst = []
for batch in tqdm(tweets_ids):
url = "https://api.twitter.com/2/tweets?ids={}&&tweet.fields=created_at,entities,geo,id,public_metrics,text&user.fields=description,entities,id,location,name,public_metrics,username".format(batch)
payload={}
headers = {'Authorization': 'Bearer ' + keys['bearer_token'],
'Cookie': 'personalization_id="v1_hzpv7qXpjB6CteyAHDWYQQ=="; guest_id=v1%3A161498381400435837'}
r = requests.request("GET", url, headers=headers, data=payload)
data = r.json()
if 'data' in data.keys():
df_lst.append(pd.DataFrame(data['data']))
return pd.concat(df_lst)
def rmse(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
def train_test_metrics(y_train, y_test, y_train_pred, y_test_pred):
print('Training R^2 Score: ', round(r2_score(y_train, y_train_pred), 4))
print('Training RMSE: %d' % rmse(y_train, y_train_pred))
print('Testing R^2 Score: ', round(r2_score(y_test, y_test_pred), 4))
print('Testing RMSE: %d' % rmse(y_test, y_test_pred))
return
def get_metrics_confusion(X, y, y_pred, model):
"""
Function to get accuracy, F1, ROC-AUC, recall, precision, PR-AUC scores followed by confusion matrix
where X is feature dataset, y is target dataset, and model is instantiated model variable
"""
acc = accuracy_score(y, y_pred)
f1 = f1_score(y, y_pred)
roc_auc = auc(X, y, model)
rec = recall_score(y, y_pred)
prec = precision_score(y, y_pred)
pr_auc = aps(X, y, model)
print('Accuracy: ', acc)
print('F1 Score: ', f1)
print('ROC-AUC: ', roc_auc)
print('Recall: ', rec)
print('Precision: ', prec)
print('PR-AUC: ', pr_auc)
cnf = confusion_matrix(y, y_pred)
group_names = ['TN','FP','FN','TP']
group_counts = ['{0:0.0f}'.format(value) for value in cnf.flatten()]
group_percentages = ['{0:.2%}'.format(value) for value in cnf.flatten()/np.sum(cnf)]
labels = [f'{v1}\n{v2}\n{v3}' for v1, v2, v3 in zip(group_names, group_counts, group_percentages)]
labels = np.asarray(labels).reshape(2,2)
fig, ax = plt.subplots(figsize=(4,4))
sns.heatmap(cnf, annot=labels, fmt='', cmap='Blues', annot_kws={'size':14}, cbar=False, xticklabels=False, yticklabels=False)
def aps2(X, y, model):
"""
Function to calculate PR-AUC Score based on decision_function(X)
where X is feature values, y is target values, and model is instantiated model variable
"""
probs = model.decision_function(X)
return average_precision_score(y, probs)
def get_metrics_2(X, y, y_pred, model):
"""
Function to get training and validation F1, recall, precision, PR AUC scores
Instantiate model and pass the model into function
Pass X_train, y_train, X_val, Y_val datasets
Pass in calculated model.predict(X) for y_pred
"""
ac = accuracy_score(y, y_pred)
f1 = f1_score(y, y_pred)
rc = recall_score(y, y_pred)
pr = precision_score(y, y_pred)
prauc = aps2(X, y, model)
print('Accuracy: ', ac)
print('F1: ', f1)
print('Recall: ', rc)
print('Precision: ', pr)
print('PR-AUC: ', prauc)
def get_confusion(y, y_pred):
cnf = confusion_matrix(y, y_pred)
group_names = ['TN','FP','FN','TP']
group_counts = ['{0:0.0f}'.format(value) for value in cnf.flatten()]
group_percentages = ['{0:.2%}'.format(value) for value in cnf.flatten()/np.sum(cnf)]
labels = [f'{v1}\n{v2}\n{v3}' for v1, v2, v3 in zip(group_names, group_counts, group_percentages)]
labels = np.asarray(labels).reshape(2,2)
fig, ax = plt.subplots(figsize=(4,4))
sns.heatmap(cnf, annot=labels, fmt='', cmap='Blues', annot_kws={'size':14}, cbar=False, xticklabels=False, yticklabels=False) | [
"sklearn.metrics.f1_score",
"sklearn.metrics.average_precision_score",
"sklearn.metrics.auc",
"tqdm.tqdm",
"numpy.asarray",
"seaborn.heatmap",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.precision_score",
"collections.Counter",
"requests.request",
"numpy.s... | [((408, 431), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'probs'], {}), '(y, probs)\n', (421, 431), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((507, 540), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y', 'probs'], {}), '(y, probs)\n', (530, 540), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((815, 840), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (829, 840), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((854, 873), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (862, 873), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((887, 903), 'sklearn.metrics.auc', 'auc', (['X', 'y', 'model'], {}), '(X, y, model)\n', (890, 903), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((917, 940), 'sklearn.metrics.recall_score', 'recall_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (929, 940), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((954, 980), 'sklearn.metrics.precision_score', 'precision_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (969, 980), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((2446, 2462), 'tqdm.tqdm', 'tqdm', (['tweets_ids'], {}), '(tweets_ids)\n', (2450, 2462), False, 'from tqdm import tqdm\n'), ((3062, 3079), 'pandas.concat', 'pd.concat', (['df_lst'], {}), '(df_lst)\n', (3071, 3079), True, 'import pandas as pd\n'), ((3771, 3796), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3785, 3796), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((3806, 3825), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3814, 3825), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((3840, 3856), 'sklearn.metrics.auc', 'auc', (['X', 'y', 'model'], {}), '(X, y, model)\n', (3843, 3856), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((3867, 3890), 'sklearn.metrics.recall_score', 'recall_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3879, 3890), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((3902, 3928), 'sklearn.metrics.precision_score', 'precision_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3917, 3928), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((4152, 4179), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'y_pred'], {}), '(y, y_pred)\n', (4168, 4179), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((4544, 4572), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (4556, 4572), True, 'import matplotlib.pyplot as plt\n'), ((4576, 4706), 'seaborn.heatmap', 'sns.heatmap', (['cnf'], {'annot': 'labels', 'fmt': '""""""', 'cmap': '"""Blues"""', 'annot_kws': "{'size': 14}", 'cbar': '(False)', 'xticklabels': '(False)', 'yticklabels': '(False)'}), "(cnf, annot=labels, fmt='', cmap='Blues', annot_kws={'size': 14},\n cbar=False, xticklabels=False, yticklabels=False)\n", (4587, 4706), True, 'import seaborn as sns\n'), ((4961, 4994), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y', 'probs'], {}), '(y, probs)\n', (4984, 4994), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((5317, 5342), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (5331, 5342), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((5352, 5371), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (5360, 5371), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((5381, 5404), 'sklearn.metrics.recall_score', 'recall_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (5393, 5404), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((5414, 5440), 'sklearn.metrics.precision_score', 'precision_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (5429, 5440), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((5651, 5678), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'y_pred'], {}), '(y, y_pred)\n', (5667, 5678), False, 'from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, auc, average_precision_score, confusion_matrix, roc_auc_score\n'), ((6043, 6071), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (6055, 6071), True, 'import matplotlib.pyplot as plt\n'), ((6075, 6205), 'seaborn.heatmap', 'sns.heatmap', (['cnf'], {'annot': 'labels', 'fmt': '""""""', 'cmap': '"""Blues"""', 'annot_kws': "{'size': 14}", 'cbar': '(False)', 'xticklabels': '(False)', 'yticklabels': '(False)'}), "(cnf, annot=labels, fmt='', cmap='Blues', annot_kws={'size': 14},\n cbar=False, xticklabels=False, yticklabels=False)\n", (6086, 6205), True, 'import seaborn as sns\n'), ((1799, 1825), 'collections.Counter', 'Counter', (['y_train_resampled'], {}), '(y_train_resampled)\n', (1806, 1825), False, 'from collections import Counter\n'), ((2874, 2933), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'headers': 'headers', 'data': 'payload'}), "('GET', url, headers=headers, data=payload)\n", (2890, 2933), False, 'import requests\n'), ((4498, 4516), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (4508, 4516), True, 'import numpy as np\n'), ((5997, 6015), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (6007, 6015), True, 'import numpy as np\n'), ((3018, 3044), 'pandas.DataFrame', 'pd.DataFrame', (["data['data']"], {}), "(data['data'])\n", (3030, 3044), True, 'import pandas as pd\n'), ((4369, 4380), 'numpy.sum', 'np.sum', (['cnf'], {}), '(cnf)\n', (4375, 4380), True, 'import numpy as np\n'), ((5868, 5879), 'numpy.sum', 'np.sum', (['cnf'], {}), '(cnf)\n', (5874, 5879), True, 'import numpy as np\n')] |
from __future__ import print_function
import unittest
from unittest import TestCase
import numpy as np
from numpy.testing import assert_, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from hmmlearn.utils import normalize
from autohmm import tm
np.seterr(all='warn')
def test_precision_prior_wrong_nb():
with assert_raises(ValueError):
m = tm.THMM(n_unique = 2)
m.precision_prior_ = np.array([0.7, 0.8, 0.9])
def test_precision_prior_unique():
m = tm.THMM(n_unique = 2, n_tied = 1)
m.precision_prior_ = np.array([[0.7], [0.3]])
correct_prior = np.array([0.7, 0.7, 0.3, 0.3])
correct_prior = correct_prior.reshape(4, 1, 1)
assert_array_equal(m._precision_prior_, correct_prior)
def fit_hmm_and_monitor_log_likelihood(h, X, n_iter=1):
#h.n_iter = 1 # make sure we do a single iteration at a time
#h.init_params = '' # and don't re-init params
h.fit(X)
loglikelihoods = np.empty(n_iter, dtype=float)
#for i in range(n_iter):
# h.fit(X)
# loglikelihoods[i], _ = h.score_samples(X)
return loglikelihoods
class PlainGaussianHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(2)
self.n_unique = 2
self.n_components = 2
self.startprob = np.array([0.6, 0.4])
self.transmat = np.array([[0.7, 0.3],
[0.4, 0.6]])
self.mu = np.array([0.7, -2.0])
self.precision = np.array([[500.],
[250.]])
self.h = tm.THMM(n_unique=self.n_unique,
random_state=self.prng,
init_params = 'stmw',
precision_bounds = np.array([-1e5, 1e5]))
self.h.startprob_ = self.startprob
self.h.transmat_ = self.transmat
self.h.mu_ = self.mu
self.h.precision_ = self.precision
def test_fit(self, params='sptmw', **kwargs):
h = self.h
h.params = params
lengths = 70000
X, _state_sequence = h.sample(lengths, random_state=self.prng)
h.precision_ = np.array([[700],
[150]])
h.mu_ = np.array([2.6, 3.4])
h.transmat_ = np.array([[0.85, 0.15],
[0.2, 0.8]])
# TODO: Test more parameters, generate test cases
trainll = fit_hmm_and_monitor_log_likelihood(h, X)
# Check that the log-likelihood is always increasing during training.
#diff = np.diff(trainll)
#self.assertTrue(np.all(diff >= -1e-6),
# "Decreasing log-likelihood: {0}" .format(diff))
assert_array_almost_equal(h.mu_.reshape(-1),
self.mu.reshape(-1), decimal=1)
assert_array_almost_equal(h.transmat_.reshape(-1),
self.transmat.reshape(-1), decimal=1)
assert_array_almost_equal(h.precision_.reshape(-1)/100,
self.precision.reshape(-1)/100, decimal =1)
class MultivariateGaussianHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(2)
self.n_tied = 2
self.n_features = 2
self.startprob = np.array([0.6, 0.4])
self.transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
self.mu = np.array([[4.5, -1.5],
[-0.7, -10.4]])
self.precision = np.array([[[0.5, 0.15],
[0.15, 0.4]],
[[0.6, 0.1],
[0.1, 0.35]]])
self.h = tm.THMM(n_unique=2, n_tied =self.n_tied,
n_features=self.n_features,
random_state=self.prng,
precision_bounds=np.array([-1e5, 1e5]),
init_params = 'stmaw', params='stmapw')
self.h.startprob_ = self.startprob
self.h.transmat_ = self.transmat
self.h.mu_ = self.mu
self.h.precision_ = self.precision
def test_fit(self, params='stmpaw', **kwargs):
h = self.h
h.params = params
lengths = 100000
X, _state_sequence = h.sample(lengths, random_state=self.prng)
# Perturb
h.precision_ = np.array([[[0.4, 0.12],
[0.12, 0.45]],
[[0.7, 0.2],
[0.2, 0.5]]])
h.transmat_ = np.array([[0.5, 0.5], [0.2, 0.8]])
h.mu_ = np.array([[5.8, -0.1],
[-3.3, -9.6]])
self.transmat = np.array([[0.7, 0.3, 0, 0, 0, 0],
[0, 0.7, 0.3, 0, 0, 0],
[0, 0, 0.7, 0.3, 0, 0],
[0, 0, 0, 0.6, 0.4, 0],
[0, 0, 0, 0, 0.6, 0.4],
[0.4, 0, 0, 0, 0, 0.6]])
# TODO: Test more parameters, generate test cases
trainll = fit_hmm_and_monitor_log_likelihood(h, X)
# Check that the log-likelihood is always increasing during training.
#diff = np.diff(trainll)
#self.assertTrue(np.all(diff >= -1e-6),
# "Decreasing log-likelihood: {0}" .format(diff))
assert_array_almost_equal(h.transmat_.reshape(-1),
self.transmat.reshape(-1), decimal=1)
assert_array_almost_equal(h.mu_.reshape(-1),
self.mu.reshape(-1), decimal=1)
assert_array_almost_equal(h.precision_.reshape(-1),
self.precision.reshape(-1), decimal=1)
class TiedGaussianHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(42)
self.n_tied = 2
self.n_unique = 2
self.startprob = np.array([0.6, 0.4])
self.transmat = np.array([[0.7, 0.3],
[0.4, 0.6]])
self.precision = np.array([[0.5],
[0.3]])
self.mu = np.array([[0.7],
[-2.0]])
self.h = tm.THMM(n_unique=self.n_unique, n_tied =self.n_tied, random_state=self.prng,
precision_bounds=np.array([-1e5, 1e5]), init_params = 'stmaw')
self.h.startprob_ = self.startprob
self.h.transmat_ = self.transmat
self.h.mu_ = self.mu
self.h.precision_ = self.precision
def test_fit(self, params='stmpaw', **kwargs):
h = self.h
h.params = params
lengths = 70000
X, _state_sequence = h.sample(lengths, random_state=self.prng)
h.mu_ = np.array([[3.5],
[-3.9]])
h.transmat_ = np.array([[0.9, 0.1],
[0.7, 0.3]])
h.precision_ = np.array([[0.4],
[0.2]])
self.transmat = np.array([[0.7, 0.3, 0, 0, 0, 0],
[0, 0.7, 0.3, 0, 0, 0],
[0, 0, 0.7, 0.3, 0, 0],
[0, 0, 0, 0.6, 0.4, 0],
[0, 0, 0, 0, 0.6, 0.4],
[0.4, 0, 0, 0, 0, 0.6]])
# TODO: Test more parameters, generate test cases
trainll = fit_hmm_and_monitor_log_likelihood(h, X)
# Check that the log-likelihood is always increasing during training.
#diff = np.diff(trainll)
#self.assertTrue(np.all(diff >= -1e-6),
# "Decreasing log-likelihood: {0}" .format(diff))
assert_array_almost_equal(h.mu_.reshape(-1),
self.mu.reshape(-1), decimal=1)
assert_array_almost_equal(h.transmat_.reshape(-1),
self.transmat.reshape(-1), decimal=1)
assert_array_almost_equal(h.precision_.reshape(-1),
self.precision.reshape(-1), decimal=1)
if __name__ == '__main__':
unittest.main()
| [
"numpy.testing.assert_raises",
"numpy.array",
"numpy.empty",
"autohmm.tm.THMM",
"numpy.random.RandomState",
"unittest.main",
"numpy.seterr",
"numpy.testing.assert_array_equal"
] | [((290, 311), 'numpy.seterr', 'np.seterr', ([], {'all': '"""warn"""'}), "(all='warn')\n", (299, 311), True, 'import numpy as np\n'), ((519, 548), 'autohmm.tm.THMM', 'tm.THMM', ([], {'n_unique': '(2)', 'n_tied': '(1)'}), '(n_unique=2, n_tied=1)\n', (526, 548), False, 'from autohmm import tm\n'), ((578, 602), 'numpy.array', 'np.array', (['[[0.7], [0.3]]'], {}), '([[0.7], [0.3]])\n', (586, 602), True, 'import numpy as np\n'), ((623, 653), 'numpy.array', 'np.array', (['[0.7, 0.7, 0.3, 0.3]'], {}), '([0.7, 0.7, 0.3, 0.3])\n', (631, 653), True, 'import numpy as np\n'), ((709, 763), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['m._precision_prior_', 'correct_prior'], {}), '(m._precision_prior_, correct_prior)\n', (727, 763), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((979, 1008), 'numpy.empty', 'np.empty', (['n_iter'], {'dtype': 'float'}), '(n_iter, dtype=float)\n', (987, 1008), True, 'import numpy as np\n'), ((8022, 8037), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8035, 8037), False, 'import unittest\n'), ((359, 384), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (372, 384), False, 'from numpy.testing import assert_, assert_raises\n'), ((398, 417), 'autohmm.tm.THMM', 'tm.THMM', ([], {'n_unique': '(2)'}), '(n_unique=2)\n', (405, 417), False, 'from autohmm import tm\n'), ((449, 474), 'numpy.array', 'np.array', (['[0.7, 0.8, 0.9]'], {}), '([0.7, 0.8, 0.9])\n', (457, 474), True, 'import numpy as np\n'), ((1209, 1233), 'numpy.random.RandomState', 'np.random.RandomState', (['(2)'], {}), '(2)\n', (1230, 1233), True, 'import numpy as np\n'), ((1316, 1336), 'numpy.array', 'np.array', (['[0.6, 0.4]'], {}), '([0.6, 0.4])\n', (1324, 1336), True, 'import numpy as np\n'), ((1361, 1395), 'numpy.array', 'np.array', (['[[0.7, 0.3], [0.4, 0.6]]'], {}), '([[0.7, 0.3], [0.4, 0.6]])\n', (1369, 1395), True, 'import numpy as np\n'), ((1448, 1469), 'numpy.array', 'np.array', (['[0.7, -2.0]'], {}), '([0.7, -2.0])\n', (1456, 1469), True, 'import numpy as np\n'), ((1495, 1523), 'numpy.array', 'np.array', (['[[500.0], [250.0]]'], {}), '([[500.0], [250.0]])\n', (1503, 1523), True, 'import numpy as np\n'), ((2141, 2165), 'numpy.array', 'np.array', (['[[700], [150]]'], {}), '([[700], [150]])\n', (2149, 2165), True, 'import numpy as np\n'), ((2215, 2235), 'numpy.array', 'np.array', (['[2.6, 3.4]'], {}), '([2.6, 3.4])\n', (2223, 2235), True, 'import numpy as np\n'), ((2258, 2294), 'numpy.array', 'np.array', (['[[0.85, 0.15], [0.2, 0.8]]'], {}), '([[0.85, 0.15], [0.2, 0.8]])\n', (2266, 2294), True, 'import numpy as np\n'), ((3155, 3179), 'numpy.random.RandomState', 'np.random.RandomState', (['(2)'], {}), '(2)\n', (3176, 3179), True, 'import numpy as np\n'), ((3257, 3277), 'numpy.array', 'np.array', (['[0.6, 0.4]'], {}), '([0.6, 0.4])\n', (3265, 3277), True, 'import numpy as np\n'), ((3302, 3336), 'numpy.array', 'np.array', (['[[0.7, 0.3], [0.4, 0.6]]'], {}), '([[0.7, 0.3], [0.4, 0.6]])\n', (3310, 3336), True, 'import numpy as np\n'), ((3356, 3394), 'numpy.array', 'np.array', (['[[4.5, -1.5], [-0.7, -10.4]]'], {}), '([[4.5, -1.5], [-0.7, -10.4]])\n', (3364, 3394), True, 'import numpy as np\n'), ((3449, 3514), 'numpy.array', 'np.array', (['[[[0.5, 0.15], [0.15, 0.4]], [[0.6, 0.1], [0.1, 0.35]]]'], {}), '([[[0.5, 0.15], [0.15, 0.4]], [[0.6, 0.1], [0.1, 0.35]]])\n', (3457, 3514), True, 'import numpy as np\n'), ((4305, 4370), 'numpy.array', 'np.array', (['[[[0.4, 0.12], [0.12, 0.45]], [[0.7, 0.2], [0.2, 0.5]]]'], {}), '([[[0.4, 0.12], [0.12, 0.45]], [[0.7, 0.2], [0.2, 0.5]]])\n', (4313, 4370), True, 'import numpy as np\n'), ((4494, 4528), 'numpy.array', 'np.array', (['[[0.5, 0.5], [0.2, 0.8]]'], {}), '([[0.5, 0.5], [0.2, 0.8]])\n', (4502, 4528), True, 'import numpy as np\n'), ((4545, 4582), 'numpy.array', 'np.array', (['[[5.8, -0.1], [-3.3, -9.6]]'], {}), '([[5.8, -0.1], [-3.3, -9.6]])\n', (4553, 4582), True, 'import numpy as np\n'), ((4634, 4797), 'numpy.array', 'np.array', (['[[0.7, 0.3, 0, 0, 0, 0], [0, 0.7, 0.3, 0, 0, 0], [0, 0, 0.7, 0.3, 0, 0], [0,\n 0, 0, 0.6, 0.4, 0], [0, 0, 0, 0, 0.6, 0.4], [0.4, 0, 0, 0, 0, 0.6]]'], {}), '([[0.7, 0.3, 0, 0, 0, 0], [0, 0.7, 0.3, 0, 0, 0], [0, 0, 0.7, 0.3, \n 0, 0], [0, 0, 0, 0.6, 0.4, 0], [0, 0, 0, 0, 0.6, 0.4], [0.4, 0, 0, 0, 0,\n 0.6]])\n', (4642, 4797), True, 'import numpy as np\n'), ((5763, 5788), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (5784, 5788), True, 'import numpy as np\n'), ((5865, 5885), 'numpy.array', 'np.array', (['[0.6, 0.4]'], {}), '([0.6, 0.4])\n', (5873, 5885), True, 'import numpy as np\n'), ((5910, 5944), 'numpy.array', 'np.array', (['[[0.7, 0.3], [0.4, 0.6]]'], {}), '([[0.7, 0.3], [0.4, 0.6]])\n', (5918, 5944), True, 'import numpy as np\n'), ((6004, 6028), 'numpy.array', 'np.array', (['[[0.5], [0.3]]'], {}), '([[0.5], [0.3]])\n', (6012, 6028), True, 'import numpy as np\n'), ((6082, 6107), 'numpy.array', 'np.array', (['[[0.7], [-2.0]]'], {}), '([[0.7], [-2.0]])\n', (6090, 6107), True, 'import numpy as np\n'), ((6683, 6708), 'numpy.array', 'np.array', (['[[3.5], [-3.9]]'], {}), '([[3.5], [-3.9]])\n', (6691, 6708), True, 'import numpy as np\n'), ((6757, 6791), 'numpy.array', 'np.array', (['[[0.9, 0.1], [0.7, 0.3]]'], {}), '([[0.9, 0.1], [0.7, 0.3]])\n', (6765, 6791), True, 'import numpy as np\n'), ((6847, 6871), 'numpy.array', 'np.array', (['[[0.4], [0.2]]'], {}), '([[0.4], [0.2]])\n', (6855, 6871), True, 'import numpy as np\n'), ((6930, 7093), 'numpy.array', 'np.array', (['[[0.7, 0.3, 0, 0, 0, 0], [0, 0.7, 0.3, 0, 0, 0], [0, 0, 0.7, 0.3, 0, 0], [0,\n 0, 0, 0.6, 0.4, 0], [0, 0, 0, 0, 0.6, 0.4], [0.4, 0, 0, 0, 0, 0.6]]'], {}), '([[0.7, 0.3, 0, 0, 0, 0], [0, 0.7, 0.3, 0, 0, 0], [0, 0, 0.7, 0.3, \n 0, 0], [0, 0, 0, 0.6, 0.4, 0], [0, 0, 0, 0, 0.6, 0.4], [0.4, 0, 0, 0, 0,\n 0.6]])\n', (6938, 7093), True, 'import numpy as np\n'), ((1746, 1777), 'numpy.array', 'np.array', (['[-100000.0, 100000.0]'], {}), '([-100000.0, 100000.0])\n', (1754, 1777), True, 'import numpy as np\n'), ((3825, 3856), 'numpy.array', 'np.array', (['[-100000.0, 100000.0]'], {}), '([-100000.0, 100000.0])\n', (3833, 3856), True, 'import numpy as np\n'), ((6272, 6303), 'numpy.array', 'np.array', (['[-100000.0, 100000.0]'], {}), '([-100000.0, 100000.0])\n', (6280, 6303), True, 'import numpy as np\n')] |
import numpy as np
import scipy.sparse as spa
import cvxpy
class RandomQPExample(object):
'''
Random QP example
'''
def __init__(self, n, seed=1):
'''
Generate problem in QP format and CVXPY format
'''
# Set random seed
np.random.seed(seed)
m = int(n * 10)
# Generate problem data
self.n = int(n)
self.m = m
P = spa.random(n, n, density=0.15,
data_rvs=np.random.randn,
format='csc')
self.P = P.dot(P.T).tocsc() + 1e-02 * spa.eye(n)
self.q = np.random.randn(n)
self.A = spa.random(m, n, density=0.15,
data_rvs=np.random.randn,
format='csc')
v = np.random.randn(n) # Fictitious solution
delta = np.random.rand(m) # To get inequality
self.u = self.A@v + delta
self.l = - np.inf * np.ones(m) # self.u - np.random.rand(m)
self.qp_problem = self._generate_qp_problem()
self.cvxpy_problem = self._generate_cvxpy_problem()
@staticmethod
def name():
return 'Random QP'
def _generate_qp_problem(self):
'''
Generate QP problem
'''
problem = {}
problem['P'] = self.P
problem['q'] = self.q
problem['A'] = self.A
problem['l'] = self.l
problem['u'] = self.u
problem['m'] = self.A.shape[0]
problem['n'] = self.A.shape[1]
return problem
def _generate_cvxpy_problem(self):
'''
Generate QP problem
'''
x_var = cvxpy.Variable(self.n)
objective = .5 * cvxpy.quad_form(x_var, self.P) + self.q * x_var
constraints = [self.A * x_var <= self.u, self.A * x_var >= self.l]
problem = cvxpy.Problem(cvxpy.Minimize(objective), constraints)
return problem
def revert_cvxpy_solution(self):
'''
Get QP primal and duar variables from cvxpy solution
'''
variables = self.cvxpy_problem.variables()
constraints = self.cvxpy_problem.constraints
# primal solution
x = variables[0].value
# dual solution
y = constraints[0].dual_value - constraints[1].dual_value
return x, y
| [
"cvxpy.Minimize",
"cvxpy.Variable",
"numpy.random.rand",
"numpy.ones",
"scipy.sparse.eye",
"scipy.sparse.random",
"numpy.random.seed",
"cvxpy.quad_form",
"numpy.random.randn"
] | [((278, 298), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (292, 298), True, 'import numpy as np\n'), ((412, 482), 'scipy.sparse.random', 'spa.random', (['n', 'n'], {'density': '(0.15)', 'data_rvs': 'np.random.randn', 'format': '"""csc"""'}), "(n, n, density=0.15, data_rvs=np.random.randn, format='csc')\n", (422, 482), True, 'import scipy.sparse as spa\n'), ((603, 621), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (618, 621), True, 'import numpy as np\n'), ((639, 709), 'scipy.sparse.random', 'spa.random', (['m', 'n'], {'density': '(0.15)', 'data_rvs': 'np.random.randn', 'format': '"""csc"""'}), "(m, n, density=0.15, data_rvs=np.random.randn, format='csc')\n", (649, 709), True, 'import scipy.sparse as spa\n'), ((778, 796), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (793, 796), True, 'import numpy as np\n'), ((837, 854), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (851, 854), True, 'import numpy as np\n'), ((1626, 1648), 'cvxpy.Variable', 'cvxpy.Variable', (['self.n'], {}), '(self.n)\n', (1640, 1648), False, 'import cvxpy\n'), ((938, 948), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (945, 948), True, 'import numpy as np\n'), ((1829, 1854), 'cvxpy.Minimize', 'cvxpy.Minimize', (['objective'], {}), '(objective)\n', (1843, 1854), False, 'import cvxpy\n'), ((575, 585), 'scipy.sparse.eye', 'spa.eye', (['n'], {}), '(n)\n', (582, 585), True, 'import scipy.sparse as spa\n'), ((1674, 1704), 'cvxpy.quad_form', 'cvxpy.quad_form', (['x_var', 'self.P'], {}), '(x_var, self.P)\n', (1689, 1704), False, 'import cvxpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 11 15:51:34 2021
@author: mike_ubuntu
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 13 14:45:14 2021
@author: mike_ubuntu
"""
import numpy as np
import epde.interface.interface as epde_alg
from epde.interface.prepared_tokens import Trigonometric_tokens
if __name__ == '__main__':
t = np.linspace(0, 4*np.pi, 1000) # setting time axis, corresonding to the solution of ODE
u = np.load('/media/mike_ubuntu/DATA/EPDE_publication/tests/system/Test_data/fill366.npy') # loading data with the solution of ODE
# Trying to create population for mulit-objective optimization with only
# derivatives as allowed tokens. Spoiler: only one equation structure will be
# discovered, thus MOO algorithm will not be launched.
epde_search_obj = epde_alg.epde_search()
trig_tokens = Trigonometric_tokens(freq = (0.95, 1.05))
epde_search_obj.fit(data = u, boundary=10, equation_factors_max_number = 2, coordinate_tensors = [t,],
additional_tokens = trig_tokens, field_smooth = False)
epde_search_obj.equation_search_results(only_print = True, level_num = 1) # showing the Pareto-optimal set of discovered equations | [
"epde.interface.prepared_tokens.Trigonometric_tokens",
"numpy.linspace",
"numpy.load",
"epde.interface.interface.epde_search"
] | [((393, 424), 'numpy.linspace', 'np.linspace', (['(0)', '(4 * np.pi)', '(1000)'], {}), '(0, 4 * np.pi, 1000)\n', (404, 424), True, 'import numpy as np\n'), ((488, 584), 'numpy.load', 'np.load', (['"""/media/mike_ubuntu/DATA/EPDE_publication/tests/system/Test_data/fill366.npy"""'], {}), "(\n '/media/mike_ubuntu/DATA/EPDE_publication/tests/system/Test_data/fill366.npy'\n )\n", (495, 584), True, 'import numpy as np\n'), ((867, 889), 'epde.interface.interface.epde_search', 'epde_alg.epde_search', ([], {}), '()\n', (887, 889), True, 'import epde.interface.interface as epde_alg\n'), ((913, 952), 'epde.interface.prepared_tokens.Trigonometric_tokens', 'Trigonometric_tokens', ([], {'freq': '(0.95, 1.05)'}), '(freq=(0.95, 1.05))\n', (933, 952), False, 'from epde.interface.prepared_tokens import Trigonometric_tokens\n')] |
# original file: https://github.com/dmlc/dgl/blob/master/examples/pytorch/lda/lda_model.py
# with minor modifications (to be considered upstream)
# Copyright 2021 <NAME>
# with references from "sklearn.decomposition.LatentDirichletAllocation"
# with the following original authors:
# * <NAME> (the said scikit-learn implementation)
# * <NAME> (original onlineldavb implementation)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, functools, warnings, torch, collections, dgl, io
import numpy as np, scipy as sp
try:
from functools import cached_property
except ImportError:
try:
from backports.cached_property import cached_property
except ImportError:
warnings.warn("cached_property not found - using property instead")
cached_property = property
class EdgeData:
def __init__(self, src_data, dst_data):
self.src_data = src_data
self.dst_data = dst_data
@property
def loglike(self):
return (self.src_data['Elog'] + self.dst_data['Elog']).logsumexp(1)
@property
def phi(self):
return (
self.src_data['Elog'] + self.dst_data['Elog'] - self.loglike.unsqueeze(1)
).exp()
@property
def expectation(self):
return (self.src_data['expectation'] * self.dst_data['expectation']).sum(1)
class _Dirichlet:
def __init__(self, prior, nphi, _chunksize=int(1e6)):
self.prior = prior
self.nphi = nphi
self._sum_by_parts = lambda map_fn: functools.reduce(torch.add, [
map_fn(slice(i, min(i + _chunksize, nphi.shape[1]))).sum(1)
for i in list(range(0, nphi.shape[1], _chunksize))
])
@property
def device(self):
return self.nphi.device
def _posterior(self, _ID=slice(None)):
return self.prior + self.nphi[:, _ID]
@cached_property
def posterior_sum(self):
return self.nphi.sum(1) + self.prior * self.nphi.shape[1]
def _Elog(self, _ID=slice(None)):
return torch.digamma(self._posterior(_ID)) - \
torch.digamma(self.posterior_sum.unsqueeze(1))
@cached_property
def loglike(self):
neg_evid = -self._sum_by_parts(
lambda s: (self.nphi[:, s] * self._Elog(s))
)
prior = torch.as_tensor(self.prior).to(self.nphi)
K = self.nphi.shape[1]
log_B_prior = torch.lgamma(prior) * K - torch.lgamma(prior * K)
log_B_posterior = self._sum_by_parts(
lambda s: torch.lgamma(self._posterior(s))
) - torch.lgamma(self.posterior_sum)
return neg_evid - log_B_prior + log_B_posterior
@cached_property
def n(self):
return self.nphi.sum(1)
@cached_property
def cdf(self):
cdf = self._posterior()
torch.cumsum(cdf, 1, out=cdf)
cdf /= cdf[:, -1:].clone()
return cdf
def _expectation(self, _ID=slice(None)):
expectation = self._posterior(_ID)
expectation /= self.posterior_sum.unsqueeze(1)
return expectation
@cached_property
def Bayesian_gap(self):
return 1. - self._sum_by_parts(lambda s: self._Elog(s).exp())
_cached_properties = ["posterior_sum", "loglike", "n", "cdf", "Bayesian_gap"]
def clear_cache(self):
for name in self._cached_properties:
try:
delattr(self, name)
except AttributeError:
pass
def update(self, new, _ID=slice(None), rho=1):
""" inplace: old * (1-rho) + new * rho """
self.clear_cache()
mean_change = (self.nphi[:, _ID] - new).abs().mean().tolist()
self.nphi *= (1 - rho)
self.nphi[:, _ID] += new * rho
return mean_change
class DocData(_Dirichlet):
""" nphi (n_docs by n_topics) """
def prepare_graph(self, G, key="Elog"):
G.nodes['doc'].data[key] = getattr(self, '_' + key)().to(G.device)
def update_from(self, G, mult):
new = G.nodes['doc'].data['nphi'] * mult
return self.update(new.to(self.device))
class _Distributed(collections.UserList):
""" split on dim=0 and store on multiple devices """
def __init__(self, prior, nphi):
self.prior = prior
super().__init__([_Dirichlet(self.prior, x) for x in nphi])
def split_device(self, other, dim=0):
split_sections = [w.nphi.shape[0] for w in self]
out = torch.split(other, split_sections, dim)
return [y.to(w.device) for w, y in zip(self, out)]
class WordData(_Distributed):
""" distributed nphi (n_topics by n_words), transpose to/from graph nodes data """
def prepare_graph(self, G, key="Elog"):
if '_ID' in G.nodes['word'].data:
_ID = G.nodes['word'].data['_ID']
else:
_ID = slice(None)
out = [getattr(part, '_' + key)(_ID).to(G.device) for part in self]
G.nodes['word'].data[key] = torch.cat(out).T
def update_from(self, G, mult, rho):
nphi = G.nodes['word'].data['nphi'].T * mult
if '_ID' in G.nodes['word'].data:
_ID = G.nodes['word'].data['_ID']
else:
_ID = slice(None)
mean_change = [x.update(y, _ID, rho)
for x, y in zip(self, self.split_device(nphi))]
return np.mean(mean_change)
class Gamma(collections.namedtuple('Gamma', "concentration, rate")):
""" articulate the difference between torch gamma and numpy gamma """
@property
def shape(self):
return self.concentration
@property
def scale(self):
return 1 / self.rate
def sample(self, shape, device):
return torch.distributions.gamma.Gamma(
torch.as_tensor(self.concentration, device=device),
torch.as_tensor(self.rate, device=device),
).sample(shape)
class LatentDirichletAllocation:
"""LDA model that works with a HeteroGraph with doc->word meta paths.
The model alters the attributes of G arbitrarily.
This is inspired by [1] and its corresponding scikit-learn implementation.
Inputs
---
* G: a template graph or an integer showing n_words
* n_components: latent feature dimension; automatically set priors if missing.
* prior: parameters in the Dirichlet prior; default to 1/n_components and 1/n_words
* rho: new_nphi = (1-rho)*old_nphi + rho*nphi; default to 1 for full gradients.
* mult: multiplier for nphi-update; a large value effectively disables prior.
* init: sklearn initializers (100.0, 100.0); the sample points concentrate around 1.0
* device_list: accelerate word_data updates.
Notes
---
Some differences between this and sklearn.decomposition.LatentDirichletAllocation:
* default word perplexity is normalized by training set instead of testing set.
References
---
[1] <NAME>, <NAME>, <NAME>. Online Learning for Latent
Dirichlet Allocation. Advances in Neural Information Processing Systems 23
(NIPS 2010).
[2] Reactive LDA Library blogpost by <NAME> for a similar Gibbs model
"""
def __init__(
self, n_words, n_components,
prior=None,
rho=1,
mult={'doc': 1, 'word': 1},
init={'doc': (100., 100.), 'word': (100., 100.)},
device_list=None,
verbose=True,
):
self.n_words = n_words
self.n_components = n_components
if prior is None:
prior = {'doc': 1. / n_components, 'word': 1. / n_components}
self.prior = prior
self.rho = rho
self.mult = mult
self.init = init
if device_list is None:
device_list = ['cuda'] if torch.cuda.is_available() else ['cpu']
self.device_list = device_list[:n_components] # avoid edge cases
self.verbose = verbose
self._init_word_data()
def _init_word_data(self):
split_sections = np.diff(
np.linspace(0, self.n_components, len(self.device_list) + 1).astype(int)
)
word_nphi = [
Gamma(*self.init['word']).sample((s, self.n_words), device)
for s, device in zip(split_sections, self.device_list)
]
self.word_data = WordData(self.prior['word'], word_nphi)
def _init_doc_data(self, n_docs, device):
doc_nphi = Gamma(*self.init['doc']).sample(
(n_docs, self.n_components), device)
return DocData(self.prior['doc'], doc_nphi)
def save(self, f):
for w in self.word_data:
w.clear_cache()
torch.save({
'prior': self.prior,
'rho': self.rho,
'mult': self.mult,
'init': self.init,
'word_data': [part.nphi for part in self.word_data],
}, f)
def _prepare_graph(self, G, doc_data, key="Elog"):
doc_data.prepare_graph(G, key)
self.word_data.prepare_graph(G, key)
def _e_step(self, G, doc_data=None, mean_change_tol=1e-3, max_iters=100):
"""_e_step implements doc data sampling until convergence or max_iters
"""
if doc_data is None:
doc_data = self._init_doc_data(G.num_nodes('doc'), G.device)
G_rev = G.reverse() # word -> doc
self.word_data.prepare_graph(G_rev)
for i in range(max_iters):
doc_data.prepare_graph(G_rev)
G_rev.update_all(
lambda edges: {'phi': EdgeData(edges.src, edges.dst).phi},
dgl.function.sum('phi', 'nphi')
)
mean_change = doc_data.update_from(G_rev, self.mult['doc'])
if mean_change < mean_change_tol:
break
if self.verbose:
print(f"e-step num_iters={i+1} with mean_change={mean_change:.4f}, "
f"perplexity={self.perplexity(G, doc_data):.4f}")
return doc_data
transform = _e_step
def predict(self, doc_data):
pred_scores = [
# d_exp @ w._expectation()
(lambda x: x @ w.nphi + x.sum(1, keepdims=True) * w.prior)
(d_exp / w.posterior_sum.unsqueeze(0))
for (d_exp, w) in zip(
self.word_data.split_device(doc_data._expectation(), dim=1),
self.word_data)
]
x = torch.zeros_like(pred_scores[0], device=doc_data.device)
for p in pred_scores:
x += p.to(x.device)
return x
def sample(self, doc_data, num_samples):
""" draw independent words and return the marginal probabilities,
i.e., the expectations in Dirichlet distributions.
"""
def fn(cdf):
u = torch.rand(cdf.shape[0], num_samples, device=cdf.device)
return torch.searchsorted(cdf, u).to(doc_data.device)
topic_ids = fn(doc_data.cdf)
word_ids = torch.cat([fn(part.cdf) for part in self.word_data])
ids = torch.gather(word_ids, 0, topic_ids) # pick components by topic_ids
# compute expectation scores on sampled ids
src_ids = torch.arange(
ids.shape[0], dtype=ids.dtype, device=ids.device
).reshape((-1, 1)).expand(ids.shape)
unique_ids, inverse_ids = torch.unique(ids, sorted=False, return_inverse=True)
G = dgl.heterograph({('doc', '', 'word'): (src_ids.ravel(), inverse_ids.ravel())})
G.nodes['word'].data['_ID'] = unique_ids
self._prepare_graph(G, doc_data, "expectation")
G.apply_edges(lambda e: {'expectation': EdgeData(e.src, e.dst).expectation})
expectation = G.edata.pop('expectation').reshape(ids.shape)
return ids, expectation
def _m_step(self, G, doc_data):
"""_m_step implements word data sampling and stores word_z stats.
mean_change is in the sense of full graph with rho=1.
"""
G = G.clone()
self._prepare_graph(G, doc_data)
G.update_all(
lambda edges: {'phi': EdgeData(edges.src, edges.dst).phi},
dgl.function.sum('phi', 'nphi')
)
self._last_mean_change = self.word_data.update_from(
G, self.mult['word'], self.rho)
if self.verbose:
print(f"m-step mean_change={self._last_mean_change:.4f}, ", end="")
Bayesian_gap = np.mean([
part.Bayesian_gap.mean().tolist() for part in self.word_data
])
print(f"Bayesian_gap={Bayesian_gap:.4f}")
def partial_fit(self, G):
doc_data = self._e_step(G)
self._m_step(G, doc_data)
return self
def fit(self, G, mean_change_tol=1e-3, max_epochs=10):
for i in range(max_epochs):
if self.verbose:
print(f"epoch {i+1}, ", end="")
self.partial_fit(G)
if self._last_mean_change < mean_change_tol:
break
return self
def perplexity(self, G, doc_data=None):
"""ppl = exp{-sum[log(p(w1,...,wn|d))] / n}
Follows Eq (15) in Hoffman et al., 2010.
"""
if doc_data is None:
doc_data = self._e_step(G)
# compute E[log p(docs | theta, beta)]
G = G.clone()
self._prepare_graph(G, doc_data)
G.apply_edges(lambda edges: {'loglike': EdgeData(edges.src, edges.dst).loglike})
edge_elbo = (G.edata['loglike'].sum() / G.num_edges()).tolist()
if self.verbose:
print(f'neg_elbo phi: {-edge_elbo:.3f}', end=' ')
# compute E[log p(theta | alpha) - log q(theta | gamma)]
doc_elbo = (doc_data.loglike.sum() / doc_data.n.sum()).tolist()
if self.verbose:
print(f'theta: {-doc_elbo:.3f}', end=' ')
# compute E[log p(beta | eta) - log q(beta | lambda)]
# The denominator n for extrapolation perplexity is undefined.
# We use the train set, whereas sklearn uses the test set.
word_elbo = (
sum([part.loglike.sum().tolist() for part in self.word_data])
/ sum([part.n.sum().tolist() for part in self.word_data])
)
if self.verbose:
print(f'beta: {-word_elbo:.3f}')
ppl = np.exp(-edge_elbo - doc_elbo - word_elbo)
if G.num_edges() > 0 and np.isnan(ppl):
warnings.warn("numerical issue in perplexity")
return ppl
def doc_subgraph(G, doc_ids):
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
if hasattr(sampler, "sample_blocks"): # dgl <= 0.7.1
block, *_ = sampler.sample_blocks(G.reverse(), {'doc': torch.as_tensor(doc_ids)})
else:
_, _, (block,) = sampler.sample(G.reverse(), {'doc': torch.as_tensor(doc_ids)})
B = dgl.DGLHeteroGraph(
block._graph, ['_', 'word', 'doc', '_'], block.etypes
).reverse()
B.nodes['word'].data['_ID'] = block.nodes['word'].data['_ID']
return B
if __name__ == '__main__':
print('Testing LatentDirichletAllocation ...')
G = dgl.heterograph({('doc', '', 'word'): [(0, 0), (1, 3)]}, {'doc': 2, 'word': 5})
model = LatentDirichletAllocation(n_words=5, n_components=10, verbose=False)
model.fit(G)
model.transform(G)
model.predict(model.transform(G))
if hasattr(torch, "searchsorted"):
model.sample(model.transform(G), 3)
model.perplexity(G)
for doc_id in range(2):
B = doc_subgraph(G, [doc_id])
model.partial_fit(B)
with io.BytesIO() as f:
model.save(f)
f.seek(0)
print(torch.load(f))
print('Testing LatentDirichletAllocation passed!')
| [
"torch.as_tensor",
"dgl.heterograph",
"io.BytesIO",
"torch.searchsorted",
"torch.cuda.is_available",
"torch.arange",
"numpy.mean",
"torch.unique",
"dgl.dataloading.MultiLayerFullNeighborSampler",
"numpy.exp",
"warnings.warn",
"torch.zeros_like",
"torch.gather",
"dgl.function.sum",
"torch... | [((5793, 5847), 'collections.namedtuple', 'collections.namedtuple', (['"""Gamma"""', '"""concentration, rate"""'], {}), "('Gamma', 'concentration, rate')\n", (5815, 5847), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((14726, 14774), 'dgl.dataloading.MultiLayerFullNeighborSampler', 'dgl.dataloading.MultiLayerFullNeighborSampler', (['(1)'], {}), '(1)\n', (14771, 14774), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((15294, 15373), 'dgl.heterograph', 'dgl.heterograph', (["{('doc', '', 'word'): [(0, 0), (1, 3)]}", "{'doc': 2, 'word': 5}"], {}), "({('doc', '', 'word'): [(0, 0), (1, 3)]}, {'doc': 2, 'word': 5})\n", (15309, 15373), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((3268, 3297), 'torch.cumsum', 'torch.cumsum', (['cdf', '(1)'], {'out': 'cdf'}), '(cdf, 1, out=cdf)\n', (3280, 3297), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((4874, 4913), 'torch.split', 'torch.split', (['other', 'split_sections', 'dim'], {}), '(other, split_sections, dim)\n', (4885, 4913), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((5758, 5778), 'numpy.mean', 'np.mean', (['mean_change'], {}), '(mean_change)\n', (5765, 5778), True, 'import numpy as np, scipy as sp\n'), ((8982, 9130), 'torch.save', 'torch.save', (["{'prior': self.prior, 'rho': self.rho, 'mult': self.mult, 'init': self.init,\n 'word_data': [part.nphi for part in self.word_data]}", 'f'], {}), "({'prior': self.prior, 'rho': self.rho, 'mult': self.mult, 'init':\n self.init, 'word_data': [part.nphi for part in self.word_data]}, f)\n", (8992, 9130), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((10693, 10749), 'torch.zeros_like', 'torch.zeros_like', (['pred_scores[0]'], {'device': 'doc_data.device'}), '(pred_scores[0], device=doc_data.device)\n', (10709, 10749), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((11304, 11340), 'torch.gather', 'torch.gather', (['word_ids', '(0)', 'topic_ids'], {}), '(word_ids, 0, topic_ids)\n', (11316, 11340), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((11598, 11650), 'torch.unique', 'torch.unique', (['ids'], {'sorted': '(False)', 'return_inverse': '(True)'}), '(ids, sorted=False, return_inverse=True)\n', (11610, 11650), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((14512, 14553), 'numpy.exp', 'np.exp', (['(-edge_elbo - doc_elbo - word_elbo)'], {}), '(-edge_elbo - doc_elbo - word_elbo)\n', (14518, 14553), True, 'import numpy as np, scipy as sp\n'), ((15746, 15758), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (15756, 15758), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((2888, 2911), 'torch.lgamma', 'torch.lgamma', (['(prior * K)'], {}), '(prior * K)\n', (2900, 2911), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((3026, 3058), 'torch.lgamma', 'torch.lgamma', (['self.posterior_sum'], {}), '(self.posterior_sum)\n', (3038, 3058), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((5381, 5395), 'torch.cat', 'torch.cat', (['out'], {}), '(out)\n', (5390, 5395), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((11057, 11113), 'torch.rand', 'torch.rand', (['cdf.shape[0]', 'num_samples'], {'device': 'cdf.device'}), '(cdf.shape[0], num_samples, device=cdf.device)\n', (11067, 11113), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((12387, 12418), 'dgl.function.sum', 'dgl.function.sum', (['"""phi"""', '"""nphi"""'], {}), "('phi', 'nphi')\n", (12403, 12418), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((14587, 14600), 'numpy.isnan', 'np.isnan', (['ppl'], {}), '(ppl)\n', (14595, 14600), True, 'import numpy as np, scipy as sp\n'), ((14614, 14660), 'warnings.warn', 'warnings.warn', (['"""numerical issue in perplexity"""'], {}), "('numerical issue in perplexity')\n", (14627, 14660), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((15029, 15102), 'dgl.DGLHeteroGraph', 'dgl.DGLHeteroGraph', (['block._graph', "['_', 'word', 'doc', '_']", 'block.etypes'], {}), "(block._graph, ['_', 'word', 'doc', '_'], block.etypes)\n", (15047, 15102), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((15819, 15832), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (15829, 15832), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((1193, 1260), 'warnings.warn', 'warnings.warn', (['"""cached_property not found - using property instead"""'], {}), "('cached_property not found - using property instead')\n", (1206, 1260), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((2767, 2794), 'torch.as_tensor', 'torch.as_tensor', (['self.prior'], {}), '(self.prior)\n', (2782, 2794), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((2862, 2881), 'torch.lgamma', 'torch.lgamma', (['prior'], {}), '(prior)\n', (2874, 2881), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((8116, 8141), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8139, 8141), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((9897, 9928), 'dgl.function.sum', 'dgl.function.sum', (['"""phi"""', '"""nphi"""'], {}), "('phi', 'nphi')\n", (9913, 9928), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((14896, 14920), 'torch.as_tensor', 'torch.as_tensor', (['doc_ids'], {}), '(doc_ids)\n', (14911, 14920), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((14994, 15018), 'torch.as_tensor', 'torch.as_tensor', (['doc_ids'], {}), '(doc_ids)\n', (15009, 15018), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((6156, 6206), 'torch.as_tensor', 'torch.as_tensor', (['self.concentration'], {'device': 'device'}), '(self.concentration, device=device)\n', (6171, 6206), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((6220, 6261), 'torch.as_tensor', 'torch.as_tensor', (['self.rate'], {'device': 'device'}), '(self.rate, device=device)\n', (6235, 6261), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((11133, 11159), 'torch.searchsorted', 'torch.searchsorted', (['cdf', 'u'], {}), '(cdf, u)\n', (11151, 11159), False, 'import os, functools, warnings, torch, collections, dgl, io\n'), ((11444, 11506), 'torch.arange', 'torch.arange', (['ids.shape[0]'], {'dtype': 'ids.dtype', 'device': 'ids.device'}), '(ids.shape[0], dtype=ids.dtype, device=ids.device)\n', (11456, 11506), False, 'import os, functools, warnings, torch, collections, dgl, io\n')] |
# individual network settings for each actor + critic pair
# see networkforall for details
'''
An addaption from:
Code partially extracted from:
https://github.com/denisyarats/pytorch_sac/blob/81c5b536d3a1c5616b2531e446450df412a064fb/agent/sac.py
https://github.com/philtabor/Youtube-Code-Repository/blob/master/ReinforcementLearning/PolicyGradient/SAC/sac_torch.py
https://github.com/pranz24/pytorch-soft-actor-critic/blob/master/sac.py
'''
from algorithms.sac.networkforall_sac import Network
from utilities.utilities import hard_update, gumbel_softmax, onehot_from_logits
from torch.optim import Adam, AdamW
import torch
import numpy as np
# add OU noise for exploration
from utilities.OUNoise import OUNoise
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = 'cpu'
class SACAgent():
def __init__(self, in_actor, hidden_in_actor, hidden_out_actor, out_actor, in_critic, hidden_in_critic, hidden_out_critic, rnn_num_layers, rnn_hidden_size_actor, rnn_hidden_size_critic , lr_actor=1.0e-2, lr_critic=1.0e-2, weight_decay=1.0e-5, device = 'cpu', rnn = True, alpha = 0.2, automatic_entropy_tuning = True):
super(SACAgent, self).__init__()
self.actor = Network(in_actor, hidden_in_actor, hidden_out_actor, out_actor, rnn_num_layers, rnn_hidden_size_actor, device,actor=True, rnn = rnn).to(device)
self.critic = Network(in_critic, hidden_in_critic, hidden_out_critic, 1, rnn_num_layers, rnn_hidden_size_critic, device, rnn = rnn).to(device)
# self.target_actor = Network(in_actor, hidden_in_actor, hidden_out_actor, out_actor, rnn_num_layers, rnn_hidden_size_actor, device, actor=True, rnn = rnn).to(device)
self.target_critic = Network(in_critic, hidden_in_critic, hidden_out_critic, 1, rnn_num_layers, rnn_hidden_size_critic, device, rnn = rnn).to(device)
self.noise = OUNoise(out_actor, scale=1.0 )
self.device = device
# from torchsummary import summary
# import pdb; pdb.set_trace()
# summary(self.actor, (3, 224, 224))
# initialize targets same as original networks
# hard_update(self.target_actor, self.actor)
hard_update(self.target_critic, self.critic)
self.actor_optimizer = Adam(self.actor.parameters(), lr=lr_actor)
self.critic_optimizer = Adam(self.critic.parameters(), lr=lr_critic, weight_decay=weight_decay)
# self.actor_optimizer = AdamW(self.actor.parameters(), lr=lr_actor, betas=(0.9, 0.999), eps=1e-08, weight_decay=weight_decay, amsgrad=False)
# self.critic_optimizer = AdamW(self.critic.parameters(), lr=lr_critic, betas=(0.9, 0.999), eps=1e-08, weight_decay=weight_decay, amsgrad=False)
# Alpha
self.automatic_entropy_tuning = automatic_entropy_tuning
self.alpha = alpha
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning is True:
self.target_entropy = -torch.prod(torch.Tensor(out_actor).to(self.device)).item()
self.log_alpha = (torch.zeros(1, requires_grad=True, device=self.device)+np.log(self.alpha)).detach().requires_grad_(True)
self.alpha_optimizer = Adam([self.log_alpha], lr=lr_actor)
def act(self, his, obs, noise=0.0):
his = his.to(self.device)
obs = obs.to(self.device)
if noise > 0.0:
action, _ = self.actor.sample_normal(his,obs)
else:
action, _ = self.actor.forward(his,obs)
action = action.cpu().clamp(-1, 1)
# actions.cpu().detach().numpy()[0]
return action.cpu()
def act_prob(self, his, obs, noise=0.0):
his = his.to(self.device)
obs = obs.to(self.device)
actions, log_probs = self.actor.sample_normal(his,obs)
# action = action.cpu().clamp(-1, 1)
# actions.cpu().detach().numpy()[0]
return actions.cpu(), log_probs
| [
"torch.optim.Adam",
"utilities.OUNoise.OUNoise",
"numpy.log",
"torch.Tensor",
"algorithms.sac.networkforall_sac.Network",
"utilities.utilities.hard_update",
"torch.zeros"
] | [((1861, 1890), 'utilities.OUNoise.OUNoise', 'OUNoise', (['out_actor'], {'scale': '(1.0)'}), '(out_actor, scale=1.0)\n', (1868, 1890), False, 'from utilities.OUNoise import OUNoise\n'), ((2182, 2226), 'utilities.utilities.hard_update', 'hard_update', (['self.target_critic', 'self.critic'], {}), '(self.target_critic, self.critic)\n', (2193, 2226), False, 'from utilities.utilities import hard_update, gumbel_softmax, onehot_from_logits\n'), ((3229, 3264), 'torch.optim.Adam', 'Adam', (['[self.log_alpha]'], {'lr': 'lr_actor'}), '([self.log_alpha], lr=lr_actor)\n', (3233, 3264), False, 'from torch.optim import Adam, AdamW\n'), ((1211, 1346), 'algorithms.sac.networkforall_sac.Network', 'Network', (['in_actor', 'hidden_in_actor', 'hidden_out_actor', 'out_actor', 'rnn_num_layers', 'rnn_hidden_size_actor', 'device'], {'actor': '(True)', 'rnn': 'rnn'}), '(in_actor, hidden_in_actor, hidden_out_actor, out_actor,\n rnn_num_layers, rnn_hidden_size_actor, device, actor=True, rnn=rnn)\n', (1218, 1346), False, 'from algorithms.sac.networkforall_sac import Network\n'), ((1377, 1496), 'algorithms.sac.networkforall_sac.Network', 'Network', (['in_critic', 'hidden_in_critic', 'hidden_out_critic', '(1)', 'rnn_num_layers', 'rnn_hidden_size_critic', 'device'], {'rnn': 'rnn'}), '(in_critic, hidden_in_critic, hidden_out_critic, 1, rnn_num_layers,\n rnn_hidden_size_critic, device, rnn=rnn)\n', (1384, 1496), False, 'from algorithms.sac.networkforall_sac import Network\n'), ((1710, 1829), 'algorithms.sac.networkforall_sac.Network', 'Network', (['in_critic', 'hidden_in_critic', 'hidden_out_critic', '(1)', 'rnn_num_layers', 'rnn_hidden_size_critic', 'device'], {'rnn': 'rnn'}), '(in_critic, hidden_in_critic, hidden_out_critic, 1, rnn_num_layers,\n rnn_hidden_size_critic, device, rnn=rnn)\n', (1717, 1829), False, 'from algorithms.sac.networkforall_sac import Network\n'), ((3089, 3143), 'torch.zeros', 'torch.zeros', (['(1)'], {'requires_grad': '(True)', 'device': 'self.device'}), '(1, requires_grad=True, device=self.device)\n', (3100, 3143), False, 'import torch\n'), ((3144, 3162), 'numpy.log', 'np.log', (['self.alpha'], {}), '(self.alpha)\n', (3150, 3162), True, 'import numpy as np\n'), ((3011, 3034), 'torch.Tensor', 'torch.Tensor', (['out_actor'], {}), '(out_actor)\n', (3023, 3034), False, 'import torch\n')] |
import numpy as np
from filterpy.kalman import KalmanFilter
import copy
import pickle
import datetime
#3D output through UDP
import socket
addr=('localhost', 5110)
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#The Kalman-Filter
class Filter:
def __init__(self):
self.kfilter = KalmanFilter(dim_x=4, dim_z=2)
#Transform matrix
self.kfilter.F = np.array([[1, 0, 1, 0],
[0, 1, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 1]])
self.kfilter.R = np.array([[16, 0],
[0, 16]])
#Observation matrix
#for state vector [x, y, vx, vy] mesure [x, y]
self.kfilter.H = np.array([[1, 0, 0, 0],
[0, 1, 0, 0]])
def setstate(self,x,y):
self.kfilter.x = np.array([[x],
[y],
[0],
[0]])
def getstate(self):
return np.copy(self.kfilter.x)
def update(self, x,y):
self.kfilter.predict()
self.kfilter.update(np.array([[x],
[y]]))
class Tracer:
# tracerlist = list()
# history = list()
def __init__(self, firsttarget:dict):
self.filter = Filter()
self.filter.setstate(firsttarget['x'],firsttarget['y'])
self.targetlist = list()
#firsttarget.filtered_pos=copy.copy(firsttarget.transformed_pos)
#self.add(self.filter.getstate(), firsttarget.device)
self.targetlist.append(firsttarget)
#self.id=len(Tracer.tracerlist)
self.id=-1
def trianglefix(self):
if len(self.targetlist)>3:
t=self.targetlist[-3:]
x=[ts['x'] for ts in t]
y=[ts['y'] for ts in t]
d1=(y[2]-y[0])**2+(x[2]-x[0])**2
d2=(y[1]-y[0])**2+(x[1]-x[0])**2
d3=(y[2]-y[1])**2+(x[2]-x[1])**2
if d1<0.5 and d1<d2+d3+(d2*d3)**0.5 or d1 < 0.2:
self.targetlist.remove(self.targetlist[-2])
def filt(self, target):
x = target['x']
y = target['y']
self.filter.update(x,y)
state = self.filter.getstate()
target['raw_x']=x
target['raw_y']=y
target['x']=state[0][0]
target['y']=state[1][0]
#target.filtered_pos = {'x':state[0][0], 'y':state[1][0]}
self.targetlist.append(target)
self.trianglefix()
class TracerList:
def __init__(self):
self.tracerlist=list()
def tracetarget(self, target, maxdistance=1., time_weight=0.04):
nearestTracer = None
nearestDistance2 = maxdistance ** 2
for tracer in self.tracerlist:
distance2 = (target['x'] - tracer.targetlist[-1]['x']) ** 2 \
+ (target['y'] - tracer.targetlist[-1]['y']) ** 2 \
+ (target['time']-tracer.targetlist[-1]['time']).total_seconds()**2*time_weight
if distance2 < nearestDistance2:
nearestDistance2 = distance2
nearestTracer = tracer
#Kalman filtering
if(nearestTracer != None):
nearestTracer.filt(target)
else:
nearestTracer = Tracer(target)
nearestTracer.id=len(self.tracerlist)
self.tracerlist.append(nearestTracer)
| [
"numpy.array",
"filterpy.kalman.KalmanFilter",
"numpy.copy",
"socket.socket"
] | [((167, 215), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (180, 215), False, 'import socket\n'), ((305, 335), 'filterpy.kalman.KalmanFilter', 'KalmanFilter', ([], {'dim_x': '(4)', 'dim_z': '(2)'}), '(dim_x=4, dim_z=2)\n', (317, 335), False, 'from filterpy.kalman import KalmanFilter\n'), ((388, 454), 'numpy.array', 'np.array', (['[[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (396, 454), True, 'import numpy as np\n'), ((585, 613), 'numpy.array', 'np.array', (['[[16, 0], [0, 16]]'], {}), '([[16, 0], [0, 16]])\n', (593, 613), True, 'import numpy as np\n'), ((757, 795), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0]])\n', (765, 795), True, 'import numpy as np\n'), ((889, 919), 'numpy.array', 'np.array', (['[[x], [y], [0], [0]]'], {}), '([[x], [y], [0], [0]])\n', (897, 919), True, 'import numpy as np\n'), ((1065, 1088), 'numpy.copy', 'np.copy', (['self.kfilter.x'], {}), '(self.kfilter.x)\n', (1072, 1088), True, 'import numpy as np\n'), ((1176, 1196), 'numpy.array', 'np.array', (['[[x], [y]]'], {}), '([[x], [y]])\n', (1184, 1196), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import random
#xs
xs = np.arange(0, 1, 0.01)
#bases
def get_base(x0, sig):
return 1.0 / (np.sqrt(2 * np.pi * sig)) * np.exp(
-(xs - x0) * (xs - x0) / (2 * sig * sig)
)
# B1 = get_base(0.2, 0.02)
# B2 = get_base(0.5, 0.02)
# B3 = get_base(0.9, 0.02)
# B4 = get_base(0.65, 0.02)
# B5 = get_base(0.32, 0.02)
#bases to be used
bases = []
for _ in range(10):
x0 = random.random()
sig = random.uniform(0.01, 0.1)
bases.append(get_base(x0, sig))
#desired output -- testing purposes!! You should put the result of the expt here!!
coeffs = [random.random() for _ in range(len(bases))]
Desired = [a * B for a, B in zip(coeffs, bases)]
Desired = sum(Desired)
class Indicator:
def __init__(self, *bases):
self.bases = [b for b in bases]
self.W = np.random.random(len(bases))
def output(self):
S = 0.0
for i in range(len(self.bases)):
S += self.bases[i] * self.W[i]
return S
def calculateCost(self, desired):
guess = self.output()
SSE = (guess - desired) * (guess - desired)
#find a better cost function!!
self.cost = sum(SSE)/len(SSE)
# def calculateFitness(self):
# self.fitness = 1.0 - self.cost
def mutate(self, rate=0.1):
if np.random.random() < rate:
index = np.random.randint(len(self.W))
self.W[index] += random.uniform(-0.1, 0.1)
self.W = np.clip(self.W, 0.0, 1.0)
def set_W(self, W):
self.W = W[:]
def clone(self):
newI = Indicator(*self.bases)
newI.set_W(self.W)
return newI
class PopulationManager:
def __init__(self, *bases, popsize = 10):
self.popsize = popsize
self.create_pop(*bases)
def create_pop(self, *bases):
population = []
for _ in range(self.popsize):
I = Indicator(*bases)
population.append(I)
self.population = population
def NextGeneration(self, desired):
for I in self.population:
I.calculateCost(desired)
#normalising the cost value!
total = 0.0
for I in self.population:
total += I.cost
for I in self.population:
I.cost /= total
#it is easier to work in terms of fitness!!
##not using it now!! Do we need this??
# for I in self.population:
# I.calculateFitness()
newpop = []
for _ in range(self.popsize):
# child = self.makeChild(self.population)
child = self.get_BestIndicator(desired).clone()
child.mutate(0.01)
newpop.append(child)
return newpop
# def makeChild(self, population): #Do we need this???
# index = 0
# r = np.random.random()
# while r > 0.0:
# r -= population[index].fitness
# index += 1
# index -= 1
# return population[index].clone()
def evolve(self, desired, generations=1):
for _ in range(generations):
self.population = self.NextGeneration(desired)
def get_BestIndicator(self, desired):
for I in self.population:
I.calculateCost(desired)
best = self.population[0]
bestcost = self.population[0].cost
for i in range(1, len(self.population)):
if self.population[i].cost < bestcost:
best = self.population[i]
bestcost = self.population[i].cost
return best
#main
if __name__ == "__main__":
plt.ion()
pop = PopulationManager(*bases, 20)
# pop.evolve(Desired, 5000)
plt.plot(xs, Desired, label="desired")
out = pop.get_BestIndicator(Desired).output()
outplot, = plt.plot(xs, out)
for i in range(100):
pop.population = pop.NextGeneration(Desired)
best = pop.get_BestIndicator(Desired)
out = best.output()
outplot.set_ydata(out)
plt.title("Generation = {}".format(i))
plt.pause(0.0001)
#the best weights!
print(
best.W
)
plt.legend()
plt.show()
| [
"numpy.clip",
"random.uniform",
"numpy.sqrt",
"numpy.random.random",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"numpy.exp",
"matplotlib.pyplot.ion",
"random.random",
"matplotlib.pyplot.pause",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((80, 101), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (89, 101), True, 'import numpy as np\n'), ((456, 471), 'random.random', 'random.random', ([], {}), '()\n', (469, 471), False, 'import random\n'), ((483, 508), 'random.uniform', 'random.uniform', (['(0.01)', '(0.1)'], {}), '(0.01, 0.1)\n', (497, 508), False, 'import random\n'), ((645, 660), 'random.random', 'random.random', ([], {}), '()\n', (658, 660), False, 'import random\n'), ((3726, 3735), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3733, 3735), True, 'import matplotlib.pyplot as plt\n'), ((3815, 3853), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'Desired'], {'label': '"""desired"""'}), "(xs, Desired, label='desired')\n", (3823, 3853), True, 'import matplotlib.pyplot as plt\n'), ((3921, 3938), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'out'], {}), '(xs, out)\n', (3929, 3938), True, 'import matplotlib.pyplot as plt\n'), ((4268, 4280), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4278, 4280), True, 'import matplotlib.pyplot as plt\n'), ((4286, 4296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4294, 4296), True, 'import matplotlib.pyplot as plt\n'), ((183, 231), 'numpy.exp', 'np.exp', (['(-(xs - x0) * (xs - x0) / (2 * sig * sig))'], {}), '(-(xs - x0) * (xs - x0) / (2 * sig * sig))\n', (189, 231), True, 'import numpy as np\n'), ((1538, 1563), 'numpy.clip', 'np.clip', (['self.W', '(0.0)', '(1.0)'], {}), '(self.W, 0.0, 1.0)\n', (1545, 1563), True, 'import numpy as np\n'), ((4184, 4201), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (4193, 4201), True, 'import matplotlib.pyplot as plt\n'), ((155, 179), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sig)'], {}), '(2 * np.pi * sig)\n', (162, 179), True, 'import numpy as np\n'), ((1385, 1403), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1401, 1403), True, 'import numpy as np\n'), ((1494, 1519), 'random.uniform', 'random.uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (1508, 1519), False, 'import random\n')] |
'''Plot grid of particles pixellised using konigcell2d. You first need to
compile `example.c` and `konigcell2d.c` and redirect the printed grid to a
file, whose path you should give to this Python script.
For example:
$> clang example.c konigcell2d.c -lm -O3
$> ./a.out > pixels.csv
$> python plot_example_output.py pixels.csv
'''
import sys
import numpy as np
import konigcell as kc
import plotly.graph_objs as go
xmin, xmax, ymin, ymax = np.loadtxt(sys.argv[1], max_rows = 1)
grid = np.loadtxt(sys.argv[1], skiprows = 1)
pixels = kc.Pixels(grid, (xmin, xmax), (ymin, ymax))
fig = go.Figure()
fig.add_trace(pixels.heatmap_trace())
fig.show()
| [
"plotly.graph_objs.Figure",
"numpy.loadtxt",
"konigcell.Pixels"
] | [((450, 485), 'numpy.loadtxt', 'np.loadtxt', (['sys.argv[1]'], {'max_rows': '(1)'}), '(sys.argv[1], max_rows=1)\n', (460, 485), True, 'import numpy as np\n'), ((495, 530), 'numpy.loadtxt', 'np.loadtxt', (['sys.argv[1]'], {'skiprows': '(1)'}), '(sys.argv[1], skiprows=1)\n', (505, 530), True, 'import numpy as np\n'), ((543, 586), 'konigcell.Pixels', 'kc.Pixels', (['grid', '(xmin, xmax)', '(ymin, ymax)'], {}), '(grid, (xmin, xmax), (ymin, ymax))\n', (552, 586), True, 'import konigcell as kc\n'), ((594, 605), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (603, 605), True, 'import plotly.graph_objs as go\n')] |
import os
from moviepy.editor import VideoFileClip
from lane_processing_pipeline import *
import numpy as np
from train_car_model import *
from extract_feature_functions import *
import random
class image_processor_class():
def __init__(self, clip, outputFile, params, svc, X_scaler):
videoClip = clip
# variables used for smoothing
self.timeStepCounter = 0
self.av_window_limit = 20
self.bbox = []
self.params = params
self.svc = svc
self.X_scaler = X_scaler
# process video clip
white_clip = videoClip.fl_image(self.process_image)
white_clip.write_videofile(outputFile, audio=False)
def process_image(self, img):
# cv2.imwrite('../test_images/bbox_example_10_{}.jpg'.format(self.timeStepCounter),
# cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
# copy image
draw_img = np.copy(img)
# find bounding boxes for the car
marked_image, heat_img, bboxes = find_car_in_frame(draw_img, self.svc, self.X_scaler, self.params)
bboxes_final = bboxes
# checking for windows that appear in majority of n consicutive frames
time_heatMap, lbl = self.significance_presence_check(bboxes_final,
marked_image.shape[0:2])
if not time_heatMap==None:
filtered_image, _ = draw_labeled_bboxes(img, lbl)
plt.figure(figsize=(12,6))
plt.subplot(121)
plt.imshow(marked_image)
plt.subplot(122)
plt.imshow(filtered_image)
plt.show()
return filtered_image
else:
return img
def significance_presence_check(self, bbox, imgSize):
# before window_limit is reached
if self.timeStepCounter > self.av_window_limit:
heatImg = np.zeros(imgSize)
if len(self.bbox):
self.bbox.pop(0)
self.bbox += bbox
heatImg, lbl = add_heat(heatImg, self.bbox, threshold=np.int(self.av_window_limit*0.2))
else:
self.bbox += bbox
heatImg = None
lbl = None
self.timeStepCounter += 1
return heatImg, lbl
if __name__ == '__main__':
#fileList = glob.glob("../*.mp4")
fileList = [r'project_video.mp4']
# get feature extraction parameters
params = get_params()
# load car_classifier file if it is present
# if not found train a support vector classifier
if os.path.isfile('../car_classifier.p'):
print('Loading trained model ../car_classifier.p')
print('To train a new model please delete this file')
load_quant = pickle.load(open('../car_classifier.p', 'rb'))
svc = load_quant['clf']
X_scaler = load_quant['X_scaler']
else:
svc, X_scaler = train_svc(self.params)
# iterate over all video files
for figIdx, fileName in enumerate(fileList):
inputFile = '../' + fileName
outputFile = '../output_videos/' + fileName
print(inputFile)
# load clips
clip1 = VideoFileClip(inputFile).subclip(38, 50)
# process video clip
oImageProc = image_processor_class(clip1, outputFile, params, svc, X_scaler) | [
"numpy.copy",
"os.path.isfile",
"numpy.zeros",
"numpy.int",
"moviepy.editor.VideoFileClip"
] | [((2568, 2605), 'os.path.isfile', 'os.path.isfile', (['"""../car_classifier.p"""'], {}), "('../car_classifier.p')\n", (2582, 2605), False, 'import os\n'), ((928, 940), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (935, 940), True, 'import numpy as np\n'), ((1901, 1918), 'numpy.zeros', 'np.zeros', (['imgSize'], {}), '(imgSize)\n', (1909, 1918), True, 'import numpy as np\n'), ((3175, 3199), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['inputFile'], {}), '(inputFile)\n', (3188, 3199), False, 'from moviepy.editor import VideoFileClip\n'), ((2079, 2113), 'numpy.int', 'np.int', (['(self.av_window_limit * 0.2)'], {}), '(self.av_window_limit * 0.2)\n', (2085, 2113), True, 'import numpy as np\n')] |
# Copyright (c) 2020, <NAME> (@e-bug).
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import logging
import _pickle as cPickle
import random
import numpy as np
import torch
from torch.utils.data import Dataset
from datasets import load_dataset
logger = logging.getLogger(__name__)
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
def assert_eq(real, expected):
assert real == expected, "%s (true) vs %s (expected)" % (real, expected)
def _load_corpus(ann_dir, lgs):
corpora = {}
for lg in lgs:
ann_file = os.path.join(ann_dir, "%s.20180201.txt" % lg)
corpus = load_dataset('text', split="train", data_files=ann_file, cache_dir=os.path.join(ann_dir, 'huggingface'))
corpus = corpus.filter(lambda example: len(example['text']) > 0)
corpora[lg] = corpus
return corpora
def set_sampling_probs(data, langs, coeff=-1):
"""
Set the probability of sampling specific languages / language pairs during training.
"""
if coeff == -1:
return
assert coeff > 0
assert len(langs) > 0
probs = np.array([1.0 * len(data[lang]) for lang in langs])
probs /= probs.sum()
probs = np.array([p ** coeff for p in probs])
probs /= probs.sum()
lg2prob = {lg: prob for lg, prob in zip(langs, probs)}
return lg2prob
def shuf_order(langs, lg2prob, lg_sampling_factor=-1, n=3):
"""
Randomize training order.
[https://github.com/microsoft/M3P/blob/master/M3P/src/utils.py]
"""
if len(langs) == 0:
return []
# sample monolingual and parallel languages separately
mono = langs
# uniform / weighted sampling
if lg_sampling_factor == -1:
p_mono = None
else:
p_mono = np.array([lg2prob[k] for k in mono])
p_mono = p_mono / p_mono.sum()
s_mono = [mono[i] for i in np.random.choice(len(mono), size=n, p=p_mono, replace=True)] # min(n, len(mono)), p=p_mono, replace=True)]
return [lang for lang in s_mono]
class WikipediasDataset(Dataset):
def __init__(
self,
dataroot,
lgs,
lg_sampling_factor,
tokenizer,
batch_size=512,
padding_index=0,
max_seq_length=36,
max_region_num=36,
num_locs=5,
add_global_imgfeat=None,
):
super().__init__()
self.num_locs = num_locs
self._max_region_num = max_region_num + int(add_global_imgfeat is not None)
self._max_seq_length = max_seq_length
self._tokenizer = tokenizer
self._padding_index = padding_index
self._add_global_imgfeat = add_global_imgfeat
if lgs == ["ALL"]:
files = glob.glob(os.path.join(dataroot, "*.20180201.txt"))
self.lgs = []
for fn in files:
self.lgs.append(fn.split("/")[-1].split(".")[0])
else:
self.lgs = lgs
self.lg_sampling_factor = lg_sampling_factor
self.n_sents = batch_size
self.corpus = _load_corpus(dataroot, self.lgs)
self.lg2lens = {lang: len(self.corpus[lang]) for lang in self.lgs}
self.lg2prob = set_sampling_probs(self.corpus, self.lgs, coeff=lg_sampling_factor)
def random_word(self, tokens, tokenizer):
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = np.random.randint(len(tokenizer))
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
output_label.append(token)
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return tokens, output_label
def tokenize(self, langs): #, min_seq_len=16):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_index in embedding
"""
# for ix, line in enumerate(self.corpus):
entries = []
for lang in langs:
ix = np.random.choice(self.lg2lens[lang])
line = self.corpus[lang][ix]['text']
# tokenize
tokens = self._tokenizer.encode(line)
# add more tokens if len(tokens) < min_len
_cur = (ix + 1) % len(self.corpus)
while len(tokens) < self._max_seq_length-2:
_cur_tokens = self._tokenizer.encode(self.corpus[lang][_cur]['text'])
tokens += _cur_tokens
_cur = (_cur + 1) % len(self.corpus[lang])
# truncate
tokens = tokens[:self._max_seq_length - 2]
tokens, tokens_label = self.random_word(tokens, self._tokenizer)
tokens = self._tokenizer.build_inputs_with_special_tokens(tokens)
lm_label_ids = [-1] + tokens_label + [-1]
segment_ids = [0] * len(tokens)
input_mask = [1] * len(tokens)
assert_eq(len(tokens), self._max_seq_length)
entry = {
"q_token": tokens,
"q_input_mask": input_mask,
"q_segment_ids": segment_ids,
"q_label": lm_label_ids,
}
entries.append(entry)
return entries
def tensorize(self, entries):
for entry in entries:
question = torch.from_numpy(np.array(entry["q_token"]))
entry["q_token"] = question
q_input_mask = torch.from_numpy(np.array(entry["q_input_mask"]))
entry["q_input_mask"] = q_input_mask
q_segment_ids = torch.from_numpy(np.array(entry["q_segment_ids"]))
entry["q_segment_ids"] = q_segment_ids
q_label_ids = torch.from_numpy(np.array(entry["q_label"]))
entry["q_label"] = q_label_ids
def __getitem__(self, index=0):
# Image
max_region_num = self._max_region_num + int(self._add_global_imgfeat is not None)
features = torch.zeros((self.n_sents, max_region_num, 2048), dtype=torch.float)
spatials = torch.zeros((self.n_sents, max_region_num, self.num_locs), dtype=torch.float)
image_mask = torch.zeros((self.n_sents, max_region_num), dtype=torch.long)
# Text
langs = shuf_order(self.lgs, self.lg2prob, self.lg_sampling_factor, n=self.n_sents)
entries = self.tokenize(langs)
self.tensorize(entries)
input_ids = torch.stack([entry["q_token"] for entry in entries])
input_mask = torch.stack([entry["q_input_mask"] for entry in entries])
segment_ids = torch.stack([entry["q_segment_ids"] for entry in entries])
lm_label_ids = torch.stack([entry["q_label"] for entry in entries])
return input_ids, input_mask, segment_ids, lm_label_ids, features, spatials, image_mask
def sample(self):
return self.__getitem__()
def __len__(self):
return len(self.corpus)
| [
"logging.getLogger",
"numpy.random.choice",
"torch.stack",
"os.path.join",
"numpy.array",
"random.random",
"torch.zeros"
] | [((358, 385), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (375, 385), False, 'import logging\n'), ((1258, 1297), 'numpy.array', 'np.array', (['[(p ** coeff) for p in probs]'], {}), '([(p ** coeff) for p in probs])\n', (1266, 1297), True, 'import numpy as np\n'), ((631, 676), 'os.path.join', 'os.path.join', (['ann_dir', "('%s.20180201.txt' % lg)"], {}), "(ann_dir, '%s.20180201.txt' % lg)\n", (643, 676), False, 'import os\n'), ((1810, 1846), 'numpy.array', 'np.array', (['[lg2prob[k] for k in mono]'], {}), '([lg2prob[k] for k in mono])\n', (1818, 1846), True, 'import numpy as np\n'), ((6463, 6531), 'torch.zeros', 'torch.zeros', (['(self.n_sents, max_region_num, 2048)'], {'dtype': 'torch.float'}), '((self.n_sents, max_region_num, 2048), dtype=torch.float)\n', (6474, 6531), False, 'import torch\n'), ((6551, 6628), 'torch.zeros', 'torch.zeros', (['(self.n_sents, max_region_num, self.num_locs)'], {'dtype': 'torch.float'}), '((self.n_sents, max_region_num, self.num_locs), dtype=torch.float)\n', (6562, 6628), False, 'import torch\n'), ((6650, 6711), 'torch.zeros', 'torch.zeros', (['(self.n_sents, max_region_num)'], {'dtype': 'torch.long'}), '((self.n_sents, max_region_num), dtype=torch.long)\n', (6661, 6711), False, 'import torch\n'), ((6911, 6963), 'torch.stack', 'torch.stack', (["[entry['q_token'] for entry in entries]"], {}), "([entry['q_token'] for entry in entries])\n", (6922, 6963), False, 'import torch\n'), ((6985, 7042), 'torch.stack', 'torch.stack', (["[entry['q_input_mask'] for entry in entries]"], {}), "([entry['q_input_mask'] for entry in entries])\n", (6996, 7042), False, 'import torch\n'), ((7065, 7123), 'torch.stack', 'torch.stack', (["[entry['q_segment_ids'] for entry in entries]"], {}), "([entry['q_segment_ids'] for entry in entries])\n", (7076, 7123), False, 'import torch\n'), ((7147, 7199), 'torch.stack', 'torch.stack', (["[entry['q_label'] for entry in entries]"], {}), "([entry['q_label'] for entry in entries])\n", (7158, 7199), False, 'import torch\n'), ((3400, 3415), 'random.random', 'random.random', ([], {}), '()\n', (3413, 3415), False, 'import random\n'), ((4561, 4597), 'numpy.random.choice', 'np.random.choice', (['self.lg2lens[lang]'], {}), '(self.lg2lens[lang])\n', (4577, 4597), True, 'import numpy as np\n'), ((761, 797), 'os.path.join', 'os.path.join', (['ann_dir', '"""huggingface"""'], {}), "(ann_dir, 'huggingface')\n", (773, 797), False, 'import os\n'), ((2753, 2793), 'os.path.join', 'os.path.join', (['dataroot', '"""*.20180201.txt"""'], {}), "(dataroot, '*.20180201.txt')\n", (2765, 2793), False, 'import os\n'), ((5860, 5886), 'numpy.array', 'np.array', (["entry['q_token']"], {}), "(entry['q_token'])\n", (5868, 5886), True, 'import numpy as np\n'), ((5973, 6004), 'numpy.array', 'np.array', (["entry['q_input_mask']"], {}), "(entry['q_input_mask'])\n", (5981, 6004), True, 'import numpy as np\n'), ((6101, 6133), 'numpy.array', 'np.array', (["entry['q_segment_ids']"], {}), "(entry['q_segment_ids'])\n", (6109, 6133), True, 'import numpy as np\n'), ((6230, 6256), 'numpy.array', 'np.array', (["entry['q_label']"], {}), "(entry['q_label'])\n", (6238, 6256), True, 'import numpy as np\n')] |
from torch import nn
import numpy as np
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
def gen_fc_dim(cnn_config, feathers):
for idd, filter_size in enumerate(cnn_config[0]):
feathers = (feathers - int(filter_size) + 1 - int(cnn_config[2][idd]) + 1) / 2
feathers = int(np.ceil(feathers))
return feathers
class CNN(nn.Module):
def __init__(self, num_input, ic, num_class, cnn_config):
super(CNN, self).__init__()
cnn_ksize, num_filters, max_pool_ksize = cnn_config
self.cnn_config = cnn_config
block_list = []
for idd, filter_size in enumerate(cnn_ksize):
block = nn.Sequential()
block.add_module('conv_out_' + str(idd), nn.Conv2d(ic, num_filters[idd], int(filter_size), 1))
block.add_module('relu_' + str(idd), nn.ReLU())
block.add_module('max_pool_' + str(idd), nn.MaxPool2d(int(max_pool_ksize[idd]), 2))
block.add_module('dropout_' + str(idd), nn.Dropout(0.5))
block_list.append(block)
ic = num_filters[idd]
self.flatten = Flatten()
feathers = gen_fc_dim(cnn_config, num_input)
self.logits = nn.Linear(ic * feathers * feathers, num_class)
self.block_list = nn.ModuleList(block_list)
def forward(self, x, **kwargs):
for block in self.block_list:
x = block(x)
x = self.flatten(x)
out = self.logits(x)
return out
| [
"numpy.ceil",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.Linear"
] | [((1268, 1314), 'torch.nn.Linear', 'nn.Linear', (['(ic * feathers * feathers)', 'num_class'], {}), '(ic * feathers * feathers, num_class)\n', (1277, 1314), False, 'from torch import nn\n'), ((1342, 1367), 'torch.nn.ModuleList', 'nn.ModuleList', (['block_list'], {}), '(block_list)\n', (1355, 1367), False, 'from torch import nn\n'), ((361, 378), 'numpy.ceil', 'np.ceil', (['feathers'], {}), '(feathers)\n', (368, 378), True, 'import numpy as np\n'), ((730, 745), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (743, 745), False, 'from torch import nn\n'), ((904, 913), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (911, 913), False, 'from torch import nn\n'), ((1065, 1080), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1075, 1080), False, 'from torch import nn\n')] |
#------------------ Gaussian Pulse propagation through an interface with ABC at left & right boundaries ------------------
#------------------ Reflection & Transmission at the interface -----------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
length = 1
#----- Air----------
length_air = 0.5
eps0 = 8.854e-12
meu0 = 4*np.pi*1e-7
epsr = 1
meur = 1
eps = eps0*epsr
meu = meu0*meur
#------- Dielectric -----
epsrd = 4
epsd = eps0*epsrd
#---- Signal -----------
c = 3e8
freq=3e9
vmax = c/np.sqrt(epsr*meur)
vmin = c/np.sqrt(epsrd*meur)
lamda_min = vmin/freq
#------ Cell length and time step---------
dz = lamda_min/10
dt = dz/vmax
N_cells = int(length/dz)
#------- Initialise arrays ------------
ex = np.zeros(N_cells)
hy = np.copy(ex)
const_e = np.copy(ex)
ex_abc = np.copy(ex)
#------ Multiplying constants--------
const_e[0:N_cells] = dt/(eps*dz)
const_e[100:N_cells] = dt/(epsd*dz)
const_h = dt/(meu*dz)
const_abc = (vmax*dt-dz)/(vmax*dt+dz)
const_abc1 = (vmin*dt-dz)/(vmin*dt+dz)
#------ Gaussian pusle parameters------------
Ts = 10*dt # pulse width
t0 = 3*Ts # delay
N_steps = 350 # maximum iteration steps
#************** Iteration loop ****************
for n in range (N_steps):
time = n*dt
#------- Gaussian pulse launched ----------------------------
pulse = (np.exp(-np.power(((time-t0)/Ts),2)))/dz
ex[4] = ex[4] + pulse
#------------------------ compute H -------------------------
k = np.linspace(0, N_cells-2, N_cells-1, dtype = int)
hy[k] = hy[k] - const_h*(ex[k+1]-ex[k])
#------------------------- compute E ------------------------
k = np.linspace(1, N_cells-2, N_cells-2, dtype = int)
ex[k] = ex[k] - const_e[k]*(hy[k]-hy[k-1])
#------------------------- ABC ------------------------
ex[0] = ex_abc[1] + const_abc*(ex[1]-ex[0])
ex[N_cells-1] = ex_abc[N_cells-2] + const_abc1*(ex[N_cells-2]-ex[N_cells-1])
ex_abc = np.copy(ex)
#------------------------ plot ------------------------------
plt.plot(np.linspace(1, N_cells, N_cells, dtype = int),ex)
plt.xlim(0,200)
plt.ylim((-110,110))
plt.grid()
plt.show()
| [
"numpy.copy",
"matplotlib.pyplot.grid",
"numpy.sqrt",
"numpy.power",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show"
] | [((797, 814), 'numpy.zeros', 'np.zeros', (['N_cells'], {}), '(N_cells)\n', (805, 814), True, 'import numpy as np\n'), ((821, 832), 'numpy.copy', 'np.copy', (['ex'], {}), '(ex)\n', (828, 832), True, 'import numpy as np\n'), ((844, 855), 'numpy.copy', 'np.copy', (['ex'], {}), '(ex)\n', (851, 855), True, 'import numpy as np\n'), ((866, 877), 'numpy.copy', 'np.copy', (['ex'], {}), '(ex)\n', (873, 877), True, 'import numpy as np\n'), ((570, 590), 'numpy.sqrt', 'np.sqrt', (['(epsr * meur)'], {}), '(epsr * meur)\n', (577, 590), True, 'import numpy as np\n'), ((599, 620), 'numpy.sqrt', 'np.sqrt', (['(epsrd * meur)'], {}), '(epsrd * meur)\n', (606, 620), True, 'import numpy as np\n'), ((1613, 1664), 'numpy.linspace', 'np.linspace', (['(0)', '(N_cells - 2)', '(N_cells - 1)'], {'dtype': 'int'}), '(0, N_cells - 2, N_cells - 1, dtype=int)\n', (1624, 1664), True, 'import numpy as np\n'), ((1800, 1851), 'numpy.linspace', 'np.linspace', (['(1)', '(N_cells - 2)', '(N_cells - 2)'], {'dtype': 'int'}), '(1, N_cells - 2, N_cells - 2, dtype=int)\n', (1811, 1851), True, 'import numpy as np\n'), ((2108, 2119), 'numpy.copy', 'np.copy', (['ex'], {}), '(ex)\n', (2115, 2119), True, 'import numpy as np\n'), ((2266, 2282), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(200)'], {}), '(0, 200)\n', (2274, 2282), True, 'import matplotlib.pyplot as plt\n'), ((2287, 2308), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-110, 110)'], {}), '((-110, 110))\n', (2295, 2308), True, 'import matplotlib.pyplot as plt\n'), ((2313, 2323), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2321, 2323), True, 'import matplotlib.pyplot as plt\n'), ((2329, 2339), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2337, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2211, 2254), 'numpy.linspace', 'np.linspace', (['(1)', 'N_cells', 'N_cells'], {'dtype': 'int'}), '(1, N_cells, N_cells, dtype=int)\n', (2222, 2254), True, 'import numpy as np\n'), ((1453, 1482), 'numpy.power', 'np.power', (['((time - t0) / Ts)', '(2)'], {}), '((time - t0) / Ts, 2)\n', (1461, 1482), True, 'import numpy as np\n')] |
import pytest
import datetime
from pymapd._loaders import _build_input_rows
from pymapd import _pandas_loaders
from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData
import pandas as pd
import numpy as np
from omnisci.mapd.ttypes import TColumnType
from omnisci.common.ttypes import TTypeInfo
def assert_columnar_equal(result, expected):
for i, (a, b) in enumerate(zip(result, expected)):
np.testing.assert_array_equal(a.nulls, b.nulls)
np.testing.assert_array_equal(a.data.int_col, b.data.int_col)
np.testing.assert_array_equal(a.data.real_col, b.data.real_col)
np.testing.assert_array_equal(a.data.str_col, b.data.str_col)
class TestLoaders:
def test_build_input_rows(self):
data = [(1, 'a'), (2, 'b')]
result = _build_input_rows(data)
expected = [TStringRow(cols=[TStringValue(str_val='1', is_null=None),
TStringValue(str_val='a', is_null=None)]),
TStringRow(cols=[TStringValue(str_val='2', is_null=None),
TStringValue(str_val='b', is_null=None)])]
assert result == expected
def test_build_input_rows_with_array(self):
data = [(1, 'a'), (2, 'b'), (3, ['c', 'd', 'e'])]
result = _build_input_rows(data)
expected = [TStringRow(cols=[TStringValue(str_val='1', is_null=None),
TStringValue(str_val='a', is_null=None)]),
TStringRow(cols=[TStringValue(str_val='2', is_null=None),
TStringValue(str_val='b', is_null=None)]),
TStringRow(cols=[TStringValue(str_val='3', is_null=None),
TStringValue(str_val='{c,d,e}',
is_null=None)])]
assert result == expected
def test_build_table_columnar(self):
from pymapd._pandas_loaders import build_input_columnar
data = pd.DataFrame({"a": [1, 2, 3], "b": [1.1, 2.2, 3.3]})
nulls = [False] * 3
result = build_input_columnar(data, preserve_index=False)
expected = [
TColumn(TColumnData(int_col=[1, 2, 3]), nulls=nulls),
TColumn(TColumnData(real_col=[1.1, 2.2, 3.3]), nulls=nulls)
]
assert_columnar_equal(result[0], expected)
def test_build_table_columnar_pandas(self):
data = pd.DataFrame({
"boolean_": [True, False],
"smallint_": np.array([0, 1], dtype=np.int16),
"int_": np.array([0, 1], dtype=np.int32),
"bigint_": np.array([0, 1], dtype=np.int64),
"float_": np.array([0, 1], dtype=np.float32),
"double_": np.array([0, 1], dtype=np.float64),
"varchar_": ["a", "b"],
"text_": ['a', 'b'],
"time_": [datetime.time(0, 11, 59), datetime.time(13)],
"timestamp_": [pd.Timestamp("2016"), pd.Timestamp("2017")],
"date_": [datetime.date(2016, 1, 1), datetime.date(2017, 1, 1)],
}, columns=['boolean_', 'smallint_', 'int_', 'bigint_', 'float_',
'double_', 'varchar_', 'text_', 'time_', 'timestamp_',
'date_'])
result = _pandas_loaders.build_input_columnar(data,
preserve_index=False)
nulls = [False, False]
expected = [
TColumn(TColumnData(int_col=[True, False]), nulls=nulls),
TColumn(TColumnData(int_col=np.array([0, 1], dtype=np.int16)), nulls=nulls), # noqa
TColumn(TColumnData(int_col=np.array([0, 1], dtype=np.int32)), nulls=nulls), # noqa
TColumn(TColumnData(int_col=np.array([0, 1], dtype=np.int64)), nulls=nulls), # noqa
TColumn(TColumnData(real_col=np.array([0, 1], dtype=np.float32)), nulls=nulls), # noqa
TColumn(TColumnData(real_col=np.array([0, 1], dtype=np.float64)), nulls=nulls), # noqa
TColumn(TColumnData(str_col=['a', 'b']), nulls=nulls),
TColumn(TColumnData(str_col=['a', 'b']), nulls=nulls),
TColumn(TColumnData(int_col=[719, 46800]), nulls=nulls),
TColumn(TColumnData(int_col=[1451606400, 1483228800]), nulls=nulls), # noqa
TColumn(TColumnData(int_col=[1451606400, 1483228800]), nulls=nulls)
]
assert_columnar_equal(result[0], expected)
def test_build_table_columnar_nulls(self):
data = pd.DataFrame({
"boolean_": [True, False, None],
# Currently Pandas does not support storing None or NaN
# in integer columns, so int cols with null
# need to be objects. This means our type detection will be
# unreliable since if there is no number outside the int32
# bounds in a column with nulls then we will be assuming int
"int_": np.array([0, 1, None], dtype=np.object),
"bigint_": np.array([0, 9223372036854775807, None],
dtype=np.object),
"double_": np.array([0, 1, None], dtype=np.float64),
"varchar_": ["a", "b", None],
"text_": ['a', 'b', None],
"time_": [datetime.time(0, 11, 59), datetime.time(13), None],
"timestamp_": [pd.Timestamp("2016"), pd.Timestamp("2017"), None],
"date_": [datetime.date(1001, 1, 1), datetime.date(2017, 1, 1),
None],
}, columns=['boolean_', 'int_', 'bigint_',
'double_', 'varchar_', 'text_', 'time_', 'timestamp_',
'date_'])
result = _pandas_loaders.build_input_columnar(data,
preserve_index=False)
nulls = [False, False, True]
bool_na = -128
int_na = -2147483648
bigint_na = -9223372036854775808
ns_na = -9223372037
double_na = 0
expected = [
TColumn(TColumnData(int_col=[1, 0, bool_na]), nulls=nulls),
TColumn(TColumnData(int_col=np.array([0, 1, int_na], dtype=np.int32)), nulls=nulls), # noqa
TColumn(TColumnData(int_col=np.array([0, 9223372036854775807, bigint_na], dtype=np.int64)), nulls=nulls), # noqa
TColumn(TColumnData(real_col=np.array([0, 1, double_na], dtype=np.float64)), nulls=nulls), # noqa
TColumn(TColumnData(str_col=['a', 'b', '']), nulls=nulls),
TColumn(TColumnData(str_col=['a', 'b', '']), nulls=nulls),
TColumn(TColumnData(int_col=[719, 46800, bigint_na]), nulls=nulls),
TColumn(TColumnData(int_col=[1451606400, 1483228800, ns_na]), nulls=nulls), # noqa
TColumn(TColumnData(int_col=[-30578688000, 1483228800, bigint_na]), nulls=nulls) # noqa
]
assert_columnar_equal(result[0], expected)
def test_build_row_desc(self):
data = pd.DataFrame({
"boolean_": [True, False],
"smallint_": np.array([0, 1], dtype=np.int16),
"int_": np.array([0, 1], dtype=np.int32),
"bigint_": np.array([0, 1], dtype=np.int64),
"float_": np.array([0, 1], dtype=np.float32),
"double_": np.array([0, 1], dtype=np.float64),
"varchar_": ["a", "b"],
"text_": ['a', 'b'],
"time_": [datetime.time(0, 11, 59), datetime.time(13)],
"timestamp_": [pd.Timestamp("2016"), pd.Timestamp("2017")],
"date_": [datetime.date(2016, 1, 1), datetime.date(2017, 1, 1)],
}, columns=['boolean_', 'smallint_', 'int_', 'bigint_', 'float_',
'double_', 'varchar_', 'text_', 'time_', 'timestamp_',
'date_'])
result = _pandas_loaders.build_row_desc(data)
expected = [
TColumnType(col_name='boolean_',
col_type=TTypeInfo(type=10),
is_reserved_keyword=None),
TColumnType(col_name='smallint_',
col_type=TTypeInfo(type=0),
is_reserved_keyword=None),
TColumnType(col_name='int_',
col_type=TTypeInfo(type=1),
is_reserved_keyword=None),
TColumnType(col_name='bigint_',
col_type=TTypeInfo(type=2)),
TColumnType(col_name='float_',
col_type=TTypeInfo(type=3)),
TColumnType(col_name='double_',
col_type=TTypeInfo(type=5)),
TColumnType(col_name='varchar_',
col_type=TTypeInfo(type=6, encoding=4)),
TColumnType(col_name='text_',
col_type=TTypeInfo(type=6, encoding=4)),
TColumnType(col_name='time_',
col_type=TTypeInfo(type=7)),
TColumnType(col_name='timestamp_',
col_type=TTypeInfo(type=8)),
TColumnType(col_name='date_',
col_type=TTypeInfo(type=9))
]
assert result == expected
data.index.name = 'idx'
result = _pandas_loaders.build_row_desc(data, preserve_index=True)
expected.insert(0, TColumnType(col_name='idx',
col_type=TTypeInfo(type=2)))
assert result == expected
def test_create_non_pandas_raises(self):
with pytest.raises(TypeError) as m:
_pandas_loaders.build_row_desc([(1, 'a'), (2, 'b')])
assert m.match('is not supported for type ')
| [
"datetime.time",
"omnisci.common.ttypes.TTypeInfo",
"pymapd._pandas_loaders.build_row_desc",
"numpy.array",
"pytest.raises",
"omnisci.mapd.MapD.TColumnData",
"datetime.date",
"omnisci.mapd.MapD.TStringValue",
"pandas.DataFrame",
"pymapd._pandas_loaders.build_input_columnar",
"pandas.Timestamp",
... | [((426, 473), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['a.nulls', 'b.nulls'], {}), '(a.nulls, b.nulls)\n', (455, 473), True, 'import numpy as np\n'), ((482, 543), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['a.data.int_col', 'b.data.int_col'], {}), '(a.data.int_col, b.data.int_col)\n', (511, 543), True, 'import numpy as np\n'), ((552, 615), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['a.data.real_col', 'b.data.real_col'], {}), '(a.data.real_col, b.data.real_col)\n', (581, 615), True, 'import numpy as np\n'), ((624, 685), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['a.data.str_col', 'b.data.str_col'], {}), '(a.data.str_col, b.data.str_col)\n', (653, 685), True, 'import numpy as np\n'), ((798, 821), 'pymapd._loaders._build_input_rows', '_build_input_rows', (['data'], {}), '(data)\n', (815, 821), False, 'from pymapd._loaders import _build_input_rows\n'), ((1297, 1320), 'pymapd._loaders._build_input_rows', '_build_input_rows', (['data'], {}), '(data)\n', (1314, 1320), False, 'from pymapd._loaders import _build_input_rows\n'), ((2009, 2061), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3]}"], {}), "({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3]})\n", (2021, 2061), True, 'import pandas as pd\n'), ((2107, 2155), 'pymapd._pandas_loaders.build_input_columnar', 'build_input_columnar', (['data'], {'preserve_index': '(False)'}), '(data, preserve_index=False)\n', (2127, 2155), False, 'from pymapd._pandas_loaders import build_input_columnar\n'), ((3264, 3328), 'pymapd._pandas_loaders.build_input_columnar', '_pandas_loaders.build_input_columnar', (['data'], {'preserve_index': '(False)'}), '(data, preserve_index=False)\n', (3300, 3328), False, 'from pymapd import _pandas_loaders\n'), ((5645, 5709), 'pymapd._pandas_loaders.build_input_columnar', '_pandas_loaders.build_input_columnar', (['data'], {'preserve_index': '(False)'}), '(data, preserve_index=False)\n', (5681, 5709), False, 'from pymapd import _pandas_loaders\n'), ((7736, 7772), 'pymapd._pandas_loaders.build_row_desc', '_pandas_loaders.build_row_desc', (['data'], {}), '(data)\n', (7766, 7772), False, 'from pymapd import _pandas_loaders\n'), ((9127, 9184), 'pymapd._pandas_loaders.build_row_desc', '_pandas_loaders.build_row_desc', (['data'], {'preserve_index': '(True)'}), '(data, preserve_index=True)\n', (9157, 9184), False, 'from pymapd import _pandas_loaders\n'), ((9402, 9426), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (9415, 9426), False, 'import pytest\n'), ((9445, 9497), 'pymapd._pandas_loaders.build_row_desc', '_pandas_loaders.build_row_desc', (["[(1, 'a'), (2, 'b')]"], {}), "([(1, 'a'), (2, 'b')])\n", (9475, 9497), False, 'from pymapd import _pandas_loaders\n'), ((2197, 2227), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'int_col': '[1, 2, 3]'}), '(int_col=[1, 2, 3])\n', (2208, 2227), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((2263, 2300), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'real_col': '[1.1, 2.2, 3.3]'}), '(real_col=[1.1, 2.2, 3.3])\n', (2274, 2300), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((2520, 2552), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int16'}), '([0, 1], dtype=np.int16)\n', (2528, 2552), True, 'import numpy as np\n'), ((2574, 2606), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int32'}), '([0, 1], dtype=np.int32)\n', (2582, 2606), True, 'import numpy as np\n'), ((2631, 2663), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int64'}), '([0, 1], dtype=np.int64)\n', (2639, 2663), True, 'import numpy as np\n'), ((2687, 2721), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (2695, 2721), True, 'import numpy as np\n'), ((2746, 2780), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.float64'}), '([0, 1], dtype=np.float64)\n', (2754, 2780), True, 'import numpy as np\n'), ((3456, 3490), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'int_col': '[True, False]'}), '(int_col=[True, False])\n', (3467, 3490), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((4017, 4048), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'str_col': "['a', 'b']"}), "(str_col=['a', 'b'])\n", (4028, 4048), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((4084, 4115), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'str_col': "['a', 'b']"}), "(str_col=['a', 'b'])\n", (4095, 4115), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((4151, 4184), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'int_col': '[719, 46800]'}), '(int_col=[719, 46800])\n', (4162, 4184), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((4220, 4265), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'int_col': '[1451606400, 1483228800]'}), '(int_col=[1451606400, 1483228800])\n', (4231, 4265), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((4309, 4354), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'int_col': '[1451606400, 1483228800]'}), '(int_col=[1451606400, 1483228800])\n', (4320, 4354), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((4914, 4953), 'numpy.array', 'np.array', (['[0, 1, None]'], {'dtype': 'np.object'}), '([0, 1, None], dtype=np.object)\n', (4922, 4953), True, 'import numpy as np\n'), ((4978, 5035), 'numpy.array', 'np.array', (['[0, 9223372036854775807, None]'], {'dtype': 'np.object'}), '([0, 9223372036854775807, None], dtype=np.object)\n', (4986, 5035), True, 'import numpy as np\n'), ((5092, 5132), 'numpy.array', 'np.array', (['[0, 1, None]'], {'dtype': 'np.float64'}), '([0, 1, None], dtype=np.float64)\n', (5100, 5132), True, 'import numpy as np\n'), ((5987, 6023), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'int_col': '[1, 0, bool_na]'}), '(int_col=[1, 0, bool_na])\n', (5998, 6023), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((6401, 6436), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'str_col': "['a', 'b', '']"}), "(str_col=['a', 'b', ''])\n", (6412, 6436), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((6472, 6507), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'str_col': "['a', 'b', '']"}), "(str_col=['a', 'b', ''])\n", (6483, 6507), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((6543, 6587), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'int_col': '[719, 46800, bigint_na]'}), '(int_col=[719, 46800, bigint_na])\n', (6554, 6587), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((6623, 6675), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'int_col': '[1451606400, 1483228800, ns_na]'}), '(int_col=[1451606400, 1483228800, ns_na])\n', (6634, 6675), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((6719, 6777), 'omnisci.mapd.MapD.TColumnData', 'TColumnData', ([], {'int_col': '[-30578688000, 1483228800, bigint_na]'}), '(int_col=[-30578688000, 1483228800, bigint_na])\n', (6730, 6777), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((6992, 7024), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int16'}), '([0, 1], dtype=np.int16)\n', (7000, 7024), True, 'import numpy as np\n'), ((7046, 7078), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int32'}), '([0, 1], dtype=np.int32)\n', (7054, 7078), True, 'import numpy as np\n'), ((7103, 7135), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int64'}), '([0, 1], dtype=np.int64)\n', (7111, 7135), True, 'import numpy as np\n'), ((7159, 7193), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (7167, 7193), True, 'import numpy as np\n'), ((7218, 7252), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.float64'}), '([0, 1], dtype=np.float64)\n', (7226, 7252), True, 'import numpy as np\n'), ((2873, 2897), 'datetime.time', 'datetime.time', (['(0)', '(11)', '(59)'], {}), '(0, 11, 59)\n', (2886, 2897), False, 'import datetime\n'), ((2899, 2916), 'datetime.time', 'datetime.time', (['(13)'], {}), '(13)\n', (2912, 2916), False, 'import datetime\n'), ((2946, 2966), 'pandas.Timestamp', 'pd.Timestamp', (['"""2016"""'], {}), "('2016')\n", (2958, 2966), True, 'import pandas as pd\n'), ((2968, 2988), 'pandas.Timestamp', 'pd.Timestamp', (['"""2017"""'], {}), "('2017')\n", (2980, 2988), True, 'import pandas as pd\n'), ((3013, 3038), 'datetime.date', 'datetime.date', (['(2016)', '(1)', '(1)'], {}), '(2016, 1, 1)\n', (3026, 3038), False, 'import datetime\n'), ((3040, 3065), 'datetime.date', 'datetime.date', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (3053, 3065), False, 'import datetime\n'), ((5237, 5261), 'datetime.time', 'datetime.time', (['(0)', '(11)', '(59)'], {}), '(0, 11, 59)\n', (5250, 5261), False, 'import datetime\n'), ((5263, 5280), 'datetime.time', 'datetime.time', (['(13)'], {}), '(13)\n', (5276, 5280), False, 'import datetime\n'), ((5316, 5336), 'pandas.Timestamp', 'pd.Timestamp', (['"""2016"""'], {}), "('2016')\n", (5328, 5336), True, 'import pandas as pd\n'), ((5338, 5358), 'pandas.Timestamp', 'pd.Timestamp', (['"""2017"""'], {}), "('2017')\n", (5350, 5358), True, 'import pandas as pd\n'), ((5389, 5414), 'datetime.date', 'datetime.date', (['(1001)', '(1)', '(1)'], {}), '(1001, 1, 1)\n', (5402, 5414), False, 'import datetime\n'), ((5416, 5441), 'datetime.date', 'datetime.date', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (5429, 5441), False, 'import datetime\n'), ((7345, 7369), 'datetime.time', 'datetime.time', (['(0)', '(11)', '(59)'], {}), '(0, 11, 59)\n', (7358, 7369), False, 'import datetime\n'), ((7371, 7388), 'datetime.time', 'datetime.time', (['(13)'], {}), '(13)\n', (7384, 7388), False, 'import datetime\n'), ((7418, 7438), 'pandas.Timestamp', 'pd.Timestamp', (['"""2016"""'], {}), "('2016')\n", (7430, 7438), True, 'import pandas as pd\n'), ((7440, 7460), 'pandas.Timestamp', 'pd.Timestamp', (['"""2017"""'], {}), "('2017')\n", (7452, 7460), True, 'import pandas as pd\n'), ((7485, 7510), 'datetime.date', 'datetime.date', (['(2016)', '(1)', '(1)'], {}), '(2016, 1, 1)\n', (7498, 7510), False, 'import datetime\n'), ((7512, 7537), 'datetime.date', 'datetime.date', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (7525, 7537), False, 'import datetime\n'), ((7872, 7890), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(10)'}), '(type=10)\n', (7881, 7890), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((8022, 8039), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(0)'}), '(type=0)\n', (8031, 8039), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((8166, 8183), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(1)'}), '(type=1)\n', (8175, 8183), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((8313, 8330), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(2)'}), '(type=2)\n', (8322, 8330), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((8409, 8426), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(3)'}), '(type=3)\n', (8418, 8426), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((8506, 8523), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(5)'}), '(type=5)\n', (8515, 8523), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((8604, 8633), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(6)', 'encoding': '(4)'}), '(type=6, encoding=4)\n', (8613, 8633), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((8711, 8740), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(6)', 'encoding': '(4)'}), '(type=6, encoding=4)\n', (8720, 8740), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((8818, 8835), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(7)'}), '(type=7)\n', (8827, 8835), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((8918, 8935), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(8)'}), '(type=8)\n', (8927, 8935), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((9013, 9030), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(9)'}), '(type=9)\n', (9022, 9030), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((9288, 9305), 'omnisci.common.ttypes.TTypeInfo', 'TTypeInfo', ([], {'type': '(2)'}), '(type=2)\n', (9297, 9305), False, 'from omnisci.common.ttypes import TTypeInfo\n'), ((859, 898), 'omnisci.mapd.MapD.TStringValue', 'TStringValue', ([], {'str_val': '"""1"""', 'is_null': 'None'}), "(str_val='1', is_null=None)\n", (871, 898), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((937, 976), 'omnisci.mapd.MapD.TStringValue', 'TStringValue', ([], {'str_val': '"""a"""', 'is_null': 'None'}), "(str_val='a', is_null=None)\n", (949, 976), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((1017, 1056), 'omnisci.mapd.MapD.TStringValue', 'TStringValue', ([], {'str_val': '"""2"""', 'is_null': 'None'}), "(str_val='2', is_null=None)\n", (1029, 1056), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((1095, 1134), 'omnisci.mapd.MapD.TStringValue', 'TStringValue', ([], {'str_val': '"""b"""', 'is_null': 'None'}), "(str_val='b', is_null=None)\n", (1107, 1134), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((1358, 1397), 'omnisci.mapd.MapD.TStringValue', 'TStringValue', ([], {'str_val': '"""1"""', 'is_null': 'None'}), "(str_val='1', is_null=None)\n", (1370, 1397), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((1436, 1475), 'omnisci.mapd.MapD.TStringValue', 'TStringValue', ([], {'str_val': '"""a"""', 'is_null': 'None'}), "(str_val='a', is_null=None)\n", (1448, 1475), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((1516, 1555), 'omnisci.mapd.MapD.TStringValue', 'TStringValue', ([], {'str_val': '"""2"""', 'is_null': 'None'}), "(str_val='2', is_null=None)\n", (1528, 1555), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((1594, 1633), 'omnisci.mapd.MapD.TStringValue', 'TStringValue', ([], {'str_val': '"""b"""', 'is_null': 'None'}), "(str_val='b', is_null=None)\n", (1606, 1633), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((1674, 1713), 'omnisci.mapd.MapD.TStringValue', 'TStringValue', ([], {'str_val': '"""3"""', 'is_null': 'None'}), "(str_val='3', is_null=None)\n", (1686, 1713), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((1752, 1797), 'omnisci.mapd.MapD.TStringValue', 'TStringValue', ([], {'str_val': '"""{c,d,e}"""', 'is_null': 'None'}), "(str_val='{c,d,e}', is_null=None)\n", (1764, 1797), False, 'from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData\n'), ((3546, 3578), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int16'}), '([0, 1], dtype=np.int16)\n', (3554, 3578), True, 'import numpy as np\n'), ((3643, 3675), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int32'}), '([0, 1], dtype=np.int32)\n', (3651, 3675), True, 'import numpy as np\n'), ((3740, 3772), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int64'}), '([0, 1], dtype=np.int64)\n', (3748, 3772), True, 'import numpy as np\n'), ((3838, 3872), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (3846, 3872), True, 'import numpy as np\n'), ((3938, 3972), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.float64'}), '([0, 1], dtype=np.float64)\n', (3946, 3972), True, 'import numpy as np\n'), ((6079, 6119), 'numpy.array', 'np.array', (['[0, 1, int_na]'], {'dtype': 'np.int32'}), '([0, 1, int_na], dtype=np.int32)\n', (6087, 6119), True, 'import numpy as np\n'), ((6184, 6245), 'numpy.array', 'np.array', (['[0, 9223372036854775807, bigint_na]'], {'dtype': 'np.int64'}), '([0, 9223372036854775807, bigint_na], dtype=np.int64)\n', (6192, 6245), True, 'import numpy as np\n'), ((6311, 6356), 'numpy.array', 'np.array', (['[0, 1, double_na]'], {'dtype': 'np.float64'}), '([0, 1, double_na], dtype=np.float64)\n', (6319, 6356), True, 'import numpy as np\n')] |
from typing import Any, Dict, Iterable, Tuple, Union
import numpy as np
from cirq import linalg, protocols, value
from cirq._compat import proper_repr
from cirq.ops import raw_types
class MixedUnitaryChannel(raw_types.Gate):
"""A generic mixture that can record the index of its selected operator.
This type of object is also referred to as a mixed-unitary channel.
Args:
mixture: a list of (probability, qubit unitary) pairs
key: an optional measurement key string for this mixture. Simulations
which select a single unitary to apply will store the index
of that unitary in the measurement result list with this key.
validate: if True, validate that `mixture` describes a valid mixture.
This validation can be slow; prefer pre-validating if possible.
"""
def __init__(
self,
mixture: Iterable[Tuple[float, np.ndarray]],
key: Union[str, value.MeasurementKey, None] = None,
validate: bool = False,
):
mixture = list(mixture)
if not mixture:
raise ValueError('MixedUnitaryChannel must have at least one unitary.')
if not protocols.approx_eq(sum(p[0] for p in mixture), 1):
raise ValueError('Unitary probabilities must sum to 1.')
m0 = mixture[0][1]
num_qubits = np.log2(m0.shape[0])
if not num_qubits.is_integer() or m0.shape[1] != m0.shape[0]:
raise ValueError(
f'Input mixture of shape {m0.shape} does not '
'represent a square operator over qubits.'
)
self._num_qubits = int(num_qubits)
for i, op in enumerate(p[1] for p in mixture):
if not op.shape == m0.shape:
raise ValueError(
f'Inconsistent unitary shapes: op[0]: {m0.shape}, op[{i}]: {op.shape}'
)
if validate and not linalg.is_unitary(op):
raise ValueError(f'Element {i} of mixture is non-unitary.')
self._mixture = mixture
if not isinstance(key, value.MeasurementKey) and key is not None:
key = value.MeasurementKey(key)
self._key = key
@staticmethod
def from_mixture(
mixture: 'protocols.SupportsMixture', key: Union[str, value.MeasurementKey, None] = None
):
"""Creates a copy of a mixture with the given measurement key."""
return MixedUnitaryChannel(mixture=list(protocols.mixture(mixture)), key=key)
def __eq__(self, other) -> bool:
if not isinstance(other, MixedUnitaryChannel):
return NotImplemented
if self._key != other._key:
return False
if not np.allclose(
[m[0] for m in self._mixture],
[m[0] for m in other._mixture],
):
return False
return np.allclose(
[m[1] for m in self._mixture],
[m[1] for m in other._mixture],
)
def num_qubits(self) -> int:
return self._num_qubits
def _mixture_(self):
return self._mixture
def _measurement_key_name_(self) -> str:
if self._key is None:
return NotImplemented
return str(self._key)
def _measurement_key_obj_(self) -> value.MeasurementKey:
if self._key is None:
return NotImplemented
return self._key
def _with_measurement_key_mapping_(self, key_map: Dict[str, str]):
if self._key is None:
return NotImplemented
if self._key not in key_map:
return self
return MixedUnitaryChannel(mixture=self._mixture, key=key_map[str(self._key)])
def _with_key_path_(self, path: Tuple[str, ...]):
return MixedUnitaryChannel(
mixture=self._mixture, key=protocols.with_key_path(self._key, path)
)
def __str__(self):
if self._key is not None:
return f'MixedUnitaryChannel({self._mixture}, key={self._key})'
return f'MixedUnitaryChannel({self._mixture})'
def __repr__(self):
unitary_tuples = [
'(' + repr(op[0]) + ', ' + proper_repr(op[1]) + ')' for op in self._mixture
]
args = [f'mixture=[{", ".join(unitary_tuples)}]']
if self._key is not None:
args.append(f'key=\'{self._key}\'')
return f'cirq.MixedUnitaryChannel({", ".join(args)})'
def _json_dict_(self) -> Dict[str, Any]:
return protocols.obj_to_dict_helper(self, ['_mixture', '_key'])
@classmethod
def _from_json_dict_(cls, _mixture, _key, **kwargs):
mix_pairs = [(m[0], np.asarray(m[1])) for m in _mixture]
return cls(mixture=mix_pairs, key=_key)
| [
"cirq.protocols.with_key_path",
"numpy.allclose",
"numpy.asarray",
"cirq.protocols.mixture",
"cirq.value.MeasurementKey",
"numpy.log2",
"cirq._compat.proper_repr",
"cirq.protocols.obj_to_dict_helper",
"cirq.linalg.is_unitary"
] | [((1346, 1366), 'numpy.log2', 'np.log2', (['m0.shape[0]'], {}), '(m0.shape[0])\n', (1353, 1366), True, 'import numpy as np\n'), ((2849, 2923), 'numpy.allclose', 'np.allclose', (['[m[1] for m in self._mixture]', '[m[1] for m in other._mixture]'], {}), '([m[1] for m in self._mixture], [m[1] for m in other._mixture])\n', (2860, 2923), True, 'import numpy as np\n'), ((4438, 4494), 'cirq.protocols.obj_to_dict_helper', 'protocols.obj_to_dict_helper', (['self', "['_mixture', '_key']"], {}), "(self, ['_mixture', '_key'])\n", (4466, 4494), False, 'from cirq import linalg, protocols, value\n'), ((2140, 2165), 'cirq.value.MeasurementKey', 'value.MeasurementKey', (['key'], {}), '(key)\n', (2160, 2165), False, 'from cirq import linalg, protocols, value\n'), ((2698, 2772), 'numpy.allclose', 'np.allclose', (['[m[0] for m in self._mixture]', '[m[0] for m in other._mixture]'], {}), '([m[0] for m in self._mixture], [m[0] for m in other._mixture])\n', (2709, 2772), True, 'import numpy as np\n'), ((3785, 3825), 'cirq.protocols.with_key_path', 'protocols.with_key_path', (['self._key', 'path'], {}), '(self._key, path)\n', (3808, 3825), False, 'from cirq import linalg, protocols, value\n'), ((4598, 4614), 'numpy.asarray', 'np.asarray', (['m[1]'], {}), '(m[1])\n', (4608, 4614), True, 'import numpy as np\n'), ((1917, 1938), 'cirq.linalg.is_unitary', 'linalg.is_unitary', (['op'], {}), '(op)\n', (1934, 1938), False, 'from cirq import linalg, protocols, value\n'), ((2457, 2483), 'cirq.protocols.mixture', 'protocols.mixture', (['mixture'], {}), '(mixture)\n', (2474, 2483), False, 'from cirq import linalg, protocols, value\n'), ((4116, 4134), 'cirq._compat.proper_repr', 'proper_repr', (['op[1]'], {}), '(op[1])\n', (4127, 4134), False, 'from cirq._compat import proper_repr\n')] |
import torch
import torch.nn.functional as F
import numpy as np
import logging
def test(step, dataset_test, filename, n_share, unk_class, G, C1, threshold):
G.eval()
C1.eval()
correct = 0
correct_close = 0
size = 0
class_list = [i for i in range(n_share)]
class_list.append(unk_class)
per_class_num = np.zeros((n_share + 1))
per_class_correct = np.zeros((n_share + 1)).astype(np.float32)
per_class_correct_cls = np.zeros((n_share + 1)).astype(np.float32)
all_pred = []
all_gt = []
for batch_idx, data in enumerate(dataset_test):
with torch.no_grad():
img_t, label_t, path_t = data[0], data[1], './data/images_Y1_train.npy'
img_t, label_t = img_t.cuda(), label_t.cuda()
feat = G(img_t)
out_t = C1(feat)
out_t = F.softmax(out_t)
entr = -torch.sum(out_t * torch.log(out_t), 1).data.cpu().numpy()
pred = out_t.data.max(1)[1]
k = label_t.data.size()[0]
pred_cls = pred.cpu().numpy()
pred = pred.cpu().numpy()
pred_unk = np.where(entr > threshold)
pred[pred_unk[0]] = unk_class
all_gt += list(label_t.data.cpu().numpy())
all_pred += list(pred)
for i, t in enumerate(class_list):
t_ind = np.where(label_t.data.cpu().numpy() == t)
correct_ind = np.where(pred[t_ind[0]] == t)
correct_ind_close = np.where(pred_cls[t_ind[0]] == i)
per_class_correct[i] += float(len(correct_ind[0]))
per_class_correct_cls[i] += float(len(correct_ind_close[0]))
per_class_num[i] += float(len(t_ind[0]))
correct += float(len(correct_ind[0]))
correct_close += float(len(correct_ind_close[0]))
size += k
per_class_acc = per_class_correct / per_class_num
close_p = float(per_class_correct_cls.sum() / per_class_num.sum())
print(
'\nTest set including unknown classes: Accuracy: {}/{} ({:.0f}%) '
'({:.4f}%)\n'.format(
correct, size,
100. * correct / size, float(per_class_acc.mean())))
output = [step, list(per_class_acc), 'per class mean acc %s'%float(per_class_acc.mean()),
float(correct / size), 'closed acc %s'%float(close_p)]
logger = logging.getLogger(__name__)
logging.basicConfig(filename=filename, format="%(message)s")
logger.setLevel(logging.INFO)
print(output)
logger.info(output) | [
"logging.getLogger",
"logging.basicConfig",
"torch.log",
"numpy.where",
"numpy.zeros",
"torch.no_grad",
"torch.nn.functional.softmax"
] | [((334, 355), 'numpy.zeros', 'np.zeros', (['(n_share + 1)'], {}), '(n_share + 1)\n', (342, 355), True, 'import numpy as np\n'), ((2365, 2392), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2382, 2392), False, 'import logging\n'), ((2397, 2457), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'filename', 'format': '"""%(message)s"""'}), "(filename=filename, format='%(message)s')\n", (2416, 2457), False, 'import logging\n'), ((382, 403), 'numpy.zeros', 'np.zeros', (['(n_share + 1)'], {}), '(n_share + 1)\n', (390, 403), True, 'import numpy as np\n'), ((453, 474), 'numpy.zeros', 'np.zeros', (['(n_share + 1)'], {}), '(n_share + 1)\n', (461, 474), True, 'import numpy as np\n'), ((595, 610), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (608, 610), False, 'import torch\n'), ((831, 847), 'torch.nn.functional.softmax', 'F.softmax', (['out_t'], {}), '(out_t)\n', (840, 847), True, 'import torch.nn.functional as F\n'), ((1109, 1135), 'numpy.where', 'np.where', (['(entr > threshold)'], {}), '(entr > threshold)\n', (1117, 1135), True, 'import numpy as np\n'), ((1411, 1440), 'numpy.where', 'np.where', (['(pred[t_ind[0]] == t)'], {}), '(pred[t_ind[0]] == t)\n', (1419, 1440), True, 'import numpy as np\n'), ((1477, 1510), 'numpy.where', 'np.where', (['(pred_cls[t_ind[0]] == i)'], {}), '(pred_cls[t_ind[0]] == i)\n', (1485, 1510), True, 'import numpy as np\n'), ((886, 902), 'torch.log', 'torch.log', (['out_t'], {}), '(out_t)\n', (895, 902), False, 'import torch\n')] |
import gym
from gym import spaces
import numpy as np
from gym_duckietown.simulator import Simulator
class MotionBlurWrapper(Simulator):
def __init__(self, env=None):
Simulator.__init__(self)
self.env = env
self.frame_skip = 3
self.env.delta_time = self.env.delta_time / self.frame_skip
def step(self, action: np.ndarray):
action = np.clip(action, -1, 1)
# Actions could be a Python list
action = np.array(action)
motion_blur_window = []
for _ in range(self.frame_skip):
obs = self.env.render_obs()
motion_blur_window.append(obs)
self.env.update_physics(action)
# Generate the current camera image
obs = self.env.render_obs()
motion_blur_window.append(obs)
obs = np.average(motion_blur_window, axis=0, weights=[0.8, 0.15, 0.04, 0.01])
misc = self.env.get_agent_info()
d = self.env._compute_done_reward()
misc["Simulator"]["msg"] = d.done_why
return obs, d.reward, d.done, misc
class ResizeWrapper(gym.ObservationWrapper):
def __init__(self, env=None, shape=(120, 160, 3)):
super(ResizeWrapper, self).__init__(env)
self.observation_space.shape = shape
self.observation_space = spaces.Box(
self.observation_space.low[0, 0, 0],
self.observation_space.high[0, 0, 0],
shape,
dtype=self.observation_space.dtype,
)
self.shape = shape
def observation(self, observation):
from scipy.misc import imresize
return imresize(observation, self.shape)
class NormalizeWrapper(gym.ObservationWrapper):
def __init__(self, env=None):
super(NormalizeWrapper, self).__init__(env)
self.obs_lo = self.observation_space.low[0, 0, 0]
self.obs_hi = self.observation_space.high[0, 0, 0]
obs_shape = self.observation_space.shape
self.observation_space = spaces.Box(0.0, 1.0, obs_shape, dtype=np.float32)
def observation(self, obs):
if self.obs_lo == 0.0 and self.obs_hi == 1.0:
return obs
else:
return (obs - self.obs_lo) / (self.obs_hi - self.obs_lo)
class ImgWrapper(gym.ObservationWrapper):
def __init__(self, env=None):
super(ImgWrapper, self).__init__(env)
obs_shape = self.observation_space.shape
self.observation_space = spaces.Box(
self.observation_space.low[0, 0, 0],
self.observation_space.high[0, 0, 0],
[obs_shape[2], obs_shape[0], obs_shape[1]],
dtype=self.observation_space.dtype,
)
def observation(self, observation):
return observation.transpose(2, 0, 1)
class DtRewardWrapper(gym.RewardWrapper):
def __init__(self, env):
super(DtRewardWrapper, self).__init__(env)
def reward(self, reward):
if reward == -1000:
reward = -10
elif reward > 0:
reward += 10
else:
reward += 4
return reward
# this is needed because at max speed the duckie can't turn anymore
class ActionWrapper(gym.ActionWrapper):
def __init__(self, env):
super(ActionWrapper, self).__init__(env)
def action(self, action):
action_ = [action[0] * 0.8, action[1]]
return action_
| [
"numpy.clip",
"numpy.average",
"gym.spaces.Box",
"numpy.array",
"scipy.misc.imresize",
"gym_duckietown.simulator.Simulator.__init__"
] | [((181, 205), 'gym_duckietown.simulator.Simulator.__init__', 'Simulator.__init__', (['self'], {}), '(self)\n', (199, 205), False, 'from gym_duckietown.simulator import Simulator\n'), ((383, 405), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (390, 405), True, 'import numpy as np\n'), ((464, 480), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (472, 480), True, 'import numpy as np\n'), ((816, 887), 'numpy.average', 'np.average', (['motion_blur_window'], {'axis': '(0)', 'weights': '[0.8, 0.15, 0.04, 0.01]'}), '(motion_blur_window, axis=0, weights=[0.8, 0.15, 0.04, 0.01])\n', (826, 887), True, 'import numpy as np\n'), ((1294, 1427), 'gym.spaces.Box', 'spaces.Box', (['self.observation_space.low[0, 0, 0]', 'self.observation_space.high[0, 0, 0]', 'shape'], {'dtype': 'self.observation_space.dtype'}), '(self.observation_space.low[0, 0, 0], self.observation_space.high\n [0, 0, 0], shape, dtype=self.observation_space.dtype)\n', (1304, 1427), False, 'from gym import spaces\n'), ((1606, 1639), 'scipy.misc.imresize', 'imresize', (['observation', 'self.shape'], {}), '(observation, self.shape)\n', (1614, 1639), False, 'from scipy.misc import imresize\n'), ((1975, 2024), 'gym.spaces.Box', 'spaces.Box', (['(0.0)', '(1.0)', 'obs_shape'], {'dtype': 'np.float32'}), '(0.0, 1.0, obs_shape, dtype=np.float32)\n', (1985, 2024), False, 'from gym import spaces\n'), ((2424, 2599), 'gym.spaces.Box', 'spaces.Box', (['self.observation_space.low[0, 0, 0]', 'self.observation_space.high[0, 0, 0]', '[obs_shape[2], obs_shape[0], obs_shape[1]]'], {'dtype': 'self.observation_space.dtype'}), '(self.observation_space.low[0, 0, 0], self.observation_space.high\n [0, 0, 0], [obs_shape[2], obs_shape[0], obs_shape[1]], dtype=self.\n observation_space.dtype)\n', (2434, 2599), False, 'from gym import spaces\n')] |
# This file contains auxiliary functions
import numpy as np
import tensorflow as tf
def orthogonal(shape):
"""Returns an orthogonal matrix of the given shape"""
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
return q.reshape(shape)
def orthogonal_initializer(scale=1.0):
"""Returns an initializer that outputs an orthogonal matrix."""
def _initializer(shape, dtype=tf.float32, partition_info=None):
return tf.constant(orthogonal(shape) * scale, dtype)
return _initializer
def rum_ortho_initializer(scale=1.0):
def _initializer(shape, dtype=tf.float32, partition_info=None):
size_x = shape[0]
size_h = shape[1] // 2 # assumes a RUM
t = np.zeros(shape)
t[:, :size_h] = orthogonal([size_x, size_h]) * scale
t[:, size_h:size_h * 2] = orthogonal([size_x, size_h]) * scale
return tf.constant(t, dtype)
return _initializer
def layer_norm_all(h, base, num_units, scope):
# Layer Norm (faster version)
#
# Performs layer norm on multiple base at once (ie, i, g, j, o for lstm)
#
# Reshapes h in to perform layer norm in parallel
with tf.variable_scope(scope):
h_reshape = tf.reshape(h, [-1, base, num_units])
mean = tf.reduce_mean(h_reshape, [2], keepdims=True)
var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keepdims=True)
epsilon = tf.constant(1e-3)
rstd = tf.rsqrt(var + epsilon)
h_reshape = (h_reshape - mean) * rstd
# reshape back to original
h = tf.reshape(h_reshape, [-1, base * num_units])
alpha = tf.get_variable('layer_norm_alpha', [base * num_units],
initializer=tf.constant_initializer(1.0), dtype=tf.float32)
bias = tf.get_variable('layer_norm_bias', [base * num_units],
initializer=tf.constant_initializer(0.0), dtype=tf.float32)
return (h * alpha) + bias
def moments_for_layer_norm(x, axes=1, name=None):
# output for mean and variance should be [batch_size]
# from https://github.com/LeavesBreathe/tensorflow_with_latest_papers
epsilon = 1e-3 # found this works best.
if not isinstance(axes, list):
axes = [axes]
mean = tf.reduce_mean(x, axes, keepdims=True)
variance = tf.sqrt(tf.reduce_mean(
tf.square(x - mean), axes, keepdims=True) + epsilon)
return mean, variance
def layer_norm(x, scope="layer_norm", alpha_start=1.0, bias_start=0.0):
# derived from:
# https://github.com/LeavesBreathe/tensorflow_with_latest_papers, but
# simplified.
with tf.variable_scope(scope):
num_units = x.get_shape().as_list()[1]
alpha = tf.get_variable('alpha', [num_units],
initializer=tf.constant_initializer(alpha_start), dtype=tf.float32)
bias = tf.get_variable('bias', [num_units],
initializer=tf.constant_initializer(bias_start), dtype=tf.float32)
mean, variance = moments_for_layer_norm(x)
y = (alpha * (x - mean)) / (variance) + bias
return y
def zoneout(new_h, new_c, h, c, h_keep, c_keep, is_training):
mask_c = tf.ones_like(c)
mask_h = tf.ones_like(h)
if is_training:
mask_c = tf.nn.dropout(mask_c, c_keep)
mask_h = tf.nn.dropout(mask_h, h_keep)
mask_c *= c_keep
mask_h *= h_keep
h = new_h * mask_h + (-mask_h + 1.) * h
c = new_c * mask_c + (-mask_c + 1.) * c
return h, c
def rum_zoneout(new_h, h, h_keep, is_training):
mask_h = tf.ones_like(h)
if is_training:
mask_h = tf.nn.dropout(mask_h, h_keep)
mask_h *= h_keep
h = new_h * mask_h + (-mask_h + 1.) * h
return h
| [
"numpy.random.normal",
"numpy.prod",
"tensorflow.variable_scope",
"tensorflow.reshape",
"tensorflow.rsqrt",
"numpy.zeros",
"tensorflow.constant",
"tensorflow.nn.dropout",
"tensorflow.constant_initializer",
"tensorflow.ones_like",
"numpy.linalg.svd",
"tensorflow.reduce_mean",
"tensorflow.squa... | [((224, 262), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', 'flat_shape'], {}), '(0.0, 1.0, flat_shape)\n', (240, 262), True, 'import numpy as np\n'), ((277, 314), 'numpy.linalg.svd', 'np.linalg.svd', (['a'], {'full_matrices': '(False)'}), '(a, full_matrices=False)\n', (290, 314), True, 'import numpy as np\n'), ((2375, 2413), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x', 'axes'], {'keepdims': '(True)'}), '(x, axes, keepdims=True)\n', (2389, 2413), True, 'import tensorflow as tf\n'), ((3308, 3323), 'tensorflow.ones_like', 'tf.ones_like', (['c'], {}), '(c)\n', (3320, 3323), True, 'import tensorflow as tf\n'), ((3337, 3352), 'tensorflow.ones_like', 'tf.ones_like', (['h'], {}), '(h)\n', (3349, 3352), True, 'import tensorflow as tf\n'), ((3680, 3695), 'tensorflow.ones_like', 'tf.ones_like', (['h'], {}), '(h)\n', (3692, 3695), True, 'import tensorflow as tf\n'), ((196, 214), 'numpy.prod', 'np.prod', (['shape[1:]'], {}), '(shape[1:])\n', (203, 214), True, 'import numpy as np\n'), ((841, 856), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (849, 856), True, 'import numpy as np\n'), ((1004, 1025), 'tensorflow.constant', 'tf.constant', (['t', 'dtype'], {}), '(t, dtype)\n', (1015, 1025), True, 'import tensorflow as tf\n'), ((1285, 1309), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (1302, 1309), True, 'import tensorflow as tf\n'), ((1331, 1367), 'tensorflow.reshape', 'tf.reshape', (['h', '[-1, base, num_units]'], {}), '(h, [-1, base, num_units])\n', (1341, 1367), True, 'import tensorflow as tf\n'), ((1383, 1428), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['h_reshape', '[2]'], {'keepdims': '(True)'}), '(h_reshape, [2], keepdims=True)\n', (1397, 1428), True, 'import tensorflow as tf\n'), ((1525, 1543), 'tensorflow.constant', 'tf.constant', (['(0.001)'], {}), '(0.001)\n', (1536, 1543), True, 'import tensorflow as tf\n'), ((1558, 1581), 'tensorflow.rsqrt', 'tf.rsqrt', (['(var + epsilon)'], {}), '(var + epsilon)\n', (1566, 1581), True, 'import tensorflow as tf\n'), ((1675, 1720), 'tensorflow.reshape', 'tf.reshape', (['h_reshape', '[-1, base * num_units]'], {}), '(h_reshape, [-1, base * num_units])\n', (1685, 1720), True, 'import tensorflow as tf\n'), ((2735, 2759), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (2752, 2759), True, 'import tensorflow as tf\n'), ((3391, 3420), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['mask_c', 'c_keep'], {}), '(mask_c, c_keep)\n', (3404, 3420), True, 'import tensorflow as tf\n'), ((3438, 3467), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['mask_h', 'h_keep'], {}), '(mask_h, h_keep)\n', (3451, 3467), True, 'import tensorflow as tf\n'), ((3734, 3763), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['mask_h', 'h_keep'], {}), '(mask_h, h_keep)\n', (3747, 3763), True, 'import tensorflow as tf\n'), ((1458, 1485), 'tensorflow.square', 'tf.square', (['(h_reshape - mean)'], {}), '(h_reshape - mean)\n', (1467, 1485), True, 'import tensorflow as tf\n'), ((1838, 1866), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (1861, 1866), True, 'import tensorflow as tf\n'), ((1999, 2027), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2022, 2027), True, 'import tensorflow as tf\n'), ((2461, 2480), 'tensorflow.square', 'tf.square', (['(x - mean)'], {}), '(x - mean)\n', (2470, 2480), True, 'import tensorflow as tf\n'), ((2907, 2943), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['alpha_start'], {}), '(alpha_start)\n', (2930, 2943), True, 'import tensorflow as tf\n'), ((3058, 3093), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['bias_start'], {}), '(bias_start)\n', (3081, 3093), True, 'import tensorflow as tf\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.